1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70#include <sys/param.h>
71#include <sys/resourcevar.h>
72#include <sys/kernel.h>
73#include <sys/systm.h>
74#include <sys/proc_internal.h>
75#include <sys/kauth.h>
76#include <sys/vnode.h>
77#include <sys/time.h>
78#include <sys/priv.h>
79
80#include <sys/mount_internal.h>
81#include <sys/sysproto.h>
82#include <sys/signalvar.h>
83#include <sys/protosw.h> /* for net_uptime2timeval() */
84
85#include <kern/clock.h>
86#include <kern/task.h>
87#include <kern/thread_call.h>
88#if CONFIG_MACF
89#include <security/mac_framework.h>
90#endif
91#include <IOKit/IOBSD.h>
92#include <sys/time.h>
93
94#define HZ 100 /* XXX */
95
96/* simple lock used to access timezone, tz structure */
97lck_spin_t * tz_slock;
98lck_grp_t * tz_slock_grp;
99lck_attr_t * tz_slock_attr;
100lck_grp_attr_t *tz_slock_grp_attr;
101
102static void setthetime(
103 struct timeval *tv);
104
105void time_zone_slock_init(void);
106static boolean_t timeval_fixusec(struct timeval *t1);
107
108/*
109 * Time of day and interval timer support.
110 *
111 * These routines provide the kernel entry points to get and set
112 * the time-of-day and per-process interval timers. Subroutines
113 * here provide support for adding and subtracting timeval structures
114 * and decrementing interval timers, optionally reloading the interval
115 * timers when they expire.
116 */
117/* ARGSUSED */
118int
119gettimeofday(
120 struct proc *p,
121 struct gettimeofday_args *uap,
122 __unused int32_t *retval)
123{
124 int error = 0;
125 struct timezone ltz; /* local copy */
126 clock_sec_t secs;
127 clock_usec_t usecs;
128 uint64_t mach_time;
129
130 if (uap->tp || uap->mach_absolute_time) {
131 clock_gettimeofday_and_absolute_time(&secs, &usecs, &mach_time);
132 }
133
134 if (uap->tp) {
135 /* Casting secs through a uint32_t to match arm64 commpage */
136 if (IS_64BIT_PROCESS(p)) {
137 struct user64_timeval user_atv = {};
138 user_atv.tv_sec = (uint32_t)secs;
139 user_atv.tv_usec = usecs;
140 error = copyout(&user_atv, uap->tp, sizeof(user_atv));
141 } else {
142 struct user32_timeval user_atv = {};
143 user_atv.tv_sec = (uint32_t)secs;
144 user_atv.tv_usec = usecs;
145 error = copyout(&user_atv, uap->tp, sizeof(user_atv));
146 }
147 if (error) {
148 return error;
149 }
150 }
151
152 if (uap->tzp) {
153 lck_spin_lock(tz_slock);
154 ltz = tz;
155 lck_spin_unlock(tz_slock);
156
157 error = copyout((caddr_t)&ltz, CAST_USER_ADDR_T(uap->tzp), sizeof(tz));
158 }
159
160 if (error == 0 && uap->mach_absolute_time) {
161 error = copyout(&mach_time, uap->mach_absolute_time, sizeof(mach_time));
162 }
163
164 return error;
165}
166
167/*
168 * XXX Y2038 bug because of setthetime() argument
169 */
170/* ARGSUSED */
171int
172settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused int32_t *retval)
173{
174 struct timeval atv;
175 struct timezone atz;
176 int error;
177
178 bzero(&atv, sizeof(atv));
179
180 /* Check that this task is entitled to set the time or it is root */
181 if (!IOTaskHasEntitlement(current_task(), SETTIME_ENTITLEMENT)) {
182
183#if CONFIG_MACF
184 error = mac_system_check_settime(kauth_cred_get());
185 if (error)
186 return (error);
187#endif
188#ifndef CONFIG_EMBEDDED
189 if ((error = suser(kauth_cred_get(), &p->p_acflag)))
190 return (error);
191#endif
192 }
193
194 /* Verify all parameters before changing time */
195 if (uap->tv) {
196 if (IS_64BIT_PROCESS(p)) {
197 struct user64_timeval user_atv;
198 error = copyin(uap->tv, &user_atv, sizeof(user_atv));
199 atv.tv_sec = user_atv.tv_sec;
200 atv.tv_usec = user_atv.tv_usec;
201 } else {
202 struct user32_timeval user_atv;
203 error = copyin(uap->tv, &user_atv, sizeof(user_atv));
204 atv.tv_sec = user_atv.tv_sec;
205 atv.tv_usec = user_atv.tv_usec;
206 }
207 if (error)
208 return (error);
209 }
210 if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz))))
211 return (error);
212 if (uap->tv) {
213 /* only positive values of sec/usec are accepted */
214 if (atv.tv_sec < 0 || atv.tv_usec < 0)
215 return (EPERM);
216 if (!timeval_fixusec(&atv))
217 return (EPERM);
218 setthetime(&atv);
219 }
220 if (uap->tzp) {
221 lck_spin_lock(tz_slock);
222 tz = atz;
223 lck_spin_unlock(tz_slock);
224 }
225 return (0);
226}
227
228static void
229setthetime(
230 struct timeval *tv)
231{
232 clock_set_calendar_microtime(tv->tv_sec, tv->tv_usec);
233}
234
235/*
236 * Verify the calendar value. If negative,
237 * reset to zero (the epoch).
238 */
239void
240inittodr(
241 __unused time_t base)
242{
243 struct timeval tv;
244
245 /*
246 * Assertion:
247 * The calendar has already been
248 * set up from the platform clock.
249 *
250 * The value returned by microtime()
251 * is gotten from the calendar.
252 */
253 microtime(&tv);
254
255 if (tv.tv_sec < 0 || tv.tv_usec < 0) {
256 printf ("WARNING: preposterous time in Real Time Clock");
257 tv.tv_sec = 0; /* the UNIX epoch */
258 tv.tv_usec = 0;
259 setthetime(&tv);
260 printf(" -- CHECK AND RESET THE DATE!\n");
261 }
262}
263
264time_t
265boottime_sec(void)
266{
267 clock_sec_t secs;
268 clock_nsec_t nanosecs;
269
270 clock_get_boottime_nanotime(&secs, &nanosecs);
271 return (secs);
272}
273
274void
275boottime_timeval(struct timeval *tv)
276{
277 clock_sec_t secs;
278 clock_usec_t microsecs;
279
280 clock_get_boottime_microtime(&secs, &microsecs);
281
282 tv->tv_sec = secs;
283 tv->tv_usec = microsecs;
284}
285
286/*
287 * Get value of an interval timer. The process virtual and
288 * profiling virtual time timers are kept internally in the
289 * way they are specified externally: in time until they expire.
290 *
291 * The real time interval timer expiration time (p_rtime)
292 * is kept as an absolute time rather than as a delta, so that
293 * it is easy to keep periodic real-time signals from drifting.
294 *
295 * The real time timer is processed by a callout routine.
296 * Since a callout may be delayed in real time due to
297 * other processing in the system, it is possible for the real
298 * time callout routine (realitexpire, given below), to be delayed
299 * in real time past when it is supposed to occur. It does not
300 * suffice, therefore, to reload the real time .it_value from the
301 * real time .it_interval. Rather, we compute the next time in
302 * absolute time when the timer should go off.
303 *
304 * Returns: 0 Success
305 * EINVAL Invalid argument
306 * copyout:EFAULT Bad address
307 */
308/* ARGSUSED */
309int
310getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval)
311{
312 struct itimerval aitv;
313
314 if (uap->which > ITIMER_PROF)
315 return(EINVAL);
316
317 bzero(&aitv, sizeof(aitv));
318
319 proc_spinlock(p);
320 switch (uap->which) {
321
322 case ITIMER_REAL:
323 /*
324 * If time for real time timer has passed return 0,
325 * else return difference between current time and
326 * time for the timer to go off.
327 */
328 aitv = p->p_realtimer;
329 if (timerisset(&p->p_rtime)) {
330 struct timeval now;
331
332 microuptime(&now);
333 if (timercmp(&p->p_rtime, &now, <))
334 timerclear(&aitv.it_value);
335 else {
336 aitv.it_value = p->p_rtime;
337 timevalsub(&aitv.it_value, &now);
338 }
339 }
340 else
341 timerclear(&aitv.it_value);
342 break;
343
344 case ITIMER_VIRTUAL:
345 aitv = p->p_vtimer_user;
346 break;
347
348 case ITIMER_PROF:
349 aitv = p->p_vtimer_prof;
350 break;
351 }
352
353 proc_spinunlock(p);
354
355 if (IS_64BIT_PROCESS(p)) {
356 struct user64_itimerval user_itv;
357 bzero(&user_itv, sizeof (user_itv));
358 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
359 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
360 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
361 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
362 return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv)));
363 } else {
364 struct user32_itimerval user_itv;
365 bzero(&user_itv, sizeof (user_itv));
366 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
367 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
368 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
369 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
370 return (copyout((caddr_t)&user_itv, uap->itv, sizeof (user_itv)));
371 }
372}
373
374/*
375 * Returns: 0 Success
376 * EINVAL Invalid argument
377 * copyin:EFAULT Bad address
378 * getitimer:EINVAL Invalid argument
379 * getitimer:EFAULT Bad address
380 */
381/* ARGSUSED */
382int
383setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval)
384{
385 struct itimerval aitv;
386 user_addr_t itvp;
387 int error;
388
389 bzero(&aitv, sizeof(aitv));
390
391 if (uap->which > ITIMER_PROF)
392 return (EINVAL);
393 if ((itvp = uap->itv)) {
394 if (IS_64BIT_PROCESS(p)) {
395 struct user64_itimerval user_itv;
396 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv))))
397 return (error);
398 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
399 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
400 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
401 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
402 } else {
403 struct user32_itimerval user_itv;
404 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof (user_itv))))
405 return (error);
406 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
407 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
408 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
409 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
410 }
411 }
412 if ((uap->itv = uap->oitv) && (error = getitimer(p, (struct getitimer_args *)uap, retval)))
413 return (error);
414 if (itvp == 0)
415 return (0);
416 if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
417 return (EINVAL);
418
419 switch (uap->which) {
420
421 case ITIMER_REAL:
422 proc_spinlock(p);
423 if (timerisset(&aitv.it_value)) {
424 microuptime(&p->p_rtime);
425 timevaladd(&p->p_rtime, &aitv.it_value);
426 p->p_realtimer = aitv;
427 if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL,
428 tvtoabstime(&p->p_rtime), 0, THREAD_CALL_DELAY_USER_NORMAL))
429 p->p_ractive++;
430 } else {
431 timerclear(&p->p_rtime);
432 p->p_realtimer = aitv;
433 if (thread_call_cancel(p->p_rcall))
434 p->p_ractive--;
435 }
436 proc_spinunlock(p);
437
438 break;
439
440
441 case ITIMER_VIRTUAL:
442 if (timerisset(&aitv.it_value))
443 task_vtimer_set(p->task, TASK_VTIMER_USER);
444 else
445 task_vtimer_clear(p->task, TASK_VTIMER_USER);
446
447 proc_spinlock(p);
448 p->p_vtimer_user = aitv;
449 proc_spinunlock(p);
450 break;
451
452 case ITIMER_PROF:
453 if (timerisset(&aitv.it_value))
454 task_vtimer_set(p->task, TASK_VTIMER_PROF);
455 else
456 task_vtimer_clear(p->task, TASK_VTIMER_PROF);
457
458 proc_spinlock(p);
459 p->p_vtimer_prof = aitv;
460 proc_spinunlock(p);
461 break;
462 }
463
464 return (0);
465}
466
467/*
468 * Real interval timer expired:
469 * send process whose timer expired an alarm signal.
470 * If time is not set up to reload, then just return.
471 * Else compute next time timer should go off which is > current time.
472 * This is where delay in processing this timeout causes multiple
473 * SIGALRM calls to be compressed into one.
474 */
475void
476realitexpire(
477 struct proc *p)
478{
479 struct proc *r;
480 struct timeval t;
481
482 r = proc_find(p->p_pid);
483
484 proc_spinlock(p);
485
486 assert(p->p_ractive > 0);
487
488 if (--p->p_ractive > 0 || r != p) {
489 /*
490 * bail, because either proc is exiting
491 * or there's another active thread call
492 */
493 proc_spinunlock(p);
494
495 if (r != NULL)
496 proc_rele(r);
497 return;
498 }
499
500 if (!timerisset(&p->p_realtimer.it_interval)) {
501 /*
502 * p_realtimer was cleared while this call was pending,
503 * send one last SIGALRM, but don't re-arm
504 */
505 timerclear(&p->p_rtime);
506 proc_spinunlock(p);
507
508 psignal(p, SIGALRM);
509 proc_rele(p);
510 return;
511 }
512
513 proc_spinunlock(p);
514
515 /*
516 * Send the signal before re-arming the next thread call,
517 * so in case psignal blocks, we won't create yet another thread call.
518 */
519
520 psignal(p, SIGALRM);
521
522 proc_spinlock(p);
523
524 /* Should we still re-arm the next thread call? */
525 if (!timerisset(&p->p_realtimer.it_interval)) {
526 timerclear(&p->p_rtime);
527 proc_spinunlock(p);
528
529 proc_rele(p);
530 return;
531 }
532
533 microuptime(&t);
534 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
535
536 if (timercmp(&p->p_rtime, &t, <=)) {
537 if ((p->p_rtime.tv_sec + 2) >= t.tv_sec) {
538 for (;;) {
539 timevaladd(&p->p_rtime, &p->p_realtimer.it_interval);
540 if (timercmp(&p->p_rtime, &t, >))
541 break;
542 }
543 } else {
544 p->p_rtime = p->p_realtimer.it_interval;
545 timevaladd(&p->p_rtime, &t);
546 }
547 }
548
549 assert(p->p_rcall != NULL);
550
551 if (!thread_call_enter_delayed_with_leeway(p->p_rcall, NULL, tvtoabstime(&p->p_rtime), 0,
552 THREAD_CALL_DELAY_USER_NORMAL)) {
553 p->p_ractive++;
554 }
555
556 proc_spinunlock(p);
557
558 proc_rele(p);
559}
560
561/*
562 * Called once in proc_exit to clean up after an armed or pending realitexpire
563 *
564 * This will only be called after the proc refcount is drained,
565 * so realitexpire cannot be currently holding a proc ref.
566 * i.e. it will/has gotten PROC_NULL from proc_find.
567 */
568void
569proc_free_realitimer(proc_t p)
570{
571 proc_spinlock(p);
572
573 assert(p->p_rcall != NULL);
574 assert(p->p_refcount == 0);
575
576 timerclear(&p->p_realtimer.it_interval);
577
578 if (thread_call_cancel(p->p_rcall)) {
579 assert(p->p_ractive > 0);
580 p->p_ractive--;
581 }
582
583 while (p->p_ractive > 0) {
584 proc_spinunlock(p);
585
586 delay(1);
587
588 proc_spinlock(p);
589 }
590
591 thread_call_t call = p->p_rcall;
592 p->p_rcall = NULL;
593
594 proc_spinunlock(p);
595
596 thread_call_free(call);
597}
598
599/*
600 * Check that a proposed value to load into the .it_value or
601 * .it_interval part of an interval timer is acceptable.
602 */
603int
604itimerfix(
605 struct timeval *tv)
606{
607
608 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
609 tv->tv_usec < 0 || tv->tv_usec >= 1000000)
610 return (EINVAL);
611 return (0);
612}
613
614int
615timespec_is_valid(const struct timespec *ts)
616{
617 /* The INT32_MAX limit ensures the timespec is safe for clock_*() functions
618 * which accept 32-bit ints. */
619 if (ts->tv_sec < 0 || ts->tv_sec > INT32_MAX ||
620 ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC) {
621 return 0;
622 }
623 return 1;
624}
625
626/*
627 * Decrement an interval timer by a specified number
628 * of microseconds, which must be less than a second,
629 * i.e. < 1000000. If the timer expires, then reload
630 * it. In this case, carry over (usec - old value) to
631 * reduce the value reloaded into the timer so that
632 * the timer does not drift. This routine assumes
633 * that it is called in a context where the timers
634 * on which it is operating cannot change in value.
635 */
636int
637itimerdecr(proc_t p,
638 struct itimerval *itp, int usec)
639{
640
641 proc_spinlock(p);
642
643 if (itp->it_value.tv_usec < usec) {
644 if (itp->it_value.tv_sec == 0) {
645 /* expired, and already in next interval */
646 usec -= itp->it_value.tv_usec;
647 goto expire;
648 }
649 itp->it_value.tv_usec += 1000000;
650 itp->it_value.tv_sec--;
651 }
652 itp->it_value.tv_usec -= usec;
653 usec = 0;
654 if (timerisset(&itp->it_value)) {
655 proc_spinunlock(p);
656 return (1);
657 }
658 /* expired, exactly at end of interval */
659expire:
660 if (timerisset(&itp->it_interval)) {
661 itp->it_value = itp->it_interval;
662 if (itp->it_value.tv_sec > 0) {
663 itp->it_value.tv_usec -= usec;
664 if (itp->it_value.tv_usec < 0) {
665 itp->it_value.tv_usec += 1000000;
666 itp->it_value.tv_sec--;
667 }
668 }
669 } else
670 itp->it_value.tv_usec = 0; /* sec is already 0 */
671 proc_spinunlock(p);
672 return (0);
673}
674
675/*
676 * Add and subtract routines for timevals.
677 * N.B.: subtract routine doesn't deal with
678 * results which are before the beginning,
679 * it just gets very confused in this case.
680 * Caveat emptor.
681 */
682void
683timevaladd(
684 struct timeval *t1,
685 struct timeval *t2)
686{
687
688 t1->tv_sec += t2->tv_sec;
689 t1->tv_usec += t2->tv_usec;
690 timevalfix(t1);
691}
692void
693timevalsub(
694 struct timeval *t1,
695 struct timeval *t2)
696{
697
698 t1->tv_sec -= t2->tv_sec;
699 t1->tv_usec -= t2->tv_usec;
700 timevalfix(t1);
701}
702void
703timevalfix(
704 struct timeval *t1)
705{
706
707 if (t1->tv_usec < 0) {
708 t1->tv_sec--;
709 t1->tv_usec += 1000000;
710 }
711 if (t1->tv_usec >= 1000000) {
712 t1->tv_sec++;
713 t1->tv_usec -= 1000000;
714 }
715}
716
717static boolean_t
718timeval_fixusec(
719 struct timeval *t1)
720{
721 assert(t1->tv_usec >= 0);
722 assert(t1->tv_sec >= 0);
723
724 if (t1->tv_usec >= 1000000) {
725 if (os_add_overflow(t1->tv_sec, t1->tv_usec / 1000000, &t1->tv_sec))
726 return FALSE;
727 t1->tv_usec = t1->tv_usec % 1000000;
728 }
729
730 return TRUE;
731}
732
733/*
734 * Return the best possible estimate of the time in the timeval
735 * to which tvp points.
736 */
737void
738microtime(
739 struct timeval *tvp)
740{
741 clock_sec_t tv_sec;
742 clock_usec_t tv_usec;
743
744 clock_get_calendar_microtime(&tv_sec, &tv_usec);
745
746 tvp->tv_sec = tv_sec;
747 tvp->tv_usec = tv_usec;
748}
749
750void
751microtime_with_abstime(
752 struct timeval *tvp, uint64_t *abstime)
753{
754 clock_sec_t tv_sec;
755 clock_usec_t tv_usec;
756
757 clock_get_calendar_absolute_and_microtime(&tv_sec, &tv_usec, abstime);
758
759 tvp->tv_sec = tv_sec;
760 tvp->tv_usec = tv_usec;
761}
762
763void
764microuptime(
765 struct timeval *tvp)
766{
767 clock_sec_t tv_sec;
768 clock_usec_t tv_usec;
769
770 clock_get_system_microtime(&tv_sec, &tv_usec);
771
772 tvp->tv_sec = tv_sec;
773 tvp->tv_usec = tv_usec;
774}
775
776/*
777 * Ditto for timespec.
778 */
779void
780nanotime(
781 struct timespec *tsp)
782{
783 clock_sec_t tv_sec;
784 clock_nsec_t tv_nsec;
785
786 clock_get_calendar_nanotime(&tv_sec, &tv_nsec);
787
788 tsp->tv_sec = tv_sec;
789 tsp->tv_nsec = tv_nsec;
790}
791
792void
793nanouptime(
794 struct timespec *tsp)
795{
796 clock_sec_t tv_sec;
797 clock_nsec_t tv_nsec;
798
799 clock_get_system_nanotime(&tv_sec, &tv_nsec);
800
801 tsp->tv_sec = tv_sec;
802 tsp->tv_nsec = tv_nsec;
803}
804
805uint64_t
806tvtoabstime(
807 struct timeval *tvp)
808{
809 uint64_t result, usresult;
810
811 clock_interval_to_absolutetime_interval(
812 tvp->tv_sec, NSEC_PER_SEC, &result);
813 clock_interval_to_absolutetime_interval(
814 tvp->tv_usec, NSEC_PER_USEC, &usresult);
815
816 return (result + usresult);
817}
818
819uint64_t
820tstoabstime(struct timespec *ts)
821{
822 uint64_t abstime_s, abstime_ns;
823 clock_interval_to_absolutetime_interval(ts->tv_sec, NSEC_PER_SEC, &abstime_s);
824 clock_interval_to_absolutetime_interval(ts->tv_nsec, 1, &abstime_ns);
825 return abstime_s + abstime_ns;
826}
827
828#if NETWORKING
829/*
830 * ratecheck(): simple time-based rate-limit checking.
831 */
832int
833ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
834{
835 struct timeval tv, delta;
836 int rv = 0;
837
838 net_uptime2timeval(&tv);
839 delta = tv;
840 timevalsub(&delta, lasttime);
841
842 /*
843 * check for 0,0 is so that the message will be seen at least once,
844 * even if interval is huge.
845 */
846 if (timevalcmp(&delta, mininterval, >=) ||
847 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
848 *lasttime = tv;
849 rv = 1;
850 }
851
852 return (rv);
853}
854
855/*
856 * ppsratecheck(): packets (or events) per second limitation.
857 */
858int
859ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
860{
861 struct timeval tv, delta;
862 int rv;
863
864 net_uptime2timeval(&tv);
865
866 timersub(&tv, lasttime, &delta);
867
868 /*
869 * Check for 0,0 so that the message will be seen at least once.
870 * If more than one second has passed since the last update of
871 * lasttime, reset the counter.
872 *
873 * we do increment *curpps even in *curpps < maxpps case, as some may
874 * try to use *curpps for stat purposes as well.
875 */
876 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
877 delta.tv_sec >= 1) {
878 *lasttime = tv;
879 *curpps = 0;
880 rv = 1;
881 } else if (maxpps < 0)
882 rv = 1;
883 else if (*curpps < maxpps)
884 rv = 1;
885 else
886 rv = 0;
887
888#if 1 /* DIAGNOSTIC? */
889 /* be careful about wrap-around */
890 if (*curpps + 1 > 0)
891 *curpps = *curpps + 1;
892#else
893 /*
894 * assume that there's not too many calls to this function.
895 * not sure if the assumption holds, as it depends on *caller's*
896 * behavior, not the behavior of this function.
897 * IMHO it is wrong to make assumption on the caller's behavior,
898 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
899 */
900 *curpps = *curpps + 1;
901#endif
902
903 return (rv);
904}
905#endif /* NETWORKING */
906
907void
908time_zone_slock_init(void)
909{
910 /* allocate lock group attribute and group */
911 tz_slock_grp_attr = lck_grp_attr_alloc_init();
912
913 tz_slock_grp = lck_grp_alloc_init("tzlock", tz_slock_grp_attr);
914
915 /* Allocate lock attribute */
916 tz_slock_attr = lck_attr_alloc_init();
917
918 /* Allocate the spin lock */
919 tz_slock = lck_spin_alloc_init(tz_slock_grp, tz_slock_attr);
920}
921
922