1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*
30 * Copyright (c) 1982, 1986, 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)kern_time.c 8.4 (Berkeley) 5/26/95
62 */
63/*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70#include <sys/param.h>
71#include <sys/resourcevar.h>
72#include <sys/kernel.h>
73#include <sys/systm.h>
74#include <sys/proc_internal.h>
75#include <sys/kauth.h>
76#include <sys/vnode.h>
77#include <sys/time.h>
78#include <sys/priv.h>
79
80#include <sys/mount_internal.h>
81#include <sys/sysproto.h>
82#include <sys/signalvar.h>
83#include <sys/protosw.h> /* for net_uptime2timeval() */
84
85#include <kern/clock.h>
86#include <kern/task.h>
87#include <kern/thread_call.h>
88#if CONFIG_MACF
89#include <security/mac_framework.h>
90#endif
91#include <IOKit/IOBSD.h>
92#include <sys/time.h>
93#include <kern/remote_time.h>
94
95#define HZ 100 /* XXX */
96
97/* simple lock used to access timezone, tz structure */
98static LCK_GRP_DECLARE(tz_slock_grp, "tzlock");
99static LCK_SPIN_DECLARE(tz_slock, &tz_slock_grp);
100
101static void setthetime(
102 struct timeval *tv);
103
104static boolean_t timeval_fixusec(struct timeval *t1);
105
106/*
107 * Time of day and interval timer support.
108 *
109 * These routines provide the kernel entry points to get and set
110 * the time-of-day and per-process interval timers. Subroutines
111 * here provide support for adding and subtracting timeval structures
112 * and decrementing interval timers, optionally reloading the interval
113 * timers when they expire.
114 */
115/* ARGSUSED */
116int
117gettimeofday(
118 struct proc *p,
119 struct gettimeofday_args *uap,
120 __unused int32_t *retval)
121{
122 int error = 0;
123 struct timezone ltz; /* local copy */
124 clock_sec_t secs;
125 clock_usec_t usecs;
126 uint64_t mach_time;
127
128 if (uap->tp || uap->mach_absolute_time) {
129 clock_gettimeofday_and_absolute_time(secs: &secs, microsecs: &usecs, absolute_time: &mach_time);
130 }
131
132 if (uap->tp) {
133 /* Casting secs through a uint32_t to match arm64 commpage */
134 if (IS_64BIT_PROCESS(p)) {
135 struct user64_timeval user_atv = {};
136 user_atv.tv_sec = (uint32_t)secs;
137 user_atv.tv_usec = usecs;
138 error = copyout(&user_atv, uap->tp, sizeof(user_atv));
139 } else {
140 struct user32_timeval user_atv = {};
141 user_atv.tv_sec = (uint32_t)secs;
142 user_atv.tv_usec = usecs;
143 error = copyout(&user_atv, uap->tp, sizeof(user_atv));
144 }
145 if (error) {
146 return error;
147 }
148 }
149
150 if (uap->tzp) {
151 lck_spin_lock(lck: &tz_slock);
152 ltz = tz;
153 lck_spin_unlock(lck: &tz_slock);
154
155 error = copyout((caddr_t)&ltz, CAST_USER_ADDR_T(uap->tzp), sizeof(tz));
156 }
157
158 if (error == 0 && uap->mach_absolute_time) {
159 error = copyout(&mach_time, uap->mach_absolute_time, sizeof(mach_time));
160 }
161
162 return error;
163}
164
165/*
166 * XXX Y2038 bug because of setthetime() argument
167 */
168/* ARGSUSED */
169int
170settimeofday(__unused struct proc *p, struct settimeofday_args *uap, __unused int32_t *retval)
171{
172 struct timeval atv;
173 struct timezone atz;
174 int error;
175
176 bzero(s: &atv, n: sizeof(atv));
177
178 /* Check that this task is entitled to set the time or it is root */
179 if (!IOCurrentTaskHasEntitlement(SETTIME_ENTITLEMENT)) {
180#if CONFIG_MACF
181 error = mac_system_check_settime(cred: kauth_cred_get());
182 if (error) {
183 return error;
184 }
185#endif
186#if defined(XNU_TARGET_OS_OSX)
187 if ((error = suser(cred: kauth_cred_get(), acflag: &p->p_acflag))) {
188 return error;
189 }
190#endif
191 }
192
193 /* Verify all parameters before changing time */
194 if (uap->tv) {
195 if (IS_64BIT_PROCESS(p)) {
196 struct user64_timeval user_atv;
197 error = copyin(uap->tv, &user_atv, sizeof(user_atv));
198 atv.tv_sec = (__darwin_time_t)user_atv.tv_sec;
199 atv.tv_usec = user_atv.tv_usec;
200 } else {
201 struct user32_timeval user_atv;
202 error = copyin(uap->tv, &user_atv, sizeof(user_atv));
203 atv.tv_sec = user_atv.tv_sec;
204 atv.tv_usec = user_atv.tv_usec;
205 }
206 if (error) {
207 return error;
208 }
209 }
210 if (uap->tzp && (error = copyin(uap->tzp, (caddr_t)&atz, sizeof(atz)))) {
211 return error;
212 }
213 if (uap->tv) {
214 /* only positive values of sec/usec are accepted */
215 if (atv.tv_sec < 0 || atv.tv_usec < 0) {
216 return EPERM;
217 }
218 if (!timeval_fixusec(t1: &atv)) {
219 return EPERM;
220 }
221 setthetime(&atv);
222 }
223 if (uap->tzp) {
224 lck_spin_lock(lck: &tz_slock);
225 tz = atz;
226 lck_spin_unlock(lck: &tz_slock);
227 }
228 return 0;
229}
230
231static void
232setthetime(
233 struct timeval *tv)
234{
235 clock_set_calendar_microtime(secs: tv->tv_sec, microsecs: tv->tv_usec);
236}
237
238/*
239 * Verify the calendar value. If negative,
240 * reset to zero (the epoch).
241 */
242void
243inittodr(
244 __unused time_t base)
245{
246 struct timeval tv;
247
248 /*
249 * Assertion:
250 * The calendar has already been
251 * set up from the platform clock.
252 *
253 * The value returned by microtime()
254 * is gotten from the calendar.
255 */
256 microtime(tv: &tv);
257
258 if (tv.tv_sec < 0 || tv.tv_usec < 0) {
259 printf("WARNING: preposterous time in Real Time Clock");
260 tv.tv_sec = 0; /* the UNIX epoch */
261 tv.tv_usec = 0;
262 setthetime(&tv);
263 printf(" -- CHECK AND RESET THE DATE!\n");
264 }
265}
266
267time_t
268boottime_sec(void)
269{
270 clock_sec_t secs;
271 clock_nsec_t nanosecs;
272
273 clock_get_boottime_nanotime(secs: &secs, nanosecs: &nanosecs);
274 return secs;
275}
276
277void
278boottime_timeval(struct timeval *tv)
279{
280 clock_sec_t secs;
281 clock_usec_t microsecs;
282
283 clock_get_boottime_microtime(secs: &secs, microsecs: &microsecs);
284
285 tv->tv_sec = secs;
286 tv->tv_usec = microsecs;
287}
288
289/*
290 * Get value of an interval timer. The process virtual and
291 * profiling virtual time timers are kept internally in the
292 * way they are specified externally: in time until they expire.
293 *
294 * The real time interval timer expiration time (p_rtime)
295 * is kept as an absolute time rather than as a delta, so that
296 * it is easy to keep periodic real-time signals from drifting.
297 *
298 * The real time timer is processed by a callout routine.
299 * Since a callout may be delayed in real time due to
300 * other processing in the system, it is possible for the real
301 * time callout routine (realitexpire, given below), to be delayed
302 * in real time past when it is supposed to occur. It does not
303 * suffice, therefore, to reload the real time .it_value from the
304 * real time .it_interval. Rather, we compute the next time in
305 * absolute time when the timer should go off.
306 *
307 * Returns: 0 Success
308 * EINVAL Invalid argument
309 * copyout:EFAULT Bad address
310 */
311/* ARGSUSED */
312int
313getitimer(struct proc *p, struct getitimer_args *uap, __unused int32_t *retval)
314{
315 struct itimerval aitv;
316
317 if (uap->which > ITIMER_PROF) {
318 return EINVAL;
319 }
320
321 bzero(s: &aitv, n: sizeof(aitv));
322
323 proc_spinlock(p);
324 switch (uap->which) {
325 case ITIMER_REAL:
326 /*
327 * If time for real time timer has passed return 0,
328 * else return difference between current time and
329 * time for the timer to go off.
330 */
331 aitv = p->p_realtimer;
332 if (timerisset(&p->p_rtime)) {
333 struct timeval now;
334
335 microuptime(tv: &now);
336 if (timercmp(&p->p_rtime, &now, <)) {
337 timerclear(&aitv.it_value);
338 } else {
339 aitv.it_value = p->p_rtime;
340 timevalsub(t1: &aitv.it_value, t2: &now);
341 }
342 } else {
343 timerclear(&aitv.it_value);
344 }
345 break;
346
347 case ITIMER_VIRTUAL:
348 aitv = p->p_vtimer_user;
349 break;
350
351 case ITIMER_PROF:
352 aitv = p->p_vtimer_prof;
353 break;
354 }
355
356 proc_spinunlock(p);
357
358 if (IS_64BIT_PROCESS(p)) {
359 struct user64_itimerval user_itv;
360 bzero(s: &user_itv, n: sizeof(user_itv));
361 user_itv.it_interval.tv_sec = aitv.it_interval.tv_sec;
362 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
363 user_itv.it_value.tv_sec = aitv.it_value.tv_sec;
364 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
365 return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv));
366 } else {
367 struct user32_itimerval user_itv;
368 bzero(s: &user_itv, n: sizeof(user_itv));
369 user_itv.it_interval.tv_sec = (user32_time_t)aitv.it_interval.tv_sec;
370 user_itv.it_interval.tv_usec = aitv.it_interval.tv_usec;
371 user_itv.it_value.tv_sec = (user32_time_t)aitv.it_value.tv_sec;
372 user_itv.it_value.tv_usec = aitv.it_value.tv_usec;
373 return copyout((caddr_t)&user_itv, uap->itv, sizeof(user_itv));
374 }
375}
376
377/*
378 * Returns: 0 Success
379 * EINVAL Invalid argument
380 * copyin:EFAULT Bad address
381 * getitimer:EINVAL Invalid argument
382 * getitimer:EFAULT Bad address
383 */
384/* ARGSUSED */
385int
386setitimer(struct proc *p, struct setitimer_args *uap, int32_t *retval)
387{
388 struct itimerval aitv;
389 user_addr_t itvp;
390 int error;
391
392 bzero(s: &aitv, n: sizeof(aitv));
393
394 if (uap->which > ITIMER_PROF) {
395 return EINVAL;
396 }
397 if ((itvp = uap->itv)) {
398 if (IS_64BIT_PROCESS(p)) {
399 struct user64_itimerval user_itv;
400 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) {
401 return error;
402 }
403 aitv.it_interval.tv_sec = (__darwin_time_t)user_itv.it_interval.tv_sec;
404 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
405 aitv.it_value.tv_sec = (__darwin_time_t)user_itv.it_value.tv_sec;
406 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
407 } else {
408 struct user32_itimerval user_itv;
409 if ((error = copyin(itvp, (caddr_t)&user_itv, sizeof(user_itv)))) {
410 return error;
411 }
412 aitv.it_interval.tv_sec = user_itv.it_interval.tv_sec;
413 aitv.it_interval.tv_usec = user_itv.it_interval.tv_usec;
414 aitv.it_value.tv_sec = user_itv.it_value.tv_sec;
415 aitv.it_value.tv_usec = user_itv.it_value.tv_usec;
416 }
417 }
418 if ((uap->itv = uap->oitv) && (error = getitimer(p, uap: (struct getitimer_args *)uap, retval))) {
419 return error;
420 }
421 if (itvp == 0) {
422 return 0;
423 }
424 if (itimerfix(tv: &aitv.it_value) || itimerfix(tv: &aitv.it_interval)) {
425 return EINVAL;
426 }
427
428 switch (uap->which) {
429 case ITIMER_REAL:
430 proc_spinlock(p);
431 if (timerisset(&aitv.it_value)) {
432 microuptime(tv: &p->p_rtime);
433 timevaladd(t1: &p->p_rtime, t2: &aitv.it_value);
434 p->p_realtimer = aitv;
435 if (!thread_call_enter_delayed_with_leeway(call: p->p_rcall, NULL,
436 deadline: tvtoabstime(&p->p_rtime), leeway: 0, THREAD_CALL_DELAY_USER_NORMAL)) {
437 p->p_ractive++;
438 }
439 } else {
440 timerclear(&p->p_rtime);
441 p->p_realtimer = aitv;
442 if (thread_call_cancel(call: p->p_rcall)) {
443 p->p_ractive--;
444 }
445 }
446 proc_spinunlock(p);
447
448 break;
449
450
451 case ITIMER_VIRTUAL:
452 if (timerisset(&aitv.it_value)) {
453 task_vtimer_set(task: proc_task(p), TASK_VTIMER_USER);
454 } else {
455 task_vtimer_clear(task: proc_task(p), TASK_VTIMER_USER);
456 }
457
458 proc_spinlock(p);
459 p->p_vtimer_user = aitv;
460 proc_spinunlock(p);
461 break;
462
463 case ITIMER_PROF:
464 if (timerisset(&aitv.it_value)) {
465 task_vtimer_set(task: proc_task(p), TASK_VTIMER_PROF);
466 } else {
467 task_vtimer_clear(task: proc_task(p), TASK_VTIMER_PROF);
468 }
469
470 proc_spinlock(p);
471 p->p_vtimer_prof = aitv;
472 proc_spinunlock(p);
473 break;
474 }
475
476 return 0;
477}
478
479void
480proc_inherit_itimers(struct proc *old_proc, struct proc *new_proc)
481{
482 struct itimerval real_itv, vuser_itv, vprof_itv;
483
484 /* Snapshot the old timer values */
485 proc_spinlock(old_proc);
486 real_itv = old_proc->p_realtimer;
487 vuser_itv = old_proc->p_vtimer_user;
488 vprof_itv = old_proc->p_vtimer_prof;
489 proc_spinunlock(old_proc);
490
491 if (timerisset(&vuser_itv.it_value)) {
492 task_vtimer_set(task: proc_task(new_proc), TASK_VTIMER_USER);
493 } else {
494 task_vtimer_clear(task: proc_task(new_proc), TASK_VTIMER_USER);
495 }
496
497 if (timerisset(&vprof_itv.it_value)) {
498 task_vtimer_set(task: proc_task(new_proc), TASK_VTIMER_PROF);
499 } else {
500 task_vtimer_clear(task: proc_task(new_proc), TASK_VTIMER_PROF);
501 }
502
503 /* Update the timer values on new proc */
504 proc_spinlock(new_proc);
505
506 if (timerisset(&real_itv.it_value)) {
507 microuptime(tv: &new_proc->p_rtime);
508 timevaladd(t1: &new_proc->p_rtime, t2: &real_itv.it_value);
509 new_proc->p_realtimer = real_itv;
510 if (!thread_call_enter_delayed_with_leeway(call: new_proc->p_rcall, NULL,
511 deadline: tvtoabstime(&new_proc->p_rtime), leeway: 0, THREAD_CALL_DELAY_USER_NORMAL)) {
512 new_proc->p_ractive++;
513 }
514 } else {
515 timerclear(&new_proc->p_rtime);
516 new_proc->p_realtimer = real_itv;
517 }
518
519 new_proc->p_vtimer_user = vuser_itv;
520 new_proc->p_vtimer_prof = vprof_itv;
521
522 proc_spinunlock(new_proc);
523}
524
525/*
526 * Real interval timer expired:
527 * send process whose timer expired an alarm signal.
528 * If time is not set up to reload, then just return.
529 * Else compute next time timer should go off which is > current time.
530 * This is where delay in processing this timeout causes multiple
531 * SIGALRM calls to be compressed into one.
532 */
533void
534realitexpire(
535 struct proc *p,
536 __unused void *p2)
537{
538 struct proc *r;
539 struct timeval t;
540
541 r = proc_find(pid: proc_getpid(p));
542
543 proc_spinlock(p);
544
545 assert(p->p_ractive > 0);
546
547 if (--p->p_ractive > 0 || r != p) {
548 /*
549 * bail, because either proc is exiting
550 * or there's another active thread call
551 */
552 proc_spinunlock(p);
553
554 if (r != NULL) {
555 proc_rele(p: r);
556 }
557 return;
558 }
559
560 if (!timerisset(&p->p_realtimer.it_interval)) {
561 /*
562 * p_realtimer was cleared while this call was pending,
563 * send one last SIGALRM, but don't re-arm
564 */
565 timerclear(&p->p_rtime);
566 proc_spinunlock(p);
567
568 psignal(p, SIGALRM);
569 proc_rele(p);
570 return;
571 }
572
573 proc_spinunlock(p);
574
575 /*
576 * Send the signal before re-arming the next thread call,
577 * so in case psignal blocks, we won't create yet another thread call.
578 */
579
580 psignal(p, SIGALRM);
581
582 proc_spinlock(p);
583
584 /* Should we still re-arm the next thread call? */
585 if (!timerisset(&p->p_realtimer.it_interval)) {
586 timerclear(&p->p_rtime);
587 proc_spinunlock(p);
588
589 proc_rele(p);
590 return;
591 }
592
593 microuptime(tv: &t);
594 timevaladd(t1: &p->p_rtime, t2: &p->p_realtimer.it_interval);
595
596 if (timercmp(&p->p_rtime, &t, <=)) {
597 if ((p->p_rtime.tv_sec + 2) >= t.tv_sec) {
598 for (;;) {
599 timevaladd(t1: &p->p_rtime, t2: &p->p_realtimer.it_interval);
600 if (timercmp(&p->p_rtime, &t, >)) {
601 break;
602 }
603 }
604 } else {
605 p->p_rtime = p->p_realtimer.it_interval;
606 timevaladd(t1: &p->p_rtime, t2: &t);
607 }
608 }
609
610 assert(p->p_rcall != NULL);
611
612 if (!thread_call_enter_delayed_with_leeway(call: p->p_rcall, NULL, deadline: tvtoabstime(&p->p_rtime), leeway: 0,
613 THREAD_CALL_DELAY_USER_NORMAL)) {
614 p->p_ractive++;
615 }
616
617 proc_spinunlock(p);
618
619 proc_rele(p);
620}
621
622/*
623 * Called once in proc_exit to clean up after an armed or pending realitexpire
624 *
625 * This will only be called after the proc refcount is drained,
626 * so realitexpire cannot be currently holding a proc ref.
627 * i.e. it will/has gotten PROC_NULL from proc_find.
628 */
629void
630proc_free_realitimer(proc_t p)
631{
632 proc_spinlock(p);
633
634 assert(p->p_rcall != NULL);
635 assert(proc_list_exited(p));
636
637 timerclear(&p->p_realtimer.it_interval);
638
639 if (thread_call_cancel(call: p->p_rcall)) {
640 assert(p->p_ractive > 0);
641 p->p_ractive--;
642 }
643
644 while (p->p_ractive > 0) {
645 proc_spinunlock(p);
646
647 delay(usec: 1);
648
649 proc_spinlock(p);
650 }
651
652 thread_call_t call = p->p_rcall;
653 p->p_rcall = NULL;
654
655 proc_spinunlock(p);
656
657 thread_call_free(call);
658}
659
660/*
661 * Check that a proposed value to load into the .it_value or
662 * .it_interval part of an interval timer is acceptable.
663 */
664int
665itimerfix(
666 struct timeval *tv)
667{
668 if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
669 tv->tv_usec < 0 || tv->tv_usec >= 1000000) {
670 return EINVAL;
671 }
672 return 0;
673}
674
675int
676timespec_is_valid(const struct timespec *ts)
677{
678 /* The INT32_MAX limit ensures the timespec is safe for clock_*() functions
679 * which accept 32-bit ints. */
680 if (ts->tv_sec < 0 || ts->tv_sec > INT32_MAX ||
681 ts->tv_nsec < 0 || (unsigned long long)ts->tv_nsec > NSEC_PER_SEC) {
682 return 0;
683 }
684 return 1;
685}
686
687/*
688 * Decrement an interval timer by a specified number
689 * of microseconds, which must be less than a second,
690 * i.e. < 1000000. If the timer expires, then reload
691 * it. In this case, carry over (usec - old value) to
692 * reduce the value reloaded into the timer so that
693 * the timer does not drift. This routine assumes
694 * that it is called in a context where the timers
695 * on which it is operating cannot change in value.
696 */
697int
698itimerdecr(proc_t p,
699 struct itimerval *itp, int usec)
700{
701 proc_spinlock(p);
702
703 if (itp->it_value.tv_usec < usec) {
704 if (itp->it_value.tv_sec == 0) {
705 /* expired, and already in next interval */
706 usec -= itp->it_value.tv_usec;
707 goto expire;
708 }
709 itp->it_value.tv_usec += 1000000;
710 itp->it_value.tv_sec--;
711 }
712 itp->it_value.tv_usec -= usec;
713 usec = 0;
714 if (timerisset(&itp->it_value)) {
715 proc_spinunlock(p);
716 return 1;
717 }
718 /* expired, exactly at end of interval */
719expire:
720 if (timerisset(&itp->it_interval)) {
721 itp->it_value = itp->it_interval;
722 if (itp->it_value.tv_sec > 0) {
723 itp->it_value.tv_usec -= usec;
724 if (itp->it_value.tv_usec < 0) {
725 itp->it_value.tv_usec += 1000000;
726 itp->it_value.tv_sec--;
727 }
728 }
729 } else {
730 itp->it_value.tv_usec = 0; /* sec is already 0 */
731 }
732 proc_spinunlock(p);
733 return 0;
734}
735
736/*
737 * Add and subtract routines for timevals.
738 * N.B.: subtract routine doesn't deal with
739 * results which are before the beginning,
740 * it just gets very confused in this case.
741 * Caveat emptor.
742 */
743void
744timevaladd(
745 struct timeval *t1,
746 struct timeval *t2)
747{
748 t1->tv_sec += t2->tv_sec;
749 t1->tv_usec += t2->tv_usec;
750 timevalfix(t1);
751}
752void
753timevalsub(
754 struct timeval *t1,
755 struct timeval *t2)
756{
757 t1->tv_sec -= t2->tv_sec;
758 t1->tv_usec -= t2->tv_usec;
759 timevalfix(t1);
760}
761void
762timevalfix(
763 struct timeval *t1)
764{
765 if (t1->tv_usec < 0) {
766 t1->tv_sec--;
767 t1->tv_usec += 1000000;
768 }
769 if (t1->tv_usec >= 1000000) {
770 t1->tv_sec++;
771 t1->tv_usec -= 1000000;
772 }
773}
774
775static boolean_t
776timeval_fixusec(
777 struct timeval *t1)
778{
779 assert(t1->tv_usec >= 0);
780 assert(t1->tv_sec >= 0);
781
782 if (t1->tv_usec >= 1000000) {
783 if (os_add_overflow(t1->tv_sec, t1->tv_usec / 1000000, &t1->tv_sec)) {
784 return FALSE;
785 }
786 t1->tv_usec = t1->tv_usec % 1000000;
787 }
788
789 return TRUE;
790}
791
792/*
793 * Return the best possible estimate of the time in the timeval
794 * to which tvp points.
795 */
796void
797microtime(
798 struct timeval *tvp)
799{
800 clock_sec_t tv_sec;
801 clock_usec_t tv_usec;
802
803 clock_get_calendar_microtime(secs: &tv_sec, microsecs: &tv_usec);
804
805 tvp->tv_sec = tv_sec;
806 tvp->tv_usec = tv_usec;
807}
808
809void
810microtime_with_abstime(
811 struct timeval *tvp, uint64_t *abstime)
812{
813 clock_sec_t tv_sec;
814 clock_usec_t tv_usec;
815
816 clock_get_calendar_absolute_and_microtime(secs: &tv_sec, microsecs: &tv_usec, abstime);
817
818 tvp->tv_sec = tv_sec;
819 tvp->tv_usec = tv_usec;
820}
821
822void
823microuptime(
824 struct timeval *tvp)
825{
826 clock_sec_t tv_sec;
827 clock_usec_t tv_usec;
828
829 clock_get_system_microtime(secs: &tv_sec, microsecs: &tv_usec);
830
831 tvp->tv_sec = tv_sec;
832 tvp->tv_usec = tv_usec;
833}
834
835/*
836 * Ditto for timespec.
837 */
838void
839nanotime(
840 struct timespec *tsp)
841{
842 clock_sec_t tv_sec;
843 clock_nsec_t tv_nsec;
844
845 clock_get_calendar_nanotime(secs: &tv_sec, nanosecs: &tv_nsec);
846
847 tsp->tv_sec = tv_sec;
848 tsp->tv_nsec = tv_nsec;
849}
850
851void
852nanouptime(
853 struct timespec *tsp)
854{
855 clock_sec_t tv_sec;
856 clock_nsec_t tv_nsec;
857
858 clock_get_system_nanotime(secs: &tv_sec, nanosecs: &tv_nsec);
859
860 tsp->tv_sec = tv_sec;
861 tsp->tv_nsec = tv_nsec;
862}
863
864uint64_t
865tvtoabstime(
866 struct timeval *tvp)
867{
868 uint64_t result, usresult;
869
870 clock_interval_to_absolutetime_interval(
871 interval: (uint32_t)tvp->tv_sec, NSEC_PER_SEC, result: &result);
872 clock_interval_to_absolutetime_interval(
873 interval: tvp->tv_usec, NSEC_PER_USEC, result: &usresult);
874
875 return result + usresult;
876}
877
878uint64_t
879tstoabstime(struct timespec *ts)
880{
881 uint64_t abstime_s, abstime_ns;
882 clock_interval_to_absolutetime_interval(interval: (uint32_t)ts->tv_sec, NSEC_PER_SEC, result: &abstime_s);
883 clock_interval_to_absolutetime_interval(interval: (uint32_t)ts->tv_nsec, scale_factor: 1, result: &abstime_ns);
884 return abstime_s + abstime_ns;
885}
886
887#if NETWORKING
888/*
889 * ratecheck(): simple time-based rate-limit checking.
890 */
891int
892ratecheck(struct timeval *lasttime, const struct timeval *mininterval)
893{
894 struct timeval tv, delta;
895 int rv = 0;
896
897 net_uptime2timeval(&tv);
898 delta = tv;
899 timevalsub(t1: &delta, t2: lasttime);
900
901 /*
902 * check for 0,0 is so that the message will be seen at least once,
903 * even if interval is huge.
904 */
905 if (timevalcmp(&delta, mininterval, >=) ||
906 (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
907 *lasttime = tv;
908 rv = 1;
909 }
910
911 return rv;
912}
913
914/*
915 * ppsratecheck(): packets (or events) per second limitation.
916 */
917int
918ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
919{
920 struct timeval tv, delta;
921 int rv;
922
923 net_uptime2timeval(&tv);
924
925 timersub(&tv, lasttime, &delta);
926
927 /*
928 * Check for 0,0 so that the message will be seen at least once.
929 * If more than one second has passed since the last update of
930 * lasttime, reset the counter.
931 *
932 * we do increment *curpps even in *curpps < maxpps case, as some may
933 * try to use *curpps for stat purposes as well.
934 */
935 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
936 delta.tv_sec >= 1) {
937 *lasttime = tv;
938 *curpps = 0;
939 rv = 1;
940 } else if (maxpps < 0) {
941 rv = 1;
942 } else if (*curpps < maxpps) {
943 rv = 1;
944 } else {
945 rv = 0;
946 }
947
948 /* be careful about wrap-around */
949 if (*curpps < INT_MAX) {
950 *curpps = *curpps + 1;
951 }
952
953 return rv;
954}
955#endif /* NETWORKING */
956
957int
958__mach_bridge_remote_time(__unused struct proc *p, struct __mach_bridge_remote_time_args *mbrt_args, uint64_t *retval)
959{
960 *retval = mach_bridge_remote_time(mbrt_args->local_timestamp);
961 return 0;
962}
963