1/*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 */
33/*-
34 * Copyright (c) 1982, 1986, 1993
35 * The Regents of the University of California. All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 *
61 * @(#)time.h 8.5 (Berkeley) 5/4/95
62 * $FreeBSD$
63 */
64
65#include <mach/mach_types.h>
66
67#include <kern/spl.h>
68#include <kern/sched_prim.h>
69#include <kern/thread.h>
70#include <kern/clock.h>
71#include <kern/host_notify.h>
72#include <kern/thread_call.h>
73#include <libkern/OSAtomic.h>
74
75#include <IOKit/IOPlatformExpert.h>
76
77#include <machine/commpage.h>
78#include <machine/config.h>
79#include <machine/machine_routines.h>
80
81#include <mach/mach_traps.h>
82#include <mach/mach_time.h>
83
84#include <sys/kdebug.h>
85#include <sys/timex.h>
86#include <kern/arithmetic_128.h>
87#include <os/log.h>
88
89#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
90// On ARM64, the hwclock keeps ticking across a normal S2R so we use it to reset the
91// system clock after a normal wake. However, on hibernation we cut power to the hwclock,
92// so we have to add an offset to the hwclock to compute continuous_time after hibernate resume.
93uint64_t hwclock_conttime_offset = 0;
94#endif /* HIBERNATION && HAS_CONTINUOUS_HWCLOCK */
95
96#if HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK
97#define ENABLE_LEGACY_CLOCK_CODE 1
98#endif /* HIBERNATION_USES_LEGACY_CLOCK || !HAS_CONTINUOUS_HWCLOCK */
99
100#if HIBERNATION_USES_LEGACY_CLOCK
101#include <IOKit/IOHibernatePrivate.h>
102#endif /* HIBERNATION_USES_LEGACY_CLOCK */
103
104uint32_t hz_tick_interval = 1;
105#if ENABLE_LEGACY_CLOCK_CODE
106static uint64_t has_monotonic_clock = 0;
107#endif /* ENABLE_LEGACY_CLOCK_CODE */
108
109lck_ticket_t clock_lock;
110LCK_GRP_DECLARE(clock_lock_grp, "clock");
111
112static LCK_GRP_DECLARE(settime_lock_grp, "settime");
113static LCK_MTX_DECLARE(settime_lock, &settime_lock_grp);
114
115#define clock_lock() \
116 lck_ticket_lock(&clock_lock, &clock_lock_grp)
117
118#define clock_unlock() \
119 lck_ticket_unlock(&clock_lock)
120
121boolean_t
122kdp_clock_is_locked()
123{
124 return kdp_lck_ticket_is_acquired(tlock: &clock_lock);
125}
126
127struct bintime {
128 time_t sec;
129 uint64_t frac;
130};
131
132static __inline void
133bintime_addx(struct bintime *_bt, uint64_t _x)
134{
135 uint64_t _u;
136
137 _u = _bt->frac;
138 _bt->frac += _x;
139 if (_u > _bt->frac) {
140 _bt->sec++;
141 }
142}
143
144static __inline void
145bintime_subx(struct bintime *_bt, uint64_t _x)
146{
147 uint64_t _u;
148
149 _u = _bt->frac;
150 _bt->frac -= _x;
151 if (_u < _bt->frac) {
152 _bt->sec--;
153 }
154}
155
156static __inline void
157bintime_addns(struct bintime *bt, uint64_t ns)
158{
159 bt->sec += ns / (uint64_t)NSEC_PER_SEC;
160 ns = ns % (uint64_t)NSEC_PER_SEC;
161 if (ns) {
162 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
163 ns = ns * (uint64_t)18446744073LL;
164 bintime_addx(bt: bt, x: ns);
165 }
166}
167
168static __inline void
169bintime_subns(struct bintime *bt, uint64_t ns)
170{
171 bt->sec -= ns / (uint64_t)NSEC_PER_SEC;
172 ns = ns % (uint64_t)NSEC_PER_SEC;
173 if (ns) {
174 /* 18446744073 = int(2^64 / NSEC_PER_SEC) */
175 ns = ns * (uint64_t)18446744073LL;
176 bintime_subx(bt: bt, x: ns);
177 }
178}
179
180static __inline void
181bintime_addxns(struct bintime *bt, uint64_t a, int64_t xns)
182{
183 uint64_t uxns = (xns > 0)?(uint64_t)xns:(uint64_t)-xns;
184 uint64_t ns = multi_overflow(a, b: uxns);
185 if (xns > 0) {
186 if (ns) {
187 bintime_addns(bt, ns);
188 }
189 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
190 bintime_addx(bt: bt, x: ns);
191 } else {
192 if (ns) {
193 bintime_subns(bt, ns);
194 }
195 ns = (a * uxns) / (uint64_t)NSEC_PER_SEC;
196 bintime_subx(bt: bt, x: ns);
197 }
198}
199
200
201static __inline void
202bintime_add(struct bintime *_bt, const struct bintime *_bt2)
203{
204 uint64_t _u;
205
206 _u = _bt->frac;
207 _bt->frac += _bt2->frac;
208 if (_u > _bt->frac) {
209 _bt->sec++;
210 }
211 _bt->sec += _bt2->sec;
212}
213
214static __inline void
215bintime_sub(struct bintime *_bt, const struct bintime *_bt2)
216{
217 uint64_t _u;
218
219 _u = _bt->frac;
220 _bt->frac -= _bt2->frac;
221 if (_u < _bt->frac) {
222 _bt->sec--;
223 }
224 _bt->sec -= _bt2->sec;
225}
226
227static __inline void
228clock2bintime(const clock_sec_t *secs, const clock_usec_t *microsecs, struct bintime *_bt)
229{
230 _bt->sec = *secs;
231 /* 18446744073709 = int(2^64 / 1000000) */
232 _bt->frac = *microsecs * (uint64_t)18446744073709LL;
233}
234
235static __inline void
236bintime2usclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *microsecs)
237{
238 *secs = _bt->sec;
239 *microsecs = ((uint64_t)USEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
240}
241
242static __inline void
243bintime2nsclock(const struct bintime *_bt, clock_sec_t *secs, clock_usec_t *nanosecs)
244{
245 *secs = _bt->sec;
246 *nanosecs = ((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32;
247}
248
249#if ENABLE_LEGACY_CLOCK_CODE
250static __inline void
251bintime2absolutetime(const struct bintime *_bt, uint64_t *abs)
252{
253 uint64_t nsec;
254 nsec = (uint64_t) _bt->sec * (uint64_t)NSEC_PER_SEC + (((uint64_t)NSEC_PER_SEC * (uint32_t)(_bt->frac >> 32)) >> 32);
255 nanoseconds_to_absolutetime(nanoseconds: nsec, result: abs);
256}
257
258struct latched_time {
259 uint64_t monotonic_time_usec;
260 uint64_t mach_time;
261};
262
263extern int
264kernel_sysctlbyname(const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
265
266#endif /* ENABLE_LEGACY_CLOCK_CODE */
267/*
268 * Time of day (calendar) variables.
269 *
270 * Algorithm:
271 *
272 * TOD <- bintime + delta*scale
273 *
274 * where :
275 * bintime is a cumulative offset that includes bootime and scaled time elapsed betweed bootime and last scale update.
276 * delta is ticks elapsed since last scale update.
277 * scale is computed according to an adjustment provided by ntp_kern.
278 */
279static struct clock_calend {
280 uint64_t s_scale_ns; /* scale to apply for each second elapsed, it converts in ns */
281 int64_t s_adj_nsx; /* additional adj to apply for each second elapsed, it is expressed in 64 bit frac of ns */
282 uint64_t tick_scale_x; /* scale to apply for each tick elapsed, it converts in 64 bit frac of s */
283 uint64_t offset_count; /* abs time from which apply current scales */
284 struct bintime offset; /* cumulative offset expressed in (sec, 64 bits frac of a second) */
285 struct bintime bintime; /* cumulative offset (it includes bootime) expressed in (sec, 64 bits frac of a second) */
286 struct bintime boottime; /* boot time expressed in (sec, 64 bits frac of a second) */
287#if ENABLE_LEGACY_CLOCK_CODE
288 struct bintime basesleep;
289#endif /* ENABLE_LEGACY_CLOCK_CODE */
290} clock_calend;
291
292static uint64_t ticks_per_sec; /* ticks in a second (expressed in abs time) */
293
294#if DEVELOPMENT || DEBUG
295extern int g_should_log_clock_adjustments;
296
297static void print_all_clock_variables(const char*, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* calend_cp);
298static void print_all_clock_variables_internal(const char *, struct clock_calend* calend_cp);
299#else
300#define print_all_clock_variables(...) do { } while (0)
301#define print_all_clock_variables_internal(...) do { } while (0)
302#endif
303
304#if CONFIG_DTRACE
305
306
307/*
308 * Unlocked calendar flipflop; this is used to track a clock_calend such
309 * that we can safely access a snapshot of a valid clock_calend structure
310 * without needing to take any locks to do it.
311 *
312 * The trick is to use a generation count and set the low bit when it is
313 * being updated/read; by doing this, we guarantee, through use of the
314 * os_atomic functions, that the generation is incremented when the bit
315 * is cleared atomically (by using a 1 bit add).
316 */
317static struct unlocked_clock_calend {
318 struct clock_calend calend; /* copy of calendar */
319 uint32_t gen; /* generation count */
320} flipflop[2];
321
322static void clock_track_calend_nowait(void);
323
324#endif
325
326void _clock_delay_until_deadline(uint64_t interval, uint64_t deadline);
327void _clock_delay_until_deadline_with_leeway(uint64_t interval, uint64_t deadline, uint64_t leeway);
328
329/* Boottime variables*/
330static uint64_t clock_boottime;
331static uint32_t clock_boottime_usec;
332
333#define TIME_ADD(rsecs, secs, rfrac, frac, unit) \
334MACRO_BEGIN \
335 if (((rfrac) += (frac)) >= (unit)) { \
336 (rfrac) -= (unit); \
337 (rsecs) += 1; \
338 } \
339 (rsecs) += (secs); \
340MACRO_END
341
342#define TIME_SUB(rsecs, secs, rfrac, frac, unit) \
343MACRO_BEGIN \
344 if ((int)((rfrac) -= (frac)) < 0) { \
345 (rfrac) += (unit); \
346 (rsecs) -= 1; \
347 } \
348 (rsecs) -= (secs); \
349MACRO_END
350
351/*
352 * clock_config:
353 *
354 * Called once at boot to configure the clock subsystem.
355 */
356void
357clock_config(void)
358{
359 lck_ticket_init(tlock: &clock_lock, grp: &clock_lock_grp);
360
361 clock_oldconfig();
362
363 ntp_init();
364
365 nanoseconds_to_absolutetime(nanoseconds: (uint64_t)NSEC_PER_SEC, result: &ticks_per_sec);
366}
367
368/*
369 * clock_init:
370 *
371 * Called on a processor each time started.
372 */
373void
374clock_init(void)
375{
376 clock_oldinit();
377}
378
379/*
380 * clock_timebase_init:
381 *
382 * Called by machine dependent code
383 * to initialize areas dependent on the
384 * timebase value. May be called multiple
385 * times during start up.
386 */
387void
388clock_timebase_init(void)
389{
390 uint64_t abstime;
391
392 /*
393 * BSD expects a tick to represent 10ms.
394 */
395 nanoseconds_to_absolutetime(NSEC_PER_SEC / 100, result: &abstime);
396 hz_tick_interval = (uint32_t)abstime;
397
398 sched_timebase_init();
399}
400
401/*
402 * mach_timebase_info_trap:
403 *
404 * User trap returns timebase constant.
405 */
406kern_return_t
407mach_timebase_info_trap(
408 struct mach_timebase_info_trap_args *args)
409{
410 mach_vm_address_t out_info_addr = args->info;
411 mach_timebase_info_data_t info = {};
412
413 clock_timebase_info(info: &info);
414
415 copyout((void *)&info, out_info_addr, sizeof(info));
416
417 return KERN_SUCCESS;
418}
419
420/*
421 * Calendar routines.
422 */
423
424/*
425 * clock_get_calendar_microtime:
426 *
427 * Returns the current calendar value,
428 * microseconds as the fraction.
429 */
430void
431clock_get_calendar_microtime(
432 clock_sec_t *secs,
433 clock_usec_t *microsecs)
434{
435 clock_get_calendar_absolute_and_microtime(secs, microsecs, NULL);
436}
437
438/*
439 * get_scale_factors_from_adj:
440 *
441 * computes scale factors from the value given in adjustment.
442 *
443 * Part of the code has been taken from tc_windup of FreeBSD
444 * written by Poul-Henning Kamp <phk@FreeBSD.ORG>, Julien Ridoux and
445 * Konstantin Belousov.
446 * https://github.com/freebsd/freebsd/blob/master/sys/kern/kern_tc.c
447 */
448static void
449get_scale_factors_from_adj(int64_t adjustment, uint64_t* tick_scale_x, uint64_t* s_scale_ns, int64_t* s_adj_nsx)
450{
451 uint64_t scale;
452 int64_t nano, frac;
453
454 /*-
455 * Calculating the scaling factor. We want the number of 1/2^64
456 * fractions of a second per period of the hardware counter, taking
457 * into account the th_adjustment factor which the NTP PLL/adjtime(2)
458 * processing provides us with.
459 *
460 * The th_adjustment is nanoseconds per second with 32 bit binary
461 * fraction and we want 64 bit binary fraction of second:
462 *
463 * x = a * 2^32 / 10^9 = a * 4.294967296
464 *
465 * The range of th_adjustment is +/- 5000PPM so inside a 64bit int
466 * we can only multiply by about 850 without overflowing, that
467 * leaves no suitably precise fractions for multiply before divide.
468 *
469 * Divide before multiply with a fraction of 2199/512 results in a
470 * systematic undercompensation of 10PPM of th_adjustment. On a
471 * 5000PPM adjustment this is a 0.05PPM error. This is acceptable.
472 *
473 * We happily sacrifice the lowest of the 64 bits of our result
474 * to the goddess of code clarity.
475 *
476 */
477 scale = (uint64_t)1 << 63;
478 scale += (adjustment / 1024) * 2199;
479 scale /= ticks_per_sec;
480 *tick_scale_x = scale * 2;
481
482 /*
483 * hi part of adj
484 * it contains ns (without fraction) to add to the next sec.
485 * Get ns scale factor for the next sec.
486 */
487 nano = (adjustment > 0)? adjustment >> 32 : -((-adjustment) >> 32);
488 scale = (uint64_t) NSEC_PER_SEC;
489 scale += nano;
490 *s_scale_ns = scale;
491
492 /*
493 * lo part of adj
494 * it contains 32 bit frac of ns to add to the next sec.
495 * Keep it as additional adjustment for the next sec.
496 */
497 frac = (adjustment > 0)? ((uint32_t) adjustment) : -((uint32_t) (-adjustment));
498 *s_adj_nsx = (frac > 0)? ((uint64_t) frac) << 32 : -(((uint64_t) (-frac)) << 32);
499
500 return;
501}
502
503/*
504 * scale_delta:
505 *
506 * returns a bintime struct representing delta scaled accordingly to the
507 * scale factors provided to this function.
508 */
509static struct bintime
510scale_delta(uint64_t delta, uint64_t tick_scale_x, uint64_t s_scale_ns, int64_t s_adj_nsx)
511{
512 uint64_t sec, new_ns, over;
513 struct bintime bt;
514
515 bt.sec = 0;
516 bt.frac = 0;
517
518 /*
519 * If more than one second is elapsed,
520 * scale fully elapsed seconds using scale factors for seconds.
521 * s_scale_ns -> scales sec to ns.
522 * s_adj_nsx -> additional adj expressed in 64 bit frac of ns to apply to each sec.
523 */
524 if (delta > ticks_per_sec) {
525 sec = (delta / ticks_per_sec);
526 new_ns = sec * s_scale_ns;
527 bintime_addns(bt: &bt, ns: new_ns);
528 if (s_adj_nsx) {
529 if (sec == 1) {
530 /* shortcut, no overflow can occur */
531 if (s_adj_nsx > 0) {
532 bintime_addx(bt: &bt, x: (uint64_t)s_adj_nsx / (uint64_t)NSEC_PER_SEC);
533 } else {
534 bintime_subx(bt: &bt, x: (uint64_t)-s_adj_nsx / (uint64_t)NSEC_PER_SEC);
535 }
536 } else {
537 /*
538 * s_adj_nsx is 64 bit frac of ns.
539 * sec*s_adj_nsx might overflow in int64_t.
540 * use bintime_addxns to not lose overflowed ns.
541 */
542 bintime_addxns(bt: &bt, a: sec, xns: s_adj_nsx);
543 }
544 }
545 delta = (delta % ticks_per_sec);
546 }
547
548 over = multi_overflow(a: tick_scale_x, b: delta);
549 if (over) {
550 bt.sec += over;
551 }
552
553 /*
554 * scale elapsed ticks using the scale factor for ticks.
555 */
556 bintime_addx(bt: &bt, x: delta * tick_scale_x);
557
558 return bt;
559}
560
561/*
562 * get_scaled_time:
563 *
564 * returns the scaled time of the time elapsed from the last time
565 * scale factors were updated to now.
566 */
567static struct bintime
568get_scaled_time(uint64_t now)
569{
570 uint64_t delta;
571
572 /*
573 * Compute ticks elapsed since last scale update.
574 * This time will be scaled according to the value given by ntp kern.
575 */
576 delta = now - clock_calend.offset_count;
577
578 return scale_delta(delta, tick_scale_x: clock_calend.tick_scale_x, s_scale_ns: clock_calend.s_scale_ns, s_adj_nsx: clock_calend.s_adj_nsx);
579}
580
581static void
582clock_get_calendar_absolute_and_microtime_locked(
583 clock_sec_t *secs,
584 clock_usec_t *microsecs,
585 uint64_t *abstime)
586{
587 uint64_t now;
588 struct bintime bt;
589
590 now = mach_absolute_time();
591 if (abstime) {
592 *abstime = now;
593 }
594
595 bt = get_scaled_time(now);
596 bintime_add(bt: &bt, bt2: &clock_calend.bintime);
597 bintime2usclock(bt: &bt, secs, microsecs);
598}
599
600static void
601clock_get_calendar_absolute_and_nanotime_locked(
602 clock_sec_t *secs,
603 clock_usec_t *nanosecs,
604 uint64_t *abstime)
605{
606 uint64_t now;
607 struct bintime bt;
608
609 now = mach_absolute_time();
610 if (abstime) {
611 *abstime = now;
612 }
613
614 bt = get_scaled_time(now);
615 bintime_add(bt: &bt, bt2: &clock_calend.bintime);
616 bintime2nsclock(bt: &bt, secs, nanosecs);
617}
618
619/*
620 * clock_get_calendar_absolute_and_microtime:
621 *
622 * Returns the current calendar value,
623 * microseconds as the fraction. Also
624 * returns mach_absolute_time if abstime
625 * is not NULL.
626 */
627void
628clock_get_calendar_absolute_and_microtime(
629 clock_sec_t *secs,
630 clock_usec_t *microsecs,
631 uint64_t *abstime)
632{
633 spl_t s;
634
635 s = splclock();
636 clock_lock();
637
638 clock_get_calendar_absolute_and_microtime_locked(secs, microsecs, abstime);
639
640 clock_unlock();
641 splx(s);
642}
643
644/*
645 * clock_get_calendar_nanotime:
646 *
647 * Returns the current calendar value,
648 * nanoseconds as the fraction.
649 *
650 * Since we do not have an interface to
651 * set the calendar with resolution greater
652 * than a microsecond, we honor that here.
653 */
654void
655clock_get_calendar_nanotime(
656 clock_sec_t *secs,
657 clock_nsec_t *nanosecs)
658{
659 spl_t s;
660
661 s = splclock();
662 clock_lock();
663
664 clock_get_calendar_absolute_and_nanotime_locked(secs, nanosecs, NULL);
665
666 clock_unlock();
667 splx(s);
668}
669
670/*
671 * clock_gettimeofday:
672 *
673 * Kernel interface for commpage implementation of
674 * gettimeofday() syscall.
675 *
676 * Returns the current calendar value, and updates the
677 * commpage info as appropriate. Because most calls to
678 * gettimeofday() are handled in user mode by the commpage,
679 * this routine should be used infrequently.
680 */
681void
682clock_gettimeofday(
683 clock_sec_t *secs,
684 clock_usec_t *microsecs)
685{
686 clock_gettimeofday_and_absolute_time(secs, microsecs, NULL);
687}
688
689void
690clock_gettimeofday_and_absolute_time(
691 clock_sec_t *secs,
692 clock_usec_t *microsecs,
693 uint64_t *mach_time)
694{
695 uint64_t now;
696 spl_t s;
697 struct bintime bt;
698
699 s = splclock();
700 clock_lock();
701
702 now = mach_absolute_time();
703 bt = get_scaled_time(now);
704 bintime_add(bt: &bt, bt2: &clock_calend.bintime);
705 bintime2usclock(bt: &bt, secs, microsecs);
706
707 clock_gettimeofday_set_commpage(abstime: now, sec: bt.sec, frac: bt.frac, scale: clock_calend.tick_scale_x, tick_per_sec: ticks_per_sec);
708
709 clock_unlock();
710 splx(s);
711
712 if (mach_time) {
713 *mach_time = now;
714 }
715}
716
717/*
718 * clock_set_calendar_microtime:
719 *
720 * Sets the current calendar value by
721 * recalculating the epoch and offset
722 * from the system clock.
723 *
724 * Also adjusts the boottime to keep the
725 * value consistent, writes the new
726 * calendar value to the platform clock,
727 * and sends calendar change notifications.
728 */
729void
730clock_set_calendar_microtime(
731 clock_sec_t secs,
732 clock_usec_t microsecs)
733{
734 uint64_t absolutesys;
735 clock_sec_t newsecs;
736 clock_sec_t oldsecs;
737 clock_usec_t newmicrosecs;
738 clock_usec_t oldmicrosecs;
739 uint64_t commpage_value;
740 spl_t s;
741 struct bintime bt;
742 clock_sec_t deltasecs;
743 clock_usec_t deltamicrosecs;
744
745 newsecs = secs;
746 newmicrosecs = microsecs;
747
748 /*
749 * settime_lock mtx is used to avoid that racing settimeofdays update the wall clock and
750 * the platform clock concurrently.
751 *
752 * clock_lock cannot be used for this race because it is acquired from interrupt context
753 * and it needs interrupts disabled while instead updating the platform clock needs to be
754 * called with interrupts enabled.
755 */
756 lck_mtx_lock(lck: &settime_lock);
757
758 s = splclock();
759 clock_lock();
760
761#if DEVELOPMENT || DEBUG
762 struct clock_calend clock_calend_cp = clock_calend;
763#endif
764 commpage_disable_timestamp();
765
766 /*
767 * Adjust the boottime based on the delta.
768 */
769 clock_get_calendar_absolute_and_microtime_locked(secs: &oldsecs, microsecs: &oldmicrosecs, abstime: &absolutesys);
770
771#if DEVELOPMENT || DEBUG
772 if (g_should_log_clock_adjustments) {
773 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
774 __func__, (unsigned long)oldsecs, oldmicrosecs, absolutesys);
775 os_log(OS_LOG_DEFAULT, "%s requested %lu s %d u\n",
776 __func__, (unsigned long)secs, microsecs );
777 }
778#endif
779
780 if (oldsecs < secs || (oldsecs == secs && oldmicrosecs < microsecs)) {
781 // moving forwards
782 deltasecs = secs;
783 deltamicrosecs = microsecs;
784
785 TIME_SUB(deltasecs, oldsecs, deltamicrosecs, oldmicrosecs, USEC_PER_SEC);
786
787 TIME_ADD(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
788 clock2bintime(secs: &deltasecs, microsecs: &deltamicrosecs, bt: &bt);
789 bintime_add(bt: &clock_calend.boottime, bt2: &bt);
790 } else {
791 // moving backwards
792 deltasecs = oldsecs;
793 deltamicrosecs = oldmicrosecs;
794
795 TIME_SUB(deltasecs, secs, deltamicrosecs, microsecs, USEC_PER_SEC);
796
797 TIME_SUB(clock_boottime, deltasecs, clock_boottime_usec, deltamicrosecs, USEC_PER_SEC);
798 clock2bintime(secs: &deltasecs, microsecs: &deltamicrosecs, bt: &bt);
799 bintime_sub(bt: &clock_calend.boottime, bt2: &bt);
800 }
801
802 clock_calend.bintime = clock_calend.boottime;
803 bintime_add(bt: &clock_calend.bintime, bt2: &clock_calend.offset);
804
805 clock2bintime(secs: (clock_sec_t *) &secs, microsecs: (clock_usec_t *) &microsecs, bt: &bt);
806
807 clock_gettimeofday_set_commpage(abstime: absolutesys, sec: bt.sec, frac: bt.frac, scale: clock_calend.tick_scale_x, tick_per_sec: ticks_per_sec);
808
809#if DEVELOPMENT || DEBUG
810 struct clock_calend clock_calend_cp1 = clock_calend;
811#endif
812
813 commpage_value = clock_boottime * USEC_PER_SEC + clock_boottime_usec;
814
815 clock_unlock();
816 splx(s);
817
818 /*
819 * Set the new value for the platform clock.
820 * This call might block, so interrupts must be enabled.
821 */
822#if DEVELOPMENT || DEBUG
823 uint64_t now_b = mach_absolute_time();
824#endif
825
826 PESetUTCTimeOfDay(secs: newsecs, usecs: newmicrosecs);
827
828#if DEVELOPMENT || DEBUG
829 uint64_t now_a = mach_absolute_time();
830 if (g_should_log_clock_adjustments) {
831 os_log(OS_LOG_DEFAULT, "%s mach bef PESet %llu mach aft %llu \n", __func__, now_b, now_a);
832 }
833#endif
834
835 print_all_clock_variables_internal(__func__, &clock_calend_cp);
836 print_all_clock_variables_internal(__func__, &clock_calend_cp1);
837
838 commpage_update_boottime(boottime_usec: commpage_value);
839
840 /*
841 * Send host notifications.
842 */
843 host_notify_calendar_change();
844 host_notify_calendar_set();
845
846#if CONFIG_DTRACE
847 clock_track_calend_nowait();
848#endif
849
850 lck_mtx_unlock(lck: &settime_lock);
851}
852
853uint64_t mach_absolutetime_asleep = 0;
854uint64_t mach_absolutetime_last_sleep = 0;
855
856void
857clock_get_calendar_uptime(clock_sec_t *secs)
858{
859 uint64_t now;
860 spl_t s;
861 struct bintime bt;
862
863 s = splclock();
864 clock_lock();
865
866 now = mach_absolute_time();
867
868 bt = get_scaled_time(now);
869 bintime_add(bt: &bt, bt2: &clock_calend.offset);
870
871 *secs = bt.sec;
872
873 clock_unlock();
874 splx(s);
875}
876
877
878/*
879 * clock_update_calendar:
880 *
881 * called by ntp timer to update scale factors.
882 */
883void
884clock_update_calendar(void)
885{
886 uint64_t now, delta;
887 struct bintime bt;
888 spl_t s;
889 int64_t adjustment;
890
891 s = splclock();
892 clock_lock();
893
894 now = mach_absolute_time();
895
896 /*
897 * scale the time elapsed since the last update and
898 * add it to offset.
899 */
900 bt = get_scaled_time(now);
901 bintime_add(bt: &clock_calend.offset, bt2: &bt);
902
903 /*
904 * update the base from which apply next scale factors.
905 */
906 delta = now - clock_calend.offset_count;
907 clock_calend.offset_count += delta;
908
909 clock_calend.bintime = clock_calend.offset;
910 bintime_add(bt: &clock_calend.bintime, bt2: &clock_calend.boottime);
911
912 /*
913 * recompute next adjustment.
914 */
915 ntp_update_second(adjustment: &adjustment, secs: clock_calend.bintime.sec);
916
917#if DEVELOPMENT || DEBUG
918 if (g_should_log_clock_adjustments) {
919 os_log(OS_LOG_DEFAULT, "%s adjustment %lld\n", __func__, adjustment);
920 }
921#endif
922
923 /*
924 * recomputing scale factors.
925 */
926 get_scale_factors_from_adj(adjustment, tick_scale_x: &clock_calend.tick_scale_x, s_scale_ns: &clock_calend.s_scale_ns, s_adj_nsx: &clock_calend.s_adj_nsx);
927
928 clock_gettimeofday_set_commpage(abstime: now, sec: clock_calend.bintime.sec, frac: clock_calend.bintime.frac, scale: clock_calend.tick_scale_x, tick_per_sec: ticks_per_sec);
929
930#if DEVELOPMENT || DEBUG
931 struct clock_calend calend_cp = clock_calend;
932#endif
933
934 clock_unlock();
935 splx(s);
936
937 print_all_clock_variables(__func__, NULL, NULL, NULL, NULL, &calend_cp);
938}
939
940
941#if DEVELOPMENT || DEBUG
942
943void
944print_all_clock_variables_internal(const char* func, struct clock_calend* clock_calend_cp)
945{
946 clock_sec_t offset_secs;
947 clock_usec_t offset_microsecs;
948 clock_sec_t bintime_secs;
949 clock_usec_t bintime_microsecs;
950 clock_sec_t bootime_secs;
951 clock_usec_t bootime_microsecs;
952
953 if (!g_should_log_clock_adjustments) {
954 return;
955 }
956
957 bintime2usclock(&clock_calend_cp->offset, &offset_secs, &offset_microsecs);
958 bintime2usclock(&clock_calend_cp->bintime, &bintime_secs, &bintime_microsecs);
959 bintime2usclock(&clock_calend_cp->boottime, &bootime_secs, &bootime_microsecs);
960
961 os_log(OS_LOG_DEFAULT, "%s s_scale_ns %llu s_adj_nsx %lld tick_scale_x %llu offset_count %llu\n",
962 func, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx,
963 clock_calend_cp->tick_scale_x, clock_calend_cp->offset_count);
964 os_log(OS_LOG_DEFAULT, "%s offset.sec %ld offset.frac %llu offset_secs %lu offset_microsecs %d\n",
965 func, clock_calend_cp->offset.sec, clock_calend_cp->offset.frac,
966 (unsigned long)offset_secs, offset_microsecs);
967 os_log(OS_LOG_DEFAULT, "%s bintime.sec %ld bintime.frac %llu bintime_secs %lu bintime_microsecs %d\n",
968 func, clock_calend_cp->bintime.sec, clock_calend_cp->bintime.frac,
969 (unsigned long)bintime_secs, bintime_microsecs);
970 os_log(OS_LOG_DEFAULT, "%s bootime.sec %ld bootime.frac %llu bootime_secs %lu bootime_microsecs %d\n",
971 func, clock_calend_cp->boottime.sec, clock_calend_cp->boottime.frac,
972 (unsigned long)bootime_secs, bootime_microsecs);
973
974#if !HAS_CONTINUOUS_HWCLOCK
975 clock_sec_t basesleep_secs;
976 clock_usec_t basesleep_microsecs;
977
978 bintime2usclock(&clock_calend_cp->basesleep, &basesleep_secs, &basesleep_microsecs);
979 os_log(OS_LOG_DEFAULT, "%s basesleep.sec %ld basesleep.frac %llu basesleep_secs %lu basesleep_microsecs %d\n",
980 func, clock_calend_cp->basesleep.sec, clock_calend_cp->basesleep.frac,
981 (unsigned long)basesleep_secs, basesleep_microsecs);
982#endif
983}
984
985
986void
987print_all_clock_variables(const char* func, clock_sec_t* pmu_secs, clock_usec_t* pmu_usec, clock_sec_t* sys_secs, clock_usec_t* sys_usec, struct clock_calend* clock_calend_cp)
988{
989 if (!g_should_log_clock_adjustments) {
990 return;
991 }
992
993 struct bintime bt;
994 clock_sec_t wall_secs;
995 clock_usec_t wall_microsecs;
996 uint64_t now;
997 uint64_t delta;
998
999 if (pmu_secs) {
1000 os_log(OS_LOG_DEFAULT, "%s PMU %lu s %d u \n", func, (unsigned long)*pmu_secs, *pmu_usec);
1001 }
1002 if (sys_secs) {
1003 os_log(OS_LOG_DEFAULT, "%s sys %lu s %d u \n", func, (unsigned long)*sys_secs, *sys_usec);
1004 }
1005
1006 print_all_clock_variables_internal(func, clock_calend_cp);
1007
1008 now = mach_absolute_time();
1009 delta = now - clock_calend_cp->offset_count;
1010
1011 bt = scale_delta(delta, clock_calend_cp->tick_scale_x, clock_calend_cp->s_scale_ns, clock_calend_cp->s_adj_nsx);
1012 bintime_add(&bt, &clock_calend_cp->bintime);
1013 bintime2usclock(&bt, &wall_secs, &wall_microsecs);
1014
1015 os_log(OS_LOG_DEFAULT, "%s wall %lu s %d u computed with %llu abs\n",
1016 func, (unsigned long)wall_secs, wall_microsecs, now);
1017}
1018
1019
1020#endif /* DEVELOPMENT || DEBUG */
1021
1022
1023/*
1024 * clock_initialize_calendar:
1025 *
1026 * Set the calendar and related clocks
1027 * from the platform clock at boot.
1028 *
1029 * Also sends host notifications.
1030 */
1031void
1032clock_initialize_calendar(void)
1033{
1034 clock_sec_t sys; // sleepless time since boot in seconds
1035 clock_sec_t secs; // Current UTC time
1036 clock_sec_t utc_offset_secs; // Difference in current UTC time and sleepless time since boot
1037 clock_usec_t microsys;
1038 clock_usec_t microsecs;
1039 clock_usec_t utc_offset_microsecs;
1040 spl_t s;
1041 struct bintime bt;
1042#if ENABLE_LEGACY_CLOCK_CODE
1043 struct bintime monotonic_bt;
1044 struct latched_time monotonic_time;
1045 uint64_t monotonic_usec_total;
1046 clock_sec_t sys2, monotonic_sec;
1047 clock_usec_t microsys2, monotonic_usec;
1048 size_t size;
1049
1050#endif /* ENABLE_LEGACY_CLOCK_CODE */
1051 //Get the UTC time and corresponding sys time
1052 PEGetUTCTimeOfDay(secs: &secs, usecs: &microsecs);
1053 clock_get_system_microtime(secs: &sys, microsecs: &microsys);
1054
1055#if ENABLE_LEGACY_CLOCK_CODE
1056 /*
1057 * If the platform has a monotonic clock, use kern.monotonicclock_usecs
1058 * to estimate the sleep/wake time, otherwise use the UTC time to estimate
1059 * the sleep time.
1060 */
1061 size = sizeof(monotonic_time);
1062 if (kernel_sysctlbyname(name: "kern.monotonicclock_usecs", oldp: &monotonic_time, oldlenp: &size, NULL, newlen: 0) != 0) {
1063 has_monotonic_clock = 0;
1064 os_log(OS_LOG_DEFAULT, "%s system does not have monotonic clock\n", __func__);
1065 } else {
1066 has_monotonic_clock = 1;
1067 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1068 absolutetime_to_microtime(abstime: monotonic_time.mach_time, secs: &sys2, microsecs: &microsys2);
1069 os_log(OS_LOG_DEFAULT, "%s system has monotonic clock\n", __func__);
1070 }
1071#endif /* ENABLE_LEGACY_CLOCK_CODE */
1072
1073 s = splclock();
1074 clock_lock();
1075
1076 commpage_disable_timestamp();
1077
1078 utc_offset_secs = secs;
1079 utc_offset_microsecs = microsecs;
1080
1081 /*
1082 * We normally expect the UTC clock to be always-on and produce
1083 * greater readings than the tick counter. There may be corner cases
1084 * due to differing clock resolutions (UTC clock is likely lower) and
1085 * and errors reading the UTC clock (some implementations return 0
1086 * on error) in which that doesn't hold true. Bring the UTC measurements
1087 * in-line with the tick counter measurements as a best effort in that case.
1088 */
1089 if ((sys > secs) || ((sys == secs) && (microsys > microsecs))) {
1090 os_log(OS_LOG_DEFAULT, "%s WARNING: UTC time is less then sys time, (%lu s %d u) UTC (%lu s %d u) sys\n",
1091 __func__, (unsigned long) secs, microsecs, (unsigned long)sys, microsys);
1092 secs = utc_offset_secs = sys;
1093 microsecs = utc_offset_microsecs = microsys;
1094 }
1095
1096 // UTC - sys
1097 // This macro stores the subtraction result in utc_offset_secs and utc_offset_microsecs
1098 TIME_SUB(utc_offset_secs, sys, utc_offset_microsecs, microsys, USEC_PER_SEC);
1099 // This function converts utc_offset_secs and utc_offset_microsecs in bintime
1100 clock2bintime(secs: &utc_offset_secs, microsecs: &utc_offset_microsecs, bt: &bt);
1101
1102 /*
1103 * Initialize the boot time based on the platform clock.
1104 */
1105 clock_boottime = secs;
1106 clock_boottime_usec = microsecs;
1107 commpage_update_boottime(boottime_usec: clock_boottime * USEC_PER_SEC + clock_boottime_usec);
1108
1109 nanoseconds_to_absolutetime(nanoseconds: (uint64_t)NSEC_PER_SEC, result: &ticks_per_sec);
1110 clock_calend.boottime = bt;
1111 clock_calend.bintime = bt;
1112 clock_calend.offset.sec = 0;
1113 clock_calend.offset.frac = 0;
1114
1115 clock_calend.tick_scale_x = (uint64_t)1 << 63;
1116 clock_calend.tick_scale_x /= ticks_per_sec;
1117 clock_calend.tick_scale_x *= 2;
1118
1119 clock_calend.s_scale_ns = NSEC_PER_SEC;
1120 clock_calend.s_adj_nsx = 0;
1121
1122#if ENABLE_LEGACY_CLOCK_CODE
1123 if (has_monotonic_clock) {
1124 OS_ANALYZER_SUPPRESS("82347749") monotonic_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1125 monotonic_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1126
1127 // monotonic clock - sys
1128 // This macro stores the subtraction result in monotonic_sec and monotonic_usec
1129 TIME_SUB(monotonic_sec, sys2, monotonic_usec, microsys2, USEC_PER_SEC);
1130 clock2bintime(secs: &monotonic_sec, microsecs: &monotonic_usec, bt: &monotonic_bt);
1131
1132 // set the baseleep as the difference between monotonic clock - sys
1133 clock_calend.basesleep = monotonic_bt;
1134 }
1135#endif /* ENABLE_LEGACY_CLOCK_CODE */
1136 commpage_update_mach_continuous_time(sleeptime: mach_absolutetime_asleep);
1137
1138#if DEVELOPMENT || DEBUG
1139 struct clock_calend clock_calend_cp = clock_calend;
1140#endif
1141
1142 clock_unlock();
1143 splx(s);
1144
1145 print_all_clock_variables(__func__, &secs, &microsecs, &sys, &microsys, &clock_calend_cp);
1146
1147 /*
1148 * Send host notifications.
1149 */
1150 host_notify_calendar_change();
1151
1152#if CONFIG_DTRACE
1153 clock_track_calend_nowait();
1154#endif
1155}
1156
1157#if HAS_CONTINUOUS_HWCLOCK
1158
1159static void
1160scale_sleep_time(void)
1161{
1162 /* Apply the current NTP frequency adjustment to the time slept.
1163 * The frequency adjustment remains stable between calls to ntp_adjtime(),
1164 * and should thus provide a reasonable approximation of the total adjustment
1165 * required for the time slept. */
1166 struct bintime sleep_time;
1167 uint64_t tick_scale_x, s_scale_ns;
1168 int64_t s_adj_nsx;
1169 int64_t sleep_adj = ntp_get_freq();
1170 if (sleep_adj) {
1171 get_scale_factors_from_adj(sleep_adj, &tick_scale_x, &s_scale_ns, &s_adj_nsx);
1172 sleep_time = scale_delta(mach_absolutetime_last_sleep, tick_scale_x, s_scale_ns, s_adj_nsx);
1173 } else {
1174 tick_scale_x = (uint64_t)1 << 63;
1175 tick_scale_x /= ticks_per_sec;
1176 tick_scale_x *= 2;
1177 sleep_time.sec = mach_absolutetime_last_sleep / ticks_per_sec;
1178 sleep_time.frac = (mach_absolutetime_last_sleep % ticks_per_sec) * tick_scale_x;
1179 }
1180 bintime_add(&clock_calend.offset, &sleep_time);
1181 bintime_add(&clock_calend.bintime, &sleep_time);
1182}
1183
1184static void
1185clock_wakeup_calendar_hwclock(void)
1186{
1187 spl_t s;
1188
1189 s = splclock();
1190 clock_lock();
1191
1192 commpage_disable_timestamp();
1193
1194 uint64_t abstime = mach_absolute_time();
1195 uint64_t total_sleep_time = mach_continuous_time() - abstime;
1196
1197 mach_absolutetime_last_sleep = total_sleep_time - mach_absolutetime_asleep;
1198 mach_absolutetime_asleep = total_sleep_time;
1199
1200 scale_sleep_time();
1201
1202 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
1203 (uintptr_t)mach_absolutetime_last_sleep,
1204 (uintptr_t)mach_absolutetime_asleep,
1205 (uintptr_t)(mach_absolutetime_last_sleep >> 32),
1206 (uintptr_t)(mach_absolutetime_asleep >> 32));
1207
1208 commpage_update_mach_continuous_time(mach_absolutetime_asleep);
1209#if HIBERNATION
1210 commpage_update_mach_continuous_time_hw_offset(hwclock_conttime_offset);
1211#endif
1212 adjust_cont_time_thread_calls();
1213
1214 clock_unlock();
1215 splx(s);
1216
1217 host_notify_calendar_change();
1218
1219#if CONFIG_DTRACE
1220 clock_track_calend_nowait();
1221#endif
1222}
1223
1224#endif /* HAS_CONTINUOUS_HWCLOCK */
1225
1226#if ENABLE_LEGACY_CLOCK_CODE
1227
1228static void
1229clock_wakeup_calendar_legacy(void)
1230{
1231 clock_sec_t wake_sys_sec;
1232 clock_usec_t wake_sys_usec;
1233 clock_sec_t wake_sec;
1234 clock_usec_t wake_usec;
1235 clock_sec_t wall_time_sec;
1236 clock_usec_t wall_time_usec;
1237 clock_sec_t diff_sec;
1238 clock_usec_t diff_usec;
1239 clock_sec_t var_s;
1240 clock_usec_t var_us;
1241 spl_t s;
1242 struct bintime bt, last_sleep_bt;
1243 struct latched_time monotonic_time;
1244 uint64_t monotonic_usec_total;
1245 uint64_t wake_abs;
1246 size_t size;
1247
1248 /*
1249 * If the platform has the monotonic clock use that to
1250 * compute the sleep time. The monotonic clock does not have an offset
1251 * that can be modified, so nor kernel or userspace can change the time
1252 * of this clock, it can only monotonically increase over time.
1253 * During sleep mach_absolute_time (sys time) does not tick,
1254 * so the sleep time is the difference between the current monotonic time
1255 * less the absolute time and the previous difference stored at wake time.
1256 *
1257 * basesleep = (monotonic - sys) ---> computed at last wake
1258 * sleep_time = (monotonic - sys) - basesleep
1259 *
1260 * If the platform does not support monotonic clock we set the wall time to what the
1261 * UTC clock returns us.
1262 * Setting the wall time to UTC time implies that we loose all the adjustments
1263 * done during wake time through adjtime/ntp_adjustime.
1264 * The UTC time is the monotonic clock + an offset that can be set
1265 * by kernel.
1266 * The time slept in this case is the difference between wall time and UTC
1267 * at wake.
1268 *
1269 * IMPORTANT:
1270 * We assume that only the kernel is setting the offset of the PMU/RTC and that
1271 * it is doing it only througth the settimeofday interface.
1272 */
1273 if (has_monotonic_clock) {
1274#if DEVELOPMENT || DEBUG
1275 /*
1276 * Just for debugging, get the wake UTC time.
1277 */
1278 PEGetUTCTimeOfDay(&var_s, &var_us);
1279#endif
1280 /*
1281 * Get monotonic time with corresponding sys time
1282 */
1283 size = sizeof(monotonic_time);
1284 if (kernel_sysctlbyname(name: "kern.monotonicclock_usecs", oldp: &monotonic_time, oldlenp: &size, NULL, newlen: 0) != 0) {
1285 panic("%s: could not call kern.monotonicclock_usecs", __func__);
1286 }
1287 wake_abs = monotonic_time.mach_time;
1288 absolutetime_to_microtime(abstime: wake_abs, secs: &wake_sys_sec, microsecs: &wake_sys_usec);
1289
1290 monotonic_usec_total = monotonic_time.monotonic_time_usec;
1291 wake_sec = monotonic_usec_total / (clock_sec_t)USEC_PER_SEC;
1292 wake_usec = monotonic_usec_total % (clock_usec_t)USEC_PER_SEC;
1293 } else {
1294 /*
1295 * Get UTC time and corresponding sys time
1296 */
1297 PEGetUTCTimeOfDay(secs: &wake_sec, usecs: &wake_usec);
1298 wake_abs = mach_absolute_time();
1299 absolutetime_to_microtime(abstime: wake_abs, secs: &wake_sys_sec, microsecs: &wake_sys_usec);
1300 }
1301
1302#if DEVELOPMENT || DEBUG
1303 os_log(OS_LOG_DEFAULT, "time at wake %lu s %d u from %s clock, abs %llu\n", (unsigned long)wake_sec, wake_usec, (has_monotonic_clock)?"monotonic":"UTC", wake_abs);
1304 if (has_monotonic_clock) {
1305 OS_ANALYZER_SUPPRESS("82347749") os_log(OS_LOG_DEFAULT, "UTC time %lu s %d u\n", (unsigned long)var_s, var_us);
1306 }
1307#endif /* DEVELOPMENT || DEBUG */
1308
1309 s = splclock();
1310 clock_lock();
1311
1312 commpage_disable_timestamp();
1313
1314#if DEVELOPMENT || DEBUG
1315 struct clock_calend clock_calend_cp1 = clock_calend;
1316#endif /* DEVELOPMENT || DEBUG */
1317
1318 /*
1319 * We normally expect the UTC/monotonic clock to be always-on and produce
1320 * greater readings than the sys counter. There may be corner cases
1321 * due to differing clock resolutions (UTC/monotonic clock is likely lower) and
1322 * and errors reading the UTC/monotonic clock (some implementations return 0
1323 * on error) in which that doesn't hold true.
1324 */
1325 if ((wake_sys_sec > wake_sec) || ((wake_sys_sec == wake_sec) && (wake_sys_usec > wake_usec))) {
1326 os_log_error(OS_LOG_DEFAULT, "WARNING: %s clock is less then sys clock at wake: %lu s %d u vs %lu s %d u, defaulting sleep time to zero\n", (has_monotonic_clock)?"monotonic":"UTC", (unsigned long)wake_sec, wake_usec, (unsigned long)wake_sys_sec, wake_sys_usec);
1327 mach_absolutetime_last_sleep = 0;
1328 goto done;
1329 }
1330
1331 if (has_monotonic_clock) {
1332 /*
1333 * computer the difference monotonic - sys
1334 * we already checked that monotonic time is
1335 * greater than sys.
1336 */
1337 diff_sec = wake_sec;
1338 diff_usec = wake_usec;
1339 // This macro stores the subtraction result in diff_sec and diff_usec
1340 TIME_SUB(diff_sec, wake_sys_sec, diff_usec, wake_sys_usec, USEC_PER_SEC);
1341 //This function converts diff_sec and diff_usec in bintime
1342 clock2bintime(secs: &diff_sec, microsecs: &diff_usec, bt: &bt);
1343
1344 /*
1345 * Safety belt: the monotonic clock will likely have a lower resolution than the sys counter.
1346 * It's also possible that the device didn't fully transition to the powered-off state on
1347 * the most recent sleep, so the sys counter may not have reset or may have only briefly
1348 * turned off. In that case it's possible for the difference between the monotonic clock and the
1349 * sys counter to be less than the previously recorded value in clock.calend.basesleep.
1350 * In that case simply record that we slept for 0 ticks.
1351 */
1352 if ((bt.sec > clock_calend.basesleep.sec) ||
1353 ((bt.sec == clock_calend.basesleep.sec) && (bt.frac > clock_calend.basesleep.frac))) {
1354 //last_sleep is the difference between (current monotonic - abs) and (last wake monotonic - abs)
1355 last_sleep_bt = bt;
1356 bintime_sub(bt: &last_sleep_bt, bt2: &clock_calend.basesleep);
1357
1358 bintime2absolutetime(bt: &last_sleep_bt, abs: &mach_absolutetime_last_sleep);
1359 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1360
1361 //set basesleep to current monotonic - abs
1362 clock_calend.basesleep = bt;
1363
1364 //update wall time
1365 bintime_add(bt: &clock_calend.offset, bt2: &last_sleep_bt);
1366 bintime_add(bt: &clock_calend.bintime, bt2: &last_sleep_bt);
1367
1368 bintime2usclock(bt: &last_sleep_bt, secs: &var_s, microsecs: &var_us);
1369 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long) var_s, var_us);
1370 } else {
1371 bintime2usclock(bt: &clock_calend.basesleep, secs: &var_s, microsecs: &var_us);
1372 os_log_error(OS_LOG_DEFAULT, "WARNING: last wake monotonic-sys time (%lu s %d u) is greater then current monotonic-sys time(%lu s %d u), defaulting sleep time to zero\n", (unsigned long) var_s, var_us, (unsigned long) diff_sec, diff_usec);
1373
1374 mach_absolutetime_last_sleep = 0;
1375 }
1376 } else {
1377 /*
1378 * set the wall time to UTC value
1379 */
1380 bt = get_scaled_time(now: wake_abs);
1381 bintime_add(bt: &bt, bt2: &clock_calend.bintime);
1382 bintime2usclock(bt: &bt, secs: &wall_time_sec, microsecs: &wall_time_usec);
1383
1384 if (wall_time_sec > wake_sec || (wall_time_sec == wake_sec && wall_time_usec > wake_usec)) {
1385 os_log(OS_LOG_DEFAULT, "WARNING: wall time (%lu s %d u) is greater than current UTC time (%lu s %d u), defaulting sleep time to zero\n", (unsigned long) wall_time_sec, wall_time_usec, (unsigned long) wake_sec, wake_usec);
1386
1387 mach_absolutetime_last_sleep = 0;
1388 } else {
1389 diff_sec = wake_sec;
1390 diff_usec = wake_usec;
1391 // This macro stores the subtraction result in diff_sec and diff_usec
1392 TIME_SUB(diff_sec, wall_time_sec, diff_usec, wall_time_usec, USEC_PER_SEC);
1393 //This function converts diff_sec and diff_usec in bintime
1394 clock2bintime(secs: &diff_sec, microsecs: &diff_usec, bt: &bt);
1395
1396 //time slept in this case is the difference between PMU/RTC and wall time
1397 last_sleep_bt = bt;
1398
1399 bintime2absolutetime(bt: &last_sleep_bt, abs: &mach_absolutetime_last_sleep);
1400 mach_absolutetime_asleep += mach_absolutetime_last_sleep;
1401
1402 //update wall time
1403 bintime_add(bt: &clock_calend.offset, bt2: &last_sleep_bt);
1404 bintime_add(bt: &clock_calend.bintime, bt2: &last_sleep_bt);
1405
1406 bintime2usclock(bt: &last_sleep_bt, secs: &var_s, microsecs: &var_us);
1407 os_log(OS_LOG_DEFAULT, "time_slept (%lu s %d u)\n", (unsigned long)var_s, var_us);
1408 }
1409 }
1410done:
1411 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_CLOCK, MACH_EPOCH_CHANGE),
1412 (uintptr_t)mach_absolutetime_last_sleep,
1413 (uintptr_t)mach_absolutetime_asleep,
1414 (uintptr_t)(mach_absolutetime_last_sleep >> 32),
1415 (uintptr_t)(mach_absolutetime_asleep >> 32));
1416
1417 commpage_update_mach_continuous_time(sleeptime: mach_absolutetime_asleep);
1418 adjust_cont_time_thread_calls();
1419
1420#if DEVELOPMENT || DEBUG
1421 struct clock_calend clock_calend_cp = clock_calend;
1422#endif
1423
1424 clock_unlock();
1425 splx(s);
1426
1427#if DEVELOPMENT || DEBUG
1428 if (g_should_log_clock_adjustments) {
1429 print_all_clock_variables("clock_wakeup_calendar: BEFORE", NULL, NULL, NULL, NULL, &clock_calend_cp1);
1430 print_all_clock_variables("clock_wakeup_calendar: AFTER", NULL, NULL, NULL, NULL, &clock_calend_cp);
1431 }
1432#endif /* DEVELOPMENT || DEBUG */
1433
1434 host_notify_calendar_change();
1435
1436#if CONFIG_DTRACE
1437 clock_track_calend_nowait();
1438#endif
1439}
1440
1441#endif /* ENABLE_LEGACY_CLOCK_CODE */
1442
1443void
1444clock_wakeup_calendar(void)
1445{
1446#if HAS_CONTINUOUS_HWCLOCK
1447#if HIBERNATION_USES_LEGACY_CLOCK
1448 if (gIOHibernateState) {
1449 // if we're resuming from hibernation, we have to take the legacy wakeup path
1450 return clock_wakeup_calendar_legacy();
1451 }
1452#endif /* HIBERNATION_USES_LEGACY_CLOCK */
1453 // use the hwclock wakeup path
1454 return clock_wakeup_calendar_hwclock();
1455#elif ENABLE_LEGACY_CLOCK_CODE
1456 return clock_wakeup_calendar_legacy();
1457#else
1458#error "can't determine which clock code to run"
1459#endif
1460}
1461
1462/*
1463 * clock_get_boottime_nanotime:
1464 *
1465 * Return the boottime, used by sysctl.
1466 */
1467void
1468clock_get_boottime_nanotime(
1469 clock_sec_t *secs,
1470 clock_nsec_t *nanosecs)
1471{
1472 spl_t s;
1473
1474 s = splclock();
1475 clock_lock();
1476
1477 *secs = (clock_sec_t)clock_boottime;
1478 *nanosecs = (clock_nsec_t)clock_boottime_usec * NSEC_PER_USEC;
1479
1480 clock_unlock();
1481 splx(s);
1482}
1483
1484/*
1485 * clock_get_boottime_nanotime:
1486 *
1487 * Return the boottime, used by sysctl.
1488 */
1489void
1490clock_get_boottime_microtime(
1491 clock_sec_t *secs,
1492 clock_usec_t *microsecs)
1493{
1494 spl_t s;
1495
1496 s = splclock();
1497 clock_lock();
1498
1499 *secs = (clock_sec_t)clock_boottime;
1500 *microsecs = (clock_nsec_t)clock_boottime_usec;
1501
1502 clock_unlock();
1503 splx(s);
1504}
1505
1506
1507/*
1508 * Wait / delay routines.
1509 */
1510static void
1511mach_wait_until_continue(
1512 __unused void *parameter,
1513 wait_result_t wresult)
1514{
1515 thread_syscall_return(ret: (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS);
1516 /*NOTREACHED*/
1517}
1518
1519/*
1520 * mach_wait_until_trap: Suspend execution of calling thread until the specified time has passed
1521 *
1522 * Parameters: args->deadline Amount of time to wait
1523 *
1524 * Returns: 0 Success
1525 * !0 Not success
1526 *
1527 */
1528kern_return_t
1529mach_wait_until_trap(
1530 struct mach_wait_until_trap_args *args)
1531{
1532 uint64_t deadline = args->deadline;
1533 wait_result_t wresult;
1534
1535
1536 wresult = assert_wait_deadline_with_leeway(event: (event_t)mach_wait_until_trap, THREAD_ABORTSAFE,
1537 TIMEOUT_URGENCY_USER_NORMAL, deadline, leeway: 0);
1538 if (wresult == THREAD_WAITING) {
1539 wresult = thread_block(continuation: mach_wait_until_continue);
1540 }
1541
1542 return (wresult == THREAD_INTERRUPTED)? KERN_ABORTED: KERN_SUCCESS;
1543}
1544
1545void
1546clock_delay_until(
1547 uint64_t deadline)
1548{
1549 uint64_t now = mach_absolute_time();
1550
1551 if (now >= deadline) {
1552 return;
1553 }
1554
1555 _clock_delay_until_deadline(interval: deadline - now, deadline);
1556}
1557
1558/*
1559 * Preserve the original precise interval that the client
1560 * requested for comparison to the spin threshold.
1561 */
1562void
1563_clock_delay_until_deadline(
1564 uint64_t interval,
1565 uint64_t deadline)
1566{
1567 _clock_delay_until_deadline_with_leeway(interval, deadline, leeway: 0);
1568}
1569
1570/*
1571 * Like _clock_delay_until_deadline, but it accepts a
1572 * leeway value.
1573 */
1574void
1575_clock_delay_until_deadline_with_leeway(
1576 uint64_t interval,
1577 uint64_t deadline,
1578 uint64_t leeway)
1579{
1580 if (interval == 0) {
1581 return;
1582 }
1583
1584 if (ml_delay_should_spin(interval) ||
1585 get_preemption_level() != 0 ||
1586 ml_get_interrupts_enabled() == FALSE) {
1587 machine_delay_until(interval, deadline);
1588 } else {
1589 /*
1590 * For now, assume a leeway request of 0 means the client does not want a leeway
1591 * value. We may want to change this interpretation in the future.
1592 */
1593
1594 if (leeway) {
1595 assert_wait_deadline_with_leeway(event: (event_t)clock_delay_until, THREAD_UNINT, TIMEOUT_URGENCY_LEEWAY, deadline, leeway);
1596 } else {
1597 assert_wait_deadline(event: (event_t)clock_delay_until, THREAD_UNINT, deadline);
1598 }
1599
1600 thread_block(THREAD_CONTINUE_NULL);
1601 }
1602}
1603
1604void
1605delay_for_interval(
1606 uint32_t interval,
1607 uint32_t scale_factor)
1608{
1609 uint64_t abstime;
1610
1611 clock_interval_to_absolutetime_interval(interval, scale_factor, result: &abstime);
1612
1613 _clock_delay_until_deadline(interval: abstime, deadline: mach_absolute_time() + abstime);
1614}
1615
1616void
1617delay_for_interval_with_leeway(
1618 uint32_t interval,
1619 uint32_t leeway,
1620 uint32_t scale_factor)
1621{
1622 uint64_t abstime_interval;
1623 uint64_t abstime_leeway;
1624
1625 clock_interval_to_absolutetime_interval(interval, scale_factor, result: &abstime_interval);
1626 clock_interval_to_absolutetime_interval(interval: leeway, scale_factor, result: &abstime_leeway);
1627
1628 _clock_delay_until_deadline_with_leeway(interval: abstime_interval, deadline: mach_absolute_time() + abstime_interval, leeway: abstime_leeway);
1629}
1630
1631void
1632delay(
1633 int usec)
1634{
1635 delay_for_interval(interval: (usec < 0)? -usec: usec, NSEC_PER_USEC);
1636}
1637
1638/*
1639 * Miscellaneous routines.
1640 */
1641void
1642clock_interval_to_deadline(
1643 uint32_t interval,
1644 uint32_t scale_factor,
1645 uint64_t *result)
1646{
1647 uint64_t abstime;
1648
1649 clock_interval_to_absolutetime_interval(interval, scale_factor, result: &abstime);
1650
1651 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1652 *result = UINT64_MAX;
1653 }
1654}
1655
1656void
1657nanoseconds_to_deadline(
1658 uint64_t interval,
1659 uint64_t *result)
1660{
1661 uint64_t abstime;
1662
1663 nanoseconds_to_absolutetime(nanoseconds: interval, result: &abstime);
1664
1665 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1666 *result = UINT64_MAX;
1667 }
1668}
1669
1670void
1671clock_absolutetime_interval_to_deadline(
1672 uint64_t abstime,
1673 uint64_t *result)
1674{
1675 if (os_add_overflow(mach_absolute_time(), abstime, result)) {
1676 *result = UINT64_MAX;
1677 }
1678}
1679
1680void
1681clock_continuoustime_interval_to_deadline(
1682 uint64_t conttime,
1683 uint64_t *result)
1684{
1685 if (os_add_overflow(mach_continuous_time(), conttime, result)) {
1686 *result = UINT64_MAX;
1687 }
1688}
1689
1690void
1691clock_get_uptime(
1692 uint64_t *result)
1693{
1694 *result = mach_absolute_time();
1695}
1696
1697void
1698clock_deadline_for_periodic_event(
1699 uint64_t interval,
1700 uint64_t abstime,
1701 uint64_t *deadline)
1702{
1703 assert(interval != 0);
1704
1705 // *deadline += interval;
1706 if (os_add_overflow(*deadline, interval, deadline)) {
1707 *deadline = UINT64_MAX;
1708 }
1709
1710 if (*deadline <= abstime) {
1711 // *deadline = abstime + interval;
1712 if (os_add_overflow(abstime, interval, deadline)) {
1713 *deadline = UINT64_MAX;
1714 }
1715
1716 abstime = mach_absolute_time();
1717 if (*deadline <= abstime) {
1718 // *deadline = abstime + interval;
1719 if (os_add_overflow(abstime, interval, deadline)) {
1720 *deadline = UINT64_MAX;
1721 }
1722 }
1723 }
1724}
1725
1726uint64_t
1727mach_continuous_time(void)
1728{
1729#if HIBERNATION && HAS_CONTINUOUS_HWCLOCK
1730 return ml_get_hwclock() + hwclock_conttime_offset;
1731#elif HAS_CONTINUOUS_HWCLOCK
1732 return ml_get_hwclock();
1733#else
1734 while (1) {
1735 uint64_t read1 = mach_absolutetime_asleep;
1736 uint64_t absolute = mach_absolute_time();
1737 OSMemoryBarrier();
1738 uint64_t read2 = mach_absolutetime_asleep;
1739
1740 if (__builtin_expect(read1 == read2, 1)) {
1741 return absolute + read1;
1742 }
1743 }
1744#endif
1745}
1746
1747uint64_t
1748mach_continuous_approximate_time(void)
1749{
1750#if HAS_CONTINUOUS_HWCLOCK
1751 return mach_continuous_time();
1752#else
1753 while (1) {
1754 uint64_t read1 = mach_absolutetime_asleep;
1755 uint64_t absolute = mach_approximate_time();
1756 OSMemoryBarrier();
1757 uint64_t read2 = mach_absolutetime_asleep;
1758
1759 if (__builtin_expect(read1 == read2, 1)) {
1760 return absolute + read1;
1761 }
1762 }
1763#endif
1764}
1765
1766/*
1767 * continuoustime_to_absolutetime
1768 * Must be called with interrupts disabled
1769 * Returned value is only valid until the next update to
1770 * mach_continuous_time
1771 */
1772uint64_t
1773continuoustime_to_absolutetime(uint64_t conttime)
1774{
1775 if (conttime <= mach_absolutetime_asleep) {
1776 return 0;
1777 } else {
1778 return conttime - mach_absolutetime_asleep;
1779 }
1780}
1781
1782/*
1783 * absolutetime_to_continuoustime
1784 * Must be called with interrupts disabled
1785 * Returned value is only valid until the next update to
1786 * mach_continuous_time
1787 */
1788uint64_t
1789absolutetime_to_continuoustime(uint64_t abstime)
1790{
1791 return abstime + mach_absolutetime_asleep;
1792}
1793
1794#if CONFIG_DTRACE
1795
1796/*
1797 * clock_get_calendar_nanotime_nowait
1798 *
1799 * Description: Non-blocking version of clock_get_calendar_nanotime()
1800 *
1801 * Notes: This function operates by separately tracking calendar time
1802 * updates using a two element structure to copy the calendar
1803 * state, which may be asynchronously modified. It utilizes
1804 * barrier instructions in the tracking process and in the local
1805 * stable snapshot process in order to ensure that a consistent
1806 * snapshot is used to perform the calculation.
1807 */
1808void
1809clock_get_calendar_nanotime_nowait(
1810 clock_sec_t *secs,
1811 clock_nsec_t *nanosecs)
1812{
1813 int i = 0;
1814 uint64_t now;
1815 struct unlocked_clock_calend stable;
1816 struct bintime bt;
1817
1818 for (;;) {
1819 stable = flipflop[i]; /* take snapshot */
1820
1821 /*
1822 * Use a barrier instructions to ensure atomicity. We AND
1823 * off the "in progress" bit to get the current generation
1824 * count.
1825 */
1826 os_atomic_andnot(&stable.gen, 1, relaxed);
1827
1828 /*
1829 * If an update _is_ in progress, the generation count will be
1830 * off by one, if it _was_ in progress, it will be off by two,
1831 * and if we caught it at a good time, it will be equal (and
1832 * our snapshot is threfore stable).
1833 */
1834 if (flipflop[i].gen == stable.gen) {
1835 break;
1836 }
1837
1838 /* Switch to the other element of the flipflop, and try again. */
1839 i ^= 1;
1840 }
1841
1842 now = mach_absolute_time();
1843
1844 bt = get_scaled_time(now);
1845
1846 bintime_add(bt: &bt, bt2: &clock_calend.bintime);
1847
1848 bintime2nsclock(bt: &bt, secs, nanosecs);
1849}
1850
1851static void
1852clock_track_calend_nowait(void)
1853{
1854 int i;
1855
1856 for (i = 0; i < 2; i++) {
1857 struct clock_calend tmp = clock_calend;
1858
1859 /*
1860 * Set the low bit if the generation count; since we use a
1861 * barrier instruction to do this, we are guaranteed that this
1862 * will flag an update in progress to an async caller trying
1863 * to examine the contents.
1864 */
1865 os_atomic_or(&flipflop[i].gen, 1, relaxed);
1866
1867 flipflop[i].calend = tmp;
1868
1869 /*
1870 * Increment the generation count to clear the low bit to
1871 * signal completion. If a caller compares the generation
1872 * count after taking a copy while in progress, the count
1873 * will be off by two.
1874 */
1875 os_atomic_inc(&flipflop[i].gen, relaxed);
1876 }
1877}
1878
1879#endif /* CONFIG_DTRACE */
1880