1/*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <arm/machine_cpu.h>
30#include <arm/cpu_internal.h>
31#include <arm/cpuid.h>
32#include <arm/cpu_data.h>
33#include <arm/cpu_data_internal.h>
34#include <arm/misc_protos.h>
35#include <arm/machdep_call.h>
36#include <arm/machine_routines.h>
37#include <arm/rtclock.h>
38#include <kern/machine.h>
39#include <kern/thread.h>
40#include <kern/thread_group.h>
41#include <kern/policy_internal.h>
42#include <machine/config.h>
43#include <pexpert/pexpert.h>
44
45#if MONOTONIC
46#include <kern/monotonic.h>
47#include <machine/monotonic.h>
48#endif /* MONOTONIC */
49
50#include <mach/machine.h>
51
52#if INTERRUPT_MASKED_DEBUG
53extern boolean_t interrupt_masked_debug;
54extern uint64_t interrupt_masked_timeout;
55#endif
56
57extern uint64_t mach_absolutetime_asleep;
58
59static void
60sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
61{
62}
63
64static void
65sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
66{
67}
68
69static void
70sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
71{
72}
73
74static void
75sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
76{
77}
78
79static void
80sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
81{
82}
83
84static void
85sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused,
86 perfcontrol_work_interval_t work_interval __unused)
87{
88}
89
90static void
91sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused,
92 perfcontrol_work_interval_instance_t instance __unused)
93{
94}
95
96static void
97sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
98{
99}
100
101static void
102sched_perfcontrol_csw_default(
103 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
104 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
105 __unused struct perfcontrol_thread_data *oncore,
106 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
107{
108}
109
110static void
111sched_perfcontrol_state_update_default(
112 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
113 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
114 __unused void *unused)
115{
116}
117
118sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
119sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
120sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
121sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
122sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
123sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
124sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
125sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
126sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
127sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
128sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
129sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
130
131void
132sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
133{
134 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
135
136 if (size_of_state > sizeof(struct perfcontrol_state)) {
137 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
138 }
139
140 if (callbacks) {
141
142
143 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_7) {
144 if (callbacks->work_interval_ctl != NULL) {
145 sched_perfcontrol_work_interval_ctl = callbacks->work_interval_ctl;
146 } else {
147 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
148 }
149 }
150
151 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
152 if (callbacks->csw != NULL) {
153 sched_perfcontrol_csw = callbacks->csw;
154 } else {
155 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
156 }
157
158 if (callbacks->state_update != NULL) {
159 sched_perfcontrol_state_update = callbacks->state_update;
160 } else {
161 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
162 }
163 }
164
165 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
166 if (callbacks->deadline_passed != NULL) {
167 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
168 } else {
169 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
170 }
171 }
172
173 if (callbacks->offcore != NULL) {
174 sched_perfcontrol_offcore = callbacks->offcore;
175 } else {
176 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
177 }
178
179 if (callbacks->context_switch != NULL) {
180 sched_perfcontrol_switch = callbacks->context_switch;
181 } else {
182 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
183 }
184
185 if (callbacks->oncore != NULL) {
186 sched_perfcontrol_oncore = callbacks->oncore;
187 } else {
188 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
189 }
190
191 if (callbacks->max_runnable_latency != NULL) {
192 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
193 } else {
194 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
195 }
196
197 if (callbacks->work_interval_notify != NULL) {
198 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
199 } else {
200 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
201 }
202 } else {
203 /* reset to defaults */
204 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
205 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
206 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
207 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
208 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
209 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
210 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
211 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
212 sched_perfcontrol_work_interval_ctl = sched_perfcontrol_work_interval_ctl_default;
213 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
214 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
215 }
216}
217
218
219static void
220machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
221 thread_t thread,
222 uint64_t same_pri_latency)
223{
224 bzero(data, sizeof(struct perfcontrol_thread_data));
225 data->perfctl_class = thread_get_perfcontrol_class(thread);
226 data->energy_estimate_nj = 0;
227 data->thread_id = thread->thread_id;
228 data->scheduling_latency_at_same_basepri = same_pri_latency;
229 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
230}
231
232static void
233machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
234{
235#if MONOTONIC
236 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
237#else /* MONOTONIC */
238 cpu_counters->instructions = 0;
239 cpu_counters->cycles = 0;
240#endif /* !MONOTONIC */
241}
242
243int perfcontrol_callout_stats_enabled = 0;
244static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
245static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
246
247#if MONOTONIC
248static inline
249bool perfcontrol_callout_counters_begin(uint64_t *counters)
250{
251 if (!perfcontrol_callout_stats_enabled)
252 return false;
253 mt_fixed_counts(counters);
254 return true;
255}
256
257static inline
258void perfcontrol_callout_counters_end(uint64_t *start_counters,
259 perfcontrol_callout_type_t type)
260{
261 uint64_t end_counters[MT_CORE_NFIXED];
262 mt_fixed_counts(end_counters);
263 atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
264 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed);
265#ifdef MT_CORE_INSTRS
266 atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
267 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed);
268#endif /* defined(MT_CORE_INSTRS) */
269 atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed);
270}
271#endif /* MONOTONIC */
272
273uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
274 perfcontrol_callout_stat_t stat)
275{
276 if (!perfcontrol_callout_stats_enabled)
277 return 0;
278 return (perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type]);
279}
280
281void
282machine_switch_perfcontrol_context(perfcontrol_event event,
283 uint64_t timestamp,
284 uint32_t flags,
285 uint64_t new_thread_same_pri_latency,
286 thread_t old,
287 thread_t new)
288{
289 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
290 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
291 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
292 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
293 }
294
295 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
296 uint32_t cpu_id = (uint32_t)cpu_number();
297 struct perfcontrol_cpu_counters cpu_counters;
298 struct perfcontrol_thread_data offcore, oncore;
299 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
300 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
301 new_thread_same_pri_latency);
302 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
303
304#if MONOTONIC
305 uint64_t counters[MT_CORE_NFIXED];
306 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
307#endif /* MONOTONIC */
308 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
309 &offcore, &oncore, &cpu_counters, NULL);
310#if MONOTONIC
311 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
312#endif /* MONOTONIC */
313
314#if __arm64__
315 old->machine.energy_estimate_nj += offcore.energy_estimate_nj;
316 new->machine.energy_estimate_nj += oncore.energy_estimate_nj;
317#endif
318 }
319}
320
321void
322machine_switch_perfcontrol_state_update(perfcontrol_event event,
323 uint64_t timestamp,
324 uint32_t flags,
325 thread_t thread)
326{
327 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default)
328 return;
329 uint32_t cpu_id = (uint32_t)cpu_number();
330 struct perfcontrol_thread_data data;
331 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
332
333#if MONOTONIC
334 uint64_t counters[MT_CORE_NFIXED];
335 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
336#endif /* MONOTONIC */
337 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
338 &data, NULL);
339#if MONOTONIC
340 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
341#endif /* MONOTONIC */
342
343#if __arm64__
344 thread->machine.energy_estimate_nj += data.energy_estimate_nj;
345#endif
346}
347
348void
349machine_thread_going_on_core(thread_t new_thread,
350 int urgency,
351 uint64_t sched_latency,
352 uint64_t same_pri_latency,
353 uint64_t timestamp)
354{
355
356 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default)
357 return;
358 struct going_on_core on_core;
359 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
360
361 on_core.thread_id = new_thread->thread_id;
362 on_core.energy_estimate_nj = 0;
363 on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
364 on_core.urgency = urgency;
365 on_core.is_32_bit = thread_is_64bit_data(new_thread) ? FALSE : TRUE;
366 on_core.is_kernel_thread = new_thread->task == kernel_task;
367 on_core.scheduling_latency = sched_latency;
368 on_core.start_time = timestamp;
369 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
370
371#if MONOTONIC
372 uint64_t counters[MT_CORE_NFIXED];
373 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
374#endif /* MONOTONIC */
375 sched_perfcontrol_oncore(state, &on_core);
376#if MONOTONIC
377 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
378#endif /* MONOTONIC */
379
380#if __arm64__
381 new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj;
382#endif
383}
384
385void
386machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch)
387{
388 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default)
389 return;
390 struct going_off_core off_core;
391 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
392
393 off_core.thread_id = old_thread->thread_id;
394 off_core.energy_estimate_nj = 0;
395 off_core.end_time = last_dispatch;
396
397#if MONOTONIC
398 uint64_t counters[MT_CORE_NFIXED];
399 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
400#endif /* MONOTONIC */
401 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
402#if MONOTONIC
403 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
404#endif /* MONOTONIC */
405
406#if __arm64__
407 old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj;
408#endif
409}
410
411
412void
413machine_max_runnable_latency(uint64_t bg_max_latency,
414 uint64_t default_max_latency,
415 uint64_t realtime_max_latency)
416{
417 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default)
418 return;
419 struct perfcontrol_max_runnable_latency latencies = {
420 .max_scheduling_latencies = {
421 [THREAD_URGENCY_NONE] = 0,
422 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
423 [THREAD_URGENCY_NORMAL] = default_max_latency,
424 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
425 }
426 };
427
428 sched_perfcontrol_max_runnable_latency(&latencies);
429}
430
431void
432machine_work_interval_notify(thread_t thread,
433 struct kern_work_interval_args* kwi_args)
434{
435 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default)
436 return;
437 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
438 struct perfcontrol_work_interval work_interval = {
439 .thread_id = thread->thread_id,
440 .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
441 .urgency = kwi_args->urgency,
442 .flags = kwi_args->notify_flags,
443 .work_interval_id = kwi_args->work_interval_id,
444 .start = kwi_args->start,
445 .finish = kwi_args->finish,
446 .deadline = kwi_args->deadline,
447 .next_start = kwi_args->next_start,
448 .create_flags = kwi_args->create_flags,
449 };
450 sched_perfcontrol_work_interval_notify(state, &work_interval);
451}
452
453
454void
455machine_perfcontrol_deadline_passed(uint64_t deadline)
456{
457 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default)
458 sched_perfcontrol_deadline_passed(deadline);
459}
460
461#if INTERRUPT_MASKED_DEBUG
462/*
463 * ml_spin_debug_reset()
464 * Reset the timestamp on a thread that has been unscheduled
465 * to avoid false alarms. Alarm will go off if interrupts are held
466 * disabled for too long, starting from now.
467 */
468void
469ml_spin_debug_reset(thread_t thread)
470{
471 thread->machine.intmask_timestamp = mach_absolute_time();
472}
473
474/*
475 * ml_spin_debug_clear()
476 * Clear the timestamp on a thread that has been unscheduled
477 * to avoid false alarms
478 */
479void
480ml_spin_debug_clear(thread_t thread)
481{
482 thread->machine.intmask_timestamp = 0;
483}
484
485/*
486 * ml_spin_debug_clear_self()
487 * Clear the timestamp on the current thread to prevent
488 * false alarms
489 */
490void
491ml_spin_debug_clear_self()
492{
493 ml_spin_debug_clear(current_thread());
494}
495
496void
497ml_check_interrupts_disabled_duration(thread_t thread)
498{
499 uint64_t start;
500 uint64_t now;
501
502 start = thread->machine.intmask_timestamp;
503 if (start != 0) {
504 now = mach_absolute_time();
505
506 if ((now - start) > interrupt_masked_timeout * debug_cpu_performance_degradation_factor) {
507 mach_timebase_info_data_t timebase;
508 clock_timebase_info(&timebase);
509
510#ifndef KASAN
511 /*
512 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
513 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
514 */
515 panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer)/timebase.denom));
516#endif
517 }
518 }
519
520 return;
521}
522#endif // INTERRUPT_MASKED_DEBUG
523
524
525boolean_t
526ml_set_interrupts_enabled(boolean_t enable)
527{
528 thread_t thread;
529 uint64_t state;
530
531#if __arm__
532#define INTERRUPT_MASK PSR_IRQF
533 state = __builtin_arm_rsr("cpsr");
534#else
535#define INTERRUPT_MASK DAIF_IRQF
536 state = __builtin_arm_rsr("DAIF");
537#endif
538 if (enable && (state & INTERRUPT_MASK)) {
539#if INTERRUPT_MASKED_DEBUG
540 if (interrupt_masked_debug) {
541 // Interrupts are currently masked, we will enable them (after finishing this check)
542 thread = current_thread();
543 ml_check_interrupts_disabled_duration(thread);
544 thread->machine.intmask_timestamp = 0;
545 }
546#endif // INTERRUPT_MASKED_DEBUG
547 if (get_preemption_level() == 0) {
548 thread = current_thread();
549 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
550#if __ARM_USER_PROTECT__
551 uintptr_t up = arm_user_protect_begin(thread);
552#endif
553 ast_taken_kernel();
554#if __ARM_USER_PROTECT__
555 arm_user_protect_end(thread, up, FALSE);
556#endif
557 }
558 }
559#if __arm__
560 __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
561#else
562 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
563#endif
564 } else if (!enable && ((state & INTERRUPT_MASK) == 0)) {
565#if __arm__
566 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
567#else
568 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
569#endif
570#if INTERRUPT_MASKED_DEBUG
571 if (interrupt_masked_debug) {
572 // Interrupts were enabled, we just masked them
573 current_thread()->machine.intmask_timestamp = mach_absolute_time();
574 }
575#endif
576 }
577 return ((state & INTERRUPT_MASK) == 0);
578}
579
580/*
581 * Routine: ml_at_interrupt_context
582 * Function: Check if running at interrupt context
583 */
584boolean_t
585ml_at_interrupt_context(void)
586{
587 /* Do not use a stack-based check here, as the top-level exception handler
588 * is free to use some other stack besides the per-CPU interrupt stack.
589 * Interrupts should always be disabled if we're at interrupt context.
590 * Check that first, as we may be in a preemptible non-interrupt context, in
591 * which case we could be migrated to a different CPU between obtaining
592 * the per-cpu data pointer and loading cpu_int_state. We then might end
593 * up checking the interrupt state of a different CPU, resulting in a false
594 * positive. But if interrupts are disabled, we also know we cannot be
595 * preempted. */
596 return (!ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state != NULL));
597}
598
599vm_offset_t
600ml_stack_remaining(void)
601{
602 uintptr_t local = (uintptr_t) &local;
603 vm_offset_t intstack_top_ptr;
604
605 /* Since this is a stack-based check, we don't need to worry about
606 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
607 * then the sp should never be within any CPU's interrupt stack unless
608 * something has gone horribly wrong. */
609 intstack_top_ptr = getCpuDatap()->intstack_top;
610 if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) {
611 return (local - (getCpuDatap()->intstack_top - INTSTACK_SIZE));
612 } else {
613 return (local - current_thread()->kernel_stack);
614 }
615}
616
617static boolean_t ml_quiescing;
618
619void ml_set_is_quiescing(boolean_t quiescing)
620{
621 assert(FALSE == ml_get_interrupts_enabled());
622 ml_quiescing = quiescing;
623}
624
625boolean_t ml_is_quiescing(void)
626{
627 assert(FALSE == ml_get_interrupts_enabled());
628 return (ml_quiescing);
629}
630
631uint64_t ml_get_booter_memory_size(void)
632{
633 uint64_t size;
634 uint64_t roundsize = 512*1024*1024ULL;
635 size = BootArgs->memSizeActual;
636 if (!size) {
637 size = BootArgs->memSize;
638 if (size < (2 * roundsize)) roundsize >>= 1;
639 size = (size + roundsize - 1) & ~(roundsize - 1);
640 size -= BootArgs->memSize;
641 }
642 return (size);
643}
644
645uint64_t
646ml_get_abstime_offset(void)
647{
648 return rtclock_base_abstime;
649}
650
651uint64_t
652ml_get_conttime_offset(void)
653{
654 return (rtclock_base_abstime + mach_absolutetime_asleep);
655}
656
657uint64_t
658ml_get_time_since_reset(void)
659{
660 /* The timebase resets across S2R, so just return the raw value. */
661 return ml_get_hwclock();
662}
663
664uint64_t
665ml_get_conttime_wake_time(void)
666{
667 /* The wake time is simply our continuous time offset. */
668 return ml_get_conttime_offset();
669}
670
671