1 | /* |
2 | * Copyright (c) 2007-2017 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <arm64/proc_reg.h> |
30 | #include <arm/machine_cpu.h> |
31 | #include <arm/cpu_internal.h> |
32 | #include <arm/cpuid.h> |
33 | #include <arm/io_map_entries.h> |
34 | #include <arm/cpu_data.h> |
35 | #include <arm/cpu_data_internal.h> |
36 | #include <arm/caches_internal.h> |
37 | #include <arm/misc_protos.h> |
38 | #include <arm/machdep_call.h> |
39 | #include <arm/rtclock.h> |
40 | #include <console/serial_protos.h> |
41 | #include <kern/machine.h> |
42 | #include <prng/random.h> |
43 | #include <kern/startup.h> |
44 | #include <kern/thread.h> |
45 | #include <mach/machine.h> |
46 | #include <machine/atomic.h> |
47 | #include <vm/pmap.h> |
48 | #include <vm/vm_page.h> |
49 | #include <sys/kdebug.h> |
50 | #include <kern/coalition.h> |
51 | #include <pexpert/device_tree.h> |
52 | |
53 | #include <IOKit/IOPlatformExpert.h> |
54 | |
55 | #if defined(KERNEL_INTEGRITY_KTRR) |
56 | #include <libkern/kernel_mach_header.h> |
57 | #endif |
58 | |
59 | #include <libkern/section_keywords.h> |
60 | |
61 | #if KPC |
62 | #include <kern/kpc.h> |
63 | #endif |
64 | |
65 | |
66 | static int max_cpus_initialized = 0; |
67 | #define MAX_CPUS_SET 0x1 |
68 | #define MAX_CPUS_WAIT 0x2 |
69 | |
70 | uint32_t LockTimeOut; |
71 | uint32_t LockTimeOutUsec; |
72 | uint64_t MutexSpin; |
73 | boolean_t is_clock_configured = FALSE; |
74 | |
75 | extern int mach_assert; |
76 | extern volatile uint32_t debug_enabled; |
77 | |
78 | extern vm_offset_t ; |
79 | extern vm_offset_t segLOWESTTEXT; |
80 | extern vm_offset_t segLASTB; |
81 | extern unsigned long segSizeLAST; |
82 | |
83 | |
84 | void machine_conf(void); |
85 | |
86 | thread_t Idle_context(void); |
87 | |
88 | SECURITY_READ_ONLY_LATE(static uint32_t) cpu_phys_ids[MAX_CPUS] = {[0 ... MAX_CPUS - 1] = (uint32_t)-1}; |
89 | SECURITY_READ_ONLY_LATE(static unsigned int) avail_cpus = 0; |
90 | SECURITY_READ_ONLY_LATE(static int) boot_cpu = -1; |
91 | SECURITY_READ_ONLY_LATE(static int) max_cpu_number = 0; |
92 | SECURITY_READ_ONLY_LATE(cluster_type_t) boot_cluster = CLUSTER_TYPE_SMP; |
93 | |
94 | SECURITY_READ_ONLY_LATE(static uint32_t) fiq_eventi = UINT32_MAX; |
95 | |
96 | lockdown_handler_t lockdown_handler; |
97 | void *lockdown_this; |
98 | lck_mtx_t lockdown_handler_lck; |
99 | lck_grp_t *lockdown_handler_grp; |
100 | int lockdown_done; |
101 | |
102 | void ml_lockdown_init(void); |
103 | void ml_lockdown_run_handler(void); |
104 | uint32_t get_arm_cpu_version(void); |
105 | |
106 | |
107 | void ml_cpu_signal(unsigned int cpu_id __unused) |
108 | { |
109 | panic("Platform does not support ACC Fast IPI" ); |
110 | } |
111 | |
112 | void ml_cpu_signal_deferred_adjust_timer(uint64_t nanosecs) { |
113 | (void)nanosecs; |
114 | panic("Platform does not support ACC Fast IPI" ); |
115 | } |
116 | |
117 | uint64_t ml_cpu_signal_deferred_get_timer() { |
118 | return 0; |
119 | } |
120 | |
121 | void ml_cpu_signal_deferred(unsigned int cpu_id __unused) |
122 | { |
123 | panic("Platform does not support ACC Fast IPI deferral" ); |
124 | } |
125 | |
126 | void ml_cpu_signal_retract(unsigned int cpu_id __unused) |
127 | { |
128 | panic("Platform does not support ACC Fast IPI retraction" ); |
129 | } |
130 | |
131 | void machine_idle(void) |
132 | { |
133 | __asm__ volatile ("msr DAIFSet, %[mask]" ::[mask] "i" (DAIFSC_IRQF | DAIFSC_FIQF)); |
134 | Idle_context(); |
135 | __asm__ volatile ("msr DAIFClr, %[mask]" ::[mask] "i" (DAIFSC_IRQF | DAIFSC_FIQF)); |
136 | } |
137 | |
138 | void init_vfp(void) |
139 | { |
140 | return; |
141 | } |
142 | |
143 | boolean_t get_vfp_enabled(void) |
144 | { |
145 | return TRUE; |
146 | } |
147 | |
148 | void OSSynchronizeIO(void) |
149 | { |
150 | __builtin_arm_dsb(DSB_SY); |
151 | } |
152 | |
153 | uint64_t get_aux_control(void) |
154 | { |
155 | uint64_t value; |
156 | |
157 | MRS(value, "ACTLR_EL1" ); |
158 | return value; |
159 | } |
160 | |
161 | uint64_t get_mmu_control(void) |
162 | { |
163 | uint64_t value; |
164 | |
165 | MRS(value, "SCTLR_EL1" ); |
166 | return value; |
167 | } |
168 | |
169 | uint64_t get_tcr(void) |
170 | { |
171 | uint64_t value; |
172 | |
173 | MRS(value, "TCR_EL1" ); |
174 | return value; |
175 | } |
176 | |
177 | boolean_t ml_get_interrupts_enabled(void) |
178 | { |
179 | uint64_t value; |
180 | |
181 | MRS(value, "DAIF" ); |
182 | if (value & DAIF_IRQF) |
183 | return FALSE; |
184 | return TRUE; |
185 | } |
186 | |
187 | pmap_paddr_t get_mmu_ttb(void) |
188 | { |
189 | pmap_paddr_t value; |
190 | |
191 | MRS(value, "TTBR0_EL1" ); |
192 | return value; |
193 | } |
194 | |
195 | static uint32_t get_midr_el1(void) |
196 | { |
197 | uint64_t value; |
198 | |
199 | MRS(value, "MIDR_EL1" ); |
200 | |
201 | /* This is a 32-bit register. */ |
202 | return (uint32_t) value; |
203 | } |
204 | |
205 | uint32_t get_arm_cpu_version(void) |
206 | { |
207 | uint32_t value = get_midr_el1(); |
208 | |
209 | /* Compose the register values into 8 bits; variant[7:4], revision[3:0]. */ |
210 | return ((value & MIDR_EL1_REV_MASK) >> MIDR_EL1_REV_SHIFT) | ((value & MIDR_EL1_VAR_MASK) >> (MIDR_EL1_VAR_SHIFT - 4)); |
211 | } |
212 | |
213 | /* |
214 | * user_cont_hwclock_allowed() |
215 | * |
216 | * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0) |
217 | * as a continuous time source (e.g. from mach_continuous_time) |
218 | */ |
219 | boolean_t user_cont_hwclock_allowed(void) |
220 | { |
221 | return FALSE; |
222 | } |
223 | |
224 | /* |
225 | * user_timebase_allowed() |
226 | * |
227 | * Indicates whether we allow EL0 to read the physical timebase (CNTPCT_EL0). |
228 | */ |
229 | boolean_t user_timebase_allowed(void) |
230 | { |
231 | return TRUE; |
232 | } |
233 | |
234 | boolean_t arm64_wfe_allowed(void) |
235 | { |
236 | return TRUE; |
237 | } |
238 | |
239 | #if defined(KERNEL_INTEGRITY_KTRR) |
240 | |
241 | uint64_t rorgn_begin __attribute__((section("__DATA, __const" ))) = 0; |
242 | uint64_t rorgn_end __attribute__((section("__DATA, __const" ))) = 0; |
243 | vm_offset_t amcc_base; |
244 | |
245 | static void assert_unlocked(void); |
246 | static void assert_amcc_cache_disabled(void); |
247 | static void lock_amcc(void); |
248 | static void lock_mmu(uint64_t begin, uint64_t end); |
249 | |
250 | void rorgn_stash_range(void) |
251 | { |
252 | |
253 | #if DEVELOPMENT || DEBUG |
254 | boolean_t rorgn_disable = FALSE; |
255 | |
256 | PE_parse_boot_argn("-unsafe_kernel_text" , &rorgn_disable, sizeof(rorgn_disable)); |
257 | |
258 | if (rorgn_disable) { |
259 | /* take early out if boot arg present, don't query any machine registers to avoid |
260 | * dependency on amcc DT entry |
261 | */ |
262 | return; |
263 | } |
264 | #endif |
265 | |
266 | /* Get the AMC values, and stash them into rorgn_begin, rorgn_end. |
267 | * gPhysBase is the base of DRAM managed by xnu. we need DRAM_BASE as |
268 | * the AMCC RO region begin/end registers are in units of 16KB page |
269 | * numbers from DRAM_BASE so we'll truncate gPhysBase at 512MB granule |
270 | * and assert the value is the canonical DRAM_BASE PA of 0x8_0000_0000 for arm64. |
271 | */ |
272 | |
273 | uint64_t dram_base = gPhysBase & ~0x1FFFFFFFULL; /* 512MB */ |
274 | assert(dram_base == 0x800000000ULL); |
275 | |
276 | #if defined(KERNEL_INTEGRITY_KTRR) |
277 | uint64_t soc_base = 0; |
278 | DTEntry entryP = NULL; |
279 | uintptr_t *reg_prop = NULL; |
280 | uint32_t prop_size = 0; |
281 | int rc; |
282 | |
283 | soc_base = pe_arm_get_soc_base_phys(); |
284 | rc = DTFindEntry("name" , "mcc" , &entryP); |
285 | assert(rc == kSuccess); |
286 | rc = DTGetProperty(entryP, "reg" , (void **)®_prop, &prop_size); |
287 | assert(rc == kSuccess); |
288 | amcc_base = ml_io_map(soc_base + *reg_prop, *(reg_prop + 1)); |
289 | #else |
290 | #error "KERNEL_INTEGRITY config error" |
291 | #endif |
292 | |
293 | #if defined(KERNEL_INTEGRITY_KTRR) |
294 | assert(rRORGNENDADDR > rRORGNBASEADDR); |
295 | rorgn_begin = (rRORGNBASEADDR << AMCC_PGSHIFT) + dram_base; |
296 | rorgn_end = (rRORGNENDADDR << AMCC_PGSHIFT) + dram_base; |
297 | #else |
298 | #error KERNEL_INTEGRITY config error |
299 | #endif /* defined (KERNEL_INTEGRITY_KTRR) */ |
300 | } |
301 | |
302 | static void assert_unlocked() { |
303 | uint64_t ktrr_lock = 0; |
304 | uint32_t rorgn_lock = 0; |
305 | |
306 | assert(amcc_base); |
307 | #if defined(KERNEL_INTEGRITY_KTRR) |
308 | rorgn_lock = rRORGNLOCK; |
309 | ktrr_lock = __builtin_arm_rsr64(ARM64_REG_KTRR_LOCK_EL1); |
310 | #else |
311 | #error KERNEL_INTEGRITY config error |
312 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ |
313 | |
314 | assert(!ktrr_lock); |
315 | assert(!rorgn_lock); |
316 | } |
317 | |
318 | static void lock_amcc() { |
319 | #if defined(KERNEL_INTEGRITY_KTRR) |
320 | rRORGNLOCK = 1; |
321 | __builtin_arm_isb(ISB_SY); |
322 | #else |
323 | #error KERNEL_INTEGRITY config error |
324 | #endif |
325 | } |
326 | |
327 | static void lock_mmu(uint64_t begin, uint64_t end) { |
328 | |
329 | #if defined(KERNEL_INTEGRITY_KTRR) |
330 | |
331 | __builtin_arm_wsr64(ARM64_REG_KTRR_LOWER_EL1, begin); |
332 | __builtin_arm_wsr64(ARM64_REG_KTRR_UPPER_EL1, end); |
333 | __builtin_arm_wsr64(ARM64_REG_KTRR_LOCK_EL1, 1ULL); |
334 | |
335 | /* flush TLB */ |
336 | |
337 | __builtin_arm_isb(ISB_SY); |
338 | flush_mmu_tlb(); |
339 | |
340 | #else |
341 | #error KERNEL_INTEGRITY config error |
342 | #endif |
343 | |
344 | } |
345 | |
346 | static void assert_amcc_cache_disabled() { |
347 | #if defined(KERNEL_INTEGRITY_KTRR) |
348 | assert((rMCCGEN & 1) == 0); /* assert M$ disabled or LLC clean will be unreliable */ |
349 | #else |
350 | #error KERNEL_INTEGRITY config error |
351 | #endif |
352 | } |
353 | |
354 | /* |
355 | * void rorgn_lockdown(void) |
356 | * |
357 | * Lock the MMU and AMCC RORegion within lower and upper boundaries if not already locked |
358 | * |
359 | * [ ] - ensure this is being called ASAP on secondary CPUs: KTRR programming and lockdown handled in |
360 | * start.s:start_cpu() for subsequent wake/resume of all cores |
361 | */ |
362 | void rorgn_lockdown(void) |
363 | { |
364 | vm_offset_t ktrr_begin, ktrr_end; |
365 | unsigned long last_segsz; |
366 | |
367 | #if DEVELOPMENT || DEBUG |
368 | boolean_t ktrr_disable = FALSE; |
369 | |
370 | PE_parse_boot_argn("-unsafe_kernel_text" , &ktrr_disable, sizeof(ktrr_disable)); |
371 | |
372 | if (ktrr_disable) { |
373 | /* |
374 | * take early out if boot arg present, since we may not have amcc DT entry present |
375 | * we can't assert that iboot hasn't programmed the RO region lockdown registers |
376 | */ |
377 | goto out; |
378 | } |
379 | #endif /* DEVELOPMENT || DEBUG */ |
380 | |
381 | assert_unlocked(); |
382 | |
383 | /* [x] - Use final method of determining all kernel text range or expect crashes */ |
384 | ktrr_begin = segEXTRADATA; |
385 | assert(ktrr_begin && gVirtBase && gPhysBase); |
386 | |
387 | ktrr_begin = kvtophys(ktrr_begin); |
388 | |
389 | ktrr_end = kvtophys(segLASTB); |
390 | last_segsz = segSizeLAST; |
391 | #if defined(KERNEL_INTEGRITY_KTRR) |
392 | /* __LAST is not part of the MMU KTRR region (it is however part of the AMCC KTRR region) */ |
393 | ktrr_end = (ktrr_end - 1) & ~AMCC_PGMASK; |
394 | /* ensure that iboot and xnu agree on the ktrr range */ |
395 | assert(rorgn_begin == ktrr_begin && rorgn_end == (ktrr_end + last_segsz)); |
396 | /* assert that __LAST segment containing privileged insns is only a single page */ |
397 | assert(last_segsz == PAGE_SIZE); |
398 | #endif |
399 | |
400 | |
401 | #if DEBUG || DEVELOPMENT |
402 | printf("KTRR Begin: %p End: %p, setting lockdown\n" , (void *)ktrr_begin, (void *)ktrr_end); |
403 | #endif |
404 | |
405 | /* [x] - ensure all in flight writes are flushed to AMCC before enabling RO Region Lock */ |
406 | |
407 | assert_amcc_cache_disabled(); |
408 | |
409 | CleanPoC_DcacheRegion_Force(phystokv(ktrr_begin), |
410 | (unsigned)((ktrr_end + last_segsz) - ktrr_begin + AMCC_PGMASK)); |
411 | |
412 | lock_amcc(); |
413 | |
414 | lock_mmu(ktrr_begin, ktrr_end); |
415 | |
416 | #if DEVELOPMENT || DEBUG |
417 | out: |
418 | #endif |
419 | |
420 | /* now we can run lockdown handler */ |
421 | ml_lockdown_run_handler(); |
422 | } |
423 | |
424 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ |
425 | |
426 | void |
427 | machine_startup(__unused boot_args * args) |
428 | { |
429 | int boot_arg; |
430 | |
431 | |
432 | PE_parse_boot_argn("assert" , &mach_assert, sizeof (mach_assert)); |
433 | |
434 | if (PE_parse_boot_argn("preempt" , &boot_arg, sizeof (boot_arg))) { |
435 | default_preemption_rate = boot_arg; |
436 | } |
437 | if (PE_parse_boot_argn("bg_preempt" , &boot_arg, sizeof (boot_arg))) { |
438 | default_bg_preemption_rate = boot_arg; |
439 | } |
440 | |
441 | machine_conf(); |
442 | |
443 | /* |
444 | * Kick off the kernel bootstrap. |
445 | */ |
446 | kernel_bootstrap(); |
447 | /* NOTREACHED */ |
448 | } |
449 | |
450 | void machine_lockdown_preflight(void) |
451 | { |
452 | #if CONFIG_KERNEL_INTEGRITY |
453 | |
454 | #if defined(KERNEL_INTEGRITY_KTRR) |
455 | rorgn_stash_range(); |
456 | #endif |
457 | |
458 | #endif |
459 | } |
460 | |
461 | void machine_lockdown(void) |
462 | { |
463 | #if CONFIG_KERNEL_INTEGRITY |
464 | #if KERNEL_INTEGRITY_WT |
465 | /* Watchtower |
466 | * |
467 | * Notify the monitor about the completion of early kernel bootstrap. |
468 | * From this point forward it will enforce the integrity of kernel text, |
469 | * rodata and page tables. |
470 | */ |
471 | |
472 | #ifdef MONITOR |
473 | monitor_call(MONITOR_LOCKDOWN, 0, 0, 0); |
474 | #endif |
475 | #endif /* KERNEL_INTEGRITY_WT */ |
476 | |
477 | |
478 | #if defined(KERNEL_INTEGRITY_KTRR) |
479 | /* KTRR |
480 | * |
481 | * Lock physical KTRR region. KTRR region is read-only. Memory outside |
482 | * the region is not executable at EL1. |
483 | */ |
484 | |
485 | rorgn_lockdown(); |
486 | #endif /* defined(KERNEL_INTEGRITY_KTRR)*/ |
487 | |
488 | |
489 | #endif /* CONFIG_KERNEL_INTEGRITY */ |
490 | } |
491 | |
492 | char * |
493 | machine_boot_info( |
494 | __unused char *buf, |
495 | __unused vm_size_t size) |
496 | { |
497 | return (PE_boot_args()); |
498 | } |
499 | |
500 | void |
501 | machine_conf(void) |
502 | { |
503 | /* |
504 | * This is known to be inaccurate. mem_size should always be capped at 2 GB |
505 | */ |
506 | machine_info.memory_size = (uint32_t)mem_size; |
507 | } |
508 | |
509 | void |
510 | machine_init(void) |
511 | { |
512 | debug_log_init(); |
513 | clock_config(); |
514 | is_clock_configured = TRUE; |
515 | if (debug_enabled) |
516 | pmap_map_globals(); |
517 | } |
518 | |
519 | void |
520 | slave_machine_init(__unused void *param) |
521 | { |
522 | cpu_machine_init(); /* Initialize the processor */ |
523 | clock_init(); /* Init the clock */ |
524 | } |
525 | |
526 | /* |
527 | * Routine: machine_processor_shutdown |
528 | * Function: |
529 | */ |
530 | thread_t |
531 | machine_processor_shutdown( |
532 | __unused thread_t thread, |
533 | void (*doshutdown) (processor_t), |
534 | processor_t processor) |
535 | { |
536 | return (Shutdown_context(doshutdown, processor)); |
537 | } |
538 | |
539 | /* |
540 | * Routine: ml_init_max_cpus |
541 | * Function: |
542 | */ |
543 | void |
544 | ml_init_max_cpus(unsigned int max_cpus) |
545 | { |
546 | boolean_t current_state; |
547 | |
548 | current_state = ml_set_interrupts_enabled(FALSE); |
549 | if (max_cpus_initialized != MAX_CPUS_SET) { |
550 | machine_info.max_cpus = max_cpus; |
551 | machine_info.physical_cpu_max = max_cpus; |
552 | machine_info.logical_cpu_max = max_cpus; |
553 | if (max_cpus_initialized == MAX_CPUS_WAIT) |
554 | thread_wakeup((event_t) & max_cpus_initialized); |
555 | max_cpus_initialized = MAX_CPUS_SET; |
556 | } |
557 | (void) ml_set_interrupts_enabled(current_state); |
558 | } |
559 | |
560 | /* |
561 | * Routine: ml_get_max_cpus |
562 | * Function: |
563 | */ |
564 | unsigned int |
565 | ml_get_max_cpus(void) |
566 | { |
567 | boolean_t current_state; |
568 | |
569 | current_state = ml_set_interrupts_enabled(FALSE); |
570 | if (max_cpus_initialized != MAX_CPUS_SET) { |
571 | max_cpus_initialized = MAX_CPUS_WAIT; |
572 | assert_wait((event_t) & max_cpus_initialized, THREAD_UNINT); |
573 | (void) thread_block(THREAD_CONTINUE_NULL); |
574 | } |
575 | (void) ml_set_interrupts_enabled(current_state); |
576 | return (machine_info.max_cpus); |
577 | } |
578 | |
579 | /* |
580 | * Routine: ml_init_lock_timeout |
581 | * Function: |
582 | */ |
583 | void |
584 | ml_init_lock_timeout(void) |
585 | { |
586 | uint64_t abstime; |
587 | uint64_t mtxspin; |
588 | uint64_t default_timeout_ns = NSEC_PER_SEC>>2; |
589 | uint32_t slto; |
590 | |
591 | if (PE_parse_boot_argn("slto_us" , &slto, sizeof (slto))) |
592 | default_timeout_ns = slto * NSEC_PER_USEC; |
593 | |
594 | nanoseconds_to_absolutetime(default_timeout_ns, &abstime); |
595 | LockTimeOutUsec = (uint32_t)(abstime / NSEC_PER_USEC); |
596 | LockTimeOut = (uint32_t)abstime; |
597 | |
598 | if (PE_parse_boot_argn("mtxspin" , &mtxspin, sizeof (mtxspin))) { |
599 | if (mtxspin > USEC_PER_SEC>>4) |
600 | mtxspin = USEC_PER_SEC>>4; |
601 | nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime); |
602 | } else { |
603 | nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime); |
604 | } |
605 | MutexSpin = abstime; |
606 | } |
607 | |
608 | /* |
609 | * This is called from the machine-independent routine cpu_up() |
610 | * to perform machine-dependent info updates. |
611 | */ |
612 | void |
613 | ml_cpu_up(void) |
614 | { |
615 | hw_atomic_add(&machine_info.physical_cpu, 1); |
616 | hw_atomic_add(&machine_info.logical_cpu, 1); |
617 | } |
618 | |
619 | /* |
620 | * This is called from the machine-independent routine cpu_down() |
621 | * to perform machine-dependent info updates. |
622 | */ |
623 | void |
624 | ml_cpu_down(void) |
625 | { |
626 | cpu_data_t *cpu_data_ptr; |
627 | |
628 | hw_atomic_sub(&machine_info.physical_cpu, 1); |
629 | hw_atomic_sub(&machine_info.logical_cpu, 1); |
630 | |
631 | /* |
632 | * If we want to deal with outstanding IPIs, we need to |
633 | * do relatively early in the processor_doshutdown path, |
634 | * as we pend decrementer interrupts using the IPI |
635 | * mechanism if we cannot immediately service them (if |
636 | * IRQ is masked). Do so now. |
637 | * |
638 | * We aren't on the interrupt stack here; would it make |
639 | * more sense to disable signaling and then enable |
640 | * interrupts? It might be a bit cleaner. |
641 | */ |
642 | cpu_data_ptr = getCpuDatap(); |
643 | cpu_data_ptr->cpu_running = FALSE; |
644 | cpu_signal_handler_internal(TRUE); |
645 | } |
646 | |
647 | /* |
648 | * Routine: ml_cpu_get_info |
649 | * Function: |
650 | */ |
651 | void |
652 | ml_cpu_get_info(ml_cpu_info_t * ml_cpu_info) |
653 | { |
654 | cache_info_t *cpuid_cache_info; |
655 | |
656 | cpuid_cache_info = cache_info(); |
657 | ml_cpu_info->vector_unit = 0; |
658 | ml_cpu_info->cache_line_size = cpuid_cache_info->c_linesz; |
659 | ml_cpu_info->l1_icache_size = cpuid_cache_info->c_isize; |
660 | ml_cpu_info->l1_dcache_size = cpuid_cache_info->c_dsize; |
661 | |
662 | #if (__ARM_ARCH__ >= 7) |
663 | ml_cpu_info->l2_settings = 1; |
664 | ml_cpu_info->l2_cache_size = cpuid_cache_info->c_l2size; |
665 | #else |
666 | ml_cpu_info->l2_settings = 0; |
667 | ml_cpu_info->l2_cache_size = 0xFFFFFFFF; |
668 | #endif |
669 | ml_cpu_info->l3_settings = 0; |
670 | ml_cpu_info->l3_cache_size = 0xFFFFFFFF; |
671 | } |
672 | |
673 | unsigned int |
674 | ml_get_machine_mem(void) |
675 | { |
676 | return (machine_info.memory_size); |
677 | } |
678 | |
679 | __attribute__((noreturn)) |
680 | void |
681 | halt_all_cpus(boolean_t reboot) |
682 | { |
683 | if (reboot) { |
684 | printf("MACH Reboot\n" ); |
685 | PEHaltRestart(kPERestartCPU); |
686 | } else { |
687 | printf("CPU halted\n" ); |
688 | PEHaltRestart(kPEHaltCPU); |
689 | } |
690 | while (1); |
691 | } |
692 | |
693 | __attribute__((noreturn)) |
694 | void |
695 | halt_cpu(void) |
696 | { |
697 | halt_all_cpus(FALSE); |
698 | } |
699 | |
700 | /* |
701 | * Routine: machine_signal_idle |
702 | * Function: |
703 | */ |
704 | void |
705 | machine_signal_idle( |
706 | processor_t processor) |
707 | { |
708 | cpu_signal(processor_to_cpu_datap(processor), SIGPnop, (void *)NULL, (void *)NULL); |
709 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); |
710 | } |
711 | |
712 | void |
713 | machine_signal_idle_deferred( |
714 | processor_t processor) |
715 | { |
716 | cpu_signal_deferred(processor_to_cpu_datap(processor)); |
717 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_DEFERRED_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); |
718 | } |
719 | |
720 | void |
721 | machine_signal_idle_cancel( |
722 | processor_t processor) |
723 | { |
724 | cpu_signal_cancel(processor_to_cpu_datap(processor)); |
725 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_CANCEL_AST), processor->cpu_id, 0 /* nop */, 0, 0, 0); |
726 | } |
727 | |
728 | /* |
729 | * Routine: ml_install_interrupt_handler |
730 | * Function: Initialize Interrupt Handler |
731 | */ |
732 | void |
733 | ml_install_interrupt_handler( |
734 | void *nub, |
735 | int source, |
736 | void *target, |
737 | IOInterruptHandler handler, |
738 | void *refCon) |
739 | { |
740 | cpu_data_t *cpu_data_ptr; |
741 | boolean_t current_state; |
742 | |
743 | current_state = ml_set_interrupts_enabled(FALSE); |
744 | cpu_data_ptr = getCpuDatap(); |
745 | |
746 | cpu_data_ptr->interrupt_nub = nub; |
747 | cpu_data_ptr->interrupt_source = source; |
748 | cpu_data_ptr->interrupt_target = target; |
749 | cpu_data_ptr->interrupt_handler = handler; |
750 | cpu_data_ptr->interrupt_refCon = refCon; |
751 | |
752 | cpu_data_ptr->interrupts_enabled = TRUE; |
753 | (void) ml_set_interrupts_enabled(current_state); |
754 | |
755 | initialize_screen(NULL, kPEAcquireScreen); |
756 | } |
757 | |
758 | /* |
759 | * Routine: ml_init_interrupt |
760 | * Function: Initialize Interrupts |
761 | */ |
762 | void |
763 | ml_init_interrupt(void) |
764 | { |
765 | } |
766 | |
767 | /* |
768 | * Routine: ml_init_timebase |
769 | * Function: register and setup Timebase, Decremeter services |
770 | */ |
771 | void ml_init_timebase( |
772 | void *args, |
773 | tbd_ops_t tbd_funcs, |
774 | vm_offset_t int_address, |
775 | vm_offset_t int_value __unused) |
776 | { |
777 | cpu_data_t *cpu_data_ptr; |
778 | |
779 | cpu_data_ptr = (cpu_data_t *)args; |
780 | |
781 | if ((cpu_data_ptr == &BootCpuData) |
782 | && (rtclock_timebase_func.tbd_fiq_handler == (void *)NULL)) { |
783 | rtclock_timebase_func = *tbd_funcs; |
784 | rtclock_timebase_addr = int_address; |
785 | } |
786 | } |
787 | |
788 | void |
789 | ml_parse_cpu_topology(void) |
790 | { |
791 | DTEntry entry, child __unused; |
792 | OpaqueDTEntryIterator iter; |
793 | uint32_t cpu_boot_arg; |
794 | int err; |
795 | |
796 | cpu_boot_arg = MAX_CPUS; |
797 | |
798 | PE_parse_boot_argn("cpus" , &cpu_boot_arg, sizeof(cpu_boot_arg)); |
799 | |
800 | err = DTLookupEntry(NULL, "/cpus" , &entry); |
801 | assert(err == kSuccess); |
802 | |
803 | err = DTInitEntryIterator(entry, &iter); |
804 | assert(err == kSuccess); |
805 | |
806 | while (kSuccess == DTIterateEntries(&iter, &child)) { |
807 | unsigned int propSize; |
808 | void *prop = NULL; |
809 | int cpu_id = avail_cpus++; |
810 | |
811 | if (kSuccess == DTGetProperty(child, "cpu-id" , &prop, &propSize)) |
812 | cpu_id = *((int32_t*)prop); |
813 | |
814 | assert(cpu_id < MAX_CPUS); |
815 | assert(cpu_phys_ids[cpu_id] == (uint32_t)-1); |
816 | |
817 | if (boot_cpu == -1) { |
818 | if (kSuccess != DTGetProperty(child, "state" , &prop, &propSize)) |
819 | panic("unable to retrieve state for cpu %d" , cpu_id); |
820 | |
821 | if (strncmp((char*)prop, "running" , propSize) == 0) { |
822 | boot_cpu = cpu_id; |
823 | } |
824 | } |
825 | if (kSuccess != DTGetProperty(child, "reg" , &prop, &propSize)) |
826 | panic("unable to retrieve physical ID for cpu %d" , cpu_id); |
827 | |
828 | cpu_phys_ids[cpu_id] = *((uint32_t*)prop); |
829 | |
830 | if ((cpu_id > max_cpu_number) && ((cpu_id == boot_cpu) || (avail_cpus <= cpu_boot_arg))) |
831 | max_cpu_number = cpu_id; |
832 | } |
833 | |
834 | if (avail_cpus > cpu_boot_arg) |
835 | avail_cpus = cpu_boot_arg; |
836 | |
837 | if (avail_cpus == 0) |
838 | panic("No cpus found!" ); |
839 | |
840 | if (boot_cpu == -1) |
841 | panic("unable to determine boot cpu!" ); |
842 | |
843 | /* |
844 | * Set TPIDRRO_EL0 to indicate the correct cpu number, as we may |
845 | * not be booting from cpu 0. Userspace will consume the current |
846 | * CPU number through this register. For non-boot cores, this is |
847 | * done in start.s (start_cpu) using the cpu_number field of the |
848 | * per-cpu data object. |
849 | */ |
850 | assert(__builtin_arm_rsr64("TPIDRRO_EL0" ) == 0); |
851 | __builtin_arm_wsr64("TPIDRRO_EL0" , (uint64_t)boot_cpu); |
852 | } |
853 | |
854 | unsigned int |
855 | ml_get_cpu_count(void) |
856 | { |
857 | return avail_cpus; |
858 | } |
859 | |
860 | int |
861 | ml_get_boot_cpu_number(void) |
862 | { |
863 | return boot_cpu; |
864 | } |
865 | |
866 | cluster_type_t |
867 | ml_get_boot_cluster(void) |
868 | { |
869 | return boot_cluster; |
870 | } |
871 | |
872 | int |
873 | ml_get_cpu_number(uint32_t phys_id) |
874 | { |
875 | for (int log_id = 0; log_id <= ml_get_max_cpu_number(); ++log_id) { |
876 | if (cpu_phys_ids[log_id] == phys_id) |
877 | return log_id; |
878 | } |
879 | return -1; |
880 | } |
881 | |
882 | int |
883 | ml_get_max_cpu_number(void) |
884 | { |
885 | return max_cpu_number; |
886 | } |
887 | |
888 | |
889 | void ml_lockdown_init() { |
890 | lockdown_handler_grp = lck_grp_alloc_init("lockdown_handler" , NULL); |
891 | assert(lockdown_handler_grp != NULL); |
892 | |
893 | lck_mtx_init(&lockdown_handler_lck, lockdown_handler_grp, NULL); |
894 | |
895 | } |
896 | |
897 | kern_return_t |
898 | ml_lockdown_handler_register(lockdown_handler_t f, void *this) |
899 | { |
900 | if (lockdown_handler || !f) { |
901 | return KERN_FAILURE; |
902 | } |
903 | |
904 | lck_mtx_lock(&lockdown_handler_lck); |
905 | lockdown_handler = f; |
906 | lockdown_this = this; |
907 | |
908 | #if !(defined(KERNEL_INTEGRITY_KTRR)) |
909 | lockdown_done=1; |
910 | lockdown_handler(this); |
911 | #else |
912 | if (lockdown_done) { |
913 | lockdown_handler(this); |
914 | } |
915 | #endif |
916 | lck_mtx_unlock(&lockdown_handler_lck); |
917 | |
918 | return KERN_SUCCESS; |
919 | } |
920 | |
921 | void ml_lockdown_run_handler() { |
922 | lck_mtx_lock(&lockdown_handler_lck); |
923 | assert(!lockdown_done); |
924 | |
925 | lockdown_done = 1; |
926 | if (lockdown_handler) { |
927 | lockdown_handler(lockdown_this); |
928 | } |
929 | lck_mtx_unlock(&lockdown_handler_lck); |
930 | } |
931 | |
932 | kern_return_t |
933 | ml_processor_register( |
934 | ml_processor_info_t * in_processor_info, |
935 | processor_t * processor_out, |
936 | ipi_handler_t * ipi_handler) |
937 | { |
938 | cpu_data_t *this_cpu_datap; |
939 | processor_set_t pset; |
940 | boolean_t is_boot_cpu; |
941 | static unsigned int reg_cpu_count = 0; |
942 | |
943 | if (in_processor_info->log_id > (uint32_t)ml_get_max_cpu_number()) |
944 | return KERN_FAILURE; |
945 | |
946 | if ((unsigned int)OSIncrementAtomic((SInt32*)®_cpu_count) >= avail_cpus) |
947 | return KERN_FAILURE; |
948 | |
949 | if (in_processor_info->log_id != (uint32_t)ml_get_boot_cpu_number()) { |
950 | is_boot_cpu = FALSE; |
951 | this_cpu_datap = cpu_data_alloc(FALSE); |
952 | cpu_data_init(this_cpu_datap); |
953 | } else { |
954 | this_cpu_datap = &BootCpuData; |
955 | is_boot_cpu = TRUE; |
956 | } |
957 | |
958 | assert(in_processor_info->log_id < MAX_CPUS); |
959 | |
960 | this_cpu_datap->cpu_id = in_processor_info->cpu_id; |
961 | |
962 | this_cpu_datap->cpu_console_buf = console_cpu_alloc(is_boot_cpu); |
963 | if (this_cpu_datap->cpu_console_buf == (void *)(NULL)) |
964 | goto processor_register_error; |
965 | |
966 | if (!is_boot_cpu) { |
967 | this_cpu_datap->cpu_number = in_processor_info->log_id; |
968 | |
969 | if (cpu_data_register(this_cpu_datap) != KERN_SUCCESS) |
970 | goto processor_register_error; |
971 | } |
972 | |
973 | this_cpu_datap->cpu_idle_notify = (void *) in_processor_info->processor_idle; |
974 | this_cpu_datap->cpu_cache_dispatch = in_processor_info->platform_cache_dispatch; |
975 | nanoseconds_to_absolutetime((uint64_t) in_processor_info->powergate_latency, &this_cpu_datap->cpu_idle_latency); |
976 | this_cpu_datap->cpu_reset_assist = kvtophys(in_processor_info->powergate_stub_addr); |
977 | |
978 | this_cpu_datap->idle_timer_notify = (void *) in_processor_info->idle_timer; |
979 | this_cpu_datap->idle_timer_refcon = in_processor_info->idle_timer_refcon; |
980 | |
981 | this_cpu_datap->platform_error_handler = (void *) in_processor_info->platform_error_handler; |
982 | this_cpu_datap->cpu_regmap_paddr = in_processor_info->regmap_paddr; |
983 | this_cpu_datap->cpu_phys_id = in_processor_info->phys_id; |
984 | this_cpu_datap->cpu_l2_access_penalty = in_processor_info->l2_access_penalty; |
985 | |
986 | this_cpu_datap->cpu_cluster_type = in_processor_info->cluster_type; |
987 | this_cpu_datap->cpu_cluster_id = in_processor_info->cluster_id; |
988 | this_cpu_datap->cpu_l2_id = in_processor_info->l2_cache_id; |
989 | this_cpu_datap->cpu_l2_size = in_processor_info->l2_cache_size; |
990 | this_cpu_datap->cpu_l3_id = in_processor_info->l3_cache_id; |
991 | this_cpu_datap->cpu_l3_size = in_processor_info->l3_cache_size; |
992 | |
993 | this_cpu_datap->cluster_master = is_boot_cpu; |
994 | |
995 | pset = pset_find(in_processor_info->cluster_id, processor_pset(master_processor)); |
996 | assert(pset != NULL); |
997 | kprintf("%s>cpu_id %p cluster_id %d cpu_number %d is type %d\n" , __FUNCTION__, in_processor_info->cpu_id, in_processor_info->cluster_id, this_cpu_datap->cpu_number, in_processor_info->cluster_type); |
998 | |
999 | if (!is_boot_cpu) { |
1000 | processor_init((struct processor *)this_cpu_datap->cpu_processor, |
1001 | this_cpu_datap->cpu_number, pset); |
1002 | |
1003 | if (this_cpu_datap->cpu_l2_access_penalty) { |
1004 | /* |
1005 | * Cores that have a non-zero L2 access penalty compared |
1006 | * to the boot processor should be de-prioritized by the |
1007 | * scheduler, so that threads use the cores with better L2 |
1008 | * preferentially. |
1009 | */ |
1010 | processor_set_primary(this_cpu_datap->cpu_processor, |
1011 | master_processor); |
1012 | } |
1013 | } |
1014 | |
1015 | *processor_out = this_cpu_datap->cpu_processor; |
1016 | *ipi_handler = cpu_signal_handler; |
1017 | if (in_processor_info->idle_tickle != (idle_tickle_t *) NULL) |
1018 | *in_processor_info->idle_tickle = (idle_tickle_t) cpu_idle_tickle; |
1019 | |
1020 | #if KPC |
1021 | if (kpc_register_cpu(this_cpu_datap) != TRUE) |
1022 | goto processor_register_error; |
1023 | #endif |
1024 | |
1025 | if (!is_boot_cpu) { |
1026 | early_random_cpu_init(this_cpu_datap->cpu_number); |
1027 | // now let next CPU register itself |
1028 | OSIncrementAtomic((SInt32*)&real_ncpus); |
1029 | } |
1030 | |
1031 | return KERN_SUCCESS; |
1032 | |
1033 | processor_register_error: |
1034 | #if KPC |
1035 | kpc_unregister_cpu(this_cpu_datap); |
1036 | #endif |
1037 | if (!is_boot_cpu) |
1038 | cpu_data_free(this_cpu_datap); |
1039 | |
1040 | return KERN_FAILURE; |
1041 | } |
1042 | |
1043 | void |
1044 | ml_init_arm_debug_interface( |
1045 | void * in_cpu_datap, |
1046 | vm_offset_t virt_address) |
1047 | { |
1048 | ((cpu_data_t *)in_cpu_datap)->cpu_debug_interface_map = virt_address; |
1049 | do_debugid(); |
1050 | } |
1051 | |
1052 | /* |
1053 | * Routine: init_ast_check |
1054 | * Function: |
1055 | */ |
1056 | void |
1057 | init_ast_check( |
1058 | __unused processor_t processor) |
1059 | { |
1060 | } |
1061 | |
1062 | /* |
1063 | * Routine: cause_ast_check |
1064 | * Function: |
1065 | */ |
1066 | void |
1067 | cause_ast_check( |
1068 | processor_t processor) |
1069 | { |
1070 | if (current_processor() != processor) { |
1071 | cpu_signal(processor_to_cpu_datap(processor), SIGPast, (void *)NULL, (void *)NULL); |
1072 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_REMOTE_AST), processor->cpu_id, 1 /* ast */, 0, 0, 0); |
1073 | } |
1074 | } |
1075 | |
1076 | extern uint32_t cpu_idle_count; |
1077 | |
1078 | void ml_get_power_state(boolean_t *icp, boolean_t *pidlep) { |
1079 | *icp = ml_at_interrupt_context(); |
1080 | *pidlep = (cpu_idle_count == real_ncpus); |
1081 | } |
1082 | |
1083 | /* |
1084 | * Routine: ml_cause_interrupt |
1085 | * Function: Generate a fake interrupt |
1086 | */ |
1087 | void |
1088 | ml_cause_interrupt(void) |
1089 | { |
1090 | return; /* BS_XXX */ |
1091 | } |
1092 | |
1093 | /* Map memory map IO space */ |
1094 | vm_offset_t |
1095 | ml_io_map( |
1096 | vm_offset_t phys_addr, |
1097 | vm_size_t size) |
1098 | { |
1099 | return (io_map(phys_addr, size, VM_WIMG_IO)); |
1100 | } |
1101 | |
1102 | vm_offset_t |
1103 | ml_io_map_wcomb( |
1104 | vm_offset_t phys_addr, |
1105 | vm_size_t size) |
1106 | { |
1107 | return (io_map(phys_addr, size, VM_WIMG_WCOMB)); |
1108 | } |
1109 | |
1110 | /* boot memory allocation */ |
1111 | vm_offset_t |
1112 | ml_static_malloc( |
1113 | __unused vm_size_t size) |
1114 | { |
1115 | return ((vm_offset_t) NULL); |
1116 | } |
1117 | |
1118 | vm_map_address_t |
1119 | ml_map_high_window( |
1120 | vm_offset_t phys_addr, |
1121 | vm_size_t len) |
1122 | { |
1123 | return pmap_map_high_window_bd(phys_addr, len, VM_PROT_READ | VM_PROT_WRITE); |
1124 | } |
1125 | |
1126 | vm_offset_t |
1127 | ml_static_ptovirt( |
1128 | vm_offset_t paddr) |
1129 | { |
1130 | return phystokv(paddr); |
1131 | } |
1132 | |
1133 | vm_offset_t |
1134 | ml_static_slide( |
1135 | vm_offset_t vaddr) |
1136 | { |
1137 | return phystokv(vaddr + vm_kernel_slide - gVirtBase + gPhysBase); |
1138 | } |
1139 | |
1140 | vm_offset_t |
1141 | ml_static_unslide( |
1142 | vm_offset_t vaddr) |
1143 | { |
1144 | return (ml_static_vtop(vaddr) - gPhysBase + gVirtBase - vm_kernel_slide) ; |
1145 | } |
1146 | |
1147 | extern tt_entry_t *arm_kva_to_tte(vm_offset_t va); |
1148 | |
1149 | kern_return_t |
1150 | ml_static_protect( |
1151 | vm_offset_t vaddr, /* kernel virtual address */ |
1152 | vm_size_t size, |
1153 | vm_prot_t new_prot) |
1154 | { |
1155 | pt_entry_t arm_prot = 0; |
1156 | pt_entry_t arm_block_prot = 0; |
1157 | vm_offset_t vaddr_cur; |
1158 | ppnum_t ppn; |
1159 | kern_return_t result = KERN_SUCCESS; |
1160 | |
1161 | if (vaddr < VM_MIN_KERNEL_ADDRESS) { |
1162 | panic("ml_static_protect(): %p < %p" , (void *) vaddr, (void *) VM_MIN_KERNEL_ADDRESS); |
1163 | return KERN_FAILURE; |
1164 | } |
1165 | |
1166 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ |
1167 | |
1168 | if ((new_prot & VM_PROT_WRITE) && (new_prot & VM_PROT_EXECUTE)) { |
1169 | panic("ml_static_protect(): WX request on %p" , (void *) vaddr); |
1170 | } |
1171 | |
1172 | /* Set up the protection bits, and block bits so we can validate block mappings. */ |
1173 | if (new_prot & VM_PROT_WRITE) { |
1174 | arm_prot |= ARM_PTE_AP(AP_RWNA); |
1175 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RWNA); |
1176 | } else { |
1177 | arm_prot |= ARM_PTE_AP(AP_RONA); |
1178 | arm_block_prot |= ARM_TTE_BLOCK_AP(AP_RONA); |
1179 | } |
1180 | |
1181 | arm_prot |= ARM_PTE_NX; |
1182 | arm_block_prot |= ARM_TTE_BLOCK_NX; |
1183 | |
1184 | if (!(new_prot & VM_PROT_EXECUTE)) { |
1185 | arm_prot |= ARM_PTE_PNX; |
1186 | arm_block_prot |= ARM_TTE_BLOCK_PNX; |
1187 | } |
1188 | |
1189 | for (vaddr_cur = vaddr; |
1190 | vaddr_cur < trunc_page_64(vaddr + size); |
1191 | vaddr_cur += PAGE_SIZE) { |
1192 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
1193 | if (ppn != (vm_offset_t) NULL) { |
1194 | tt_entry_t *tte2; |
1195 | pt_entry_t *pte_p; |
1196 | pt_entry_t ptmp; |
1197 | |
1198 | |
1199 | tte2 = arm_kva_to_tte(vaddr_cur); |
1200 | |
1201 | if (((*tte2) & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) { |
1202 | if ((((*tte2) & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) && |
1203 | ((*tte2 & (ARM_TTE_BLOCK_NXMASK | ARM_TTE_BLOCK_PNXMASK | ARM_TTE_BLOCK_APMASK)) == arm_block_prot)) { |
1204 | /* |
1205 | * We can support ml_static_protect on a block mapping if the mapping already has |
1206 | * the desired protections. We still want to run checks on a per-page basis. |
1207 | */ |
1208 | continue; |
1209 | } |
1210 | |
1211 | result = KERN_FAILURE; |
1212 | break; |
1213 | } |
1214 | |
1215 | pte_p = (pt_entry_t *)&((tt_entry_t*)(phystokv((*tte2) & ARM_TTE_TABLE_MASK)))[(((vaddr_cur) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT)]; |
1216 | ptmp = *pte_p; |
1217 | |
1218 | if ((ptmp & ARM_PTE_HINT_MASK) && ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot)) { |
1219 | /* |
1220 | * The contiguous hint is similar to a block mapping for ml_static_protect; if the existing |
1221 | * protections do not match the desired protections, then we will fail (as we cannot update |
1222 | * this mapping without updating other mappings as well). |
1223 | */ |
1224 | result = KERN_FAILURE; |
1225 | break; |
1226 | } |
1227 | |
1228 | __unreachable_ok_push |
1229 | if (TEST_PAGE_RATIO_4) { |
1230 | { |
1231 | unsigned int i; |
1232 | pt_entry_t *ptep_iter; |
1233 | |
1234 | ptep_iter = pte_p; |
1235 | for (i=0; i<4; i++, ptep_iter++) { |
1236 | /* Note that there is a hole in the HINT sanity checking here. */ |
1237 | ptmp = *ptep_iter; |
1238 | |
1239 | /* We only need to update the page tables if the protections do not match. */ |
1240 | if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) { |
1241 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot; |
1242 | *ptep_iter = ptmp; |
1243 | } |
1244 | } |
1245 | } |
1246 | #ifndef __ARM_L1_PTW__ |
1247 | FlushPoC_DcacheRegion( trunc_page_32(pte_p), 4*sizeof(*pte_p)); |
1248 | #endif |
1249 | } else { |
1250 | ptmp = *pte_p; |
1251 | |
1252 | /* We only need to update the page tables if the protections do not match. */ |
1253 | if ((ptmp & (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) != arm_prot) { |
1254 | ptmp = (ptmp & ~(ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)) | arm_prot; |
1255 | *pte_p = ptmp; |
1256 | } |
1257 | |
1258 | #ifndef __ARM_L1_PTW__ |
1259 | FlushPoC_DcacheRegion( trunc_page_32(pte_p), sizeof(*pte_p)); |
1260 | #endif |
1261 | } |
1262 | __unreachable_ok_pop |
1263 | } |
1264 | } |
1265 | |
1266 | if (vaddr_cur > vaddr) { |
1267 | assert(((vaddr_cur - vaddr) & 0xFFFFFFFF00000000ULL) == 0); |
1268 | flush_mmu_tlb_region(vaddr, (uint32_t)(vaddr_cur - vaddr)); |
1269 | } |
1270 | |
1271 | |
1272 | return result; |
1273 | } |
1274 | |
1275 | /* |
1276 | * Routine: ml_static_mfree |
1277 | * Function: |
1278 | */ |
1279 | void |
1280 | ml_static_mfree( |
1281 | vm_offset_t vaddr, |
1282 | vm_size_t size) |
1283 | { |
1284 | vm_offset_t vaddr_cur; |
1285 | ppnum_t ppn; |
1286 | uint32_t freed_pages = 0; |
1287 | |
1288 | /* It is acceptable (if bad) to fail to free. */ |
1289 | if (vaddr < VM_MIN_KERNEL_ADDRESS) |
1290 | return; |
1291 | |
1292 | assert((vaddr & (PAGE_SIZE - 1)) == 0); /* must be page aligned */ |
1293 | |
1294 | for (vaddr_cur = vaddr; |
1295 | vaddr_cur < trunc_page_64(vaddr + size); |
1296 | vaddr_cur += PAGE_SIZE) { |
1297 | |
1298 | ppn = pmap_find_phys(kernel_pmap, vaddr_cur); |
1299 | if (ppn != (vm_offset_t) NULL) { |
1300 | /* |
1301 | * It is not acceptable to fail to update the protections on a page |
1302 | * we will release to the VM. We need to either panic or continue. |
1303 | * For now, we'll panic (to help flag if there is memory we can |
1304 | * reclaim). |
1305 | */ |
1306 | if (ml_static_protect(vaddr_cur, PAGE_SIZE, VM_PROT_WRITE | VM_PROT_READ) != KERN_SUCCESS) { |
1307 | panic("Failed ml_static_mfree on %p" , (void *) vaddr_cur); |
1308 | } |
1309 | |
1310 | #if 0 |
1311 | /* |
1312 | * Must NOT tear down the "V==P" mapping for vaddr_cur as the zone alias scheme |
1313 | * relies on the persistence of these mappings for all time. |
1314 | */ |
1315 | // pmap_remove(kernel_pmap, (addr64_t) vaddr_cur, (addr64_t) (vaddr_cur + PAGE_SIZE)); |
1316 | #endif |
1317 | |
1318 | vm_page_create(ppn, (ppn + 1)); |
1319 | freed_pages++; |
1320 | } |
1321 | } |
1322 | vm_page_lockspin_queues(); |
1323 | vm_page_wire_count -= freed_pages; |
1324 | vm_page_wire_count_initial -= freed_pages; |
1325 | vm_page_unlock_queues(); |
1326 | #if DEBUG |
1327 | kprintf("ml_static_mfree: Released 0x%x pages at VA %p, size:0x%llx, last ppn: 0x%x\n" , freed_pages, (void *)vaddr, (uint64_t)size, ppn); |
1328 | #endif |
1329 | } |
1330 | |
1331 | |
1332 | /* virtual to physical on wired pages */ |
1333 | vm_offset_t |
1334 | ml_vtophys(vm_offset_t vaddr) |
1335 | { |
1336 | return kvtophys(vaddr); |
1337 | } |
1338 | |
1339 | /* |
1340 | * Routine: ml_nofault_copy |
1341 | * Function: Perform a physical mode copy if the source and destination have |
1342 | * valid translations in the kernel pmap. If translations are present, they are |
1343 | * assumed to be wired; e.g., no attempt is made to guarantee that the |
1344 | * translations obtained remain valid for the duration of the copy process. |
1345 | */ |
1346 | vm_size_t |
1347 | ml_nofault_copy(vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size) |
1348 | { |
1349 | addr64_t cur_phys_dst, cur_phys_src; |
1350 | vm_size_t count, nbytes = 0; |
1351 | |
1352 | while (size > 0) { |
1353 | if (!(cur_phys_src = kvtophys(virtsrc))) |
1354 | break; |
1355 | if (!(cur_phys_dst = kvtophys(virtdst))) |
1356 | break; |
1357 | if (!pmap_valid_address(trunc_page_64(cur_phys_dst)) || |
1358 | !pmap_valid_address(trunc_page_64(cur_phys_src))) |
1359 | break; |
1360 | count = PAGE_SIZE - (cur_phys_src & PAGE_MASK); |
1361 | if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK))) |
1362 | count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK); |
1363 | if (count > size) |
1364 | count = size; |
1365 | |
1366 | bcopy_phys(cur_phys_src, cur_phys_dst, count); |
1367 | |
1368 | nbytes += count; |
1369 | virtsrc += count; |
1370 | virtdst += count; |
1371 | size -= count; |
1372 | } |
1373 | |
1374 | return nbytes; |
1375 | } |
1376 | |
1377 | /* |
1378 | * Routine: ml_validate_nofault |
1379 | * Function: Validate that ths address range has a valid translations |
1380 | * in the kernel pmap. If translations are present, they are |
1381 | * assumed to be wired; i.e. no attempt is made to guarantee |
1382 | * that the translation persist after the check. |
1383 | * Returns: TRUE if the range is mapped and will not cause a fault, |
1384 | * FALSE otherwise. |
1385 | */ |
1386 | |
1387 | boolean_t ml_validate_nofault( |
1388 | vm_offset_t virtsrc, vm_size_t size) |
1389 | { |
1390 | addr64_t cur_phys_src; |
1391 | uint32_t count; |
1392 | |
1393 | while (size > 0) { |
1394 | if (!(cur_phys_src = kvtophys(virtsrc))) |
1395 | return FALSE; |
1396 | if (!pmap_valid_address(trunc_page_64(cur_phys_src))) |
1397 | return FALSE; |
1398 | count = (uint32_t)(PAGE_SIZE - (cur_phys_src & PAGE_MASK)); |
1399 | if (count > size) |
1400 | count = (uint32_t)size; |
1401 | |
1402 | virtsrc += count; |
1403 | size -= count; |
1404 | } |
1405 | |
1406 | return TRUE; |
1407 | } |
1408 | |
1409 | void |
1410 | ml_get_bouncepool_info(vm_offset_t * phys_addr, vm_size_t * size) |
1411 | { |
1412 | *phys_addr = 0; |
1413 | *size = 0; |
1414 | } |
1415 | |
1416 | void |
1417 | active_rt_threads(__unused boolean_t active) |
1418 | { |
1419 | } |
1420 | |
1421 | static void cpu_qos_cb_default(__unused int urgency, __unused uint64_t qos_param1, __unused uint64_t qos_param2) { |
1422 | return; |
1423 | } |
1424 | |
1425 | cpu_qos_update_t cpu_qos_update = cpu_qos_cb_default; |
1426 | |
1427 | void cpu_qos_update_register(cpu_qos_update_t cpu_qos_cb) { |
1428 | if (cpu_qos_cb != NULL) { |
1429 | cpu_qos_update = cpu_qos_cb; |
1430 | } else { |
1431 | cpu_qos_update = cpu_qos_cb_default; |
1432 | } |
1433 | } |
1434 | |
1435 | void |
1436 | thread_tell_urgency(int urgency, uint64_t rt_period, uint64_t rt_deadline, uint64_t sched_latency __unused, __unused thread_t nthread) |
1437 | { |
1438 | SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_START, urgency, rt_period, rt_deadline, sched_latency, 0); |
1439 | |
1440 | cpu_qos_update(urgency, rt_period, rt_deadline); |
1441 | |
1442 | SCHED_DEBUG_PLATFORM_KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED,MACH_URGENCY) | DBG_FUNC_END, urgency, rt_period, rt_deadline, 0, 0); |
1443 | } |
1444 | |
1445 | void |
1446 | machine_run_count(__unused uint32_t count) |
1447 | { |
1448 | } |
1449 | |
1450 | processor_t |
1451 | machine_choose_processor(__unused processor_set_t pset, processor_t processor) |
1452 | { |
1453 | return (processor); |
1454 | } |
1455 | |
1456 | #if KASAN |
1457 | vm_offset_t ml_stack_base(void); |
1458 | vm_size_t ml_stack_size(void); |
1459 | |
1460 | vm_offset_t |
1461 | ml_stack_base(void) |
1462 | { |
1463 | uintptr_t local = (uintptr_t) &local; |
1464 | vm_offset_t intstack_top_ptr; |
1465 | |
1466 | intstack_top_ptr = getCpuDatap()->intstack_top; |
1467 | if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) { |
1468 | return intstack_top_ptr - INTSTACK_SIZE; |
1469 | } else { |
1470 | return current_thread()->kernel_stack; |
1471 | } |
1472 | } |
1473 | vm_size_t |
1474 | ml_stack_size(void) |
1475 | { |
1476 | uintptr_t local = (uintptr_t) &local; |
1477 | vm_offset_t intstack_top_ptr; |
1478 | |
1479 | intstack_top_ptr = getCpuDatap()->intstack_top; |
1480 | if ((local < intstack_top_ptr) && (local > intstack_top_ptr - INTSTACK_SIZE)) { |
1481 | return INTSTACK_SIZE; |
1482 | } else { |
1483 | return kernel_stack_size; |
1484 | } |
1485 | } |
1486 | #endif |
1487 | |
1488 | boolean_t machine_timeout_suspended(void) { |
1489 | return FALSE; |
1490 | } |
1491 | |
1492 | kern_return_t |
1493 | ml_interrupt_prewarm(__unused uint64_t deadline) |
1494 | { |
1495 | return KERN_FAILURE; |
1496 | } |
1497 | |
1498 | /* |
1499 | * Assumes fiq, irq disabled. |
1500 | */ |
1501 | void |
1502 | ml_set_decrementer(uint32_t dec_value) |
1503 | { |
1504 | cpu_data_t *cdp = getCpuDatap(); |
1505 | |
1506 | assert(ml_get_interrupts_enabled() == FALSE); |
1507 | cdp->cpu_decrementer = dec_value; |
1508 | |
1509 | if (cdp->cpu_set_decrementer_func) { |
1510 | ((void (*)(uint32_t))cdp->cpu_set_decrementer_func)(dec_value); |
1511 | } else { |
1512 | __asm__ volatile("msr CNTP_TVAL_EL0, %0" : : "r" ((uint64_t)dec_value)); |
1513 | } |
1514 | } |
1515 | |
1516 | uint64_t ml_get_hwclock() |
1517 | { |
1518 | uint64_t timebase; |
1519 | |
1520 | // ISB required by ARMV7C.b section B8.1.2 & ARMv8 section D6.1.2 |
1521 | // "Reads of CNTPCT[_EL0] can occur speculatively and out of order relative |
1522 | // to other instructions executed on the same processor." |
1523 | __asm__ volatile("isb\n" |
1524 | "mrs %0, CNTPCT_EL0" |
1525 | : "=r" (timebase)); |
1526 | |
1527 | return timebase; |
1528 | } |
1529 | |
1530 | uint64_t |
1531 | ml_get_timebase() |
1532 | { |
1533 | return (ml_get_hwclock() + getCpuDatap()->cpu_base_timebase); |
1534 | } |
1535 | |
1536 | uint32_t |
1537 | ml_get_decrementer() |
1538 | { |
1539 | cpu_data_t *cdp = getCpuDatap(); |
1540 | uint32_t dec; |
1541 | |
1542 | assert(ml_get_interrupts_enabled() == FALSE); |
1543 | |
1544 | if (cdp->cpu_get_decrementer_func) { |
1545 | dec = ((uint32_t (*)(void))cdp->cpu_get_decrementer_func)(); |
1546 | } else { |
1547 | uint64_t wide_val; |
1548 | |
1549 | __asm__ volatile("mrs %0, CNTP_TVAL_EL0" : "=r" (wide_val)); |
1550 | dec = (uint32_t)wide_val; |
1551 | assert(wide_val == (uint64_t)dec); |
1552 | } |
1553 | |
1554 | return dec; |
1555 | } |
1556 | |
1557 | boolean_t |
1558 | ml_get_timer_pending() |
1559 | { |
1560 | uint64_t cntp_ctl; |
1561 | |
1562 | __asm__ volatile("mrs %0, CNTP_CTL_EL0" : "=r" (cntp_ctl)); |
1563 | return ((cntp_ctl & CNTP_CTL_EL0_ISTATUS) != 0) ? TRUE : FALSE; |
1564 | } |
1565 | |
1566 | boolean_t |
1567 | ml_wants_panic_trap_to_debugger(void) |
1568 | { |
1569 | boolean_t result = FALSE; |
1570 | return result; |
1571 | } |
1572 | |
1573 | static void |
1574 | cache_trap_error(thread_t thread, vm_map_address_t fault_addr) |
1575 | { |
1576 | mach_exception_data_type_t exc_data[2]; |
1577 | arm_saved_state_t *regs = get_user_regs(thread); |
1578 | |
1579 | set_saved_state_far(regs, fault_addr); |
1580 | |
1581 | exc_data[0] = KERN_INVALID_ADDRESS; |
1582 | exc_data[1] = fault_addr; |
1583 | |
1584 | exception_triage(EXC_BAD_ACCESS, exc_data, 2); |
1585 | } |
1586 | |
1587 | static void |
1588 | cache_trap_recover() |
1589 | { |
1590 | vm_map_address_t fault_addr; |
1591 | |
1592 | __asm__ volatile("mrs %0, FAR_EL1" : "=r" (fault_addr)); |
1593 | |
1594 | cache_trap_error(current_thread(), fault_addr); |
1595 | } |
1596 | |
1597 | static void |
1598 | dcache_flush_trap(vm_map_address_t start, vm_map_size_t size) |
1599 | { |
1600 | vm_map_address_t end = start + size; |
1601 | thread_t thread = current_thread(); |
1602 | vm_offset_t old_recover = thread->recover; |
1603 | |
1604 | /* Check bounds */ |
1605 | if (task_has_64Bit_addr(current_task())) { |
1606 | if (end > MACH_VM_MAX_ADDRESS) { |
1607 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); |
1608 | } |
1609 | } else { |
1610 | if (end > VM_MAX_ADDRESS) { |
1611 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); |
1612 | } |
1613 | } |
1614 | |
1615 | if (start > end) { |
1616 | cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1)); |
1617 | } |
1618 | |
1619 | /* Set recovery function */ |
1620 | thread->recover = (vm_address_t)cache_trap_recover; |
1621 | |
1622 | /* |
1623 | * We're coherent on Apple ARM64 CPUs, so this could be a nop. However, |
1624 | * if the region given us is bad, it would be good to catch it and |
1625 | * crash, ergo we still do the flush. |
1626 | */ |
1627 | FlushPoC_DcacheRegion(start, (uint32_t)size); |
1628 | |
1629 | /* Restore recovery function */ |
1630 | thread->recover = old_recover; |
1631 | |
1632 | /* Return (caller does exception return) */ |
1633 | } |
1634 | |
1635 | static void |
1636 | icache_invalidate_trap(vm_map_address_t start, vm_map_size_t size) |
1637 | { |
1638 | vm_map_address_t end = start + size; |
1639 | thread_t thread = current_thread(); |
1640 | vm_offset_t old_recover = thread->recover; |
1641 | |
1642 | /* Check bounds */ |
1643 | if (task_has_64Bit_addr(current_task())) { |
1644 | if (end > MACH_VM_MAX_ADDRESS) { |
1645 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); |
1646 | } |
1647 | } else { |
1648 | if (end > VM_MAX_ADDRESS) { |
1649 | cache_trap_error(thread, end & ((1 << ARM64_CLINE_SHIFT) - 1)); |
1650 | } |
1651 | } |
1652 | |
1653 | if (start > end) { |
1654 | cache_trap_error(thread, start & ((1 << ARM64_CLINE_SHIFT) - 1)); |
1655 | } |
1656 | |
1657 | /* Set recovery function */ |
1658 | thread->recover = (vm_address_t)cache_trap_recover; |
1659 | |
1660 | CleanPoU_DcacheRegion(start, (uint32_t) size); |
1661 | |
1662 | /* Invalidate iCache to point of unification */ |
1663 | #if __ARM_IC_NOALIAS_ICACHE__ |
1664 | InvalidatePoU_IcacheRegion(start, (uint32_t)size); |
1665 | #else |
1666 | InvalidatePoU_Icache(); |
1667 | #endif |
1668 | |
1669 | /* Restore recovery function */ |
1670 | thread->recover = old_recover; |
1671 | |
1672 | /* Return (caller does exception return) */ |
1673 | } |
1674 | |
1675 | __attribute__((noreturn)) |
1676 | void |
1677 | platform_syscall(arm_saved_state_t *state) |
1678 | { |
1679 | uint32_t code; |
1680 | |
1681 | #define platform_syscall_kprintf(x...) /* kprintf("platform_syscall: " x) */ |
1682 | |
1683 | code = (uint32_t)get_saved_state_reg(state, 3); |
1684 | switch (code) { |
1685 | case 0: |
1686 | /* I-Cache flush */ |
1687 | platform_syscall_kprintf("icache flush requested.\n" ); |
1688 | icache_invalidate_trap(get_saved_state_reg(state, 0), get_saved_state_reg(state, 1)); |
1689 | break; |
1690 | case 1: |
1691 | /* D-Cache flush */ |
1692 | platform_syscall_kprintf("dcache flush requested.\n" ); |
1693 | dcache_flush_trap(get_saved_state_reg(state, 0), get_saved_state_reg(state, 1)); |
1694 | break; |
1695 | case 2: |
1696 | /* set cthread */ |
1697 | platform_syscall_kprintf("set cthread self.\n" ); |
1698 | thread_set_cthread_self(get_saved_state_reg(state, 0)); |
1699 | break; |
1700 | case 3: |
1701 | /* get cthread */ |
1702 | platform_syscall_kprintf("get cthread self.\n" ); |
1703 | set_saved_state_reg(state, 0, thread_get_cthread_self()); |
1704 | break; |
1705 | default: |
1706 | platform_syscall_kprintf("unknown: %d\n" , code); |
1707 | break; |
1708 | } |
1709 | |
1710 | thread_exception_return(); |
1711 | } |
1712 | |
1713 | static void |
1714 | _enable_timebase_event_stream(uint32_t bit_index) |
1715 | { |
1716 | uint64_t cntkctl; /* One wants to use 32 bits, but "mrs" prefers it this way */ |
1717 | |
1718 | if (bit_index >= 64) { |
1719 | panic("%s: invalid bit index (%u)" , __FUNCTION__, bit_index); |
1720 | } |
1721 | |
1722 | __asm__ volatile ("mrs %0, CNTKCTL_EL1" : "=r" (cntkctl)); |
1723 | |
1724 | cntkctl |= (bit_index << CNTKCTL_EL1_EVENTI_SHIFT); |
1725 | cntkctl |= CNTKCTL_EL1_EVNTEN; |
1726 | cntkctl |= CNTKCTL_EL1_EVENTDIR; /* 1->0; why not? */ |
1727 | |
1728 | /* |
1729 | * If the SOC supports it (and it isn't broken), enable |
1730 | * EL0 access to the physical timebase register. |
1731 | */ |
1732 | if (user_timebase_allowed()) { |
1733 | cntkctl |= CNTKCTL_EL1_PL0PCTEN; |
1734 | } |
1735 | |
1736 | __asm__ volatile ("msr CNTKCTL_EL1, %0" : : "r" (cntkctl)); |
1737 | } |
1738 | |
1739 | /* |
1740 | * Turn timer on, unmask that interrupt. |
1741 | */ |
1742 | static void |
1743 | _enable_virtual_timer(void) |
1744 | { |
1745 | uint64_t cntvctl = CNTP_CTL_EL0_ENABLE; /* One wants to use 32 bits, but "mrs" prefers it this way */ |
1746 | |
1747 | __asm__ volatile ("msr CNTP_CTL_EL0, %0" : : "r" (cntvctl)); |
1748 | } |
1749 | |
1750 | void |
1751 | fiq_context_init(boolean_t enable_fiq __unused) |
1752 | { |
1753 | _enable_timebase_event_stream(fiq_eventi); |
1754 | |
1755 | /* Interrupts still disabled. */ |
1756 | assert(ml_get_interrupts_enabled() == FALSE); |
1757 | _enable_virtual_timer(); |
1758 | } |
1759 | |
1760 | void |
1761 | fiq_context_bootstrap(boolean_t enable_fiq) |
1762 | { |
1763 | #if defined(APPLE_ARM64_ARCH_FAMILY) || defined(BCM2837) |
1764 | /* Could fill in our own ops here, if we needed them */ |
1765 | uint64_t ticks_per_sec, ticks_per_event, events_per_sec; |
1766 | uint32_t bit_index; |
1767 | |
1768 | ticks_per_sec = gPEClockFrequencyInfo.timebase_frequency_hz; |
1769 | #if defined(ARM_BOARD_WFE_TIMEOUT_NS) |
1770 | events_per_sec = 1000000000 / ARM_BOARD_WFE_TIMEOUT_NS; |
1771 | #else |
1772 | /* Default to 1usec (or as close as we can get) */ |
1773 | events_per_sec = 1000000; |
1774 | #endif |
1775 | ticks_per_event = ticks_per_sec / events_per_sec; |
1776 | bit_index = flsll(ticks_per_event) - 1; /* Highest bit set */ |
1777 | |
1778 | /* Round up to power of two */ |
1779 | if ((ticks_per_event & ((1 << bit_index) - 1)) != 0) |
1780 | bit_index++; |
1781 | |
1782 | /* |
1783 | * The timer can only trigger on rising or falling edge, |
1784 | * not both; we don't care which we trigger on, but we |
1785 | * do need to adjust which bit we are interested in to |
1786 | * account for this. |
1787 | */ |
1788 | if (bit_index != 0) |
1789 | bit_index--; |
1790 | |
1791 | fiq_eventi = bit_index; |
1792 | #else |
1793 | #error Need a board configuration. |
1794 | #endif |
1795 | fiq_context_init(enable_fiq); |
1796 | } |
1797 | |
1798 | boolean_t |
1799 | ml_delay_should_spin(uint64_t interval) |
1800 | { |
1801 | cpu_data_t *cdp = getCpuDatap(); |
1802 | |
1803 | if (cdp->cpu_idle_latency) { |
1804 | return (interval < cdp->cpu_idle_latency) ? TRUE : FALSE; |
1805 | } else { |
1806 | /* |
1807 | * Early boot, latency is unknown. Err on the side of blocking, |
1808 | * which should always be safe, even if slow |
1809 | */ |
1810 | return FALSE; |
1811 | } |
1812 | } |
1813 | |
1814 | boolean_t ml_thread_is64bit(thread_t thread) { |
1815 | return (thread_is_64bit_addr(thread)); |
1816 | } |
1817 | |
1818 | void ml_timer_evaluate(void) { |
1819 | } |
1820 | |
1821 | boolean_t |
1822 | ml_timer_forced_evaluation(void) { |
1823 | return FALSE; |
1824 | } |
1825 | |
1826 | uint64_t |
1827 | ml_energy_stat(thread_t t) { |
1828 | return t->machine.energy_estimate_nj; |
1829 | } |
1830 | |
1831 | |
1832 | void |
1833 | ml_gpu_stat_update(__unused uint64_t gpu_ns_delta) { |
1834 | #if CONFIG_EMBEDDED |
1835 | /* |
1836 | * For now: update the resource coalition stats of the |
1837 | * current thread's coalition |
1838 | */ |
1839 | task_coalition_update_gpu_stats(current_task(), gpu_ns_delta); |
1840 | #endif |
1841 | } |
1842 | |
1843 | uint64_t |
1844 | ml_gpu_stat(__unused thread_t t) { |
1845 | return 0; |
1846 | } |
1847 | |
1848 | #if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME |
1849 | static void |
1850 | timer_state_event(boolean_t switch_to_kernel) |
1851 | { |
1852 | thread_t thread = current_thread(); |
1853 | if (!thread->precise_user_kernel_time) return; |
1854 | |
1855 | processor_data_t *pd = &getCpuDatap()->cpu_processor->processor_data; |
1856 | uint64_t now = ml_get_timebase(); |
1857 | |
1858 | timer_stop(pd->current_state, now); |
1859 | pd->current_state = (switch_to_kernel) ? &pd->system_state : &pd->user_state; |
1860 | timer_start(pd->current_state, now); |
1861 | |
1862 | timer_stop(pd->thread_timer, now); |
1863 | pd->thread_timer = (switch_to_kernel) ? &thread->system_timer : &thread->user_timer; |
1864 | timer_start(pd->thread_timer, now); |
1865 | } |
1866 | |
1867 | void |
1868 | timer_state_event_user_to_kernel(void) |
1869 | { |
1870 | timer_state_event(TRUE); |
1871 | } |
1872 | |
1873 | void |
1874 | timer_state_event_kernel_to_user(void) |
1875 | { |
1876 | timer_state_event(FALSE); |
1877 | } |
1878 | #endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */ |
1879 | |
1880 | /* |
1881 | * The following are required for parts of the kernel |
1882 | * that cannot resolve these functions as inlines: |
1883 | */ |
1884 | extern thread_t current_act(void); |
1885 | thread_t |
1886 | current_act(void) |
1887 | { |
1888 | return current_thread_fast(); |
1889 | } |
1890 | |
1891 | #undef current_thread |
1892 | extern thread_t current_thread(void); |
1893 | thread_t |
1894 | current_thread(void) |
1895 | { |
1896 | return current_thread_fast(); |
1897 | } |
1898 | |
1899 | typedef struct |
1900 | { |
1901 | ex_cb_t cb; |
1902 | void *refcon; |
1903 | } |
1904 | ex_cb_info_t; |
1905 | |
1906 | ex_cb_info_t ex_cb_info[EXCB_CLASS_MAX]; |
1907 | |
1908 | /* |
1909 | * Callback registration |
1910 | * Currently we support only one registered callback per class but |
1911 | * it should be possible to support more callbacks |
1912 | */ |
1913 | kern_return_t ex_cb_register( |
1914 | ex_cb_class_t cb_class, |
1915 | ex_cb_t cb, |
1916 | void *refcon) |
1917 | { |
1918 | ex_cb_info_t *pInfo = &ex_cb_info[cb_class]; |
1919 | |
1920 | if ((NULL == cb) || (cb_class >= EXCB_CLASS_MAX)) |
1921 | { |
1922 | return KERN_INVALID_VALUE; |
1923 | } |
1924 | |
1925 | if (NULL == pInfo->cb) |
1926 | { |
1927 | pInfo->cb = cb; |
1928 | pInfo->refcon = refcon; |
1929 | return KERN_SUCCESS; |
1930 | } |
1931 | return KERN_FAILURE; |
1932 | } |
1933 | |
1934 | /* |
1935 | * Called internally by platform kernel to invoke the registered callback for class |
1936 | */ |
1937 | ex_cb_action_t ex_cb_invoke( |
1938 | ex_cb_class_t cb_class, |
1939 | vm_offset_t far) |
1940 | { |
1941 | ex_cb_info_t *pInfo = &ex_cb_info[cb_class]; |
1942 | ex_cb_state_t state = {far}; |
1943 | |
1944 | if (cb_class >= EXCB_CLASS_MAX) |
1945 | { |
1946 | panic("Invalid exception callback class 0x%x\n" , cb_class); |
1947 | } |
1948 | |
1949 | if (pInfo->cb) |
1950 | { |
1951 | return pInfo->cb(cb_class, pInfo->refcon, &state); |
1952 | } |
1953 | return EXCB_ACTION_NONE; |
1954 | } |
1955 | |
1956 | |