1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: arm/cpu_common.c
30 *
31 * cpu routines common to all supported arm variants
32 */
33
34#include <kern/kalloc.h>
35#include <kern/machine.h>
36#include <kern/cpu_number.h>
37#include <kern/thread.h>
38#include <kern/timer_queue.h>
39#include <arm/cpu_data.h>
40#include <arm/cpuid.h>
41#include <arm/caches_internal.h>
42#include <arm/cpu_data_internal.h>
43#include <arm/cpu_internal.h>
44#include <arm/misc_protos.h>
45#include <arm/machine_cpu.h>
46#include <arm/rtclock.h>
47#include <mach/processor_info.h>
48#include <machine/atomic.h>
49#include <machine/config.h>
50#include <vm/vm_kern.h>
51#include <vm/vm_map.h>
52#include <pexpert/arm/protos.h>
53#include <pexpert/device_tree.h>
54#include <sys/kdebug.h>
55#include <arm/machine_routines.h>
56#include <libkern/OSAtomic.h>
57
58#if KPERF
59void kperf_signal_handler(unsigned int cpu_number);
60#endif
61
62struct processor BootProcessor;
63
64unsigned int real_ncpus = 1;
65boolean_t idle_enable = FALSE;
66uint64_t wake_abstime=0x0ULL;
67
68
69cpu_data_t *
70cpu_datap(int cpu)
71{
72 assert(cpu < MAX_CPUS);
73 return (CpuDataEntries[cpu].cpu_data_vaddr);
74}
75
76kern_return_t
77cpu_control(int slot_num,
78 processor_info_t info,
79 unsigned int count)
80{
81 printf("cpu_control(%d,%p,%d) not implemented\n",
82 slot_num, info, count);
83 return (KERN_FAILURE);
84}
85
86kern_return_t
87cpu_info_count(processor_flavor_t flavor,
88 unsigned int *count)
89{
90
91 switch (flavor) {
92 case PROCESSOR_CPU_STAT:
93 *count = PROCESSOR_CPU_STAT_COUNT;
94 return (KERN_SUCCESS);
95
96 default:
97 *count = 0;
98 return (KERN_FAILURE);
99 }
100}
101
102kern_return_t
103cpu_info(processor_flavor_t flavor,
104 int slot_num,
105 processor_info_t info,
106 unsigned int *count)
107{
108 switch (flavor) {
109 case PROCESSOR_CPU_STAT:
110 {
111 processor_cpu_stat_t cpu_stat;
112 cpu_data_t *cpu_data_ptr = CpuDataEntries[slot_num].cpu_data_vaddr;
113
114 if (*count < PROCESSOR_CPU_STAT_COUNT)
115 return (KERN_FAILURE);
116
117 cpu_stat = (processor_cpu_stat_t) info;
118 cpu_stat->irq_ex_cnt = cpu_data_ptr->cpu_stat.irq_ex_cnt;
119 cpu_stat->ipi_cnt = cpu_data_ptr->cpu_stat.ipi_cnt;
120 cpu_stat->timer_cnt = cpu_data_ptr->cpu_stat.timer_cnt;
121 cpu_stat->undef_ex_cnt = cpu_data_ptr->cpu_stat.undef_ex_cnt;
122 cpu_stat->unaligned_cnt = cpu_data_ptr->cpu_stat.unaligned_cnt;
123 cpu_stat->vfp_cnt = cpu_data_ptr->cpu_stat.vfp_cnt;
124 cpu_stat->vfp_shortv_cnt = 0;
125 cpu_stat->data_ex_cnt = cpu_data_ptr->cpu_stat.data_ex_cnt;
126 cpu_stat->instr_ex_cnt = cpu_data_ptr->cpu_stat.instr_ex_cnt;
127
128 *count = PROCESSOR_CPU_STAT_COUNT;
129
130 return (KERN_SUCCESS);
131 }
132
133 default:
134 return (KERN_FAILURE);
135 }
136}
137
138/*
139 * Routine: cpu_doshutdown
140 * Function:
141 */
142void
143cpu_doshutdown(void (*doshutdown) (processor_t),
144 processor_t processor)
145{
146 doshutdown(processor);
147}
148
149/*
150 * Routine: cpu_idle_tickle
151 *
152 */
153void
154cpu_idle_tickle(void)
155{
156 boolean_t intr;
157 cpu_data_t *cpu_data_ptr;
158 uint64_t new_idle_timeout_ticks = 0x0ULL;
159
160 intr = ml_set_interrupts_enabled(FALSE);
161 cpu_data_ptr = getCpuDatap();
162
163 if (cpu_data_ptr->idle_timer_notify != (void *)NULL) {
164 ((idle_timer_t)cpu_data_ptr->idle_timer_notify)(cpu_data_ptr->idle_timer_refcon, &new_idle_timeout_ticks);
165 if (new_idle_timeout_ticks != 0x0ULL) {
166 /* if a new idle timeout was requested set the new idle timer deadline */
167 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks, &cpu_data_ptr->idle_timer_deadline);
168 } else {
169 /* turn off the idle timer */
170 cpu_data_ptr->idle_timer_deadline = 0x0ULL;
171 }
172 timer_resync_deadlines();
173 }
174 (void) ml_set_interrupts_enabled(intr);
175}
176
177static void
178cpu_handle_xcall(cpu_data_t *cpu_data_ptr)
179{
180 broadcastFunc xfunc;
181 void *xparam;
182
183 __c11_atomic_thread_fence(memory_order_acquire_smp);
184 /* Come back around if cpu_signal_internal is running on another CPU and has just
185 * added SIGPxcall to the pending mask, but hasn't yet assigned the call params.*/
186 if (cpu_data_ptr->cpu_xcall_p0 != NULL && cpu_data_ptr->cpu_xcall_p1 != NULL) {
187 xfunc = cpu_data_ptr->cpu_xcall_p0;
188 xparam = cpu_data_ptr->cpu_xcall_p1;
189 cpu_data_ptr->cpu_xcall_p0 = NULL;
190 cpu_data_ptr->cpu_xcall_p1 = NULL;
191 __c11_atomic_thread_fence(memory_order_acq_rel_smp);
192 hw_atomic_and_noret(&cpu_data_ptr->cpu_signal, ~SIGPxcall);
193 xfunc(xparam);
194 }
195
196}
197
198unsigned int
199cpu_broadcast_xcall(uint32_t *synch,
200 boolean_t self_xcall,
201 broadcastFunc func,
202 void *parm)
203{
204 boolean_t intr;
205 cpu_data_t *cpu_data_ptr;
206 cpu_data_t *target_cpu_datap;
207 unsigned int failsig;
208 int cpu;
209 int max_cpu;
210
211 intr = ml_set_interrupts_enabled(FALSE);
212 cpu_data_ptr = getCpuDatap();
213
214 failsig = 0;
215
216 if (synch != NULL) {
217 *synch = real_ncpus;
218 assert_wait((event_t)synch, THREAD_UNINT);
219 }
220
221 max_cpu = ml_get_max_cpu_number();
222 for (cpu=0; cpu <= max_cpu; cpu++) {
223 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
224
225 if ((target_cpu_datap == NULL) || (target_cpu_datap == cpu_data_ptr))
226 continue;
227
228 if(KERN_SUCCESS != cpu_signal(target_cpu_datap, SIGPxcall, (void *)func, parm)) {
229 failsig++;
230 }
231 }
232
233
234 if (self_xcall) {
235 func(parm);
236 }
237
238 (void) ml_set_interrupts_enabled(intr);
239
240 if (synch != NULL) {
241 if (hw_atomic_sub(synch, (!self_xcall)? failsig+1 : failsig) == 0)
242 clear_wait(current_thread(), THREAD_AWAKENED);
243 else
244 thread_block(THREAD_CONTINUE_NULL);
245 }
246
247 if (!self_xcall)
248 return (real_ncpus - failsig - 1);
249 else
250 return (real_ncpus - failsig);
251}
252
253kern_return_t
254cpu_xcall(int cpu_number, broadcastFunc func, void *param)
255{
256 cpu_data_t *target_cpu_datap;
257
258 if ((cpu_number < 0) || (cpu_number > ml_get_max_cpu_number()))
259 return KERN_INVALID_ARGUMENT;
260
261 target_cpu_datap = (cpu_data_t*)CpuDataEntries[cpu_number].cpu_data_vaddr;
262 if (target_cpu_datap == NULL)
263 return KERN_INVALID_ARGUMENT;
264
265 return cpu_signal(target_cpu_datap, SIGPxcall, (void*)func, param);
266}
267
268static kern_return_t
269cpu_signal_internal(cpu_data_t *target_proc,
270 unsigned int signal,
271 void *p0,
272 void *p1,
273 boolean_t defer)
274{
275 unsigned int Check_SIGPdisabled;
276 int current_signals;
277 Boolean swap_success;
278 boolean_t interruptible = ml_set_interrupts_enabled(FALSE);
279 cpu_data_t *current_proc = getCpuDatap();
280
281 /* We'll mandate that only IPIs meant to kick a core out of idle may ever be deferred. */
282 if (defer) {
283 assert(signal == SIGPnop);
284 }
285
286 if (current_proc != target_proc)
287 Check_SIGPdisabled = SIGPdisabled;
288 else
289 Check_SIGPdisabled = 0;
290
291 if (signal == SIGPxcall) {
292 do {
293 current_signals = target_proc->cpu_signal;
294 if ((current_signals & SIGPdisabled) == SIGPdisabled) {
295#if DEBUG || DEVELOPMENT
296 target_proc->failed_signal = SIGPxcall;
297 target_proc->failed_xcall = p0;
298 OSIncrementAtomicLong(&target_proc->failed_signal_count);
299#endif
300 ml_set_interrupts_enabled(interruptible);
301 return KERN_FAILURE;
302 }
303 swap_success = OSCompareAndSwap(current_signals & (~SIGPxcall), current_signals | SIGPxcall,
304 &target_proc->cpu_signal);
305
306 /* Drain pending xcalls on this cpu; the CPU we're trying to xcall may in turn
307 * be trying to xcall us. Since we have interrupts disabled that can deadlock,
308 * so break the deadlock by draining pending xcalls. */
309 if (!swap_success && (current_proc->cpu_signal & SIGPxcall))
310 cpu_handle_xcall(current_proc);
311
312 } while (!swap_success);
313
314 target_proc->cpu_xcall_p0 = p0;
315 target_proc->cpu_xcall_p1 = p1;
316 } else {
317 do {
318 current_signals = target_proc->cpu_signal;
319 if ((Check_SIGPdisabled !=0 ) && (current_signals & Check_SIGPdisabled) == SIGPdisabled) {
320#if DEBUG || DEVELOPMENT
321 target_proc->failed_signal = signal;
322 OSIncrementAtomicLong(&target_proc->failed_signal_count);
323#endif
324 ml_set_interrupts_enabled(interruptible);
325 return KERN_FAILURE;
326 }
327
328 swap_success = OSCompareAndSwap(current_signals, current_signals | signal,
329 &target_proc->cpu_signal);
330 } while (!swap_success);
331 }
332
333 /*
334 * Issue DSB here to guarantee: 1) prior stores to pending signal mask and xcall params
335 * will be visible to other cores when the IPI is dispatched, and 2) subsequent
336 * instructions to signal the other cores will not execute until after the barrier.
337 * DMB would be sufficient to guarantee 1) but not 2).
338 */
339 __builtin_arm_dsb(DSB_ISH);
340
341 if (!(target_proc->cpu_signal & SIGPdisabled)) {
342 if (defer) {
343 PE_cpu_signal_deferred(getCpuDatap()->cpu_id, target_proc->cpu_id);
344 } else {
345 PE_cpu_signal(getCpuDatap()->cpu_id, target_proc->cpu_id);
346 }
347 }
348
349 ml_set_interrupts_enabled(interruptible);
350 return (KERN_SUCCESS);
351}
352
353kern_return_t
354cpu_signal(cpu_data_t *target_proc,
355 unsigned int signal,
356 void *p0,
357 void *p1)
358{
359 return cpu_signal_internal(target_proc, signal, p0, p1, FALSE);
360}
361
362kern_return_t
363cpu_signal_deferred(cpu_data_t *target_proc)
364{
365 return cpu_signal_internal(target_proc, SIGPnop, NULL, NULL, TRUE);
366}
367
368void
369cpu_signal_cancel(cpu_data_t *target_proc)
370{
371 /* TODO: Should we care about the state of a core as far as squashing deferred IPIs goes? */
372 if (!(target_proc->cpu_signal & SIGPdisabled)) {
373 PE_cpu_signal_cancel(getCpuDatap()->cpu_id, target_proc->cpu_id);
374 }
375}
376
377void
378cpu_signal_handler(void)
379{
380 cpu_signal_handler_internal(FALSE);
381}
382
383void
384cpu_signal_handler_internal(boolean_t disable_signal)
385{
386 cpu_data_t *cpu_data_ptr = getCpuDatap();
387 unsigned int cpu_signal;
388
389
390 cpu_data_ptr->cpu_stat.ipi_cnt++;
391 cpu_data_ptr->cpu_stat.ipi_cnt_wake++;
392
393 SCHED_STATS_IPI(current_processor());
394
395 cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
396
397 if ((!(cpu_signal & SIGPdisabled)) && (disable_signal == TRUE))
398 (void)hw_atomic_or(&cpu_data_ptr->cpu_signal, SIGPdisabled);
399 else if ((cpu_signal & SIGPdisabled) && (disable_signal == FALSE))
400 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdisabled);
401
402 while (cpu_signal & ~SIGPdisabled) {
403 if (cpu_signal & SIGPdec) {
404 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdec);
405 rtclock_intr(FALSE);
406 }
407#if KPERF
408 if (cpu_signal & SIGPkptimer) {
409 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPkptimer);
410 kperf_signal_handler((unsigned int)cpu_data_ptr->cpu_number);
411 }
412#endif
413 if (cpu_signal & SIGPxcall) {
414 cpu_handle_xcall(cpu_data_ptr);
415 }
416 if (cpu_signal & SIGPast) {
417 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPast);
418 ast_check(cpu_data_ptr->cpu_processor);
419 }
420 if (cpu_signal & SIGPdebug) {
421 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPdebug);
422 DebuggerXCall(cpu_data_ptr->cpu_int_state);
423 }
424#if __ARM_SMP__ && defined(ARMA7)
425 if (cpu_signal & SIGPLWFlush) {
426 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWFlush);
427 cache_xcall_handler(LWFlush);
428 }
429 if (cpu_signal & SIGPLWClean) {
430 (void)hw_atomic_and(&cpu_data_ptr->cpu_signal, ~SIGPLWClean);
431 cache_xcall_handler(LWClean);
432 }
433#endif
434
435 cpu_signal = hw_atomic_or(&cpu_data_ptr->cpu_signal, 0);
436 }
437}
438
439void
440cpu_exit_wait(int cpu)
441{
442 if ( cpu != master_cpu) {
443 cpu_data_t *cpu_data_ptr;
444
445 cpu_data_ptr = CpuDataEntries[cpu].cpu_data_vaddr;
446 while (!((*(volatile unsigned int*)&cpu_data_ptr->cpu_sleep_token) == ARM_CPU_ON_SLEEP_PATH)) {};
447 }
448}
449
450void
451cpu_machine_init(void)
452{
453 static boolean_t started = FALSE;
454 cpu_data_t *cpu_data_ptr;
455
456 cpu_data_ptr = getCpuDatap();
457 started = ((cpu_data_ptr->cpu_flags & StartedState) == StartedState);
458 if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL)
459 platform_cache_init();
460 PE_cpu_machine_init(cpu_data_ptr->cpu_id, !started);
461 cpu_data_ptr->cpu_flags |= StartedState;
462 ml_init_interrupt();
463}
464
465processor_t
466cpu_processor_alloc(boolean_t is_boot_cpu)
467{
468 processor_t proc;
469
470 if (is_boot_cpu)
471 return &BootProcessor;
472
473 proc = kalloc(sizeof(*proc));
474 if (!proc)
475 return NULL;
476
477 bzero((void *) proc, sizeof(*proc));
478 return proc;
479}
480
481void
482cpu_processor_free(processor_t proc)
483{
484 if (proc != NULL && proc != &BootProcessor)
485 kfree((void *) proc, sizeof(*proc));
486}
487
488processor_t
489current_processor(void)
490{
491 return getCpuDatap()->cpu_processor;
492}
493
494processor_t
495cpu_to_processor(int cpu)
496{
497 cpu_data_t *cpu_data = cpu_datap(cpu);
498 if (cpu_data != NULL)
499 return cpu_data->cpu_processor;
500 else
501 return NULL;
502}
503
504cpu_data_t *
505processor_to_cpu_datap(processor_t processor)
506{
507 cpu_data_t *target_cpu_datap;
508
509 assert(processor->cpu_id < MAX_CPUS);
510 assert(CpuDataEntries[processor->cpu_id].cpu_data_vaddr != NULL);
511
512 target_cpu_datap = (cpu_data_t*)CpuDataEntries[processor->cpu_id].cpu_data_vaddr;
513 assert(target_cpu_datap->cpu_processor == processor);
514
515 return target_cpu_datap;
516}
517
518cpu_data_t *
519cpu_data_alloc(boolean_t is_boot_cpu)
520{
521 cpu_data_t *cpu_data_ptr = NULL;
522
523 if (is_boot_cpu)
524 cpu_data_ptr = &BootCpuData;
525 else {
526 if ((kmem_alloc(kernel_map, (vm_offset_t *)&cpu_data_ptr, sizeof(cpu_data_t), VM_KERN_MEMORY_CPU)) != KERN_SUCCESS)
527 goto cpu_data_alloc_error;
528
529 bzero((void *)cpu_data_ptr, sizeof(cpu_data_t));
530
531 cpu_stack_alloc(cpu_data_ptr);
532 }
533
534 cpu_data_ptr->cpu_processor = cpu_processor_alloc(is_boot_cpu);
535 if (cpu_data_ptr->cpu_processor == (struct processor *)NULL)
536 goto cpu_data_alloc_error;
537
538 return cpu_data_ptr;
539
540cpu_data_alloc_error:
541 panic("cpu_data_alloc() failed\n");
542 return (cpu_data_t *)NULL;
543}
544
545ast_t *
546ast_pending(void)
547{
548 return (&getCpuDatap()->cpu_pending_ast);
549}
550
551cpu_type_t
552slot_type(int slot_num)
553{
554 return (cpu_datap(slot_num)->cpu_type);
555}
556
557cpu_subtype_t
558slot_subtype(int slot_num)
559{
560 return (cpu_datap(slot_num)->cpu_subtype);
561}
562
563cpu_threadtype_t
564slot_threadtype(int slot_num)
565{
566 return (cpu_datap(slot_num)->cpu_threadtype);
567}
568
569cpu_type_t
570cpu_type(void)
571{
572 return (getCpuDatap()->cpu_type);
573}
574
575cpu_subtype_t
576cpu_subtype(void)
577{
578 return (getCpuDatap()->cpu_subtype);
579}
580
581cpu_threadtype_t
582cpu_threadtype(void)
583{
584 return (getCpuDatap()->cpu_threadtype);
585}
586
587int
588cpu_number(void)
589{
590 return (getCpuDatap()->cpu_number);
591}
592
593uint64_t
594ml_get_wake_timebase(void)
595{
596 return wake_abstime;
597}
598
599