1/*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29extern "C" {
30#include <machine/machine_routines.h>
31#include <pexpert/pexpert.h>
32#include <kern/cpu_number.h>
33extern void kperf_kernel_configure(char *);
34}
35
36#include <IOKit/IOLib.h>
37#include <IOKit/IOPlatformExpert.h>
38#include <IOKit/pwr_mgt/RootDomain.h>
39#include <IOKit/pwr_mgt/IOPMPrivate.h>
40#include <IOKit/IOUserClient.h>
41#include <IOKit/IOKitKeysPrivate.h>
42#include <IOKit/IOCPU.h>
43#include "IOKitKernelInternal.h"
44
45/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
46#include <kern/queue.h>
47
48extern "C" void console_suspend();
49extern "C" void console_resume();
50extern "C" void sched_override_recommended_cores_for_sleep(void);
51extern "C" void sched_restore_recommended_cores_after_sleep(void);
52
53typedef kern_return_t (*iocpu_platform_action_t)(void * refcon0, void * refcon1, uint32_t priority,
54 void * param1, void * param2, void * param3,
55 const char * name);
56
57struct iocpu_platform_action_entry
58{
59 queue_chain_t link;
60 iocpu_platform_action_t action;
61 int32_t priority;
62 const char * name;
63 void * refcon0;
64 void * refcon1;
65 boolean_t callout_in_progress;
66 struct iocpu_platform_action_entry * alloc_list;
67};
68typedef struct iocpu_platform_action_entry iocpu_platform_action_entry_t;
69
70/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
71
72static IOLock *gIOCPUsLock;
73static OSArray *gIOCPUs;
74static const OSSymbol *gIOCPUStateKey;
75static OSString *gIOCPUStateNames[kIOCPUStateCount];
76
77enum
78{
79 kQueueSleep = 0,
80 kQueueWake = 1,
81 kQueueQuiesce = 2,
82 kQueueActive = 3,
83 kQueueHaltRestart = 4,
84 kQueuePanic = 5,
85 kQueueCount = 6
86};
87
88const OSSymbol * gIOPlatformSleepActionKey;
89const OSSymbol * gIOPlatformWakeActionKey;
90const OSSymbol * gIOPlatformQuiesceActionKey;
91const OSSymbol * gIOPlatformActiveActionKey;
92const OSSymbol * gIOPlatformHaltRestartActionKey;
93const OSSymbol * gIOPlatformPanicActionKey;
94
95static queue_head_t gActionQueues[kQueueCount];
96static const OSSymbol * gActionSymbols[kQueueCount];
97
98/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99
100static void
101iocpu_add_platform_action(queue_head_t * queue, iocpu_platform_action_entry_t * entry)
102{
103 iocpu_platform_action_entry_t * next;
104
105 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
106 {
107 if (next->priority > entry->priority)
108 {
109 queue_insert_before(queue, entry, next, iocpu_platform_action_entry_t *, link);
110 return;
111 }
112 }
113 queue_enter(queue, entry, iocpu_platform_action_entry_t *, link); // at tail
114}
115
116static void
117iocpu_remove_platform_action(iocpu_platform_action_entry_t * entry)
118{
119 remque(&entry->link);
120}
121
122static kern_return_t
123iocpu_run_platform_actions(queue_head_t * queue, uint32_t first_priority, uint32_t last_priority,
124 void * param1, void * param2, void * param3, boolean_t allow_nested_callouts)
125{
126 kern_return_t ret = KERN_SUCCESS;
127 kern_return_t result = KERN_SUCCESS;
128 iocpu_platform_action_entry_t * next;
129
130 queue_iterate(queue, next, iocpu_platform_action_entry_t *, link)
131 {
132 uint32_t pri = (next->priority < 0) ? -next->priority : next->priority;
133 if ((pri >= first_priority) && (pri <= last_priority))
134 {
135 //kprintf("[%p]", next->action);
136 if (!allow_nested_callouts && !next->callout_in_progress)
137 {
138 next->callout_in_progress = TRUE;
139 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
140 next->callout_in_progress = FALSE;
141 }
142 else if (allow_nested_callouts)
143 {
144 ret = (*next->action)(next->refcon0, next->refcon1, pri, param1, param2, param3, next->name);
145 }
146 }
147 if (KERN_SUCCESS == result)
148 result = ret;
149 }
150 return (result);
151}
152
153/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
154
155extern "C" kern_return_t
156IOCPURunPlatformQuiesceActions(void)
157{
158 return (iocpu_run_platform_actions(&gActionQueues[kQueueQuiesce], 0, 0U-1,
159 NULL, NULL, NULL, TRUE));
160}
161
162extern "C" kern_return_t
163IOCPURunPlatformActiveActions(void)
164{
165 return (iocpu_run_platform_actions(&gActionQueues[kQueueActive], 0, 0U-1,
166 NULL, NULL, NULL, TRUE));
167}
168
169extern "C" kern_return_t
170IOCPURunPlatformHaltRestartActions(uint32_t message)
171{
172 if (!gActionQueues[kQueueHaltRestart].next) return (kIOReturnNotReady);
173 return (iocpu_run_platform_actions(&gActionQueues[kQueueHaltRestart], 0, 0U-1,
174 (void *)(uintptr_t) message, NULL, NULL, TRUE));
175}
176
177extern "C" kern_return_t
178IOCPURunPlatformPanicActions(uint32_t message)
179{
180 // Don't allow nested calls of panic actions
181 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
182 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
183 (void *)(uintptr_t) message, NULL, NULL, FALSE));
184}
185
186
187extern "C" kern_return_t
188IOCPURunPlatformPanicSyncAction(void *addr, uint32_t offset, uint32_t len)
189{
190 PE_panic_save_context_t context = {
191 .psc_buffer = addr,
192 .psc_offset = offset,
193 .psc_length = len
194 };
195
196 // Don't allow nested calls of panic actions
197 if (!gActionQueues[kQueuePanic].next) return (kIOReturnNotReady);
198 return (iocpu_run_platform_actions(&gActionQueues[kQueuePanic], 0, 0U-1,
199 (void *)(uintptr_t)(kPEPanicSync), &context, NULL, FALSE));
200
201}
202
203/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
204
205static kern_return_t
206IOServicePlatformAction(void * refcon0, void * refcon1, uint32_t priority,
207 void * param1, void * param2, void * param3,
208 const char * service_name)
209{
210 IOReturn ret;
211 IOService * service = (IOService *) refcon0;
212 const OSSymbol * function = (const OSSymbol *) refcon1;
213
214 kprintf("%s -> %s\n", function->getCStringNoCopy(), service_name);
215
216 ret = service->callPlatformFunction(function, false,
217 (void *)(uintptr_t) priority, param1, param2, param3);
218
219 return (ret);
220}
221
222static void
223IOInstallServicePlatformAction(IOService * service, uint32_t qidx)
224{
225 iocpu_platform_action_entry_t * entry;
226 OSNumber * num;
227 uint32_t priority;
228 const OSSymbol * key = gActionSymbols[qidx];
229 queue_head_t * queue = &gActionQueues[qidx];
230 bool reverse;
231 bool uniq;
232
233 num = OSDynamicCast(OSNumber, service->getProperty(key));
234 if (!num) return;
235
236 reverse = false;
237 uniq = false;
238 switch (qidx)
239 {
240 case kQueueWake:
241 case kQueueActive:
242 reverse = true;
243 break;
244 case kQueueHaltRestart:
245 case kQueuePanic:
246 uniq = true;
247 break;
248 }
249 if (uniq)
250 {
251 queue_iterate(queue, entry, iocpu_platform_action_entry_t *, link)
252 {
253 if (service == entry->refcon0) return;
254 }
255 }
256
257 entry = IONew(iocpu_platform_action_entry_t, 1);
258 entry->action = &IOServicePlatformAction;
259 entry->name = service->getName();
260 priority = num->unsigned32BitValue();
261 if (reverse)
262 entry->priority = -priority;
263 else
264 entry->priority = priority;
265 entry->refcon0 = service;
266 entry->refcon1 = (void *) key;
267 entry->callout_in_progress = FALSE;
268
269 iocpu_add_platform_action(queue, entry);
270}
271
272/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
273
274void
275IOCPUInitialize(void)
276{
277 gIOCPUsLock = IOLockAlloc();
278 gIOCPUs = OSArray::withCapacity(1);
279
280 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
281 {
282 queue_init(&gActionQueues[qidx]);
283 }
284
285 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
286
287 gIOCPUStateNames[kIOCPUStateUnregistered] =
288 OSString::withCStringNoCopy("Unregistered");
289 gIOCPUStateNames[kIOCPUStateUninitalized] =
290 OSString::withCStringNoCopy("Uninitalized");
291 gIOCPUStateNames[kIOCPUStateStopped] =
292 OSString::withCStringNoCopy("Stopped");
293 gIOCPUStateNames[kIOCPUStateRunning] =
294 OSString::withCStringNoCopy("Running");
295
296 gIOPlatformSleepActionKey = gActionSymbols[kQueueSleep]
297 = OSSymbol::withCStringNoCopy(kIOPlatformSleepActionKey);
298 gIOPlatformWakeActionKey = gActionSymbols[kQueueWake]
299 = OSSymbol::withCStringNoCopy(kIOPlatformWakeActionKey);
300 gIOPlatformQuiesceActionKey = gActionSymbols[kQueueQuiesce]
301 = OSSymbol::withCStringNoCopy(kIOPlatformQuiesceActionKey);
302 gIOPlatformActiveActionKey = gActionSymbols[kQueueActive]
303 = OSSymbol::withCStringNoCopy(kIOPlatformActiveActionKey);
304 gIOPlatformHaltRestartActionKey = gActionSymbols[kQueueHaltRestart]
305 = OSSymbol::withCStringNoCopy(kIOPlatformHaltRestartActionKey);
306 gIOPlatformPanicActionKey = gActionSymbols[kQueuePanic]
307 = OSSymbol::withCStringNoCopy(kIOPlatformPanicActionKey);
308}
309
310IOReturn
311IOInstallServicePlatformActions(IOService * service)
312{
313 IOLockLock(gIOCPUsLock);
314
315 IOInstallServicePlatformAction(service, kQueueHaltRestart);
316 IOInstallServicePlatformAction(service, kQueuePanic);
317
318 IOLockUnlock(gIOCPUsLock);
319
320 return (kIOReturnSuccess);
321}
322
323IOReturn
324IORemoveServicePlatformActions(IOService * service)
325{
326 iocpu_platform_action_entry_t * entry;
327 iocpu_platform_action_entry_t * next;
328
329 IOLockLock(gIOCPUsLock);
330
331 for (uint32_t qidx = kQueueSleep; qidx < kQueueCount; qidx++)
332 {
333 next = (typeof(entry)) queue_first(&gActionQueues[qidx]);
334 while (!queue_end(&gActionQueues[qidx], &next->link))
335 {
336 entry = next;
337 next = (typeof(entry)) queue_next(&entry->link);
338 if (service == entry->refcon0)
339 {
340 iocpu_remove_platform_action(entry);
341 IODelete(entry, iocpu_platform_action_entry_t, 1);
342 }
343 }
344 }
345
346 IOLockUnlock(gIOCPUsLock);
347
348 return (kIOReturnSuccess);
349}
350
351
352/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
353
354kern_return_t PE_cpu_start(cpu_id_t target,
355 vm_offset_t start_paddr, vm_offset_t arg_paddr)
356{
357 IOCPU *targetCPU = (IOCPU *)target;
358
359 if (targetCPU == NULL) return KERN_FAILURE;
360 return targetCPU->startCPU(start_paddr, arg_paddr);
361}
362
363void PE_cpu_halt(cpu_id_t target)
364{
365 IOCPU *targetCPU = (IOCPU *)target;
366
367 targetCPU->haltCPU();
368}
369
370void PE_cpu_signal(cpu_id_t source, cpu_id_t target)
371{
372 IOCPU *sourceCPU = (IOCPU *)source;
373 IOCPU *targetCPU = (IOCPU *)target;
374
375 sourceCPU->signalCPU(targetCPU);
376}
377
378void PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
379{
380 IOCPU *sourceCPU = (IOCPU *)source;
381 IOCPU *targetCPU = (IOCPU *)target;
382
383 sourceCPU->signalCPUDeferred(targetCPU);
384}
385
386void PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
387{
388 IOCPU *sourceCPU = (IOCPU *)source;
389 IOCPU *targetCPU = (IOCPU *)target;
390
391 sourceCPU->signalCPUCancel(targetCPU);
392}
393
394void PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
395{
396 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
397
398 if (targetCPU == NULL)
399 panic("%s: invalid target CPU %p", __func__, target);
400
401 targetCPU->initCPU(bootb);
402#if defined(__arm__) || defined(__arm64__)
403 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) ml_set_is_quiescing(false);
404#endif /* defined(__arm__) || defined(__arm64__) */
405}
406
407void PE_cpu_machine_quiesce(cpu_id_t target)
408{
409 IOCPU *targetCPU = (IOCPU*)target;
410#if defined(__arm__) || defined(__arm64__)
411 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) ml_set_is_quiescing(true);
412#endif /* defined(__arm__) || defined(__arm64__) */
413 targetCPU->quiesceCPU();
414}
415
416#if defined(__arm__) || defined(__arm64__)
417static perfmon_interrupt_handler_func pmi_handler = 0;
418
419kern_return_t PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
420{
421 pmi_handler = handler;
422
423 return KERN_SUCCESS;
424}
425
426void PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
427{
428 IOCPU *targetCPU = (IOCPU*)target;
429
430 if (targetCPU == nullptr) {
431 return;
432 }
433
434 if (enable) {
435 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)pmi_handler, 0);
436 targetCPU->getProvider()->enableInterrupt(1);
437 } else {
438 targetCPU->getProvider()->disableInterrupt(1);
439 }
440}
441#endif
442
443/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
444
445#define super IOService
446
447OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
448OSMetaClassDefineReservedUnused(IOCPU, 0);
449OSMetaClassDefineReservedUnused(IOCPU, 1);
450OSMetaClassDefineReservedUnused(IOCPU, 2);
451OSMetaClassDefineReservedUnused(IOCPU, 3);
452OSMetaClassDefineReservedUnused(IOCPU, 4);
453OSMetaClassDefineReservedUnused(IOCPU, 5);
454OSMetaClassDefineReservedUnused(IOCPU, 6);
455OSMetaClassDefineReservedUnused(IOCPU, 7);
456
457/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
458
459void IOCPUSleepKernel(void)
460{
461 long cnt, numCPUs;
462 IOCPU *target;
463 IOCPU *bootCPU = NULL;
464 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
465
466 kprintf("IOCPUSleepKernel\n");
467#if defined(__arm64__)
468 sched_override_recommended_cores_for_sleep();
469#endif
470
471 IORegistryIterator * iter;
472 OSOrderedSet * all;
473 IOService * service;
474
475 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
476
477 iter = IORegistryIterator::iterateOver( gIOServicePlane,
478 kIORegistryIterateRecursively );
479 if( iter)
480 {
481 all = 0;
482 do
483 {
484 if (all)
485 all->release();
486 all = iter->iterateAll();
487 }
488 while (!iter->isValid());
489 iter->release();
490
491 if (all)
492 {
493 while((service = (IOService *) all->getFirstObject()))
494 {
495 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
496 {
497 IOInstallServicePlatformAction(service, qidx);
498 }
499 all->removeObject(service);
500 }
501 all->release();
502 }
503 }
504
505 iocpu_run_platform_actions(&gActionQueues[kQueueSleep], 0, 0U-1,
506 NULL, NULL, NULL, TRUE);
507
508 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
509
510 numCPUs = gIOCPUs->getCount();
511 // Sleep the CPUs.
512 cnt = numCPUs;
513 while (cnt--)
514 {
515 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
516
517 // We make certain that the bootCPU is the last to sleep
518 // We'll skip it for now, and halt it after finishing the
519 // non-boot CPU's.
520 if (target->getCPUNumber() == (UInt32)master_cpu)
521 {
522 bootCPU = target;
523 } else if (target->getCPUState() == kIOCPUStateRunning)
524 {
525 target->haltCPU();
526 }
527 }
528
529 assert(bootCPU != NULL);
530 assert(cpu_number() == master_cpu);
531
532 console_suspend();
533
534 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
535 rootDomain->stop_watchdog_timer();
536
537 // Now sleep the boot CPU.
538 bootCPU->haltCPU();
539
540 rootDomain->start_watchdog_timer();
541 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
542
543 console_resume();
544
545 iocpu_run_platform_actions(&gActionQueues[kQueueWake], 0, 0U-1,
546 NULL, NULL, NULL, TRUE);
547
548 iocpu_platform_action_entry_t * entry;
549 for (uint32_t qidx = kQueueSleep; qidx <= kQueueActive; qidx++)
550 {
551 while (!(queue_empty(&gActionQueues[qidx])))
552 {
553 entry = (typeof(entry)) queue_first(&gActionQueues[qidx]);
554 iocpu_remove_platform_action(entry);
555 IODelete(entry, iocpu_platform_action_entry_t, 1);
556 }
557 }
558
559 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
560
561 // Wake the other CPUs.
562 for (cnt = 0; cnt < numCPUs; cnt++)
563 {
564 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
565
566 // Skip the already-woken boot CPU.
567 if (target->getCPUNumber() != (UInt32)master_cpu) {
568 if (target->getCPUState() == kIOCPUStateRunning)
569 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
570
571 if (target->getCPUState() == kIOCPUStateStopped)
572 processor_start(target->getMachProcessor());
573 }
574 }
575
576#if defined(__arm64__)
577 sched_restore_recommended_cores_after_sleep();
578#endif
579}
580
581bool IOCPU::start(IOService *provider)
582{
583 OSData *busFrequency, *cpuFrequency, *timebaseFrequency;
584
585 if (!super::start(provider)) return false;
586
587 _cpuGroup = gIOCPUs;
588 cpuNub = provider;
589
590 IOLockLock(gIOCPUsLock);
591 gIOCPUs->setObject(this);
592 IOLockUnlock(gIOCPUsLock);
593
594 // Correct the bus, cpu and timebase frequencies in the device tree.
595 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
596 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, 4);
597 } else {
598 busFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.bus_frequency_hz, 8);
599 }
600 provider->setProperty("bus-frequency", busFrequency);
601 busFrequency->release();
602
603 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
604 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, 4);
605 } else {
606 cpuFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.cpu_frequency_hz, 8);
607 }
608 provider->setProperty("clock-frequency", cpuFrequency);
609 cpuFrequency->release();
610
611 timebaseFrequency = OSData::withBytesNoCopy((void *)&gPEClockFrequencyInfo.timebase_frequency_hz, 4);
612 provider->setProperty("timebase-frequency", timebaseFrequency);
613 timebaseFrequency->release();
614
615 super::setProperty("IOCPUID", getRegistryEntryID(), sizeof(uint64_t)*8);
616
617 setCPUNumber(0);
618 setCPUState(kIOCPUStateUnregistered);
619
620 return true;
621}
622
623OSObject *IOCPU::getProperty(const OSSymbol *aKey) const
624{
625 if (aKey == gIOCPUStateKey) return gIOCPUStateNames[_cpuState];
626
627 return super::getProperty(aKey);
628}
629
630bool IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
631{
632 if (aKey == gIOCPUStateKey) {
633 return false;
634 }
635
636 return super::setProperty(aKey, anObject);
637}
638
639bool IOCPU::serializeProperties(OSSerialize *serialize) const
640{
641 bool result;
642 OSDictionary *dict = dictionaryWithProperties();
643 if (!dict) return false;
644 dict->setObject(gIOCPUStateKey, gIOCPUStateNames[_cpuState]);
645 result = dict->serialize(serialize);
646 dict->release();
647 return result;
648}
649
650IOReturn IOCPU::setProperties(OSObject *properties)
651{
652 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
653 OSString *stateStr;
654 IOReturn result;
655
656 if (dict == 0) return kIOReturnUnsupported;
657
658 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey));
659 if (stateStr != 0) {
660 result = IOUserClient::clientHasPrivilege(current_task(), kIOClientPrivilegeAdministrator);
661 if (result != kIOReturnSuccess) return result;
662
663 if (setProperty(gIOCPUStateKey, stateStr)) return kIOReturnSuccess;
664
665 return kIOReturnUnsupported;
666 }
667
668 return kIOReturnUnsupported;
669}
670
671void IOCPU::signalCPU(IOCPU */*target*/)
672{
673}
674
675void IOCPU::signalCPUDeferred(IOCPU *target)
676{
677 // Our CPU may not support deferred IPIs,
678 // so send a regular IPI by default
679 signalCPU(target);
680}
681
682void IOCPU::signalCPUCancel(IOCPU */*target*/)
683{
684 // Meant to cancel signals sent by
685 // signalCPUDeferred; unsupported
686 // by default
687}
688
689void IOCPU::enableCPUTimeBase(bool /*enable*/)
690{
691}
692
693UInt32 IOCPU::getCPUNumber(void)
694{
695 return _cpuNumber;
696}
697
698void IOCPU::setCPUNumber(UInt32 cpuNumber)
699{
700 _cpuNumber = cpuNumber;
701 super::setProperty("IOCPUNumber", _cpuNumber, 32);
702}
703
704UInt32 IOCPU::getCPUState(void)
705{
706 return _cpuState;
707}
708
709void IOCPU::setCPUState(UInt32 cpuState)
710{
711 if (cpuState < kIOCPUStateCount) {
712 _cpuState = cpuState;
713 }
714}
715
716OSArray *IOCPU::getCPUGroup(void)
717{
718 return _cpuGroup;
719}
720
721UInt32 IOCPU::getCPUGroupSize(void)
722{
723 return _cpuGroup->getCount();
724}
725
726processor_t IOCPU::getMachProcessor(void)
727{
728 return machProcessor;
729}
730
731
732/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
733
734#undef super
735#define super IOInterruptController
736
737OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
738
739OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
740OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
741OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
742OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
743OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
744
745
746
747/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
748
749IOReturn IOCPUInterruptController::initCPUInterruptController(int sources)
750{
751 return initCPUInterruptController(sources, sources);
752}
753
754IOReturn IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
755{
756 int cnt;
757
758 if (!super::init()) return kIOReturnInvalid;
759
760 numSources = sources;
761 numCPUs = cpus;
762
763 vectors = (IOInterruptVector *)IOMalloc(numSources * sizeof(IOInterruptVector));
764 if (vectors == 0) return kIOReturnNoMemory;
765 bzero(vectors, numSources * sizeof(IOInterruptVector));
766
767 // Allocate a lock for each vector
768 for (cnt = 0; cnt < numSources; cnt++) {
769 vectors[cnt].interruptLock = IOLockAlloc();
770 if (vectors[cnt].interruptLock == NULL) {
771 for (cnt = 0; cnt < numSources; cnt++) {
772 if (vectors[cnt].interruptLock != NULL)
773 IOLockFree(vectors[cnt].interruptLock);
774 }
775 return kIOReturnNoResources;
776 }
777 }
778
779 ml_init_max_cpus(numSources);
780
781#if KPERF
782 /*
783 * kperf allocates based on the number of CPUs and requires them to all be
784 * accounted for.
785 */
786 boolean_t found_kperf = FALSE;
787 char kperf_config_str[64];
788 found_kperf = PE_parse_boot_arg_str("kperf", kperf_config_str, sizeof(kperf_config_str));
789 if (found_kperf && kperf_config_str[0] != '\0') {
790 kperf_kernel_configure(kperf_config_str);
791 }
792#endif
793
794 return kIOReturnSuccess;
795}
796
797void IOCPUInterruptController::registerCPUInterruptController(void)
798{
799 registerService();
800
801 getPlatform()->registerInterruptController(gPlatformInterruptControllerName,
802 this);
803}
804
805void IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
806{
807 int cnt;
808 OSArray *controller;
809 OSArray *specifier;
810 OSData *tmpData;
811 long tmpLong;
812
813 if ((service->getProperty(gIOInterruptControllersKey) != 0) &&
814 (service->getProperty(gIOInterruptSpecifiersKey) != 0))
815 return;
816
817 // Create the interrupt specifer array.
818 specifier = OSArray::withCapacity(numSources);
819 for (cnt = 0; cnt < numSources; cnt++) {
820 tmpLong = cnt;
821 tmpData = OSData::withBytes(&tmpLong, sizeof(tmpLong));
822 specifier->setObject(tmpData);
823 tmpData->release();
824 };
825
826 // Create the interrupt controller array.
827 controller = OSArray::withCapacity(numSources);
828 for (cnt = 0; cnt < numSources; cnt++) {
829 controller->setObject(gPlatformInterruptControllerName);
830 }
831
832 // Put the two arrays into the property table.
833 service->setProperty(gIOInterruptControllersKey, controller);
834 service->setProperty(gIOInterruptSpecifiersKey, specifier);
835 controller->release();
836 specifier->release();
837}
838
839void IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
840{
841 IOInterruptHandler handler = OSMemberFunctionCast(
842 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
843
844 assert(numCPUs > 0);
845
846 ml_install_interrupt_handler(cpu, cpu->getCPUNumber(), this, handler, 0);
847
848 IOTakeLock(vectors[0].interruptLock);
849 ++enabledCPUs;
850
851 if (enabledCPUs == numCPUs) {
852 IOService::cpusRunning();
853 thread_wakeup(this);
854 }
855 IOUnlock(vectors[0].interruptLock);
856}
857
858IOReturn IOCPUInterruptController::registerInterrupt(IOService *nub,
859 int source,
860 void *target,
861 IOInterruptHandler handler,
862 void *refCon)
863{
864 IOInterruptVector *vector;
865
866 if (source >= numSources) return kIOReturnNoResources;
867
868 vector = &vectors[source];
869
870 // Get the lock for this vector.
871 IOTakeLock(vector->interruptLock);
872
873 // Make sure the vector is not in use.
874 if (vector->interruptRegistered) {
875 IOUnlock(vector->interruptLock);
876 return kIOReturnNoResources;
877 }
878
879 // Fill in vector with the client's info.
880 vector->handler = handler;
881 vector->nub = nub;
882 vector->source = source;
883 vector->target = target;
884 vector->refCon = refCon;
885
886 // Get the vector ready. It starts hard disabled.
887 vector->interruptDisabledHard = 1;
888 vector->interruptDisabledSoft = 1;
889 vector->interruptRegistered = 1;
890
891 IOUnlock(vector->interruptLock);
892
893 IOTakeLock(vectors[0].interruptLock);
894 if (enabledCPUs != numCPUs) {
895 assert_wait(this, THREAD_UNINT);
896 IOUnlock(vectors[0].interruptLock);
897 thread_block(THREAD_CONTINUE_NULL);
898 } else
899 IOUnlock(vectors[0].interruptLock);
900
901 return kIOReturnSuccess;
902}
903
904IOReturn IOCPUInterruptController::getInterruptType(IOService */*nub*/,
905 int /*source*/,
906 int *interruptType)
907{
908 if (interruptType == 0) return kIOReturnBadArgument;
909
910 *interruptType = kIOInterruptTypeLevel;
911
912 return kIOReturnSuccess;
913}
914
915IOReturn IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
916 int /*source*/)
917{
918// ml_set_interrupts_enabled(true);
919 return kIOReturnSuccess;
920}
921
922IOReturn IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
923 int /*source*/)
924{
925// ml_set_interrupts_enabled(false);
926 return kIOReturnSuccess;
927}
928
929IOReturn IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
930 int /*source*/)
931{
932 ml_cause_interrupt();
933 return kIOReturnSuccess;
934}
935
936IOReturn IOCPUInterruptController::handleInterrupt(void */*refCon*/,
937 IOService */*nub*/,
938 int source)
939{
940 IOInterruptVector *vector;
941
942 vector = &vectors[source];
943
944 if (!vector->interruptRegistered) return kIOReturnInvalid;
945
946 vector->handler(vector->target, vector->refCon,
947 vector->nub, vector->source);
948
949 return kIOReturnSuccess;
950}
951
952/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
953