1/*
2 * Copyright (c) 1999-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define IOKIT_ENABLE_SHARED_PTR
30
31extern "C" {
32#include <pexpert/pexpert.h>
33#include <kern/cpu_number.h>
34extern void kperf_kernel_configure(char *);
35}
36
37#include <machine/machine_routines.h>
38#include <IOKit/IOLib.h>
39#include <IOKit/IOPlatformExpert.h>
40#include <IOKit/pwr_mgt/RootDomain.h>
41#include <IOKit/pwr_mgt/IOPMPrivate.h>
42#include <libkern/c++/OSSharedPtr.h>
43#include <IOKit/IOUserClient.h>
44#include <IOKit/IOKitKeysPrivate.h>
45#include <IOKit/IOCPU.h>
46#include "IOKitKernelInternal.h"
47
48/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49
50#include <kern/queue.h>
51#include <kern/sched_prim.h>
52
53extern "C" void console_suspend();
54extern "C" void console_resume();
55extern "C" void sched_override_available_cores_for_sleep(void);
56extern "C" void sched_restore_available_cores_after_sleep(void);
57
58/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59
60static IOLock *gIOCPUsLock;
61static OSSharedPtr<OSArray> gIOCPUs;
62static OSSharedPtr<const OSSymbol> gIOCPUStateKey;
63static OSSharedPtr<OSString> gIOCPUStateNames[kIOCPUStateCount];
64
65/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66
67#if !USE_APPLEARMSMP
68
69void
70IOCPUInitialize(void)
71{
72 gIOCPUsLock = IOLockAlloc();
73 gIOCPUs = OSArray::withCapacity(1);
74
75 gIOCPUStateKey = OSSymbol::withCStringNoCopy("IOCPUState");
76
77 gIOCPUStateNames[kIOCPUStateUnregistered] =
78 OSString::withCStringNoCopy("Unregistered");
79 gIOCPUStateNames[kIOCPUStateUninitalized] =
80 OSString::withCStringNoCopy("Uninitalized");
81 gIOCPUStateNames[kIOCPUStateStopped] =
82 OSString::withCStringNoCopy("Stopped");
83 gIOCPUStateNames[kIOCPUStateRunning] =
84 OSString::withCStringNoCopy("Running");
85}
86
87/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
88
89kern_return_t
90PE_cpu_start(cpu_id_t target,
91 vm_offset_t start_paddr, vm_offset_t arg_paddr)
92{
93 IOCPU *targetCPU = (IOCPU *)target;
94
95 if (targetCPU == NULL) {
96 return KERN_FAILURE;
97 }
98 return targetCPU->startCPU(start_paddr, arg_paddr);
99}
100
101void
102PE_cpu_halt(cpu_id_t target)
103{
104 IOCPU *targetCPU = (IOCPU *)target;
105
106 targetCPU->haltCPU();
107}
108
109void
110PE_cpu_signal(cpu_id_t source, cpu_id_t target)
111{
112 IOCPU *sourceCPU = (IOCPU *)source;
113 IOCPU *targetCPU = (IOCPU *)target;
114
115 sourceCPU->signalCPU(targetCPU);
116}
117
118void
119PE_cpu_signal_deferred(cpu_id_t source, cpu_id_t target)
120{
121 IOCPU *sourceCPU = (IOCPU *)source;
122 IOCPU *targetCPU = (IOCPU *)target;
123
124 sourceCPU->signalCPUDeferred(targetCPU);
125}
126
127void
128PE_cpu_signal_cancel(cpu_id_t source, cpu_id_t target)
129{
130 IOCPU *sourceCPU = (IOCPU *)source;
131 IOCPU *targetCPU = (IOCPU *)target;
132
133 sourceCPU->signalCPUCancel(targetCPU);
134}
135
136void
137PE_cpu_machine_init(cpu_id_t target, boolean_t bootb)
138{
139 IOCPU *targetCPU = OSDynamicCast(IOCPU, (OSObject *)target);
140
141 if (targetCPU == NULL) {
142 panic("%s: invalid target CPU %p", __func__, target);
143 }
144
145 targetCPU->initCPU(bootb);
146#if defined(__arm64__)
147 if (!bootb && (targetCPU->getCPUNumber() == (UInt32)master_cpu)) {
148 ml_set_is_quiescing(false);
149 }
150#endif /* defined(__arm64__) */
151}
152
153void
154PE_cpu_machine_quiesce(cpu_id_t target)
155{
156 IOCPU *targetCPU = (IOCPU*)target;
157#if defined(__arm64__)
158 if (targetCPU->getCPUNumber() == (UInt32)master_cpu) {
159 ml_set_is_quiescing(true);
160 }
161#endif /* defined(__arm64__) */
162 targetCPU->quiesceCPU();
163}
164
165#if defined(__arm64__)
166static perfmon_interrupt_handler_func pmi_handler = NULL;
167
168kern_return_t
169PE_cpu_perfmon_interrupt_install_handler(perfmon_interrupt_handler_func handler)
170{
171 pmi_handler = handler;
172
173 return KERN_SUCCESS;
174}
175
176void
177PE_cpu_perfmon_interrupt_enable(cpu_id_t target, boolean_t enable)
178{
179 IOCPU *targetCPU = (IOCPU*)target;
180
181 if (targetCPU == nullptr) {
182 return;
183 }
184
185 if (enable) {
186 targetCPU->getProvider()->registerInterrupt(1, targetCPU, (IOInterruptAction)(void (*)(void))pmi_handler, NULL);
187 targetCPU->getProvider()->enableInterrupt(1);
188 } else {
189 targetCPU->getProvider()->disableInterrupt(1);
190 }
191}
192#endif
193
194bool
195PE_cpu_power_check_kdp(int cpu_id)
196{
197 return true;
198}
199
200#endif /* !USE_APPLEARMSMP */
201
202/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
203
204#define super IOService
205
206OSDefineMetaClassAndAbstractStructors(IOCPU, IOService);
207OSMetaClassDefineReservedUnused(IOCPU, 0);
208OSMetaClassDefineReservedUnused(IOCPU, 1);
209OSMetaClassDefineReservedUnused(IOCPU, 2);
210OSMetaClassDefineReservedUnused(IOCPU, 3);
211OSMetaClassDefineReservedUnused(IOCPU, 4);
212OSMetaClassDefineReservedUnused(IOCPU, 5);
213OSMetaClassDefineReservedUnused(IOCPU, 6);
214OSMetaClassDefineReservedUnused(IOCPU, 7);
215
216/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
217
218#if !USE_APPLEARMSMP
219void
220IOCPUSleepKernel(void)
221{
222#if defined(__x86_64__)
223 extern IOCPU *currentShutdownTarget;
224#endif
225 unsigned int cnt, numCPUs;
226 IOCPU *target;
227 IOCPU *bootCPU = NULL;
228 IOPMrootDomain *rootDomain = IOService::getPMRootDomain();
229
230 printf("IOCPUSleepKernel enter\n");
231 sched_override_available_cores_for_sleep();
232
233 rootDomain->tracePoint( kIOPMTracePointSleepPlatformActions );
234 IOPlatformActionsPreSleep();
235 rootDomain->tracePoint( kIOPMTracePointSleepCPUs );
236
237 numCPUs = gIOCPUs->getCount();
238#if defined(__x86_64__)
239 currentShutdownTarget = NULL;
240#endif
241
242 integer_t old_pri;
243 thread_t self = current_thread();
244
245 /*
246 * We need to boost this thread's priority to the maximum kernel priority to
247 * ensure we can urgently preempt ANY thread currently executing on the
248 * target CPU. Note that realtime threads have their own mechanism to eventually
249 * demote their priority below MAXPRI_KERNEL if they hog the CPU for too long.
250 */
251 old_pri = thread_kern_get_pri(self);
252 thread_kern_set_pri(self, thread_kern_get_kernel_maxpri());
253
254 // Sleep the CPUs.
255 ml_set_is_quiescing(true);
256 cnt = numCPUs;
257 while (cnt--) {
258 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
259
260 // We make certain that the bootCPU is the last to sleep
261 // We'll skip it for now, and halt it after finishing the
262 // non-boot CPU's.
263 if (target->getCPUNumber() == (UInt32)master_cpu) {
264 bootCPU = target;
265 } else if (target->getCPUState() == kIOCPUStateRunning) {
266#if defined(__x86_64__)
267 currentShutdownTarget = target;
268#endif
269 target->haltCPU();
270 }
271 }
272
273 assert(bootCPU != NULL);
274 assert(cpu_number() == master_cpu);
275
276 console_suspend();
277
278 rootDomain->tracePoint( kIOPMTracePointSleepPlatformDriver );
279 rootDomain->stop_watchdog_timer();
280
281 /*
282 * Now sleep the boot CPU, including calling the kQueueQuiesce actions.
283 * The system sleeps here.
284 */
285
286 bootCPU->haltCPU();
287 ml_set_is_quiescing(false);
288
289 /*
290 * The system is now coming back from sleep on the boot CPU.
291 * The kQueueActive actions have already been called.
292 */
293
294 rootDomain->start_watchdog_timer();
295
296 console_resume();
297
298 rootDomain->tracePoint( kIOPMTracePointWakeCPUs );
299
300 // Wake the other CPUs.
301 for (cnt = 0; cnt < numCPUs; cnt++) {
302 target = OSDynamicCast(IOCPU, gIOCPUs->getObject(cnt));
303
304 // Skip the already-woken boot CPU.
305 if (target->getCPUNumber() != (UInt32)master_cpu) {
306 if (target->getCPUState() == kIOCPUStateRunning) {
307 panic("Spurious wakeup of cpu %u", (unsigned int)(target->getCPUNumber()));
308 }
309
310 if (target->getCPUState() == kIOCPUStateStopped) {
311 processor_start(target->getMachProcessor());
312 }
313 }
314 }
315
316 rootDomain->tracePoint( kIOPMTracePointWakePlatformActions );
317 IOPlatformActionsPostResume();
318
319 sched_restore_available_cores_after_sleep();
320
321 thread_kern_set_pri(self, old_pri);
322 printf("IOCPUSleepKernel exit\n");
323}
324
325static bool
326is_IOCPU_disabled(void)
327{
328 return false;
329}
330#else /* !USE_APPLEARMSMP */
331static bool
332is_IOCPU_disabled(void)
333{
334 return true;
335}
336#endif /* !USE_APPLEARMSMP */
337
338bool
339IOCPU::start(IOService *provider)
340{
341 if (is_IOCPU_disabled()) {
342 return false;
343 }
344
345 if (!super::start(provider)) {
346 return false;
347 }
348
349 _cpuGroup = gIOCPUs;
350 cpuNub = provider;
351
352 IOLockLock(gIOCPUsLock);
353 gIOCPUs->setObject(this);
354 IOLockUnlock(gIOCPUsLock);
355
356 // Correct the bus, cpu and timebase frequencies in the device tree.
357 if (gPEClockFrequencyInfo.bus_frequency_hz < 0x100000000ULL) {
358 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy(bytes: (void *)&gPEClockFrequencyInfo.bus_clock_rate_hz, numBytes: 4);
359 provider->setProperty(aKey: "bus-frequency", anObject: busFrequency.get());
360 } else {
361 OSSharedPtr<OSData> busFrequency = OSData::withBytesNoCopy(bytes: (void *)&gPEClockFrequencyInfo.bus_frequency_hz, numBytes: 8);
362 provider->setProperty(aKey: "bus-frequency", anObject: busFrequency.get());
363 }
364
365 if (gPEClockFrequencyInfo.cpu_frequency_hz < 0x100000000ULL) {
366 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy(bytes: (void *)&gPEClockFrequencyInfo.cpu_clock_rate_hz, numBytes: 4);
367 provider->setProperty(aKey: "clock-frequency", anObject: cpuFrequency.get());
368 } else {
369 OSSharedPtr<OSData> cpuFrequency = OSData::withBytesNoCopy(bytes: (void *)&gPEClockFrequencyInfo.cpu_frequency_hz, numBytes: 8);
370 provider->setProperty(aKey: "clock-frequency", anObject: cpuFrequency.get());
371 }
372
373 OSSharedPtr<OSData> timebaseFrequency = OSData::withBytesNoCopy(bytes: (void *)&gPEClockFrequencyInfo.timebase_frequency_hz, numBytes: 4);
374 provider->setProperty(aKey: "timebase-frequency", anObject: timebaseFrequency.get());
375
376 super::setProperty(aKey: "IOCPUID", aValue: getRegistryEntryID(), aNumberOfBits: sizeof(uint64_t) * 8);
377
378 setCPUNumber(0);
379 setCPUState(kIOCPUStateUnregistered);
380
381 return true;
382}
383
384void
385IOCPU::detach(IOService *provider)
386{
387 if (is_IOCPU_disabled()) {
388 return;
389 }
390
391 super::detach(provider);
392 IOLockLock(gIOCPUsLock);
393 unsigned int index = gIOCPUs->getNextIndexOfObject(anObject: this, index: 0);
394 if (index != (unsigned int)-1) {
395 gIOCPUs->removeObject(index);
396 }
397 IOLockUnlock(gIOCPUsLock);
398}
399
400OSObject *
401IOCPU::getProperty(const OSSymbol *aKey) const
402{
403 if (aKey == gIOCPUStateKey) {
404 return gIOCPUStateNames[_cpuState].get();
405 }
406#pragma clang diagnostic push
407#pragma clang diagnostic ignored "-Wdeprecated-declarations"
408 return super::getProperty(aKey);
409#pragma clang diagnostic pop
410}
411
412bool
413IOCPU::setProperty(const OSSymbol *aKey, OSObject *anObject)
414{
415 if (aKey == gIOCPUStateKey) {
416 return false;
417 }
418
419 return super::setProperty(aKey, anObject);
420}
421
422bool
423IOCPU::serializeProperties(OSSerialize *serialize) const
424{
425 bool result;
426 OSSharedPtr<OSDictionary> dict = dictionaryWithProperties();
427 if (!dict) {
428 return false;
429 }
430 dict->setObject(aKey: gIOCPUStateKey.get(), anObject: gIOCPUStateNames[_cpuState].get());
431 result = dict->serialize(serializer: serialize);
432 return result;
433}
434
435IOReturn
436IOCPU::setProperties(OSObject *properties)
437{
438 OSDictionary *dict = OSDynamicCast(OSDictionary, properties);
439 OSString *stateStr;
440 IOReturn result;
441
442 if (dict == NULL) {
443 return kIOReturnUnsupported;
444 }
445
446 stateStr = OSDynamicCast(OSString, dict->getObject(gIOCPUStateKey.get()));
447 if (stateStr != NULL) {
448 result = IOUserClient::clientHasPrivilege(securityToken: current_task(), kIOClientPrivilegeAdministrator);
449 if (result != kIOReturnSuccess) {
450 return result;
451 }
452
453 if (setProperty(aKey: gIOCPUStateKey.get(), anObject: stateStr)) {
454 return kIOReturnSuccess;
455 }
456
457 return kIOReturnUnsupported;
458 }
459
460 return kIOReturnUnsupported;
461}
462
463void
464IOCPU::signalCPU(IOCPU */*target*/)
465{
466}
467
468void
469IOCPU::signalCPUDeferred(IOCPU *target)
470{
471 // Our CPU may not support deferred IPIs,
472 // so send a regular IPI by default
473 signalCPU(target);
474}
475
476void
477IOCPU::signalCPUCancel(IOCPU */*target*/)
478{
479 // Meant to cancel signals sent by
480 // signalCPUDeferred; unsupported
481 // by default
482}
483
484void
485IOCPU::enableCPUTimeBase(bool /*enable*/)
486{
487}
488
489UInt32
490IOCPU::getCPUNumber(void)
491{
492 return _cpuNumber;
493}
494
495void
496IOCPU::setCPUNumber(UInt32 cpuNumber)
497{
498 _cpuNumber = cpuNumber;
499 super::setProperty(aKey: "IOCPUNumber", aValue: _cpuNumber, aNumberOfBits: 32);
500}
501
502UInt32
503IOCPU::getCPUState(void)
504{
505 return _cpuState;
506}
507
508void
509IOCPU::setCPUState(UInt32 cpuState)
510{
511 if (cpuState < kIOCPUStateCount) {
512 _cpuState = cpuState;
513 }
514}
515
516OSArray *
517IOCPU::getCPUGroup(void)
518{
519 return _cpuGroup.get();
520}
521
522UInt32
523IOCPU::getCPUGroupSize(void)
524{
525 return _cpuGroup->getCount();
526}
527
528processor_t
529IOCPU::getMachProcessor(void)
530{
531 return machProcessor;
532}
533
534
535/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
536
537#undef super
538#define super IOInterruptController
539
540OSDefineMetaClassAndStructors(IOCPUInterruptController, IOInterruptController);
541
542OSMetaClassDefineReservedUnused(IOCPUInterruptController, 1);
543OSMetaClassDefineReservedUnused(IOCPUInterruptController, 2);
544OSMetaClassDefineReservedUnused(IOCPUInterruptController, 3);
545OSMetaClassDefineReservedUnused(IOCPUInterruptController, 4);
546OSMetaClassDefineReservedUnused(IOCPUInterruptController, 5);
547
548
549
550/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
551
552IOReturn
553IOCPUInterruptController::initCPUInterruptController(int sources)
554{
555 return initCPUInterruptController(sources, cpus: sources);
556}
557
558IOReturn
559IOCPUInterruptController::initCPUInterruptController(int sources, int cpus)
560{
561 int cnt;
562
563 if (!super::init()) {
564 return kIOReturnInvalid;
565 }
566
567 numSources = sources;
568 numCPUs = cpus;
569
570 vectors = (IOInterruptVector *)zalloc_permanent(numSources *
571 sizeof(IOInterruptVector), ZALIGN(IOInterruptVector));
572
573 // Allocate a lock for each vector
574 for (cnt = 0; cnt < numSources; cnt++) {
575 vectors[cnt].interruptLock = IOLockAlloc();
576 if (vectors[cnt].interruptLock == NULL) {
577 for (cnt = 0; cnt < numSources; cnt++) {
578 if (vectors[cnt].interruptLock != NULL) {
579 IOLockFree(lock: vectors[cnt].interruptLock);
580 }
581 }
582 return kIOReturnNoResources;
583 }
584 }
585
586 ml_set_max_cpus(max_cpus: numSources);
587 return kIOReturnSuccess;
588}
589
590void
591IOCPUInterruptController::registerCPUInterruptController(void)
592{
593 setProperty(aKey: gPlatformInterruptControllerName, anObject: kOSBooleanTrue);
594 registerService();
595
596 getPlatform()->registerInterruptController(name: gPlatformInterruptControllerName,
597 interruptController: this);
598}
599
600void
601IOCPUInterruptController::setCPUInterruptProperties(IOService *service)
602{
603 int cnt;
604 OSSharedPtr<OSArray> specifier;
605 OSSharedPtr<OSArray> controller;
606 long tmpLong;
607
608 if ((service->propertyExists(aKey: gIOInterruptControllersKey)) &&
609 (service->propertyExists(aKey: gIOInterruptSpecifiersKey))) {
610 return;
611 }
612
613 // Create the interrupt specifer array.
614 specifier = OSArray::withCapacity(capacity: numSources);
615 for (cnt = 0; cnt < numSources; cnt++) {
616 tmpLong = cnt;
617 OSSharedPtr<OSData> tmpData = OSData::withValue(value: tmpLong);
618 specifier->setObject(tmpData.get());
619 }
620
621 // Create the interrupt controller array.
622 controller = OSArray::withCapacity(capacity: numSources);
623 for (cnt = 0; cnt < numSources; cnt++) {
624 controller->setObject(gPlatformInterruptControllerName);
625 }
626
627 // Put the two arrays into the property table.
628 service->setProperty(aKey: gIOInterruptControllersKey, anObject: controller.get());
629 service->setProperty(aKey: gIOInterruptSpecifiersKey, anObject: specifier.get());
630}
631
632void
633IOCPUInterruptController::enableCPUInterrupt(IOCPU *cpu)
634{
635 IOInterruptHandler handler = OSMemberFunctionCast(
636 IOInterruptHandler, this, &IOCPUInterruptController::handleInterrupt);
637
638 assert(numCPUs > 0);
639
640 ml_install_interrupt_handler(nub: cpu, source: cpu->getCPUNumber(), target: this, handler, NULL);
641
642 IOTakeLock(lock: vectors[0].interruptLock);
643 ++enabledCPUs;
644
645 if (enabledCPUs == numCPUs) {
646 IOService::cpusRunning();
647 thread_wakeup(this);
648 }
649 IOUnlock(lock: vectors[0].interruptLock);
650}
651
652IOReturn
653IOCPUInterruptController::registerInterrupt(IOService *nub,
654 int source,
655 void *target,
656 IOInterruptHandler handler,
657 void *refCon)
658{
659 IOInterruptVector *vector;
660
661 // Interrupts must be enabled, as this can allocate memory.
662 assert(ml_get_interrupts_enabled() == TRUE);
663
664 if (source >= numSources) {
665 return kIOReturnNoResources;
666 }
667
668 vector = &vectors[source];
669
670 // Get the lock for this vector.
671 IOTakeLock(lock: vector->interruptLock);
672
673 // Make sure the vector is not in use.
674 if (vector->interruptRegistered) {
675 IOUnlock(lock: vector->interruptLock);
676 return kIOReturnNoResources;
677 }
678
679 // Fill in vector with the client's info.
680 vector->handler = handler;
681 vector->nub = nub;
682 vector->source = source;
683 vector->target = target;
684 vector->refCon = refCon;
685
686 // Get the vector ready. It starts hard disabled.
687 vector->interruptDisabledHard = 1;
688 vector->interruptDisabledSoft = 1;
689 vector->interruptRegistered = 1;
690
691 IOUnlock(lock: vector->interruptLock);
692
693 IOTakeLock(lock: vectors[0].interruptLock);
694 if (enabledCPUs != numCPUs) {
695 assert_wait(event: this, THREAD_UNINT);
696 IOUnlock(lock: vectors[0].interruptLock);
697 thread_block(THREAD_CONTINUE_NULL);
698 } else {
699 IOUnlock(lock: vectors[0].interruptLock);
700 }
701
702 return kIOReturnSuccess;
703}
704
705IOReturn
706IOCPUInterruptController::getInterruptType(IOService */*nub*/,
707 int /*source*/,
708 int *interruptType)
709{
710 if (interruptType == NULL) {
711 return kIOReturnBadArgument;
712 }
713
714 *interruptType = kIOInterruptTypeLevel;
715
716 return kIOReturnSuccess;
717}
718
719IOReturn
720IOCPUInterruptController::enableInterrupt(IOService */*nub*/,
721 int /*source*/)
722{
723// ml_set_interrupts_enabled(true);
724 return kIOReturnSuccess;
725}
726
727IOReturn
728IOCPUInterruptController::disableInterrupt(IOService */*nub*/,
729 int /*source*/)
730{
731// ml_set_interrupts_enabled(false);
732 return kIOReturnSuccess;
733}
734
735IOReturn
736IOCPUInterruptController::causeInterrupt(IOService */*nub*/,
737 int /*source*/)
738{
739 ml_cause_interrupt();
740 return kIOReturnSuccess;
741}
742
743IOReturn
744IOCPUInterruptController::handleInterrupt(void */*refCon*/,
745 IOService */*nub*/,
746 int source)
747{
748 IOInterruptVector *vector;
749
750 vector = &vectors[source];
751
752 if (!vector->interruptRegistered) {
753 return kIOReturnInvalid;
754 }
755
756 vector->handler(vector->target, vector->refCon,
757 vector->nub, vector->source);
758
759 return kIOReturnSuccess;
760}
761
762/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
763