1 | /* |
2 | * Copyright (c) 1998-2010 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <pexpert/pexpert.h> |
30 | #include <IOKit/IOWorkLoop.h> |
31 | #include <IOKit/IOEventSource.h> |
32 | #include <IOKit/IOInterruptEventSource.h> |
33 | #include <IOKit/IOCommandGate.h> |
34 | #include <IOKit/IOCommandPool.h> |
35 | #include <IOKit/IOTimeStamp.h> |
36 | #include <IOKit/IOKitDebug.h> |
37 | #include <libkern/OSDebug.h> |
38 | #include <kern/thread.h> |
39 | |
40 | #define super OSObject |
41 | |
42 | OSDefineMetaClassAndStructors(IOWorkLoop, OSObject); |
43 | |
44 | // Block of unused functions intended for future use |
45 | #if __LP64__ |
46 | OSMetaClassDefineReservedUnused(IOWorkLoop, 0); |
47 | OSMetaClassDefineReservedUnused(IOWorkLoop, 1); |
48 | OSMetaClassDefineReservedUnused(IOWorkLoop, 2); |
49 | #else |
50 | OSMetaClassDefineReservedUsedX86(IOWorkLoop, 0); |
51 | OSMetaClassDefineReservedUsedX86(IOWorkLoop, 1); |
52 | OSMetaClassDefineReservedUsedX86(IOWorkLoop, 2); |
53 | #endif |
54 | OSMetaClassDefineReservedUnused(IOWorkLoop, 3); |
55 | OSMetaClassDefineReservedUnused(IOWorkLoop, 4); |
56 | OSMetaClassDefineReservedUnused(IOWorkLoop, 5); |
57 | OSMetaClassDefineReservedUnused(IOWorkLoop, 6); |
58 | OSMetaClassDefineReservedUnused(IOWorkLoop, 7); |
59 | |
60 | enum IOWorkLoopState { kLoopRestart = 0x1, kLoopTerminate = 0x2 }; |
61 | static inline void |
62 | SETP(void *addr, unsigned int flag) |
63 | { |
64 | unsigned char *num = (unsigned char *) addr; *num |= flag; |
65 | } |
66 | static inline void |
67 | CLRP(void *addr, unsigned int flag) |
68 | { |
69 | unsigned char *num = (unsigned char *) addr; *num &= ~flag; |
70 | } |
71 | static inline bool |
72 | ISSETP(void *addr, unsigned int flag) |
73 | { |
74 | unsigned char *num = (unsigned char *) addr; return (*num & flag) != 0; |
75 | } |
76 | |
77 | #define fFlags loopRestart |
78 | |
79 | #define passiveEventChain reserved->passiveEventChain |
80 | |
81 | #if IOKITSTATS |
82 | |
83 | #define IOStatisticsRegisterCounter() \ |
84 | do { \ |
85 | reserved->counter = IOStatistics::registerWorkLoop(this); \ |
86 | } while(0) |
87 | |
88 | #define IOStatisticsUnregisterCounter() \ |
89 | do { \ |
90 | if (reserved) \ |
91 | IOStatistics::unregisterWorkLoop(reserved->counter); \ |
92 | } while(0) |
93 | |
94 | #define IOStatisticsOpenGate() \ |
95 | do { \ |
96 | IOStatistics::countWorkLoopOpenGate(reserved->counter); \ |
97 | if (reserved->lockInterval) lockTime(); \ |
98 | } while(0) |
99 | #define IOStatisticsCloseGate() \ |
100 | do { \ |
101 | IOStatistics::countWorkLoopCloseGate(reserved->counter); \ |
102 | if (reserved->lockInterval) reserved->lockTime = mach_absolute_time(); \ |
103 | } while(0) |
104 | |
105 | #define IOStatisticsAttachEventSource() \ |
106 | do { \ |
107 | IOStatistics::attachWorkLoopEventSource(reserved->counter, inEvent->reserved->counter); \ |
108 | } while(0) |
109 | |
110 | #define IOStatisticsDetachEventSource() \ |
111 | do { \ |
112 | IOStatistics::detachWorkLoopEventSource(reserved->counter, inEvent->reserved->counter); \ |
113 | } while(0) |
114 | |
115 | #else |
116 | |
117 | #define IOStatisticsRegisterCounter() |
118 | #define IOStatisticsUnregisterCounter() |
119 | #define IOStatisticsOpenGate() |
120 | #define IOStatisticsCloseGate() |
121 | #define IOStatisticsAttachEventSource() |
122 | #define IOStatisticsDetachEventSource() |
123 | |
124 | #endif /* IOKITSTATS */ |
125 | |
126 | bool |
127 | IOWorkLoop::init() |
128 | { |
129 | // The super init and gateLock allocation MUST be done first. |
130 | if (!super::init()) { |
131 | return false; |
132 | } |
133 | |
134 | // Allocate our ExpansionData if it hasn't been allocated already. |
135 | if (!reserved) { |
136 | reserved = IOMallocType(ExpansionData); |
137 | } |
138 | |
139 | if (gateLock == NULL) { |
140 | if (!(gateLock = IORecursiveLockAlloc())) { |
141 | return false; |
142 | } |
143 | } |
144 | |
145 | if (workToDoLock == NULL) { |
146 | if (!(workToDoLock = IOSimpleLockAlloc())) { |
147 | return false; |
148 | } |
149 | IOSimpleLockInit(lock: workToDoLock); |
150 | workToDo = false; |
151 | } |
152 | |
153 | IOStatisticsRegisterCounter(); |
154 | |
155 | if (controlG == NULL) { |
156 | controlG = IOCommandGate::commandGate( |
157 | owner: this, |
158 | OSMemberFunctionCast( |
159 | IOCommandGate::Action, |
160 | this, |
161 | &IOWorkLoop::_maintRequest)); |
162 | |
163 | if (!controlG) { |
164 | return false; |
165 | } |
166 | // Point the controlGate at the workLoop. Usually addEventSource |
167 | // does this automatically. The problem is in this case addEventSource |
168 | // uses the control gate and it has to be bootstrapped. |
169 | controlG->setWorkLoop(this); |
170 | if (addEventSource(newEvent: controlG) != kIOReturnSuccess) { |
171 | return false; |
172 | } |
173 | } |
174 | |
175 | if (workThread == NULL) { |
176 | thread_continue_t cptr = OSMemberFunctionCast( |
177 | thread_continue_t, |
178 | this, |
179 | &IOWorkLoop::threadMain); |
180 | if (KERN_SUCCESS != kernel_thread_start(continuation: cptr, parameter: this, new_thread: &workThread)) { |
181 | return false; |
182 | } |
183 | } |
184 | |
185 | (void) thread_set_tag(thread: workThread, tag: THREAD_TAG_IOWORKLOOP); |
186 | return true; |
187 | } |
188 | |
189 | IOWorkLoop * |
190 | IOWorkLoop::workLoop() |
191 | { |
192 | return IOWorkLoop::workLoopWithOptions(options: 0); |
193 | } |
194 | |
195 | IOWorkLoop * |
196 | IOWorkLoop::workLoopWithOptions(IOOptionBits options) |
197 | { |
198 | IOWorkLoop *me = new IOWorkLoop; |
199 | |
200 | if (me && options) { |
201 | me->reserved = IOMallocType(ExpansionData); |
202 | me->reserved->options = options; |
203 | } |
204 | |
205 | if (me && !me->init()) { |
206 | me->release(); |
207 | return NULL; |
208 | } |
209 | |
210 | return me; |
211 | } |
212 | |
213 | void |
214 | IOWorkLoop::releaseEventChain(LIBKERN_CONSUMED IOEventSource *eventChain) |
215 | { |
216 | IOEventSource *event, *next; |
217 | for (event = eventChain; event; event = next) { |
218 | next = event->getNext(); |
219 | #ifdef __clang_analyzer__ |
220 | // Unlike the usual IOKit memory management convention, IOWorkLoop |
221 | // manages the retain count for the IOEventSource instances in the |
222 | // the chain rather than have IOEventSource do that itself. This means |
223 | // it is safe to call release() on the result of getNext() while the |
224 | // chain is being torn down. However, the analyzer doesn't |
225 | // realize this. We add an extra retain under analysis to suppress |
226 | // an analyzer diagnostic about violations of the memory management rules. |
227 | if (next) { |
228 | next->retain(); |
229 | } |
230 | #endif |
231 | event->setWorkLoop(NULL); |
232 | event->setNext(NULL); |
233 | event->release(); |
234 | } |
235 | } |
236 | // Free is called twice: |
237 | // First when the atomic retainCount transitions from 1 -> 0 |
238 | // Secondly when the work loop itself is commiting hari kari |
239 | // Hence the each leg of the free must be single threaded. |
240 | void |
241 | IOWorkLoop::free() |
242 | { |
243 | if (workThread) { |
244 | IOInterruptState is; |
245 | |
246 | // If we are here then we must be trying to shut down this work loop |
247 | // in this case disable all of the event source, mark the loop |
248 | // as terminating and wakeup the work thread itself and return |
249 | // Note: we hold the gate across the entire operation mainly for the |
250 | // benefit of our event sources so we can disable them cleanly. |
251 | closeGate(); |
252 | |
253 | disableAllEventSources(); |
254 | |
255 | is = IOSimpleLockLockDisableInterrupt(lock: workToDoLock); |
256 | SETP(addr: &fFlags, flag: kLoopTerminate); |
257 | thread_wakeup_thread(event: (void *) &workToDo, thread: workThread); |
258 | IOSimpleLockUnlockEnableInterrupt(lock: workToDoLock, state: is); |
259 | |
260 | openGate(); |
261 | } else { /* !workThread */ |
262 | releaseEventChain(eventChain); |
263 | eventChain = NULL; |
264 | |
265 | releaseEventChain(passiveEventChain); |
266 | passiveEventChain = NULL; |
267 | |
268 | // Either we have a partial initialization to clean up |
269 | // or the workThread itself is performing hari-kari. |
270 | // Either way clean up all of our resources and return. |
271 | |
272 | if (controlG) { |
273 | controlG->workLoop = NULL; |
274 | controlG->release(); |
275 | controlG = NULL; |
276 | } |
277 | |
278 | if (workToDoLock) { |
279 | IOSimpleLockFree(lock: workToDoLock); |
280 | workToDoLock = NULL; |
281 | } |
282 | |
283 | if (gateLock) { |
284 | IORecursiveLockFree(lock: gateLock); |
285 | gateLock = NULL; |
286 | } |
287 | |
288 | IOStatisticsUnregisterCounter(); |
289 | |
290 | if (reserved) { |
291 | IOFreeType(reserved, ExpansionData); |
292 | reserved = NULL; |
293 | } |
294 | |
295 | super::free(); |
296 | } |
297 | } |
298 | |
299 | IOReturn |
300 | IOWorkLoop::addEventSource(IOEventSource *newEvent) |
301 | { |
302 | if ((workThread) |
303 | && !thread_has_thread_name(th: workThread) |
304 | && (newEvent->owner) |
305 | && !OSDynamicCast(IOCommandPool, newEvent->owner)) { |
306 | thread_set_thread_name(th: workThread, name: newEvent->owner->getMetaClass()->getClassName()); |
307 | } |
308 | |
309 | return controlG->runCommand(arg0: (void *) mAddEvent, arg1: (void *) newEvent); |
310 | } |
311 | |
312 | IOReturn |
313 | IOWorkLoop::removeEventSource(IOEventSource *toRemove) |
314 | { |
315 | return controlG->runCommand(arg0: (void *) mRemoveEvent, arg1: (void *) toRemove); |
316 | } |
317 | |
318 | void |
319 | IOWorkLoop::enableAllEventSources() const |
320 | { |
321 | IOEventSource *event; |
322 | |
323 | for (event = eventChain; event; event = event->getNext()) { |
324 | event->enable(); |
325 | } |
326 | |
327 | for (event = passiveEventChain; event; event = event->getNext()) { |
328 | event->enable(); |
329 | } |
330 | } |
331 | |
332 | void |
333 | IOWorkLoop::disableAllEventSources() const |
334 | { |
335 | IOEventSource *event; |
336 | |
337 | for (event = eventChain; event; event = event->getNext()) { |
338 | event->disable(); |
339 | } |
340 | |
341 | /* NOTE: controlG is in passiveEventChain since it's an IOCommandGate */ |
342 | for (event = passiveEventChain; event; event = event->getNext()) { |
343 | if (event != controlG) { // Don't disable the control gate |
344 | event->disable(); |
345 | } |
346 | } |
347 | } |
348 | |
349 | void |
350 | IOWorkLoop::enableAllInterrupts() const |
351 | { |
352 | IOEventSource *event; |
353 | |
354 | for (event = eventChain; event; event = event->getNext()) { |
355 | if (OSDynamicCast(IOInterruptEventSource, event)) { |
356 | event->enable(); |
357 | } |
358 | } |
359 | } |
360 | |
361 | void |
362 | IOWorkLoop::disableAllInterrupts() const |
363 | { |
364 | IOEventSource *event; |
365 | |
366 | for (event = eventChain; event; event = event->getNext()) { |
367 | if (OSDynamicCast(IOInterruptEventSource, event)) { |
368 | event->disable(); |
369 | } |
370 | } |
371 | } |
372 | |
373 | |
374 | /* virtual */ bool |
375 | IOWorkLoop::runEventSources() |
376 | { |
377 | bool res = false; |
378 | bool traceWL = (gIOKitTrace & kIOTraceWorkLoops) ? true : false; |
379 | bool traceES = (gIOKitTrace & kIOTraceEventSources) ? true : false; |
380 | |
381 | closeGate(); |
382 | if (ISSETP(addr: &fFlags, flag: kLoopTerminate)) { |
383 | goto abort; |
384 | } |
385 | |
386 | if (traceWL) { |
387 | IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_WORK), VM_KERNEL_ADDRHIDE(this)); |
388 | } |
389 | |
390 | bool more; |
391 | do { |
392 | CLRP(addr: &fFlags, flag: kLoopRestart); |
393 | more = false; |
394 | IOInterruptState is = IOSimpleLockLockDisableInterrupt(lock: workToDoLock); |
395 | workToDo = false; |
396 | IOSimpleLockUnlockEnableInterrupt(lock: workToDoLock, state: is); |
397 | /* NOTE: only loop over event sources in eventChain. Bypass "passive" event sources for performance */ |
398 | for (IOEventSource *evnt = eventChain; evnt; evnt = evnt->getNext()) { |
399 | if (traceES) { |
400 | IOTimeStampStartConstant(IODBG_WORKLOOP(IOWL_CLIENT), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt)); |
401 | } |
402 | |
403 | more |= evnt->checkForWork(); |
404 | |
405 | if (traceES) { |
406 | IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_CLIENT), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(evnt)); |
407 | } |
408 | |
409 | if (ISSETP(addr: &fFlags, flag: kLoopTerminate)) { |
410 | goto abort; |
411 | } else if (fFlags & kLoopRestart) { |
412 | more = true; |
413 | break; |
414 | } |
415 | } |
416 | } while (more); |
417 | |
418 | res = true; |
419 | |
420 | if (traceWL) { |
421 | IOTimeStampEndConstant(IODBG_WORKLOOP(IOWL_WORK), VM_KERNEL_ADDRHIDE(this)); |
422 | } |
423 | |
424 | abort: |
425 | openGate(); |
426 | return res; |
427 | } |
428 | |
429 | /* virtual */ void |
430 | IOWorkLoop::threadMain() |
431 | { |
432 | restartThread: |
433 | do { |
434 | if (!runEventSources()) { |
435 | goto exitThread; |
436 | } |
437 | |
438 | IOInterruptState is = IOSimpleLockLockDisableInterrupt(lock: workToDoLock); |
439 | if (!ISSETP(addr: &fFlags, flag: kLoopTerminate) && !workToDo) { |
440 | assert_wait(event: (void *) &workToDo, interruptible: false); |
441 | IOSimpleLockUnlockEnableInterrupt(lock: workToDoLock, state: is); |
442 | thread_continue_t cptr = NULL; |
443 | if (!reserved || !(kPreciousStack & reserved->options)) { |
444 | cptr = OSMemberFunctionCast( |
445 | thread_continue_t, this, &IOWorkLoop::threadMain); |
446 | } |
447 | thread_block_parameter(continuation: cptr, parameter: this); |
448 | goto restartThread; |
449 | /* NOTREACHED */ |
450 | } |
451 | |
452 | // At this point we either have work to do or we need |
453 | // to commit suicide. But no matter |
454 | // Clear the simple lock and retore the interrupt state |
455 | IOSimpleLockUnlockEnableInterrupt(lock: workToDoLock, state: is); |
456 | } while (workToDo); |
457 | |
458 | exitThread: |
459 | closeGate(); |
460 | thread_t thread = workThread; |
461 | workThread = NULL; // Say we don't have a loop and free ourselves |
462 | openGate(); |
463 | |
464 | free(); |
465 | |
466 | thread_deallocate(thread); |
467 | (void) thread_terminate(target_act: thread); |
468 | } |
469 | |
470 | IOThread |
471 | IOWorkLoop::getThread() const |
472 | { |
473 | return workThread; |
474 | } |
475 | |
476 | bool |
477 | IOWorkLoop::onThread() const |
478 | { |
479 | return IOThreadSelf() == workThread; |
480 | } |
481 | |
482 | bool |
483 | IOWorkLoop::inGate() const |
484 | { |
485 | return IORecursiveLockHaveLock(lock: gateLock); |
486 | } |
487 | |
488 | // Internal APIs used by event sources to control the thread |
489 | void |
490 | IOWorkLoop::signalWorkAvailable() |
491 | { |
492 | if (workToDoLock) { |
493 | IOInterruptState is = IOSimpleLockLockDisableInterrupt(lock: workToDoLock); |
494 | workToDo = true; |
495 | thread_wakeup_thread(event: (void *) &workToDo, thread: workThread); |
496 | IOSimpleLockUnlockEnableInterrupt(lock: workToDoLock, state: is); |
497 | } |
498 | } |
499 | |
500 | void |
501 | IOWorkLoop::openGate() |
502 | { |
503 | IOStatisticsOpenGate(); |
504 | IORecursiveLockUnlock(lock: gateLock); |
505 | } |
506 | |
507 | void |
508 | IOWorkLoop::closeGate() |
509 | { |
510 | IORecursiveLockLock(lock: gateLock); |
511 | IOStatisticsCloseGate(); |
512 | } |
513 | |
514 | bool |
515 | IOWorkLoop::tryCloseGate() |
516 | { |
517 | bool res = (IORecursiveLockTryLock(lock: gateLock) != 0); |
518 | if (res) { |
519 | IOStatisticsCloseGate(); |
520 | } |
521 | return res; |
522 | } |
523 | |
524 | int |
525 | IOWorkLoop::sleepGate(void *event, UInt32 interuptibleType) |
526 | { |
527 | int res; |
528 | IOStatisticsOpenGate(); |
529 | res = IORecursiveLockSleep(lock: gateLock, event, interType: interuptibleType); |
530 | IOStatisticsCloseGate(); |
531 | return res; |
532 | } |
533 | |
534 | int |
535 | IOWorkLoop::sleepGate(void *event, AbsoluteTime deadline, UInt32 interuptibleType) |
536 | { |
537 | int res; |
538 | IOStatisticsOpenGate(); |
539 | res = IORecursiveLockSleepDeadline(lock: gateLock, event, deadline, interType: interuptibleType); |
540 | IOStatisticsCloseGate(); |
541 | return res; |
542 | } |
543 | |
544 | void |
545 | IOWorkLoop::wakeupGate(void *event, bool oneThread) |
546 | { |
547 | IORecursiveLockWakeup(lock: gateLock, event, oneThread); |
548 | } |
549 | |
550 | static IOReturn |
551 | IOWorkLoopActionToBlock(OSObject *owner, |
552 | void *arg0, void *arg1, |
553 | void *arg2, void *arg3) |
554 | { |
555 | return ((IOWorkLoop::ActionBlock) arg0)(); |
556 | } |
557 | |
558 | IOReturn |
559 | IOWorkLoop::runActionBlock(ActionBlock action) |
560 | { |
561 | return runAction(action: &IOWorkLoopActionToBlock, target: this, arg0: action); |
562 | } |
563 | |
564 | IOReturn |
565 | IOWorkLoop::runAction(Action inAction, OSObject *target, |
566 | void *arg0, void *arg1, |
567 | void *arg2, void *arg3) |
568 | { |
569 | IOReturn res; |
570 | |
571 | // closeGate is recursive so don't worry if we already hold the lock. |
572 | closeGate(); |
573 | res = (*inAction)(target, arg0, arg1, arg2, arg3); |
574 | openGate(); |
575 | |
576 | return res; |
577 | } |
578 | |
579 | IOReturn |
580 | IOWorkLoop::_maintRequest(void *inC, void *inD, void *, void *) |
581 | { |
582 | maintCommandEnum command = (maintCommandEnum) (uintptr_t) inC; |
583 | IOEventSource *inEvent = (IOEventSource *) inD; |
584 | IOReturn res = kIOReturnSuccess; |
585 | |
586 | switch (command) { |
587 | case mAddEvent: |
588 | if (!inEvent->getWorkLoop()) { |
589 | SETP(addr: &fFlags, flag: kLoopRestart); |
590 | |
591 | inEvent->retain(); |
592 | inEvent->setWorkLoop(this); |
593 | inEvent->setNext(NULL); |
594 | |
595 | /* Check if this is a passive or active event source being added */ |
596 | if (eventSourcePerformsWork(inEventSource: inEvent)) { |
597 | if (!eventChain) { |
598 | eventChain = inEvent; |
599 | } else { |
600 | IOEventSource *event, *next; |
601 | |
602 | for (event = eventChain; (next = event->getNext()); event = next) { |
603 | ; |
604 | } |
605 | event->setNext(inEvent); |
606 | } |
607 | } else { |
608 | if (!passiveEventChain) { |
609 | passiveEventChain = inEvent; |
610 | } else { |
611 | IOEventSource *event, *next; |
612 | |
613 | for (event = passiveEventChain; (next = event->getNext()); event = next) { |
614 | ; |
615 | } |
616 | event->setNext(inEvent); |
617 | } |
618 | } |
619 | IOStatisticsAttachEventSource(); |
620 | } |
621 | break; |
622 | |
623 | case mRemoveEvent: |
624 | if (inEvent->getWorkLoop()) { |
625 | IOStatisticsDetachEventSource(); |
626 | |
627 | if (eventSourcePerformsWork(inEventSource: inEvent)) { |
628 | if (eventChain == inEvent) { |
629 | eventChain = inEvent->getNext(); |
630 | } else { |
631 | IOEventSource *event, *next = NULL; |
632 | |
633 | event = eventChain; |
634 | if (event) { |
635 | while ((next = event->getNext()) && (next != inEvent)) { |
636 | event = next; |
637 | } |
638 | } |
639 | |
640 | if (!next) { |
641 | res = kIOReturnBadArgument; |
642 | break; |
643 | } |
644 | event->setNext(inEvent->getNext()); |
645 | } |
646 | } else { |
647 | if (passiveEventChain == inEvent) { |
648 | passiveEventChain = inEvent->getNext(); |
649 | } else { |
650 | IOEventSource *event, *next = NULL; |
651 | |
652 | event = passiveEventChain; |
653 | if (event) { |
654 | while ((next = event->getNext()) && (next != inEvent)) { |
655 | event = next; |
656 | } |
657 | } |
658 | |
659 | if (!next) { |
660 | res = kIOReturnBadArgument; |
661 | break; |
662 | } |
663 | event->setNext(inEvent->getNext()); |
664 | } |
665 | } |
666 | |
667 | inEvent->setWorkLoop(NULL); |
668 | inEvent->setNext(NULL); |
669 | inEvent->release(); |
670 | SETP(addr: &fFlags, flag: kLoopRestart); |
671 | } |
672 | break; |
673 | |
674 | default: |
675 | return kIOReturnUnsupported; |
676 | } |
677 | |
678 | return res; |
679 | } |
680 | |
681 | bool |
682 | IOWorkLoop::eventSourcePerformsWork(IOEventSource *inEventSource) |
683 | { |
684 | bool result = true; |
685 | |
686 | /* |
687 | * The idea here is to see if the subclass of IOEventSource has overridden checkForWork(). |
688 | * The assumption is that if you override checkForWork(), you need to be |
689 | * active and not passive. |
690 | * |
691 | * We picked a known quantity controlG that does not override |
692 | * IOEventSource::checkForWork(), namely the IOCommandGate associated with |
693 | * the workloop to which this event source is getting attached. |
694 | * |
695 | * We do a pointer comparison on the offset in the vtable for inNewEvent against |
696 | * the offset in the vtable for inReferenceEvent. This works because |
697 | * IOCommandGate's slot for checkForWork() has the address of |
698 | * IOEventSource::checkForWork() in it. |
699 | * |
700 | * Think of OSMemberFunctionCast yielding the value at the vtable offset for |
701 | * checkForWork() here. We're just testing to see if it's the same or not. |
702 | * |
703 | */ |
704 | |
705 | if (IOEventSource::kPassive & inEventSource->flags) { |
706 | result = false; |
707 | } else if (IOEventSource::kActive & inEventSource->flags) { |
708 | result = true; |
709 | } else if (controlG) { |
710 | void * ptr1; |
711 | void * ptr2; |
712 | |
713 | ptr1 = OSMemberFunctionCast(void*, inEventSource, &IOEventSource::checkForWork); |
714 | ptr2 = OSMemberFunctionCast(void*, controlG, &IOEventSource::checkForWork); |
715 | |
716 | if (ptr1 == ptr2) { |
717 | result = false; |
718 | } |
719 | } |
720 | |
721 | return result; |
722 | } |
723 | |
724 | void |
725 | IOWorkLoop::lockTime(void) |
726 | { |
727 | uint64_t time; |
728 | time = mach_absolute_time() - reserved->lockTime; |
729 | if (time > reserved->lockInterval) { |
730 | absolutetime_to_nanoseconds(abstime: time, result: &time); |
731 | if (kTimeLockPanics & reserved->options) { |
732 | panic("IOWorkLoop %p lock time %qd us" , this, time / 1000ULL); |
733 | } else { |
734 | OSReportWithBacktrace(str: "IOWorkLoop %p lock time %qd us" , this, time / 1000ULL); |
735 | } |
736 | } |
737 | } |
738 | |
739 | void |
740 | IOWorkLoop::setMaximumLockTime(uint64_t interval, uint32_t options) |
741 | { |
742 | IORecursiveLockLock(lock: gateLock); |
743 | reserved->lockInterval = interval; |
744 | reserved->options = (reserved->options & ~kTimeLockPanics) | (options & kTimeLockPanics); |
745 | IORecursiveLockUnlock(lock: gateLock); |
746 | } |
747 | |