1 | /* |
2 | * Copyright (c) 2017 Apple Inc. All rights reserved. |
3 | */ |
4 | |
5 | #include <IOKit/perfcontrol/IOPerfControl.h> |
6 | |
7 | #include <stdatomic.h> |
8 | |
9 | #include <kern/thread_group.h> |
10 | |
11 | #undef super |
12 | #define super OSObject |
13 | OSDefineMetaClassAndStructors(IOPerfControlClient, OSObject); |
14 | |
15 | static IOPerfControlClient::IOPerfControlClientShared *_Atomic gIOPerfControlClientShared; |
16 | |
17 | bool |
18 | IOPerfControlClient::init(IOService *driver, uint64_t maxWorkCapacity) |
19 | { |
20 | // TODO: Remove this limit and implement dynamic table growth if workloads are found that exceed this |
21 | if (maxWorkCapacity > kMaxWorkTableNumEntries) { |
22 | maxWorkCapacity = kMaxWorkTableNumEntries; |
23 | } |
24 | |
25 | if (!super::init()) { |
26 | return false; |
27 | } |
28 | |
29 | shared = atomic_load_explicit(&gIOPerfControlClientShared, memory_order_acquire); |
30 | if (shared == nullptr) { |
31 | IOPerfControlClient::IOPerfControlClientShared *expected = shared; |
32 | shared = kalloc_type(IOPerfControlClientShared, Z_WAITOK); |
33 | if (!shared) { |
34 | return false; |
35 | } |
36 | |
37 | atomic_init(&shared->maxDriverIndex, 0); |
38 | |
39 | shared->interface = PerfControllerInterface{ |
40 | .version = PERFCONTROL_INTERFACE_VERSION_NONE, |
41 | .registerDevice = |
42 | [](IOService *device) { |
43 | return kIOReturnSuccess; |
44 | }, |
45 | .unregisterDevice = |
46 | [](IOService *device) { |
47 | return kIOReturnSuccess; |
48 | }, |
49 | .workCanSubmit = |
50 | [](IOService *device, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) { |
51 | return false; |
52 | }, |
53 | .workSubmit = |
54 | [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkSubmitArgs *args) { |
55 | }, |
56 | .workBegin = |
57 | [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkBeginArgs *args) { |
58 | }, |
59 | .workEnd = |
60 | [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkEndArgs *args, bool done) { |
61 | }, |
62 | .workUpdate = |
63 | [](IOService *device, uint64_t token, PerfControllerInterface::WorkState *state, WorkUpdateArgs *args) { |
64 | }, |
65 | }; |
66 | |
67 | shared->interfaceLock = IOLockAlloc(); |
68 | if (!shared->interfaceLock) { |
69 | goto shared_init_error; |
70 | } |
71 | |
72 | shared->deviceRegistrationList = OSSet::withCapacity(capacity: 4); |
73 | if (!shared->deviceRegistrationList) { |
74 | goto shared_init_error; |
75 | } |
76 | |
77 | if (!atomic_compare_exchange_strong_explicit(&gIOPerfControlClientShared, &expected, shared, memory_order_acq_rel, |
78 | memory_order_acquire)) { |
79 | IOLockFree(lock: shared->interfaceLock); |
80 | shared->deviceRegistrationList->release(); |
81 | kfree_type(IOPerfControlClientShared, shared); |
82 | shared = expected; |
83 | } |
84 | } |
85 | workTable = NULL; |
86 | workTableLock = NULL; |
87 | |
88 | // Note: driverIndex is not guaranteed to be unique if maxDriverIndex wraps around. It is intended for debugging only. |
89 | driverIndex = atomic_fetch_add_explicit(&shared->maxDriverIndex, 1, memory_order_relaxed) + 1; |
90 | |
91 | // + 1 since index 0 is unused for kIOPerfControlClientWorkUntracked |
92 | workTableLength = maxWorkCapacity + 1; |
93 | assertf(workTableLength <= kWorkTableMaxSize, "%zu exceeds max allowed capacity of %zu" , workTableLength, kWorkTableMaxSize); |
94 | if (maxWorkCapacity > 0) { |
95 | workTable = kalloc_type(WorkTableEntry, workTableLength, Z_WAITOK_ZERO); |
96 | if (!workTable) { |
97 | goto error; |
98 | } |
99 | workTableNextIndex = 1; |
100 | |
101 | workTableLock = IOSimpleLockAlloc(); |
102 | if (!workTableLock) { |
103 | goto error; |
104 | } |
105 | } |
106 | |
107 | bzero(s: &clientData, n: sizeof(clientData)); |
108 | |
109 | return true; |
110 | |
111 | error: |
112 | if (workTable) { |
113 | kfree_type(WorkTableEntry, workTableLength, workTable); |
114 | workTable = NULL; |
115 | } |
116 | if (workTableLock) { |
117 | IOSimpleLockFree(lock: workTableLock); |
118 | workTableLock = NULL; |
119 | } |
120 | return false; |
121 | shared_init_error: |
122 | if (shared) { |
123 | if (shared->interfaceLock) { |
124 | IOLockFree(lock: shared->interfaceLock); |
125 | } |
126 | if (shared->deviceRegistrationList) { |
127 | shared->deviceRegistrationList->release(); |
128 | } |
129 | kfree_type(IOPerfControlClientShared, shared); |
130 | shared = nullptr; |
131 | } |
132 | return false; |
133 | } |
134 | |
135 | void |
136 | IOPerfControlClient::free() |
137 | { |
138 | if (workTable) { |
139 | kfree_type(WorkTableEntry, workTableLength, workTable); |
140 | } |
141 | if (workTableLock) { |
142 | IOSimpleLockFree(lock: workTableLock); |
143 | } |
144 | super::free(); |
145 | } |
146 | |
147 | IOPerfControlClient * |
148 | IOPerfControlClient::copyClient(IOService *driver, uint64_t maxWorkCapacity) |
149 | { |
150 | IOPerfControlClient *client = new IOPerfControlClient; |
151 | if (!client || !client->init(driver, maxWorkCapacity)) { |
152 | panic("could not create IOPerfControlClient" ); |
153 | } |
154 | return client; |
155 | } |
156 | |
157 | /* Convert the per driver token into a globally unique token for the performance |
158 | * controller's consumption. This is achieved by setting the driver's unique |
159 | * index onto the high order bits. The performance controller is shared between |
160 | * all drivers and must track all instances separately, while each driver has |
161 | * its own token table, so this step is needed to avoid token collisions between |
162 | * drivers. |
163 | */ |
164 | inline uint64_t |
165 | IOPerfControlClient::tokenToGlobalUniqueToken(uint64_t token) |
166 | { |
167 | return token | (static_cast<uint64_t>(driverIndex) << kWorkTableIndexBits); |
168 | } |
169 | |
170 | /* With this implementation, tokens returned to the driver differ from tokens |
171 | * passed to the performance controller. This implementation has the nice |
172 | * property that tokens returns to the driver will aways be between 1 and |
173 | * the value of maxWorkCapacity passed by the driver to copyClient. The tokens |
174 | * the performance controller sees will match on the lower order bits and have |
175 | * the driver index set on the high order bits. |
176 | */ |
177 | uint64_t |
178 | IOPerfControlClient::allocateToken(thread_group *thread_group) |
179 | { |
180 | uint64_t token = kIOPerfControlClientWorkUntracked; |
181 | |
182 | #if CONFIG_THREAD_GROUPS |
183 | auto s = IOSimpleLockLockDisableInterrupt(lock: workTableLock); |
184 | |
185 | uint64_t num_tries = 0; |
186 | size_t index = workTableNextIndex; |
187 | // - 1 since entry 0 is for kIOPerfControlClientWorkUntracked |
188 | while (num_tries < workTableLength - 1) { |
189 | if (workTable[index].thread_group == nullptr) { |
190 | thread_group_retain(tg: thread_group); |
191 | workTable[index].thread_group = thread_group; |
192 | token = index; |
193 | // next integer between 1 and workTableLength - 1 |
194 | workTableNextIndex = (index % (workTableLength - 1)) + 1; |
195 | break; |
196 | } |
197 | // next integer between 1 and workTableLength - 1 |
198 | index = (index % (workTableLength - 1)) + 1; |
199 | num_tries += 1; |
200 | } |
201 | #if (DEVELOPMENT || DEBUG) |
202 | if (token == kIOPerfControlClientWorkUntracked) { |
203 | /* When investigating a panic here, first check that the driver is not leaking tokens. |
204 | * If the driver is not leaking tokens and maximum is less than kMaxWorkTableNumEntries, |
205 | * the driver should be modified to pass a larger value to copyClient. |
206 | * If the driver is not leaking tokens and maximum is equal to kMaxWorkTableNumEntries, |
207 | * this code will have to be modified to support dynamic table growth to support larger |
208 | * numbers of tokens. |
209 | */ |
210 | panic("Tokens allocated for this device exceeded maximum of %zu." , |
211 | workTableLength - 1); // - 1 since entry 0 is for kIOPerfControlClientWorkUntracked |
212 | } |
213 | #endif |
214 | |
215 | IOSimpleLockUnlockEnableInterrupt(lock: workTableLock, state: s); |
216 | #endif |
217 | |
218 | return token; |
219 | } |
220 | |
221 | void |
222 | IOPerfControlClient::deallocateToken(uint64_t token) |
223 | { |
224 | #if CONFIG_THREAD_GROUPS |
225 | assertf(token != kIOPerfControlClientWorkUntracked, "Attempt to deallocate token kIOPerfControlClientWorkUntracked\n" ); |
226 | assertf(token <= workTableLength, "Attempt to deallocate token %llu which is greater than the table size of %zu\n" , token, workTableLength); |
227 | auto s = IOSimpleLockLockDisableInterrupt(lock: workTableLock); |
228 | |
229 | auto &entry = workTable[token]; |
230 | auto *thread_group = entry.thread_group; |
231 | bzero(s: &entry, n: sizeof(entry)); |
232 | workTableNextIndex = token; |
233 | |
234 | IOSimpleLockUnlockEnableInterrupt(lock: workTableLock, state: s); |
235 | |
236 | // This can call into the performance controller if the last reference is dropped here. Are we sure |
237 | // the driver isn't holding any locks? If not, we may want to async this to another context. |
238 | thread_group_release(tg: thread_group); |
239 | #endif |
240 | } |
241 | |
242 | IOPerfControlClient::WorkTableEntry * |
243 | IOPerfControlClient::getEntryForToken(uint64_t token) |
244 | { |
245 | if (token == kIOPerfControlClientWorkUntracked) { |
246 | return nullptr; |
247 | } |
248 | |
249 | if (token >= workTableLength) { |
250 | panic("Invalid work token (%llu): index out of bounds." , token); |
251 | } |
252 | |
253 | WorkTableEntry *entry = &workTable[token]; |
254 | assertf(entry->thread_group, "Invalid work token: %llu" , token); |
255 | return entry; |
256 | } |
257 | |
258 | void |
259 | IOPerfControlClient::markEntryStarted(uint64_t token, bool started) |
260 | { |
261 | if (token == kIOPerfControlClientWorkUntracked) { |
262 | return; |
263 | } |
264 | |
265 | if (token >= workTableLength) { |
266 | panic("Invalid work token (%llu): index out of bounds." , token); |
267 | } |
268 | |
269 | workTable[token].started = started; |
270 | } |
271 | |
272 | #if CONFIG_THREAD_GROUPS |
273 | |
274 | static struct thread_group * |
275 | threadGroupForDextService(IOService *device) |
276 | { |
277 | assert(device); |
278 | |
279 | if (!device->hasUserServer()) { |
280 | return NULL; |
281 | } |
282 | |
283 | // Devices associated with a dext driver, must be called from dext |
284 | // context to ensure that thread_group reference is valid. |
285 | thread_t thread = current_thread(); |
286 | assert(get_threadtask(thread) != kernel_task); |
287 | struct thread_group * thread_group = thread_group_get(t: thread); |
288 | assert(thread_group != nullptr); |
289 | return thread_group; |
290 | } |
291 | |
292 | #endif /* CONFIG_THREAD_GROUPS */ |
293 | |
294 | IOReturn |
295 | IOPerfControlClient::registerDevice(IOService *driver, IOService *device) |
296 | { |
297 | IOReturn ret = kIOReturnSuccess; |
298 | #if CONFIG_THREAD_GROUPS |
299 | IOLockLock(shared->interfaceLock); |
300 | |
301 | clientData.device = device; |
302 | |
303 | if (device) { |
304 | struct thread_group *dext_thread_group = threadGroupForDextService(device); |
305 | if (dext_thread_group) { |
306 | if (clientData.driverState.has_target_thread_group) { |
307 | panic("driverState has already been initialized" ); |
308 | } |
309 | clientData.driverState.has_target_thread_group = true; |
310 | clientData.driverState.target_thread_group_id = thread_group_get_id(tg: dext_thread_group); |
311 | clientData.driverState.target_thread_group_data = thread_group_get_machine_data(tg: dext_thread_group); |
312 | |
313 | clientData.target_thread_group = dext_thread_group; |
314 | thread_group_retain(tg: dext_thread_group); |
315 | } |
316 | } |
317 | |
318 | if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_3) { |
319 | ret = shared->interface.registerDriverDevice(driver, device, &clientData.driverState); |
320 | } else if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_1) { |
321 | ret = shared->interface.registerDevice(device); |
322 | } else { |
323 | shared->deviceRegistrationList->setObject(this); |
324 | } |
325 | |
326 | IOLockUnlock(shared->interfaceLock); |
327 | #endif |
328 | return ret; |
329 | } |
330 | |
331 | void |
332 | IOPerfControlClient::unregisterDevice(IOService *driver, IOService *device) |
333 | { |
334 | #if CONFIG_THREAD_GROUPS |
335 | IOLockLock(shared->interfaceLock); |
336 | |
337 | if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_3) { |
338 | shared->interface.unregisterDriverDevice(driver, device, &clientData.driverState); |
339 | } else if (shared->interface.version >= PERFCONTROL_INTERFACE_VERSION_1) { |
340 | shared->interface.unregisterDevice(device); |
341 | } else { |
342 | shared->deviceRegistrationList->removeObject(anObject: this); |
343 | } |
344 | |
345 | if (clientData.driverState.has_target_thread_group) { |
346 | thread_group_release(tg: clientData.target_thread_group); |
347 | clientData.target_thread_group = nullptr; |
348 | |
349 | clientData.driverState.has_target_thread_group = false; |
350 | clientData.driverState.target_thread_group_id = ~0ull; |
351 | clientData.driverState.target_thread_group_data = nullptr; |
352 | } |
353 | |
354 | clientData.device = nullptr; |
355 | |
356 | IOLockUnlock(shared->interfaceLock); |
357 | #endif |
358 | } |
359 | |
360 | uint64_t |
361 | IOPerfControlClient::workSubmit(IOService *device, WorkSubmitArgs *args) |
362 | { |
363 | #if CONFIG_THREAD_GROUPS |
364 | auto *thread_group = thread_group_get(t: current_thread()); |
365 | if (!thread_group) { |
366 | return kIOPerfControlClientWorkUntracked; |
367 | } |
368 | |
369 | PerfControllerInterface::WorkState state{ |
370 | .thread_group_id = thread_group_get_id(tg: thread_group), |
371 | .thread_group_data = thread_group_get_machine_data(tg: thread_group), |
372 | .work_data = nullptr, |
373 | .work_data_size = 0, |
374 | .started = false, |
375 | .driver_state = &clientData.driverState |
376 | }; |
377 | if (!shared->interface.workCanSubmit(device, &state, args)) { |
378 | return kIOPerfControlClientWorkUntracked; |
379 | } |
380 | |
381 | uint64_t token = allocateToken(thread_group); |
382 | if (token != kIOPerfControlClientWorkUntracked) { |
383 | state.work_data = &workTable[token].perfcontrol_data; |
384 | state.work_data_size = sizeof(workTable[token].perfcontrol_data); |
385 | shared->interface.workSubmit(device, tokenToGlobalUniqueToken(token), &state, args); |
386 | } |
387 | return token; |
388 | #else |
389 | return kIOPerfControlClientWorkUntracked; |
390 | #endif |
391 | } |
392 | |
393 | uint64_t |
394 | IOPerfControlClient::workSubmitAndBegin(IOService *device, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs) |
395 | { |
396 | #if CONFIG_THREAD_GROUPS |
397 | auto *thread_group = thread_group_get(t: current_thread()); |
398 | if (!thread_group) { |
399 | return kIOPerfControlClientWorkUntracked; |
400 | } |
401 | |
402 | PerfControllerInterface::WorkState state{ |
403 | .thread_group_id = thread_group_get_id(tg: thread_group), |
404 | .thread_group_data = thread_group_get_machine_data(tg: thread_group), |
405 | .work_data = nullptr, |
406 | .work_data_size = 0, |
407 | .started = false, |
408 | .driver_state = &clientData.driverState |
409 | }; |
410 | if (!shared->interface.workCanSubmit(device, &state, submitArgs)) { |
411 | return kIOPerfControlClientWorkUntracked; |
412 | } |
413 | |
414 | uint64_t token = allocateToken(thread_group); |
415 | if (token != kIOPerfControlClientWorkUntracked) { |
416 | auto &entry = workTable[token]; |
417 | state.work_data = &entry.perfcontrol_data; |
418 | state.work_data_size = sizeof(workTable[token].perfcontrol_data); |
419 | shared->interface.workSubmit(device, tokenToGlobalUniqueToken(token), &state, submitArgs); |
420 | state.started = true; |
421 | shared->interface.workBegin(device, tokenToGlobalUniqueToken(token), &state, beginArgs); |
422 | markEntryStarted(token, started: true); |
423 | } |
424 | return token; |
425 | #else |
426 | return kIOPerfControlClientWorkUntracked; |
427 | #endif |
428 | } |
429 | |
430 | void |
431 | IOPerfControlClient::workBegin(IOService *device, uint64_t token, WorkBeginArgs *args) |
432 | { |
433 | #if CONFIG_THREAD_GROUPS |
434 | WorkTableEntry *entry = getEntryForToken(token); |
435 | if (entry == nullptr) { |
436 | return; |
437 | } |
438 | |
439 | assertf(!entry->started, "Work for token %llu was already started" , token); |
440 | |
441 | PerfControllerInterface::WorkState state{ |
442 | .thread_group_id = thread_group_get_id(tg: entry->thread_group), |
443 | .thread_group_data = thread_group_get_machine_data(tg: entry->thread_group), |
444 | .work_data = &entry->perfcontrol_data, |
445 | .work_data_size = sizeof(entry->perfcontrol_data), |
446 | .started = true, |
447 | .driver_state = &clientData.driverState |
448 | }; |
449 | shared->interface.workBegin(device, tokenToGlobalUniqueToken(token), &state, args); |
450 | markEntryStarted(token, started: true); |
451 | #endif |
452 | } |
453 | |
454 | void |
455 | IOPerfControlClient::workEnd(IOService *device, uint64_t token, WorkEndArgs *args, bool done) |
456 | { |
457 | #if CONFIG_THREAD_GROUPS |
458 | WorkTableEntry *entry = getEntryForToken(token); |
459 | if (entry == nullptr) { |
460 | return; |
461 | } |
462 | |
463 | PerfControllerInterface::WorkState state{ |
464 | .thread_group_id = thread_group_get_id(tg: entry->thread_group), |
465 | .thread_group_data = thread_group_get_machine_data(tg: entry->thread_group), |
466 | .work_data = &entry->perfcontrol_data, |
467 | .work_data_size = sizeof(entry->perfcontrol_data), |
468 | .started = entry->started, |
469 | .driver_state = &clientData.driverState |
470 | }; |
471 | shared->interface.workEnd(device, tokenToGlobalUniqueToken(token), &state, args, done); |
472 | |
473 | if (done) { |
474 | deallocateToken(token); |
475 | } else { |
476 | markEntryStarted(token, started: false); |
477 | } |
478 | #endif |
479 | } |
480 | |
481 | static _Atomic uint64_t unique_work_context_id = 1ull; |
482 | |
483 | class IOPerfControlWorkContext : public OSObject |
484 | { |
485 | OSDeclareDefaultStructors(IOPerfControlWorkContext); |
486 | |
487 | public: |
488 | uint64_t id; |
489 | struct thread_group *thread_group; |
490 | bool started; |
491 | uint8_t perfcontrol_data[32]; |
492 | |
493 | bool init() override; |
494 | void reset(); |
495 | void free() override; |
496 | }; |
497 | |
498 | OSDefineMetaClassAndStructors(IOPerfControlWorkContext, OSObject); |
499 | |
500 | bool |
501 | IOPerfControlWorkContext::init() |
502 | { |
503 | if (!super::init()) { |
504 | return false; |
505 | } |
506 | id = atomic_fetch_add_explicit(&unique_work_context_id, 1, memory_order_relaxed) + 1; |
507 | reset(); |
508 | return true; |
509 | } |
510 | |
511 | void |
512 | IOPerfControlWorkContext::reset() |
513 | { |
514 | thread_group = nullptr; |
515 | started = false; |
516 | bzero(s: perfcontrol_data, n: sizeof(perfcontrol_data)); |
517 | } |
518 | |
519 | void |
520 | IOPerfControlWorkContext::free() |
521 | { |
522 | assertf(thread_group == nullptr, "IOPerfControlWorkContext ID %llu being released without calling workEnd!\n" , id); |
523 | super::free(); |
524 | } |
525 | |
526 | OSObject * |
527 | IOPerfControlClient::copyWorkContext() |
528 | { |
529 | IOPerfControlWorkContext *context = new IOPerfControlWorkContext; |
530 | |
531 | if (context == nullptr) { |
532 | return nullptr; |
533 | } |
534 | |
535 | if (!context->init()) { |
536 | context->free(); |
537 | return nullptr; |
538 | } |
539 | |
540 | return context; |
541 | } |
542 | |
543 | bool |
544 | IOPerfControlClient::workSubmitAndBeginWithContext(IOService *device, OSObject *context, WorkSubmitArgs *submitArgs, WorkBeginArgs *beginArgs) |
545 | { |
546 | #if CONFIG_THREAD_GROUPS |
547 | |
548 | if (workSubmitWithContext(device, context, args: submitArgs) == false) { |
549 | return false; |
550 | } |
551 | |
552 | IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); |
553 | |
554 | PerfControllerInterface::WorkState state{ |
555 | .thread_group_id = thread_group_get_id(tg: work_context->thread_group), |
556 | .thread_group_data = thread_group_get_machine_data(tg: work_context->thread_group), |
557 | .work_data = &work_context->perfcontrol_data, |
558 | .work_data_size = sizeof(work_context->perfcontrol_data), |
559 | .started = true, |
560 | .driver_state = &clientData.driverState |
561 | }; |
562 | |
563 | shared->interface.workBegin(device, work_context->id, &state, beginArgs); |
564 | |
565 | work_context->started = true; |
566 | |
567 | return true; |
568 | #else |
569 | return false; |
570 | #endif |
571 | } |
572 | |
573 | bool |
574 | IOPerfControlClient::workSubmitWithContext(IOService *device, OSObject *context, WorkSubmitArgs *args) |
575 | { |
576 | #if CONFIG_THREAD_GROUPS |
577 | IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); |
578 | |
579 | if (work_context == nullptr) { |
580 | return false; |
581 | } |
582 | |
583 | auto *thread_group = thread_group_get(t: current_thread()); |
584 | assert(thread_group != nullptr); |
585 | |
586 | assertf(!work_context->started, "IOPerfControlWorkContext ID %llu was already started" , work_context->id); |
587 | assertf(work_context->thread_group == nullptr, "IOPerfControlWorkContext ID %llu has already taken a refcount on TG 0x%p \n" , work_context->id, (void *)(work_context->thread_group)); |
588 | |
589 | PerfControllerInterface::WorkState state{ |
590 | .thread_group_id = thread_group_get_id(tg: thread_group), |
591 | .thread_group_data = thread_group_get_machine_data(tg: thread_group), |
592 | .work_data = nullptr, |
593 | .work_data_size = 0, |
594 | .started = false, |
595 | .driver_state = &clientData.driverState |
596 | }; |
597 | if (!shared->interface.workCanSubmit(device, &state, args)) { |
598 | return false; |
599 | } |
600 | |
601 | work_context->thread_group = thread_group_retain(tg: thread_group); |
602 | |
603 | state.work_data = &work_context->perfcontrol_data; |
604 | state.work_data_size = sizeof(work_context->perfcontrol_data); |
605 | |
606 | shared->interface.workSubmit(device, work_context->id, &state, args); |
607 | |
608 | return true; |
609 | #else |
610 | return false; |
611 | #endif |
612 | } |
613 | |
614 | void |
615 | IOPerfControlClient::workUpdateWithContext(IOService *device, OSObject *context, WorkUpdateArgs *args) |
616 | { |
617 | #if CONFIG_THREAD_GROUPS |
618 | IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); |
619 | |
620 | if (work_context == nullptr) { |
621 | return; |
622 | } |
623 | |
624 | if (work_context->thread_group == nullptr) { |
625 | // This Work Context has not taken a refcount on a TG |
626 | return; |
627 | } |
628 | |
629 | PerfControllerInterface::WorkState state{ |
630 | .thread_group_id = thread_group_get_id(tg: work_context->thread_group), |
631 | .thread_group_data = thread_group_get_machine_data(tg: work_context->thread_group), |
632 | .work_data = &work_context->perfcontrol_data, |
633 | .work_data_size = sizeof(work_context->perfcontrol_data), |
634 | .driver_state = &clientData.driverState |
635 | }; |
636 | shared->interface.workUpdate(device, work_context->id, &state, args); |
637 | #endif |
638 | } |
639 | |
640 | void |
641 | IOPerfControlClient::workBeginWithContext(IOService *device, OSObject *context, WorkBeginArgs *args) |
642 | { |
643 | #if CONFIG_THREAD_GROUPS |
644 | IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); |
645 | |
646 | if (work_context == nullptr) { |
647 | return; |
648 | } |
649 | |
650 | if (work_context->thread_group == nullptr) { |
651 | // This Work Context has not taken a refcount on a TG |
652 | return; |
653 | } |
654 | |
655 | assertf(!work_context->started, "IOPerfControlWorkContext %llu was already started" , work_context->id); |
656 | |
657 | PerfControllerInterface::WorkState state{ |
658 | .thread_group_id = thread_group_get_id(tg: work_context->thread_group), |
659 | .thread_group_data = thread_group_get_machine_data(tg: work_context->thread_group), |
660 | .work_data = &work_context->perfcontrol_data, |
661 | .work_data_size = sizeof(work_context->perfcontrol_data), |
662 | .started = true, |
663 | .driver_state = &clientData.driverState |
664 | }; |
665 | shared->interface.workBegin(device, work_context->id, &state, args); |
666 | |
667 | work_context->started = true; |
668 | #endif |
669 | } |
670 | |
671 | void |
672 | IOPerfControlClient::workEndWithContext(IOService *device, OSObject *context, WorkEndArgs *args, bool done) |
673 | { |
674 | #if CONFIG_THREAD_GROUPS |
675 | IOPerfControlWorkContext *work_context = OSDynamicCast(IOPerfControlWorkContext, context); |
676 | |
677 | if (work_context == nullptr) { |
678 | return; |
679 | } |
680 | |
681 | if (work_context->thread_group == nullptr) { |
682 | return; |
683 | } |
684 | |
685 | PerfControllerInterface::WorkState state{ |
686 | .thread_group_id = thread_group_get_id(tg: work_context->thread_group), |
687 | .thread_group_data = thread_group_get_machine_data(tg: work_context->thread_group), |
688 | .work_data = &work_context->perfcontrol_data, |
689 | .work_data_size = sizeof(work_context->perfcontrol_data), |
690 | .started = work_context->started, |
691 | .driver_state = &clientData.driverState |
692 | }; |
693 | |
694 | shared->interface.workEnd(device, work_context->id, &state, args, done); |
695 | |
696 | if (done) { |
697 | thread_group_release(tg: work_context->thread_group); |
698 | work_context->reset(); |
699 | } else { |
700 | work_context->started = false; |
701 | } |
702 | |
703 | return; |
704 | #else |
705 | return; |
706 | #endif |
707 | } |
708 | |
709 | IOReturn |
710 | IOPerfControlClient::registerPerformanceController(PerfControllerInterface *pci) |
711 | { |
712 | IOReturn result = kIOReturnError; |
713 | |
714 | IOLockLock(shared->interfaceLock); |
715 | |
716 | if (shared->interface.version == PERFCONTROL_INTERFACE_VERSION_NONE) { |
717 | shared->interface.version = pci->version; |
718 | |
719 | if (pci->version >= PERFCONTROL_INTERFACE_VERSION_1) { |
720 | assert(pci->registerDevice && pci->unregisterDevice && pci->workCanSubmit && pci->workSubmit && pci->workBegin && pci->workEnd); |
721 | shared->interface.registerDevice = pci->registerDevice; |
722 | shared->interface.unregisterDevice = pci->unregisterDevice; |
723 | shared->interface.workCanSubmit = pci->workCanSubmit; |
724 | shared->interface.workSubmit = pci->workSubmit; |
725 | shared->interface.workBegin = pci->workBegin; |
726 | shared->interface.workEnd = pci->workEnd; |
727 | } |
728 | |
729 | if (pci->version >= PERFCONTROL_INTERFACE_VERSION_2) { |
730 | if (pci->workUpdate != nullptr) { |
731 | shared->interface.workUpdate = pci->workUpdate; |
732 | } |
733 | } |
734 | |
735 | if (pci->version >= PERFCONTROL_INTERFACE_VERSION_3) { |
736 | assert(pci->registerDriverDevice && pci->unregisterDriverDevice); |
737 | shared->interface.registerDriverDevice = pci->registerDriverDevice; |
738 | shared->interface.unregisterDriverDevice = pci->unregisterDriverDevice; |
739 | } |
740 | |
741 | result = kIOReturnSuccess; |
742 | |
743 | OSObject *obj; |
744 | while ((obj = shared->deviceRegistrationList->getAnyObject())) { |
745 | IOPerfControlClient *client = OSDynamicCast(IOPerfControlClient, obj); |
746 | IOPerfControlClientData *clientData = client->getClientData(); |
747 | if (clientData && clientData->device) { |
748 | if (pci->version >= PERFCONTROL_INTERFACE_VERSION_3) { |
749 | pci->registerDriverDevice(clientData->device->getProvider(), clientData->device, &(clientData->driverState)); |
750 | } else if (pci->version >= PERFCONTROL_INTERFACE_VERSION_1) { |
751 | pci->registerDevice(clientData->device); |
752 | } |
753 | } |
754 | shared->deviceRegistrationList->removeObject(anObject: obj); |
755 | } |
756 | } |
757 | |
758 | IOLockUnlock(shared->interfaceLock); |
759 | |
760 | return result; |
761 | } |
762 | |