1 | typedef struct _IODataQueueEntry { |
2 | uint32_t size; |
3 | uint8_t data[0]; |
4 | } IODataQueueEntry; |
5 | |
6 | #define sizeof(IODataQueueEntry) |
7 | |
8 | typedef struct _IODataQueueMemory { |
9 | volatile uint32_t head; |
10 | volatile uint32_t tail; |
11 | volatile uint8_t needServicedCallback; |
12 | volatile uint8_t _resv[119]; |
13 | IODataQueueEntry queue[0]; |
14 | } IODataQueueMemory; |
15 | |
16 | struct IODataQueueDispatchSource_IVars { |
17 | IODataQueueMemory * dataQueue; |
18 | IODataQueueDispatchSource * source; |
19 | // IODispatchQueue * queue; |
20 | IOMemoryDescriptor * memory; |
21 | OSAction * dataAvailableAction; |
22 | OSAction * dataServicedAction; |
23 | uint64_t options; |
24 | uint32_t queueByteCount; |
25 | |
26 | #if !KERNEL |
27 | bool enable; |
28 | bool canceled; |
29 | #endif |
30 | }; |
31 | |
32 | bool |
33 | IODataQueueDispatchSource::init() |
34 | { |
35 | if (!super::init()) { |
36 | return false; |
37 | } |
38 | |
39 | ivars = IONewZero(IODataQueueDispatchSource_IVars, 1); |
40 | ivars->source = this; |
41 | |
42 | #if !KERNEL |
43 | kern_return_t ret; |
44 | |
45 | ret = CopyMemory(&ivars->memory); |
46 | assert(kIOReturnSuccess == ret); |
47 | |
48 | uint64_t address; |
49 | uint64_t length; |
50 | |
51 | ret = ivars->memory->Map(0, 0, 0, 0, &address, &length); |
52 | assert(kIOReturnSuccess == ret); |
53 | ivars->dataQueue = (typeof(ivars->dataQueue))(uintptr_t) address; |
54 | ivars->queueByteCount = length; |
55 | #endif |
56 | |
57 | return true; |
58 | } |
59 | |
60 | kern_return_t |
61 | IODataQueueDispatchSource::CheckForWork_Impl( |
62 | const IORPC rpc, |
63 | bool synchronous) |
64 | { |
65 | IOReturn ret = kIOReturnNotReady; |
66 | |
67 | return ret; |
68 | } |
69 | |
70 | #if KERNEL |
71 | |
72 | kern_return_t |
73 | IODataQueueDispatchSource::Create_Impl( |
74 | uint64_t queueByteCount, |
75 | IODispatchQueue * queue, |
76 | IODataQueueDispatchSource ** source) |
77 | { |
78 | IODataQueueDispatchSource * inst; |
79 | IOBufferMemoryDescriptor * bmd; |
80 | |
81 | if (3 & queueByteCount) { |
82 | return kIOReturnBadArgument; |
83 | } |
84 | if (queueByteCount > UINT_MAX) { |
85 | return kIOReturnBadArgument; |
86 | } |
87 | inst = OSTypeAlloc(IODataQueueDispatchSource); |
88 | if (!inst) { |
89 | return kIOReturnNoMemory; |
90 | } |
91 | if (!inst->init()) { |
92 | inst->release(); |
93 | return kIOReturnError; |
94 | } |
95 | |
96 | bmd = IOBufferMemoryDescriptor::withOptions( |
97 | options: kIODirectionOutIn | kIOMemoryKernelUserShared, |
98 | capacity: queueByteCount, alignment: page_size); |
99 | if (!bmd) { |
100 | inst->release(); |
101 | return kIOReturnNoMemory; |
102 | } |
103 | inst->ivars->memory = bmd; |
104 | inst->ivars->queueByteCount = ((uint32_t) queueByteCount); |
105 | inst->ivars->options = 0; |
106 | inst->ivars->dataQueue = (typeof(inst->ivars->dataQueue))bmd->getBytesNoCopy(); |
107 | |
108 | *source = inst; |
109 | |
110 | return kIOReturnSuccess; |
111 | } |
112 | |
113 | kern_return_t |
114 | IODataQueueDispatchSource::CopyMemory_Impl( |
115 | IOMemoryDescriptor ** memory) |
116 | { |
117 | kern_return_t ret; |
118 | IOMemoryDescriptor * result; |
119 | |
120 | result = ivars->memory; |
121 | if (result) { |
122 | result->retain(); |
123 | ret = kIOReturnSuccess; |
124 | } else { |
125 | ret = kIOReturnNotReady; |
126 | } |
127 | *memory = result; |
128 | |
129 | return ret; |
130 | } |
131 | |
132 | kern_return_t |
133 | IODataQueueDispatchSource::CopyDataAvailableHandler_Impl( |
134 | OSAction ** action) |
135 | { |
136 | kern_return_t ret; |
137 | OSAction * result; |
138 | |
139 | result = ivars->dataAvailableAction; |
140 | if (result) { |
141 | result->retain(); |
142 | ret = kIOReturnSuccess; |
143 | } else { |
144 | ret = kIOReturnNotReady; |
145 | } |
146 | *action = result; |
147 | |
148 | return ret; |
149 | } |
150 | |
151 | kern_return_t |
152 | IODataQueueDispatchSource::CopyDataServicedHandler_Impl( |
153 | OSAction ** action) |
154 | { |
155 | kern_return_t ret; |
156 | OSAction * result; |
157 | |
158 | result = ivars->dataServicedAction; |
159 | if (result) { |
160 | result->retain(); |
161 | ret = kIOReturnSuccess; |
162 | } else { |
163 | ret = kIOReturnNotReady; |
164 | } |
165 | *action = result; |
166 | return ret; |
167 | } |
168 | |
169 | kern_return_t |
170 | IODataQueueDispatchSource::SetDataAvailableHandler_Impl( |
171 | OSAction * action) |
172 | { |
173 | IOReturn ret; |
174 | OSAction * oldAction; |
175 | |
176 | oldAction = ivars->dataAvailableAction; |
177 | if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataAvailableAction)) { |
178 | oldAction->release(); |
179 | } |
180 | if (action) { |
181 | action->retain(); |
182 | ivars->dataAvailableAction = action; |
183 | if (IsDataAvailable()) { |
184 | DataAvailable(action: ivars->dataAvailableAction); |
185 | } |
186 | } |
187 | ret = kIOReturnSuccess; |
188 | |
189 | return ret; |
190 | } |
191 | |
192 | kern_return_t |
193 | IODataQueueDispatchSource::SetDataServicedHandler_Impl( |
194 | OSAction * action) |
195 | { |
196 | IOReturn ret; |
197 | OSAction * oldAction; |
198 | |
199 | oldAction = ivars->dataServicedAction; |
200 | if (oldAction && OSCompareAndSwapPtr(oldAction, NULL, &ivars->dataServicedAction)) { |
201 | oldAction->release(); |
202 | } |
203 | if (action) { |
204 | action->retain(); |
205 | ivars->dataServicedAction = action; |
206 | } |
207 | ret = kIOReturnSuccess; |
208 | |
209 | return ret; |
210 | } |
211 | |
212 | #endif /* KERNEL */ |
213 | |
214 | void |
215 | IODataQueueDispatchSource::SendDataAvailable(void) |
216 | { |
217 | IOReturn ret; |
218 | |
219 | if (!ivars->dataAvailableAction) { |
220 | ret = CopyDataAvailableHandler(action: &ivars->dataAvailableAction); |
221 | if (kIOReturnSuccess != ret) { |
222 | ivars->dataAvailableAction = NULL; |
223 | } |
224 | } |
225 | if (ivars->dataAvailableAction) { |
226 | DataAvailable(action: ivars->dataAvailableAction); |
227 | } |
228 | } |
229 | |
230 | void |
231 | IODataQueueDispatchSource::SendDataServiced(void) |
232 | { |
233 | IOReturn ret; |
234 | |
235 | if (!ivars->dataServicedAction) { |
236 | ret = CopyDataServicedHandler(action: &ivars->dataServicedAction); |
237 | if (kIOReturnSuccess != ret) { |
238 | ivars->dataServicedAction = NULL; |
239 | } |
240 | } |
241 | if (ivars->dataServicedAction) { |
242 | ivars->dataQueue->needServicedCallback = false; |
243 | DataServiced(action: ivars->dataServicedAction); |
244 | } |
245 | } |
246 | |
247 | kern_return_t |
248 | IODataQueueDispatchSource::SetEnableWithCompletion_Impl( |
249 | bool enable, |
250 | IODispatchSourceCancelHandler handler) |
251 | { |
252 | IOReturn ret; |
253 | |
254 | #if !KERNEL |
255 | ivars->enable = enable; |
256 | #endif |
257 | |
258 | ret = kIOReturnSuccess; |
259 | return ret; |
260 | } |
261 | |
262 | void |
263 | IODataQueueDispatchSource::free() |
264 | { |
265 | OSSafeReleaseNULL(ivars->memory); |
266 | OSSafeReleaseNULL(ivars->dataAvailableAction); |
267 | OSSafeReleaseNULL(ivars->dataServicedAction); |
268 | IOSafeDeleteNULL(ivars, IODataQueueDispatchSource_IVars, 1); |
269 | super::free(); |
270 | } |
271 | |
272 | kern_return_t |
273 | IODataQueueDispatchSource::Cancel_Impl( |
274 | IODispatchSourceCancelHandler handler) |
275 | { |
276 | if (handler) { |
277 | handler(); |
278 | } |
279 | return kIOReturnSuccess; |
280 | } |
281 | |
282 | bool |
283 | IODataQueueDispatchSource::IsDataAvailable(void) |
284 | { |
285 | IODataQueueMemory *dataQueue = ivars->dataQueue; |
286 | |
287 | return dataQueue && (dataQueue->head != dataQueue->tail); |
288 | } |
289 | |
290 | kern_return_t |
291 | IODataQueueDispatchSource::Peek(IODataQueueClientDequeueEntryBlock callback) |
292 | { |
293 | IODataQueueEntry * entry = NULL; |
294 | IODataQueueMemory * dataQueue; |
295 | uint32_t callerDataSize; |
296 | uint32_t dataSize; |
297 | uint32_t headOffset; |
298 | uint32_t tailOffset; |
299 | |
300 | dataQueue = ivars->dataQueue; |
301 | if (!dataQueue) { |
302 | return kIOReturnNoMemory; |
303 | } |
304 | |
305 | // Read head and tail with acquire barrier |
306 | headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED); |
307 | tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE); |
308 | |
309 | if (headOffset != tailOffset) { |
310 | IODataQueueEntry * head = NULL; |
311 | uint32_t headSize = 0; |
312 | uint32_t queueSize = ivars->queueByteCount; |
313 | |
314 | if (headOffset > queueSize) { |
315 | return kIOReturnError; |
316 | } |
317 | |
318 | head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset); |
319 | callerDataSize = head->size; |
320 | if (os_add_overflow(3, callerDataSize, &headSize)) { |
321 | return kIOReturnError; |
322 | } |
323 | headSize &= ~3U; |
324 | |
325 | // Check if there's enough room before the end of the queue for a header. |
326 | // If there is room, check if there's enough room to hold the header and |
327 | // the data. |
328 | |
329 | if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || |
330 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || |
331 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || |
332 | (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { |
333 | // No room for the header or the data, wrap to the beginning of the queue. |
334 | // Note: wrapping even with the UINT32_MAX checks, as we have to support |
335 | // queueSize of UINT32_MAX |
336 | entry = dataQueue->queue; |
337 | callerDataSize = entry->size; |
338 | dataSize = entry->size; |
339 | if (os_add_overflow(3, callerDataSize, &dataSize)) { |
340 | return kIOReturnError; |
341 | } |
342 | dataSize &= ~3U; |
343 | |
344 | if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || |
345 | (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { |
346 | return kIOReturnError; |
347 | } |
348 | |
349 | callback(&entry->data, callerDataSize); |
350 | return kIOReturnSuccess; |
351 | } else { |
352 | callback(&head->data, callerDataSize); |
353 | return kIOReturnSuccess; |
354 | } |
355 | } |
356 | |
357 | return kIOReturnUnderrun; |
358 | } |
359 | |
360 | kern_return_t |
361 | IODataQueueDispatchSource::Dequeue(IODataQueueClientDequeueEntryBlock callback) |
362 | { |
363 | kern_return_t ret; |
364 | bool sendDataServiced; |
365 | |
366 | sendDataServiced = false; |
367 | ret = DequeueWithCoalesce(sendDataServiced: &sendDataServiced, callback); |
368 | if (sendDataServiced) { |
369 | SendDataServiced(); |
370 | } |
371 | return ret; |
372 | } |
373 | |
374 | kern_return_t |
375 | IODataQueueDispatchSource::DequeueWithCoalesce(bool * sendDataServiced, |
376 | IODataQueueClientDequeueEntryBlock callback) |
377 | { |
378 | IOReturn retVal = kIOReturnSuccess; |
379 | IODataQueueEntry * entry = NULL; |
380 | IODataQueueMemory * dataQueue; |
381 | uint32_t callerDataSize; |
382 | uint32_t dataSize = 0; |
383 | uint32_t headOffset = 0; |
384 | uint32_t tailOffset = 0; |
385 | uint32_t newHeadOffset = 0; |
386 | |
387 | dataQueue = ivars->dataQueue; |
388 | if (!dataQueue) { |
389 | return kIOReturnNoMemory; |
390 | } |
391 | |
392 | // Read head and tail with acquire barrier |
393 | headOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED); |
394 | tailOffset = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_ACQUIRE); |
395 | |
396 | if (headOffset != tailOffset) { |
397 | IODataQueueEntry * head = NULL; |
398 | uint32_t headSize = 0; |
399 | uint32_t queueSize = ivars->queueByteCount; |
400 | |
401 | if (headOffset > queueSize) { |
402 | return kIOReturnError; |
403 | } |
404 | |
405 | head = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + headOffset); |
406 | callerDataSize = head->size; |
407 | if (os_add_overflow(3, callerDataSize, &headSize)) { |
408 | return kIOReturnError; |
409 | } |
410 | headSize &= ~3U; |
411 | |
412 | // we wrapped around to beginning, so read from there |
413 | // either there was not even room for the header |
414 | if ((headOffset > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || |
415 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize) || |
416 | // or there was room for the header, but not for the data |
417 | (headOffset + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headSize) || |
418 | (headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { |
419 | // Note: we have to wrap to the beginning even with the UINT32_MAX checks |
420 | // because we have to support a queueSize of UINT32_MAX. |
421 | entry = dataQueue->queue; |
422 | callerDataSize = entry->size; |
423 | |
424 | if (os_add_overflow(callerDataSize, 3, &dataSize)) { |
425 | return kIOReturnError; |
426 | } |
427 | dataSize &= ~3U; |
428 | if ((dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || |
429 | (dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE > queueSize)) { |
430 | return kIOReturnError; |
431 | } |
432 | newHeadOffset = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; |
433 | // else it is at the end |
434 | } else { |
435 | entry = head; |
436 | |
437 | if ((headSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || |
438 | (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX - headOffset) || |
439 | (headSize + DATA_QUEUE_ENTRY_HEADER_SIZE + headOffset > queueSize)) { |
440 | return kIOReturnError; |
441 | } |
442 | newHeadOffset = headOffset + headSize + DATA_QUEUE_ENTRY_HEADER_SIZE; |
443 | } |
444 | } else { |
445 | // empty queue |
446 | if (dataQueue->needServicedCallback) { |
447 | *sendDataServiced = true; |
448 | } |
449 | return kIOReturnUnderrun; |
450 | } |
451 | |
452 | callback(&entry->data, callerDataSize); |
453 | if (dataQueue->needServicedCallback) { |
454 | *sendDataServiced = true; |
455 | } |
456 | |
457 | __c11_atomic_store((_Atomic uint32_t *)&dataQueue->head, newHeadOffset, __ATOMIC_RELEASE); |
458 | |
459 | if (newHeadOffset == tailOffset) { |
460 | // |
461 | // If we are making the queue empty, then we need to make sure |
462 | // that either the enqueuer notices, or we notice the enqueue |
463 | // that raced with our making of the queue empty. |
464 | // |
465 | __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); |
466 | } |
467 | |
468 | return retVal; |
469 | } |
470 | |
471 | kern_return_t |
472 | IODataQueueDispatchSource::Enqueue(uint32_t callerDataSize, |
473 | IODataQueueClientEnqueueEntryBlock callback) |
474 | { |
475 | kern_return_t ret; |
476 | bool sendDataAvailable; |
477 | |
478 | sendDataAvailable = false; |
479 | ret = EnqueueWithCoalesce(dataSize: callerDataSize, sendDataAvailable: &sendDataAvailable, callback); |
480 | if (sendDataAvailable) { |
481 | SendDataAvailable(); |
482 | } |
483 | return ret; |
484 | } |
485 | |
486 | kern_return_t |
487 | IODataQueueDispatchSource::EnqueueWithCoalesce(uint32_t callerDataSize, |
488 | bool * sendDataAvailable, |
489 | IODataQueueClientEnqueueEntryBlock callback) |
490 | { |
491 | IODataQueueMemory * dataQueue; |
492 | IODataQueueEntry * entry; |
493 | uint32_t head; |
494 | uint32_t tail; |
495 | uint32_t newTail; |
496 | uint32_t dataSize; |
497 | uint32_t queueSize; |
498 | uint32_t entrySize; |
499 | IOReturn retVal = kIOReturnSuccess; |
500 | |
501 | dataQueue = ivars->dataQueue; |
502 | if (!dataQueue) { |
503 | return kIOReturnNoMemory; |
504 | } |
505 | queueSize = ivars->queueByteCount; |
506 | |
507 | // Force a single read of head and tail |
508 | tail = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_RELAXED); |
509 | head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_ACQUIRE); |
510 | |
511 | if (os_add_overflow(callerDataSize, 3, &dataSize)) { |
512 | return kIOReturnOverrun; |
513 | } |
514 | dataSize &= ~3U; |
515 | |
516 | // Check for overflow of entrySize |
517 | if (os_add_overflow(DATA_QUEUE_ENTRY_HEADER_SIZE, dataSize, &entrySize)) { |
518 | return kIOReturnOverrun; |
519 | } |
520 | |
521 | // Check for underflow of (getQueueSize() - tail) |
522 | if (queueSize < tail || queueSize < head) { |
523 | return kIOReturnUnderrun; |
524 | } |
525 | |
526 | newTail = tail; |
527 | if (tail >= head) { |
528 | // Is there enough room at the end for the entry? |
529 | if ((entrySize <= (UINT32_MAX - tail)) && |
530 | ((tail + entrySize) <= queueSize)) { |
531 | entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail); |
532 | |
533 | callback(&entry->data, callerDataSize); |
534 | |
535 | entry->size = callerDataSize; |
536 | |
537 | // The tail can be out of bound when the size of the new entry |
538 | // exactly matches the available space at the end of the queue. |
539 | // The tail can range from 0 to queueSize inclusive. |
540 | |
541 | newTail = tail + entrySize; |
542 | } else if (head > entrySize) { // Is there enough room at the beginning? |
543 | entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue); |
544 | |
545 | callback(&entry->data, callerDataSize); |
546 | |
547 | // Wrap around to the beginning, but do not allow the tail to catch |
548 | // up to the head. |
549 | |
550 | entry->size = callerDataSize; |
551 | |
552 | // We need to make sure that there is enough room to set the size before |
553 | // doing this. The user client checks for this and will look for the size |
554 | // at the beginning if there isn't room for it at the end. |
555 | |
556 | if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) { |
557 | ((IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail))->size = dataSize; |
558 | } |
559 | |
560 | newTail = entrySize; |
561 | } else { |
562 | retVal = kIOReturnOverrun; // queue is full |
563 | } |
564 | } else { |
565 | // Do not allow the tail to catch up to the head when the queue is full. |
566 | // That's why the comparison uses a '>' rather than '>='. |
567 | |
568 | if ((head - tail) > entrySize) { |
569 | entry = (IODataQueueEntry *)((uintptr_t)dataQueue->queue + tail); |
570 | |
571 | callback(&entry->data, callerDataSize); |
572 | |
573 | entry->size = callerDataSize; |
574 | |
575 | newTail = tail + entrySize; |
576 | } else { |
577 | retVal = kIOReturnOverrun; // queue is full |
578 | } |
579 | } |
580 | |
581 | // Send notification (via mach message) that data is available. |
582 | |
583 | if (retVal == kIOReturnSuccess) { |
584 | // Publish the data we just enqueued |
585 | __c11_atomic_store((_Atomic uint32_t *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); |
586 | |
587 | if (tail != head) { |
588 | // |
589 | // The memory barrier below pairs with the one in dequeue |
590 | // so that either our store to the tail cannot be missed by |
591 | // the next dequeue attempt, or we will observe the dequeuer |
592 | // making the queue empty. |
593 | // |
594 | // Of course, if we already think the queue is empty, |
595 | // there's no point paying this extra cost. |
596 | // |
597 | __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); |
598 | head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_RELAXED); |
599 | } |
600 | |
601 | if (tail == head) { |
602 | // Send notification that data is now available. |
603 | *sendDataAvailable = true; |
604 | retVal = kIOReturnSuccess; |
605 | } |
606 | } else if (retVal == kIOReturnOverrun) { |
607 | // ask to be notified of Dequeue() |
608 | dataQueue->needServicedCallback = true; |
609 | *sendDataAvailable = true; |
610 | } |
611 | |
612 | return retVal; |
613 | } |
614 | |
615 | kern_return_t |
616 | IODataQueueDispatchSource::CanEnqueueData(uint32_t callerDataSize) |
617 | { |
618 | return CanEnqueueData(dataSize: callerDataSize, entryCount: 1); |
619 | } |
620 | |
621 | kern_return_t |
622 | IODataQueueDispatchSource::CanEnqueueData(uint32_t callerDataSize, uint32_t dataCount) |
623 | { |
624 | IODataQueueMemory * dataQueue; |
625 | uint32_t head; |
626 | uint32_t tail; |
627 | uint32_t dataSize; |
628 | uint32_t queueSize; |
629 | uint32_t entrySize; |
630 | |
631 | dataQueue = ivars->dataQueue; |
632 | if (!dataQueue) { |
633 | return kIOReturnNoMemory; |
634 | } |
635 | queueSize = ivars->queueByteCount; |
636 | |
637 | // Force a single read of head and tail |
638 | tail = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->tail, __ATOMIC_RELAXED); |
639 | head = __c11_atomic_load((_Atomic uint32_t *)&dataQueue->head, __ATOMIC_ACQUIRE); |
640 | |
641 | if (os_add_overflow(callerDataSize, 3, &dataSize)) { |
642 | return kIOReturnOverrun; |
643 | } |
644 | dataSize &= ~3U; |
645 | |
646 | // Check for overflow of entrySize |
647 | if (os_add_overflow(DATA_QUEUE_ENTRY_HEADER_SIZE, dataSize, &entrySize)) { |
648 | return kIOReturnOverrun; |
649 | } |
650 | |
651 | // Check for underflow of (getQueueSize() - tail) |
652 | if (queueSize < tail || queueSize < head) { |
653 | return kIOReturnError; |
654 | } |
655 | |
656 | if (tail >= head) { |
657 | uint32_t endSpace = queueSize - tail; |
658 | uint32_t endElements = endSpace / entrySize; |
659 | uint32_t beginElements = head / entrySize; |
660 | if (endElements < dataCount && endElements + beginElements <= dataCount) { |
661 | return kIOReturnOverrun; |
662 | } |
663 | } else { |
664 | // Do not allow the tail to catch up to the head when the queue is full. |
665 | uint32_t space = head - tail - 1; |
666 | uint32_t elements = space / entrySize; |
667 | if (elements < dataCount) { |
668 | return kIOReturnOverrun; |
669 | } |
670 | } |
671 | |
672 | return kIOReturnSuccess; |
673 | } |
674 | |
675 | size_t |
676 | IODataQueueDispatchSource::() |
677 | { |
678 | return DATA_QUEUE_ENTRY_HEADER_SIZE; |
679 | } |
680 | |