1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define IOKIT_ENABLE_SHARED_PTR
30
31#define DISABLE_DATAQUEUE_WARNING
32
33#include <IOKit/IODataQueue.h>
34
35#undef DISABLE_DATAQUEUE_WARNING
36
37#include <IOKit/IODataQueueShared.h>
38#include <IOKit/IOLib.h>
39#include <IOKit/IOMemoryDescriptor.h>
40#include <libkern/OSAtomic.h>
41#include <libkern/c++/OSSharedPtr.h>
42
43struct IODataQueueInternal {
44 mach_msg_header_t msg;
45 UInt32 queueSize;
46};
47
48#ifdef enqueue
49#undef enqueue
50#endif
51
52#ifdef dequeue
53#undef dequeue
54#endif
55
56#define super OSObject
57
58OSDefineMetaClassAndStructors(IODataQueue, OSObject)
59
60OSSharedPtr<IODataQueue>
61IODataQueue::withCapacity(UInt32 size)
62{
63 OSSharedPtr<IODataQueue> dataQueue = OSMakeShared<IODataQueue>();
64
65 if (dataQueue) {
66 if (!dataQueue->initWithCapacity(size)) {
67 return nullptr;
68 }
69 }
70
71 return dataQueue;
72}
73
74OSSharedPtr<IODataQueue>
75IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize)
76{
77 OSSharedPtr<IODataQueue> dataQueue = OSMakeShared<IODataQueue>();
78
79 if (dataQueue) {
80 if (!dataQueue->initWithEntries(numEntries, entrySize)) {
81 return nullptr;
82 }
83 }
84
85 return dataQueue;
86}
87
88Boolean
89IODataQueue::initWithCapacity(UInt32 size)
90{
91 vm_size_t allocSize = 0;
92 kern_return_t kr;
93
94 if (!super::init()) {
95 return false;
96 }
97
98 if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) {
99 return false;
100 }
101
102 allocSize = round_page(x: size + DATA_QUEUE_MEMORY_HEADER_SIZE);
103
104 if (allocSize < size) {
105 return false;
106 }
107
108 assert(!notifyMsg);
109 notifyMsg = IOMallocType(IODataQueueInternal);
110 ((IODataQueueInternal *)notifyMsg)->queueSize = size;
111
112 kr = kmem_alloc(map: kernel_map, addrp: (vm_offset_t *)&dataQueue, size: allocSize,
113 flags: (kma_flags_t)(KMA_DATA | KMA_ZERO), tag: IOMemoryTag(map: kernel_map));
114 if (kr != KERN_SUCCESS) {
115 return false;
116 }
117
118 dataQueue->queueSize = size;
119// dataQueue->head = 0;
120// dataQueue->tail = 0;
121
122 return true;
123}
124
125Boolean
126IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize)
127{
128 // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE):
129 // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
130 if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) ||
131 // check (numEntries + 1)
132 (numEntries > UINT32_MAX - 1) ||
133 // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE)
134 (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX / (numEntries + 1))) {
135 return false;
136 }
137
138 return initWithCapacity(size: (numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize));
139}
140
141void
142IODataQueue::free()
143{
144 if (notifyMsg) {
145 if (dataQueue) {
146 kmem_free(map: kernel_map, addr: (vm_offset_t)dataQueue,
147 size: round_page(x: ((IODataQueueInternal *)notifyMsg)->queueSize +
148 DATA_QUEUE_MEMORY_HEADER_SIZE));
149 dataQueue = NULL;
150 }
151
152 IOFreeType(notifyMsg, IODataQueueInternal);
153 notifyMsg = NULL;
154 }
155
156 super::free();
157
158 return;
159}
160
161Boolean
162IODataQueue::enqueue(void * data, UInt32 dataSize)
163{
164 UInt32 head;
165 UInt32 tail;
166 UInt32 newTail;
167 const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE;
168 UInt32 queueSize;
169 IODataQueueEntry * entry;
170
171 // Check for overflow of entrySize
172 if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) {
173 return false;
174 }
175
176 // Force a single read of head and tail
177 // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers
178 tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED);
179 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE);
180
181 // Check for underflow of (dataQueue->queueSize - tail)
182 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
183 if ((queueSize < tail) || (queueSize < head)) {
184 return false;
185 }
186
187 if (tail >= head) {
188 // Is there enough room at the end for the entry?
189 if ((entrySize <= UINT32_MAX - tail) &&
190 ((tail + entrySize) <= queueSize)) {
191 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
192
193 entry->size = dataSize;
194 __nochk_memcpy(dst: &entry->data, src: data, n: dataSize);
195
196 // The tail can be out of bound when the size of the new entry
197 // exactly matches the available space at the end of the queue.
198 // The tail can range from 0 to dataQueue->queueSize inclusive.
199
200 newTail = tail + entrySize;
201 } else if (head > entrySize) { // Is there enough room at the beginning?
202 // Wrap around to the beginning, but do not allow the tail to catch
203 // up to the head.
204
205 dataQueue->queue->size = dataSize;
206
207 // We need to make sure that there is enough room to set the size before
208 // doing this. The user client checks for this and will look for the size
209 // at the beginning if there isn't room for it at the end.
210
211 if ((queueSize - tail) >= DATA_QUEUE_ENTRY_HEADER_SIZE) {
212 ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize;
213 }
214
215 __nochk_memcpy(dst: &dataQueue->queue->data, src: data, n: dataSize);
216 newTail = entrySize;
217 } else {
218 return false; // queue is full
219 }
220 } else {
221 // Do not allow the tail to catch up to the head when the queue is full.
222 // That's why the comparison uses a '>' rather than '>='.
223
224 if ((head - tail) > entrySize) {
225 entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail);
226
227 entry->size = dataSize;
228 __nochk_memcpy(dst: &entry->data, src: data, n: dataSize);
229 newTail = tail + entrySize;
230 } else {
231 return false; // queue is full
232 }
233 }
234
235 // Publish the data we just enqueued
236 __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE);
237
238 if (tail != head) {
239 //
240 // The memory barrier below paris with the one in ::dequeue
241 // so that either our store to the tail cannot be missed by
242 // the next dequeue attempt, or we will observe the dequeuer
243 // making the queue empty.
244 //
245 // Of course, if we already think the queue is empty,
246 // there's no point paying this extra cost.
247 //
248 __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
249 head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED);
250 }
251
252 if (tail == head) {
253 // Send notification (via mach message) that data is now available.
254 sendDataAvailableNotification();
255 }
256 return true;
257}
258
259void
260IODataQueue::setNotificationPort(mach_port_t port)
261{
262 mach_msg_header_t * msgh;
263
264 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
265 bzero(s: msgh, n: sizeof(mach_msg_header_t));
266 msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
267 msgh->msgh_size = sizeof(mach_msg_header_t);
268 msgh->msgh_remote_port = port;
269}
270
271void
272IODataQueue::sendDataAvailableNotification()
273{
274 kern_return_t kr;
275 mach_msg_header_t * msgh;
276
277 msgh = &((IODataQueueInternal *) notifyMsg)->msg;
278 if (msgh->msgh_remote_port) {
279 kr = mach_msg_send_from_kernel_with_options(msg: msgh, send_size: msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE);
280 switch (kr) {
281 case MACH_SEND_TIMED_OUT: // Notification already sent
282 case MACH_MSG_SUCCESS:
283 case MACH_SEND_NO_BUFFER:
284 break;
285 default:
286 IOLog(format: "%s: dataAvailableNotification failed - msg_send returned: %d\n", /*getName()*/ "IODataQueue", kr);
287 break;
288 }
289 }
290}
291
292OSSharedPtr<IOMemoryDescriptor>
293IODataQueue::getMemoryDescriptor()
294{
295 OSSharedPtr<IOMemoryDescriptor> descriptor;
296 UInt32 queueSize;
297
298 queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize;
299 if (dataQueue != NULL) {
300 descriptor = IOMemoryDescriptor::withAddress(address: dataQueue, withLength: queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, withDirection: kIODirectionOutIn);
301 }
302
303 return descriptor;
304}
305