1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #define DISABLE_DATAQUEUE_WARNING |
30 | |
31 | #include <IOKit/IODataQueue.h> |
32 | |
33 | #undef DISABLE_DATAQUEUE_WARNING |
34 | |
35 | #include <IOKit/IODataQueueShared.h> |
36 | #include <IOKit/IOLib.h> |
37 | #include <IOKit/IOMemoryDescriptor.h> |
38 | #include <libkern/OSAtomic.h> |
39 | |
40 | struct IODataQueueInternal |
41 | { |
42 | mach_msg_header_t msg; |
43 | UInt32 queueSize; |
44 | }; |
45 | |
46 | #ifdef enqueue |
47 | #undef enqueue |
48 | #endif |
49 | |
50 | #ifdef dequeue |
51 | #undef dequeue |
52 | #endif |
53 | |
54 | #define super OSObject |
55 | |
56 | OSDefineMetaClassAndStructors(IODataQueue, OSObject) |
57 | |
58 | IODataQueue *IODataQueue::withCapacity(UInt32 size) |
59 | { |
60 | IODataQueue *dataQueue = new IODataQueue; |
61 | |
62 | if (dataQueue) { |
63 | if (!dataQueue->initWithCapacity(size)) { |
64 | dataQueue->release(); |
65 | dataQueue = 0; |
66 | } |
67 | } |
68 | |
69 | return dataQueue; |
70 | } |
71 | |
72 | IODataQueue *IODataQueue::withEntries(UInt32 numEntries, UInt32 entrySize) |
73 | { |
74 | IODataQueue *dataQueue = new IODataQueue; |
75 | |
76 | if (dataQueue) { |
77 | if (!dataQueue->initWithEntries(numEntries, entrySize)) { |
78 | dataQueue->release(); |
79 | dataQueue = 0; |
80 | } |
81 | } |
82 | |
83 | return dataQueue; |
84 | } |
85 | |
86 | Boolean IODataQueue::initWithCapacity(UInt32 size) |
87 | { |
88 | vm_size_t allocSize = 0; |
89 | |
90 | if (!super::init()) { |
91 | return false; |
92 | } |
93 | |
94 | if (size > UINT32_MAX - DATA_QUEUE_MEMORY_HEADER_SIZE) { |
95 | return false; |
96 | } |
97 | |
98 | allocSize = round_page(size + DATA_QUEUE_MEMORY_HEADER_SIZE); |
99 | |
100 | if (allocSize < size) { |
101 | return false; |
102 | } |
103 | |
104 | assert(!notifyMsg); |
105 | notifyMsg = IONew(IODataQueueInternal, 1); |
106 | if (!notifyMsg) { |
107 | return false; |
108 | } |
109 | bzero(notifyMsg, sizeof(IODataQueueInternal)); |
110 | ((IODataQueueInternal *)notifyMsg)->queueSize = size; |
111 | |
112 | dataQueue = (IODataQueueMemory *)IOMallocAligned(allocSize, PAGE_SIZE); |
113 | if (dataQueue == 0) { |
114 | return false; |
115 | } |
116 | bzero(dataQueue, allocSize); |
117 | |
118 | dataQueue->queueSize = size; |
119 | // dataQueue->head = 0; |
120 | // dataQueue->tail = 0; |
121 | |
122 | return true; |
123 | } |
124 | |
125 | Boolean IODataQueue::initWithEntries(UInt32 numEntries, UInt32 entrySize) |
126 | { |
127 | // Checking overflow for (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE): |
128 | // check (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE) |
129 | if ((entrySize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) || |
130 | // check (numEntries + 1) |
131 | (numEntries > UINT32_MAX-1) || |
132 | // check (numEntries + 1)*(entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE) |
133 | (entrySize + DATA_QUEUE_ENTRY_HEADER_SIZE > UINT32_MAX/(numEntries+1))) { |
134 | return false; |
135 | } |
136 | |
137 | return (initWithCapacity((numEntries + 1) * (DATA_QUEUE_ENTRY_HEADER_SIZE + entrySize))); |
138 | } |
139 | |
140 | void IODataQueue::free() |
141 | { |
142 | if (notifyMsg) { |
143 | if (dataQueue) { |
144 | IOFreeAligned(dataQueue, round_page(((IODataQueueInternal *)notifyMsg)->queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE)); |
145 | dataQueue = NULL; |
146 | } |
147 | |
148 | IODelete(notifyMsg, IODataQueueInternal, 1); |
149 | notifyMsg = NULL; |
150 | } |
151 | |
152 | super::free(); |
153 | |
154 | return; |
155 | } |
156 | |
157 | Boolean IODataQueue::enqueue(void * data, UInt32 dataSize) |
158 | { |
159 | UInt32 head; |
160 | UInt32 tail; |
161 | UInt32 newTail; |
162 | const UInt32 entrySize = dataSize + DATA_QUEUE_ENTRY_HEADER_SIZE; |
163 | UInt32 queueSize; |
164 | IODataQueueEntry * entry; |
165 | |
166 | // Check for overflow of entrySize |
167 | if (dataSize > UINT32_MAX - DATA_QUEUE_ENTRY_HEADER_SIZE) { |
168 | return false; |
169 | } |
170 | |
171 | // Force a single read of head and tail |
172 | // See rdar://problem/40780584 for an explanation of relaxed/acquire barriers |
173 | tail = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->tail, __ATOMIC_RELAXED); |
174 | head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_ACQUIRE); |
175 | |
176 | // Check for underflow of (dataQueue->queueSize - tail) |
177 | queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; |
178 | if ((queueSize < tail) || (queueSize < head)) { |
179 | return false; |
180 | } |
181 | |
182 | if ( tail >= head ) |
183 | { |
184 | // Is there enough room at the end for the entry? |
185 | if ((entrySize <= UINT32_MAX - tail) && |
186 | ((tail + entrySize) <= queueSize) ) |
187 | { |
188 | entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); |
189 | |
190 | entry->size = dataSize; |
191 | memcpy(&entry->data, data, dataSize); |
192 | |
193 | // The tail can be out of bound when the size of the new entry |
194 | // exactly matches the available space at the end of the queue. |
195 | // The tail can range from 0 to dataQueue->queueSize inclusive. |
196 | |
197 | newTail = tail + entrySize; |
198 | } |
199 | else if ( head > entrySize ) // Is there enough room at the beginning? |
200 | { |
201 | // Wrap around to the beginning, but do not allow the tail to catch |
202 | // up to the head. |
203 | |
204 | dataQueue->queue->size = dataSize; |
205 | |
206 | // We need to make sure that there is enough room to set the size before |
207 | // doing this. The user client checks for this and will look for the size |
208 | // at the beginning if there isn't room for it at the end. |
209 | |
210 | if ( ( queueSize - tail ) >= DATA_QUEUE_ENTRY_HEADER_SIZE ) |
211 | { |
212 | ((IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail))->size = dataSize; |
213 | } |
214 | |
215 | memcpy(&dataQueue->queue->data, data, dataSize); |
216 | newTail = entrySize; |
217 | } |
218 | else |
219 | { |
220 | return false; // queue is full |
221 | } |
222 | } |
223 | else |
224 | { |
225 | // Do not allow the tail to catch up to the head when the queue is full. |
226 | // That's why the comparison uses a '>' rather than '>='. |
227 | |
228 | if ( (head - tail) > entrySize ) |
229 | { |
230 | entry = (IODataQueueEntry *)((UInt8 *)dataQueue->queue + tail); |
231 | |
232 | entry->size = dataSize; |
233 | memcpy(&entry->data, data, dataSize); |
234 | newTail = tail + entrySize; |
235 | } |
236 | else |
237 | { |
238 | return false; // queue is full |
239 | } |
240 | } |
241 | |
242 | // Publish the data we just enqueued |
243 | __c11_atomic_store((_Atomic UInt32 *)&dataQueue->tail, newTail, __ATOMIC_RELEASE); |
244 | |
245 | if (tail != head) { |
246 | // |
247 | // The memory barrier below paris with the one in ::dequeue |
248 | // so that either our store to the tail cannot be missed by |
249 | // the next dequeue attempt, or we will observe the dequeuer |
250 | // making the queue empty. |
251 | // |
252 | // Of course, if we already think the queue is empty, |
253 | // there's no point paying this extra cost. |
254 | // |
255 | __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); |
256 | head = __c11_atomic_load((_Atomic UInt32 *)&dataQueue->head, __ATOMIC_RELAXED); |
257 | } |
258 | |
259 | if (tail == head) { |
260 | // Send notification (via mach message) that data is now available. |
261 | sendDataAvailableNotification(); |
262 | } |
263 | return true; |
264 | } |
265 | |
266 | void IODataQueue::setNotificationPort(mach_port_t port) |
267 | { |
268 | mach_msg_header_t * msgh; |
269 | |
270 | msgh = &((IODataQueueInternal *) notifyMsg)->msg; |
271 | bzero(msgh, sizeof(mach_msg_header_t)); |
272 | msgh->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0); |
273 | msgh->msgh_size = sizeof(mach_msg_header_t); |
274 | msgh->msgh_remote_port = port; |
275 | } |
276 | |
277 | void IODataQueue::sendDataAvailableNotification() |
278 | { |
279 | kern_return_t kr; |
280 | mach_msg_header_t * msgh; |
281 | |
282 | msgh = &((IODataQueueInternal *) notifyMsg)->msg; |
283 | if (msgh->msgh_remote_port) { |
284 | kr = mach_msg_send_from_kernel_with_options(msgh, msgh->msgh_size, MACH_SEND_TIMEOUT, MACH_MSG_TIMEOUT_NONE); |
285 | switch(kr) { |
286 | case MACH_SEND_TIMED_OUT: // Notification already sent |
287 | case MACH_MSG_SUCCESS: |
288 | case MACH_SEND_NO_BUFFER: |
289 | break; |
290 | default: |
291 | IOLog("%s: dataAvailableNotification failed - msg_send returned: %d\n" , /*getName()*/"IODataQueue" , kr); |
292 | break; |
293 | } |
294 | } |
295 | } |
296 | |
297 | IOMemoryDescriptor *IODataQueue::getMemoryDescriptor() |
298 | { |
299 | IOMemoryDescriptor *descriptor = 0; |
300 | UInt32 queueSize; |
301 | |
302 | queueSize = ((IODataQueueInternal *) notifyMsg)->queueSize; |
303 | if (dataQueue != 0) { |
304 | descriptor = IOMemoryDescriptor::withAddress(dataQueue, queueSize + DATA_QUEUE_MEMORY_HEADER_SIZE, kIODirectionOutIn); |
305 | } |
306 | |
307 | return descriptor; |
308 | } |
309 | |
310 | |
311 | |