1/*
2 * Copyright (c) 1998-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#define IOKIT_ENABLE_SHARED_PTR
30
31#if !defined(__LP64__)
32
33#include <IOKit/IOCommandQueue.h>
34#include <IOKit/IOWorkLoop.h>
35#include <IOKit/IOTimeStamp.h>
36#include <IOKit/IOKitDebug.h>
37#include <libkern/c++/OSSharedPtr.h>
38
39#include <mach/sync_policy.h>
40
41#if IOKITSTATS
42
43#define IOStatisticsInitializeCounter() \
44 IOStatistics::setCounterType(reserved->counter, kIOStatisticsCommandQueueCounter)
45
46#define IOStatisticsActionCall() \
47 IOStatistics::countCommandQueueActionCall(reserved->counter)
48
49#else
50
51#define IOStatisticsInitializeCounter()
52#define IOStatisticsActionCall()
53
54#endif /* IOKITSTATS */
55
56#define NUM_FIELDS_IN_COMMAND 4
57typedef struct commandEntryTag {
58 void *f[NUM_FIELDS_IN_COMMAND];
59} commandEntryT;
60
61#define super IOEventSource
62
63OSDefineMetaClassAndStructors(IOCommandQueue, IOEventSource)
64
65/*[
66 * Instance Methods
67 *
68 * initWithNext:owner:action:size:
69 * - initWithNext: (IOEventSource *) inNext
70 * owner: (id) inOwner
71 * action: (SEL) inAction
72 * size: (int) inSize;
73 *
74 * Primary initialiser for the IOCommandQueue class. Returns an
75 * IOCommandQueue object that is initialised with the next object in
76 * the chain and the owner and action. On return the signalWorkAvailableIMP
77 * has been cached for this function.
78 *
79 * If the object fails to initialise for some reason then [self free] will
80 * be called and nil will be returned.
81 *
82 * See also: initWithNext:owner:action:(IOEventSource)
83 * ]*/
84bool
85IOCommandQueue::init(OSObject *inOwner,
86 IOCommandQueueAction inAction,
87 int inSize)
88{
89 if (!super::init(inOwner, (IOEventSourceAction) inAction)) {
90 return false;
91 }
92
93 if (KERN_SUCCESS
94 != semaphore_create(kernel_task, &producerSema, SYNC_POLICY_FIFO, inSize)) {
95 return false;
96 }
97
98 size = inSize + 1; /* Allocate one more entry than needed */
99
100 queue = (void *)kalloc_type(commandEntryT, size, Z_WAITOK_ZERO);
101 if (!queue) {
102 return false;
103 }
104
105 producerLock = IOLockAlloc();
106 if (!producerLock) {
107 return false;
108 }
109
110 producerIndex = consumerIndex = 0;
111
112 IOStatisticsInitializeCounter();
113
114 return true;
115}
116
117OSSharedPtr<IOCommandQueue>
118IOCommandQueue::commandQueue(OSObject *inOwner,
119 IOCommandQueueAction inAction,
120 int inSize)
121{
122 OSSharedPtr<IOCommandQueue> me = OSMakeShared<IOCommandQueue>();
123
124 if (me && !me->init(inOwner, inAction, inSize)) {
125 me.reset();
126 return nullptr;
127 }
128
129 return me;
130}
131
132/*[
133 * free
134 * - free
135 *
136 * Mandatory free of the object independent of the current retain count.
137 * Returns nil.
138 * ]*/
139void
140IOCommandQueue::free()
141{
142 if (queue) {
143 kfree_type(commandEntryT, size, queue);
144 }
145 if (producerSema) {
146 semaphore_destroy(kernel_task, producerSema);
147 }
148 if (producerLock) {
149 IOLockFree(producerLock);
150 }
151
152 super::free();
153}
154
155#if NUM_FIELDS_IN_COMMAND != 4
156#error IOCommandQueue::checkForWork needs to be updated for new command size
157#endif
158
159bool
160IOCommandQueue::checkForWork()
161{
162 void *field0, *field1, *field2, *field3;
163 bool trace = (gIOKitTrace & kIOTraceCommandGates) ? true : false;
164
165 if (!enabled || consumerIndex == producerIndex) {
166 return false;
167 }
168
169 {
170 commandEntryT *q = (commandEntryT *) queue;
171 int localIndex = consumerIndex;
172
173 field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1];
174 field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3];
175 semaphore_signal(producerSema);
176 }
177
178 if (++consumerIndex >= size) {
179 consumerIndex = 0;
180 }
181
182 if (trace) {
183 IOTimeStampStartConstant(IODBG_CMDQ(IOCMDQ_ACTION),
184 VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
185 }
186
187 IOStatisticsActionCall();
188 (*(IOCommandQueueAction) action)(owner, field0, field1, field2, field3);
189
190 if (trace) {
191 IOTimeStampEndConstant(IODBG_CMDQ(IOCMDQ_ACTION),
192 VM_KERNEL_ADDRHIDE(action), VM_KERNEL_ADDRHIDE(owner));
193 }
194
195 return consumerIndex != producerIndex;
196}
197
198/*[
199 * enqueueSleep:command:
200 * - (kern_return_t) enqueueSleepRaw: (BOOL) gotoSleep
201 * field0: (void *) field0 field1: (void *) field1
202 * field2: (void *) field2 field3: (void *) field3;
203 *
204 * Key method that enqueues the four input fields onto the command queue
205 * and calls signalWorkAvailable to indicate that work is available to the
206 * consumer. This routine is safe against multiple threaded producers.
207 *
208 * A family of convenience functions have been provided to assist with the
209 * enqueueing of an method selector and an integer tag. This relies on the
210 * IODevice rawCommandOccurred... command to forward on the requests.
211 *
212 * See also: signalWorkAvailable, checkForWork
213 * ]*/
214#if NUM_FIELDS_IN_COMMAND != 4
215#error IOCommandQueue::enqueueCommand needs to be updated
216#endif
217
218kern_return_t
219IOCommandQueue::enqueueCommand(bool gotoSleep,
220 void *field0, void *field1,
221 void *field2, void *field3)
222{
223 kern_return_t rtn = KERN_SUCCESS;
224 int retry;
225
226 /* Make sure there is room in the queue before doing anything else */
227
228 if (gotoSleep) {
229 retry = 0;
230 do{
231 rtn = semaphore_wait(producerSema);
232 } while ((KERN_SUCCESS != rtn)
233 && (KERN_OPERATION_TIMED_OUT != rtn)
234 && (KERN_SEMAPHORE_DESTROYED != rtn)
235 && (KERN_TERMINATED != rtn)
236 && ((retry++) < 4));
237 } else {
238 rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO);
239 }
240
241 if (KERN_SUCCESS != rtn) {
242 return rtn;
243 }
244
245 /* Block other producers */
246 IOTakeLock(producerLock);
247
248 /*
249 * Make sure that we update the current producer entry before we
250 * increment the producer pointer. This avoids a nasty race as the
251 * test for work is producerIndex != consumerIndex and a signal.
252 */
253 {
254 commandEntryT *q = (commandEntryT *) queue;
255 int localIndex = producerIndex;
256
257 q[localIndex].f[0] = field0; q[localIndex].f[1] = field1;
258 q[localIndex].f[2] = field2; q[localIndex].f[3] = field3;
259 }
260 if (++producerIndex >= size) {
261 producerIndex = 0;
262 }
263
264 /* Clear to allow other producers to go now */
265 IOUnlock(producerLock);
266
267 /*
268 * Right we have created some new work, we had better make sure that
269 * we notify the work loop that it has to test producerIndex.
270 */
271 signalWorkAvailable();
272 return rtn;
273}
274
275int
276IOCommandQueue::performAndFlush(OSObject *target,
277 IOCommandQueueAction inAction)
278{
279 int numEntries;
280 kern_return_t rtn;
281
282 // Set the defaults if necessary
283 if (!target) {
284 target = owner;
285 }
286 if (!inAction) {
287 inAction = (IOCommandQueueAction) action;
288 }
289
290 // Lock out the producers first
291 do {
292 rtn = semaphore_timedwait(producerSema, MACH_TIMESPEC_ZERO);
293 } while (rtn == KERN_SUCCESS);
294
295 // now step over all remaining entries in the command queue
296 for (numEntries = 0; consumerIndex != producerIndex;) {
297 void *field0, *field1, *field2, *field3;
298
299 {
300 commandEntryT *q = (commandEntryT *) queue;
301 int localIndex = consumerIndex;
302
303 field0 = q[localIndex].f[0]; field1 = q[localIndex].f[1];
304 field2 = q[localIndex].f[2]; field3 = q[localIndex].f[3];
305 }
306
307 if (++consumerIndex >= size) {
308 consumerIndex = 0;
309 }
310
311 (*inAction)(target, field0, field1, field2, field3);
312 }
313
314 // finally refill the producer semaphore to size - 1
315 for (int i = 1; i < size; i++) {
316 semaphore_signal(producerSema);
317 }
318
319 return numEntries;
320}
321
322#endif /* !defined(__LP64__) */
323