1 | /* |
2 | * Copyright (c) 2016-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /*- |
29 | * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> |
30 | * All rights reserved. |
31 | * |
32 | * Redistribution and use in source and binary forms, with or without |
33 | * modification, are permitted provided that the following conditions |
34 | * are met: |
35 | * 1. Redistributions of source code must retain the above copyright |
36 | * notice, this list of conditions and the following disclaimer. |
37 | * 2. Redistributions in binary form must reproduce the above copyright |
38 | * notice, this list of conditions and the following disclaimer in the |
39 | * documentation and/or other materials provided with the distribution. |
40 | * |
41 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
51 | * SUCH DAMAGE. |
52 | */ |
53 | |
54 | #include <sys/cdefs.h> |
55 | #include <sys/param.h> |
56 | #include <sys/kernel.h> |
57 | #include <kern/queue.h> |
58 | #include <kern/locks.h> |
59 | #include <sys/malloc.h> |
60 | #include <sys/proc.h> |
61 | #include <sys/systm.h> |
62 | #include <sys/eventhandler.h> |
63 | #include <sys/sysctl.h> |
64 | #include <sys/mcache.h> /* for VERIFY() */ |
65 | |
66 | int evh_debug = 0; |
67 | |
68 | SYSCTL_NODE(_kern, OID_AUTO, eventhandler, CTLFLAG_RW | CTLFLAG_LOCKED, |
69 | 0, "Eventhandler" ); |
70 | SYSCTL_INT(_kern_eventhandler, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, |
71 | &evh_debug, 0, "Eventhandler debug mode" ); |
72 | |
73 | struct eventhandler_entry_arg eventhandler_entry_dummy_arg = { .ee_fm_uuid = { 0 }, .ee_fr_uuid = { 0 } }; |
74 | |
75 | /* List of 'slow' lists */ |
76 | static struct eventhandler_lists_ctxt evthdlr_lists_ctxt_glb; |
77 | static LCK_GRP_DECLARE(eventhandler_mutex_grp, "eventhandler" ); |
78 | |
79 | LCK_GRP_DECLARE(el_lock_grp, "eventhandler list" ); |
80 | LCK_ATTR_DECLARE(el_lock_attr, 0, 0); |
81 | |
82 | struct eventhandler_entry_generic { |
83 | struct eventhandler_entry ee; |
84 | void *func; |
85 | }; |
86 | |
87 | static struct eventhandler_list *_eventhandler_find_list( |
88 | struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, const char *name); |
89 | |
90 | void |
91 | eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt) |
92 | { |
93 | VERIFY(evthdlr_lists_ctxt != NULL); |
94 | |
95 | TAILQ_INIT(&evthdlr_lists_ctxt->eventhandler_lists); |
96 | evthdlr_lists_ctxt->eventhandler_lists_initted = 1; |
97 | lck_mtx_init(lck: &evthdlr_lists_ctxt->eventhandler_mutex, |
98 | grp: &eventhandler_mutex_grp, LCK_ATTR_NULL); |
99 | } |
100 | |
101 | /* |
102 | * Initialize the eventhandler list. |
103 | */ |
104 | void |
105 | eventhandler_init(void) |
106 | { |
107 | eventhandler_lists_ctxt_init(evthdlr_lists_ctxt: &evthdlr_lists_ctxt_glb); |
108 | } |
109 | |
110 | /* |
111 | * Insertion is O(n) due to the priority scan, but optimises to O(1) |
112 | * if all priorities are identical. |
113 | */ |
114 | static eventhandler_tag |
115 | eventhandler_register_internal( |
116 | struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, |
117 | struct eventhandler_list *list, |
118 | const char *name, eventhandler_tag epn) |
119 | { |
120 | struct eventhandler_list *__single new_list; |
121 | struct eventhandler_entry *__single ep; |
122 | |
123 | VERIFY(strlen(name) <= (sizeof(new_list->el_name) - 1)); |
124 | |
125 | if (evthdlr_lists_ctxt == NULL) { |
126 | evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb; |
127 | } |
128 | |
129 | VERIFY(evthdlr_lists_ctxt->eventhandler_lists_initted); /* eventhandler registered too early */ |
130 | VERIFY(epn != NULL); /* cannot register NULL event */ |
131 | |
132 | /* lock the eventhandler lists */ |
133 | lck_mtx_lock_spin(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
134 | |
135 | /* Do we need to find/create the (slow) list? */ |
136 | if (list == NULL) { |
137 | /* look for a matching, existing list */ |
138 | list = _eventhandler_find_list(evthdlr_lists_ctxt, name); |
139 | |
140 | /* Do we need to create the list? */ |
141 | if (list == NULL) { |
142 | lck_mtx_convert_spin(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
143 | new_list = kalloc_type(struct eventhandler_list, Z_WAITOK_ZERO); |
144 | evhlog((LOG_DEBUG, "%s: creating list \"%s\"" , __func__, name)); |
145 | list = new_list; |
146 | list->el_flags = 0; |
147 | list->el_runcount = 0; |
148 | bzero(s: &list->el_lock, n: sizeof(list->el_lock)); |
149 | (void) snprintf(list->el_name, count: sizeof(list->el_name), "%s" , name); |
150 | TAILQ_INSERT_HEAD(&evthdlr_lists_ctxt->eventhandler_lists, list, el_link); |
151 | } |
152 | } |
153 | if (!(list->el_flags & EHL_INITTED)) { |
154 | TAILQ_INIT(&list->el_entries); |
155 | EHL_LOCK_INIT(list); |
156 | list->el_flags |= EHL_INITTED; |
157 | } |
158 | lck_mtx_unlock(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
159 | |
160 | KASSERT(epn->ee_priority != EHE_DEAD_PRIORITY, |
161 | ("%s: handler for %s registered with dead priority" , __func__, name)); |
162 | |
163 | /* sort it into the list */ |
164 | evhlog((LOG_DEBUG, "%s: adding item %p (function %p to \"%s\"" , __func__, (void *)VM_KERNEL_ADDRPERM(epn), |
165 | (void *)VM_KERNEL_UNSLIDE(((struct eventhandler_entry_generic *)epn)->func), name)); |
166 | EHL_LOCK(list); |
167 | TAILQ_FOREACH(ep, &list->el_entries, ee_link) { |
168 | if (ep->ee_priority != EHE_DEAD_PRIORITY && |
169 | epn->ee_priority < ep->ee_priority) { |
170 | TAILQ_INSERT_BEFORE(ep, epn, ee_link); |
171 | break; |
172 | } |
173 | } |
174 | if (ep == NULL) { |
175 | TAILQ_INSERT_TAIL(&list->el_entries, epn, ee_link); |
176 | } |
177 | EHL_UNLOCK(list); |
178 | return epn; |
179 | } |
180 | |
181 | eventhandler_tag |
182 | eventhandler_register(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, |
183 | struct eventhandler_list *list, const char *name, |
184 | void *func, struct eventhandler_entry_arg arg, int priority) |
185 | { |
186 | struct eventhandler_entry_generic *__single eg; |
187 | |
188 | /* allocate an entry for this handler, populate it */ |
189 | eg = kalloc_type(struct eventhandler_entry_generic, Z_WAITOK_ZERO); |
190 | eg->func = func; |
191 | eg->ee.ee_arg = arg; |
192 | eg->ee.ee_priority = priority; |
193 | |
194 | return eventhandler_register_internal(evthdlr_lists_ctxt, list, name, epn: &eg->ee); |
195 | } |
196 | |
197 | void |
198 | eventhandler_deregister(struct eventhandler_list *list, eventhandler_tag tag) |
199 | { |
200 | struct eventhandler_entry *__single ep = tag; |
201 | |
202 | EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED); |
203 | if (ep != NULL) { |
204 | /* remove just this entry */ |
205 | if (list->el_runcount == 0) { |
206 | evhlog((LOG_DEBUG, "%s: removing item %p from \"%s\"" , __func__, (void *)VM_KERNEL_ADDRPERM(ep), |
207 | list->el_name)); |
208 | /* |
209 | * We may have purged the list because of certain events. |
210 | * Make sure that is not the case when a specific entry |
211 | * is being removed. |
212 | */ |
213 | if (!TAILQ_EMPTY(&list->el_entries)) { |
214 | TAILQ_REMOVE(&list->el_entries, ep, ee_link); |
215 | } |
216 | EHL_LOCK_CONVERT(list); |
217 | kfree_type(struct eventhandler_entry, ep); |
218 | } else { |
219 | evhlog((LOG_DEBUG, "%s: marking item %p from \"%s\" as dead" , __func__, |
220 | (void *)VM_KERNEL_ADDRPERM(ep), list->el_name)); |
221 | ep->ee_priority = EHE_DEAD_PRIORITY; |
222 | } |
223 | } else { |
224 | /* remove entire list */ |
225 | if (list->el_runcount == 0) { |
226 | evhlog((LOG_DEBUG, "%s: removing all items from \"%s\"" , __func__, |
227 | list->el_name)); |
228 | EHL_LOCK_CONVERT(list); |
229 | while (!TAILQ_EMPTY(&list->el_entries)) { |
230 | ep = TAILQ_FIRST(&list->el_entries); |
231 | TAILQ_REMOVE(&list->el_entries, ep, ee_link); |
232 | kfree_type(struct eventhandler_entry, ep); |
233 | } |
234 | } else { |
235 | evhlog((LOG_DEBUG, "%s: marking all items from \"%s\" as dead" , |
236 | __func__, list->el_name)); |
237 | TAILQ_FOREACH(ep, &list->el_entries, ee_link) |
238 | ep->ee_priority = EHE_DEAD_PRIORITY; |
239 | } |
240 | } |
241 | while (list->el_runcount > 0) { |
242 | msleep(chan: (caddr_t)list, mtx: &list->el_lock, PSPIN, wmesg: "evhrm" , ts: 0); |
243 | } |
244 | EHL_UNLOCK(list); |
245 | } |
246 | |
247 | /* |
248 | * Internal version for use when eventhandler list is already locked. |
249 | */ |
250 | static struct eventhandler_list * |
251 | _eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, |
252 | const char *name) |
253 | { |
254 | struct eventhandler_list *__single list; |
255 | |
256 | VERIFY(evthdlr_lists_ctxt != NULL); |
257 | |
258 | LCK_MTX_ASSERT(&evthdlr_lists_ctxt->eventhandler_mutex, LCK_MTX_ASSERT_OWNED); |
259 | TAILQ_FOREACH(list, &evthdlr_lists_ctxt->eventhandler_lists, el_link) { |
260 | if (!strlcmp(s1: list->el_name, s2: name, EVENTHANDLER_MAX_NAME)) { |
261 | break; |
262 | } |
263 | } |
264 | return list; |
265 | } |
266 | |
267 | /* |
268 | * Lookup a "slow" list by name. Returns with the list locked. |
269 | */ |
270 | struct eventhandler_list * |
271 | eventhandler_find_list(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt, |
272 | const char *name) |
273 | { |
274 | struct eventhandler_list *__single list; |
275 | |
276 | if (evthdlr_lists_ctxt == NULL) { |
277 | evthdlr_lists_ctxt = &evthdlr_lists_ctxt_glb; |
278 | } |
279 | |
280 | if (!evthdlr_lists_ctxt->eventhandler_lists_initted) { |
281 | return NULL; |
282 | } |
283 | |
284 | /* scan looking for the requested list */ |
285 | lck_mtx_lock_spin(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
286 | list = _eventhandler_find_list(evthdlr_lists_ctxt, name); |
287 | if (list != NULL) { |
288 | lck_mtx_convert_spin(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
289 | EHL_LOCK_SPIN(list); |
290 | } |
291 | lck_mtx_unlock(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
292 | |
293 | return list; |
294 | } |
295 | |
296 | /* |
297 | * Prune "dead" entries from an eventhandler list. |
298 | */ |
299 | void |
300 | eventhandler_prune_list(struct eventhandler_list *list) |
301 | { |
302 | struct eventhandler_entry *__single ep, *__single en; |
303 | |
304 | int pruned = 0; |
305 | |
306 | evhlog((LOG_DEBUG, "%s: pruning list \"%s\"" , __func__, list->el_name)); |
307 | EHL_LOCK_ASSERT(list, LCK_MTX_ASSERT_OWNED); |
308 | TAILQ_FOREACH_SAFE(ep, &list->el_entries, ee_link, en) { |
309 | if (ep->ee_priority == EHE_DEAD_PRIORITY) { |
310 | TAILQ_REMOVE(&list->el_entries, ep, ee_link); |
311 | kfree_type(struct eventhandler_entry, ep); |
312 | pruned++; |
313 | } |
314 | } |
315 | if (pruned > 0) { |
316 | wakeup(chan: list); |
317 | } |
318 | } |
319 | |
320 | /* |
321 | * This should be called when last reference to an object |
322 | * is being released. |
323 | * The individual event type lists must be purged when the object |
324 | * becomes defunct. |
325 | */ |
326 | void |
327 | eventhandler_lists_ctxt_destroy(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt) |
328 | { |
329 | struct eventhandler_list *__single list = NULL; |
330 | struct eventhandler_list *__single list_next = NULL; |
331 | |
332 | lck_mtx_lock(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
333 | TAILQ_FOREACH_SAFE(list, &evthdlr_lists_ctxt->eventhandler_lists, |
334 | el_link, list_next) { |
335 | VERIFY(TAILQ_EMPTY(&list->el_entries)); |
336 | EHL_LOCK_DESTROY(list); |
337 | kfree_type(struct eventhandler_list, list); |
338 | } |
339 | lck_mtx_unlock(lck: &evthdlr_lists_ctxt->eventhandler_mutex); |
340 | lck_mtx_destroy(lck: &evthdlr_lists_ctxt->eventhandler_mutex, |
341 | grp: &eventhandler_mutex_grp); |
342 | return; |
343 | } |
344 | |