1 | /* |
2 | * Copyright (c) 2003-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/kpi_socketfilter.h> |
30 | |
31 | #include <sys/socket.h> |
32 | #include <sys/param.h> |
33 | #include <sys/errno.h> |
34 | #include <sys/malloc.h> |
35 | #include <sys/protosw.h> |
36 | #include <sys/domain.h> |
37 | #include <sys/proc.h> |
38 | #include <kern/locks.h> |
39 | #include <kern/thread.h> |
40 | #include <kern/debug.h> |
41 | #include <net/kext_net.h> |
42 | #include <net/if.h> |
43 | #include <net/net_api_stats.h> |
44 | #if SKYWALK && defined(XNU_TARGET_OS_OSX) |
45 | #include <skywalk/lib/net_filter_event.h> |
46 | #endif /* SKYWALK && XNU_TARGET_OS_OSX */ |
47 | #include <netinet/in_var.h> |
48 | #include <netinet/ip.h> |
49 | #include <netinet/ip_var.h> |
50 | #include <netinet/tcp.h> |
51 | #include <netinet/tcp_var.h> |
52 | #include <netinet/udp.h> |
53 | #include <netinet/udp_var.h> |
54 | |
55 | #include <libkern/libkern.h> |
56 | #include <libkern/OSAtomic.h> |
57 | |
58 | #include <libkern/sysctl.h> |
59 | #include <libkern/OSDebug.h> |
60 | |
61 | #include <os/refcnt.h> |
62 | |
63 | #include <stdbool.h> |
64 | #include <string.h> |
65 | |
66 | #if SKYWALK |
67 | #include <skywalk/core/skywalk_var.h> |
68 | #endif /* SKYWALK */ |
69 | |
70 | #define SFEF_ATTACHED 0x1 /* SFE is on socket list */ |
71 | #define SFEF_NODETACH 0x2 /* Detach should not be called */ |
72 | #define SFEF_NOSOCKET 0x4 /* Socket is gone */ |
73 | |
74 | struct socket_filter_entry { |
75 | struct socket_filter_entry *sfe_next_onsocket; |
76 | struct socket_filter_entry *sfe_next_onfilter; |
77 | struct socket_filter_entry *sfe_next_oncleanup; |
78 | |
79 | struct socket_filter *sfe_filter; |
80 | struct socket *sfe_socket; |
81 | void *sfe_cookie; |
82 | |
83 | uint32_t sfe_flags; |
84 | struct os_refcnt sfe_refcount; |
85 | }; |
86 | |
87 | struct socket_filter { |
88 | TAILQ_ENTRY(socket_filter) sf_protosw_next; |
89 | TAILQ_ENTRY(socket_filter) sf_global_next; |
90 | struct socket_filter_entry *sf_entry_head; |
91 | |
92 | struct protosw *sf_proto; |
93 | struct sflt_filter sf_filter; |
94 | struct os_refcnt sf_refcount; |
95 | uint32_t sf_flags; |
96 | }; |
97 | |
98 | #define SFF_INTERNAL 0x1 |
99 | |
100 | TAILQ_HEAD(socket_filter_list, socket_filter); |
101 | |
102 | static LCK_GRP_DECLARE(sock_filter_lock_grp, "socket filter lock" ); |
103 | static LCK_RW_DECLARE(sock_filter_lock, &sock_filter_lock_grp); |
104 | static LCK_MTX_DECLARE(sock_filter_cleanup_lock, &sock_filter_lock_grp); |
105 | |
106 | static struct socket_filter_list sock_filter_head = |
107 | TAILQ_HEAD_INITIALIZER(sock_filter_head); |
108 | static struct socket_filter_entry *sock_filter_cleanup_entries = NULL; |
109 | static thread_t sock_filter_cleanup_thread = NULL; |
110 | |
111 | static void sflt_cleanup_thread(void *, wait_result_t); |
112 | static void sflt_detach_locked(struct socket_filter_entry *entry); |
113 | |
114 | #undef sflt_register |
115 | static errno_t sflt_register_common(const struct sflt_filter *filter, int domain, |
116 | int type, int protocol, bool is_internal); |
117 | errno_t sflt_register(const struct sflt_filter *filter, int domain, |
118 | int type, int protocol); |
119 | |
120 | #if SKYWALK && defined(XNU_TARGET_OS_OSX) |
121 | static bool net_check_compatible_sfltr(void); |
122 | bool net_check_compatible_alf(void); |
123 | static bool net_check_compatible_parental_controls(void); |
124 | #endif /* SKYWALK && XNU_TARGET_OS_OSX */ |
125 | |
126 | #pragma mark -- Internal State Management -- |
127 | |
128 | __private_extern__ int |
129 | sflt_permission_check(struct inpcb *inp) |
130 | { |
131 | /* Only IPv4 or IPv6 sockets can bypass filters */ |
132 | if (!(inp->inp_vflag & INP_IPV4) && |
133 | !(inp->inp_vflag & INP_IPV6)) { |
134 | return 0; |
135 | } |
136 | /* Sockets that have incoproc or management entitlements bypass socket filters. */ |
137 | if (INP_INTCOPROC_ALLOWED(inp) || INP_MANAGEMENT_ALLOWED(inp)) { |
138 | return 1; |
139 | } |
140 | /* Sockets bound to an intcoproc or management interface bypass socket filters. */ |
141 | if ((inp->inp_flags & INP_BOUND_IF) && |
142 | (IFNET_IS_INTCOPROC(inp->inp_boundifp) || |
143 | IFNET_IS_MANAGEMENT(inp->inp_boundifp))) { |
144 | return 1; |
145 | } |
146 | #if NECP |
147 | /* |
148 | * Make sure that the NECP policy is populated. |
149 | * If result is not populated, the policy ID will be |
150 | * NECP_KERNEL_POLICY_ID_NONE. Note that if the result |
151 | * is populated, but there was no match, it will be |
152 | * NECP_KERNEL_POLICY_ID_NO_MATCH. |
153 | * Do not call inp_update_necp_policy() to avoid scoping |
154 | * a socket prior to calls to bind(). |
155 | */ |
156 | if (inp->inp_policyresult.policy_id == NECP_KERNEL_POLICY_ID_NONE) { |
157 | necp_socket_find_policy_match(inp, NULL, NULL, override_bound_interface: 0); |
158 | } |
159 | |
160 | /* If the filter unit is marked to be "no filter", bypass filters */ |
161 | if (inp->inp_policyresult.results.filter_control_unit == |
162 | NECP_FILTER_UNIT_NO_FILTER) { |
163 | return 1; |
164 | } |
165 | #endif /* NECP */ |
166 | return 0; |
167 | } |
168 | |
169 | static void |
170 | sflt_retain_locked(struct socket_filter *filter) |
171 | { |
172 | os_ref_retain_locked(rc: &filter->sf_refcount); |
173 | } |
174 | |
175 | static void |
176 | sflt_release_locked(struct socket_filter *filter) |
177 | { |
178 | if (os_ref_release_locked(rc: &filter->sf_refcount) == 0) { |
179 | /* Call the unregistered function */ |
180 | if (filter->sf_filter.sf_unregistered) { |
181 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
182 | filter->sf_filter.sf_unregistered( |
183 | filter->sf_filter.sf_handle); |
184 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
185 | } |
186 | |
187 | /* Free the entry */ |
188 | kfree_type(struct socket_filter, filter); |
189 | } |
190 | } |
191 | |
192 | static void |
193 | sflt_entry_retain(struct socket_filter_entry *entry) |
194 | { |
195 | os_ref_retain(rc: &entry->sfe_refcount); |
196 | } |
197 | |
198 | static void |
199 | sflt_entry_release(struct socket_filter_entry *entry) |
200 | { |
201 | if (os_ref_release(rc: &entry->sfe_refcount) == 0) { |
202 | /* That was the last reference */ |
203 | |
204 | /* Take the cleanup lock */ |
205 | lck_mtx_lock(lck: &sock_filter_cleanup_lock); |
206 | |
207 | /* Put this item on the cleanup list */ |
208 | entry->sfe_next_oncleanup = sock_filter_cleanup_entries; |
209 | sock_filter_cleanup_entries = entry; |
210 | |
211 | /* If the item is the first item in the list */ |
212 | if (entry->sfe_next_oncleanup == NULL) { |
213 | if (sock_filter_cleanup_thread == NULL) { |
214 | /* Create a thread */ |
215 | kernel_thread_start(continuation: sflt_cleanup_thread, |
216 | NULL, new_thread: &sock_filter_cleanup_thread); |
217 | } else { |
218 | /* Wakeup the thread */ |
219 | wakeup(chan: &sock_filter_cleanup_entries); |
220 | } |
221 | } |
222 | |
223 | /* Drop the cleanup lock */ |
224 | lck_mtx_unlock(lck: &sock_filter_cleanup_lock); |
225 | } |
226 | } |
227 | |
228 | __attribute__((noreturn)) |
229 | static void |
230 | sflt_cleanup_thread(void *blah, wait_result_t blah2) |
231 | { |
232 | #pragma unused(blah, blah2) |
233 | while (1) { |
234 | lck_mtx_lock(lck: &sock_filter_cleanup_lock); |
235 | while (sock_filter_cleanup_entries == NULL) { |
236 | /* Sleep until we've got something better to do */ |
237 | msleep(chan: &sock_filter_cleanup_entries, |
238 | mtx: &sock_filter_cleanup_lock, PWAIT, |
239 | wmesg: "sflt_cleanup" , NULL); |
240 | } |
241 | |
242 | /* Pull the current list of dead items */ |
243 | struct socket_filter_entry *dead = sock_filter_cleanup_entries; |
244 | sock_filter_cleanup_entries = NULL; |
245 | |
246 | /* Drop the lock */ |
247 | lck_mtx_unlock(lck: &sock_filter_cleanup_lock); |
248 | |
249 | /* Take the socket filter lock */ |
250 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
251 | |
252 | /* Cleanup every dead item */ |
253 | struct socket_filter_entry *entry; |
254 | for (entry = dead; entry; entry = dead) { |
255 | struct socket_filter_entry **nextpp; |
256 | |
257 | dead = entry->sfe_next_oncleanup; |
258 | |
259 | /* Call detach function if necessary - drop the lock */ |
260 | if ((entry->sfe_flags & SFEF_NODETACH) == 0 && |
261 | entry->sfe_filter->sf_filter.sf_detach) { |
262 | entry->sfe_flags |= SFEF_NODETACH; |
263 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
264 | |
265 | /* |
266 | * Warning - passing a potentially |
267 | * dead socket may be bad |
268 | */ |
269 | entry->sfe_filter->sf_filter.sf_detach( |
270 | entry->sfe_cookie, entry->sfe_socket); |
271 | |
272 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
273 | } |
274 | |
275 | /* |
276 | * Pull entry off the socket list -- |
277 | * if the socket still exists |
278 | */ |
279 | if ((entry->sfe_flags & SFEF_NOSOCKET) == 0) { |
280 | for (nextpp = &entry->sfe_socket->so_filt; |
281 | *nextpp; |
282 | nextpp = &(*nextpp)->sfe_next_onsocket) { |
283 | if (*nextpp == entry) { |
284 | *nextpp = |
285 | entry->sfe_next_onsocket; |
286 | break; |
287 | } |
288 | } |
289 | } |
290 | |
291 | /* Pull entry off the filter list */ |
292 | for (nextpp = &entry->sfe_filter->sf_entry_head; |
293 | *nextpp; nextpp = &(*nextpp)->sfe_next_onfilter) { |
294 | if (*nextpp == entry) { |
295 | *nextpp = entry->sfe_next_onfilter; |
296 | break; |
297 | } |
298 | } |
299 | |
300 | /* |
301 | * Release the filter -- may drop lock, but that's okay |
302 | */ |
303 | sflt_release_locked(filter: entry->sfe_filter); |
304 | entry->sfe_socket = NULL; |
305 | entry->sfe_filter = NULL; |
306 | kfree_type(struct socket_filter_entry, entry); |
307 | } |
308 | |
309 | /* Drop the socket filter lock */ |
310 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
311 | } |
312 | /* NOTREACHED */ |
313 | } |
314 | |
315 | static int |
316 | sflt_attach_locked(struct socket *so, struct socket_filter *filter, |
317 | int socklocked) |
318 | { |
319 | int error = 0; |
320 | struct socket_filter_entry *entry = NULL; |
321 | |
322 | if (sflt_permission_check(sotoinpcb(so))) { |
323 | return 0; |
324 | } |
325 | |
326 | if (filter == NULL) { |
327 | return ENOENT; |
328 | } |
329 | |
330 | for (entry = so->so_filt; entry; entry = entry->sfe_next_onfilter) { |
331 | if (entry->sfe_filter->sf_filter.sf_handle == |
332 | filter->sf_filter.sf_handle) { |
333 | return EEXIST; |
334 | } |
335 | } |
336 | /* allocate the socket filter entry */ |
337 | entry = kalloc_type(struct socket_filter_entry, Z_WAITOK | Z_NOFAIL); |
338 | |
339 | /* Initialize the socket filter entry */ |
340 | entry->sfe_cookie = NULL; |
341 | entry->sfe_flags = SFEF_ATTACHED; |
342 | os_ref_init(&entry->sfe_refcount, NULL); /* corresponds to SFEF_ATTACHED flag set */ |
343 | |
344 | /* Put the entry in the filter list */ |
345 | sflt_retain_locked(filter); |
346 | entry->sfe_filter = filter; |
347 | entry->sfe_next_onfilter = filter->sf_entry_head; |
348 | filter->sf_entry_head = entry; |
349 | |
350 | /* Put the entry on the socket filter list */ |
351 | entry->sfe_socket = so; |
352 | entry->sfe_next_onsocket = so->so_filt; |
353 | so->so_filt = entry; |
354 | |
355 | if (entry->sfe_filter->sf_filter.sf_attach) { |
356 | /* Retain the entry while we call attach */ |
357 | sflt_entry_retain(entry); |
358 | |
359 | /* |
360 | * Release the filter lock -- |
361 | * callers must be aware we will do this |
362 | */ |
363 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
364 | |
365 | /* Unlock the socket */ |
366 | if (socklocked) { |
367 | socket_unlock(so, refcount: 0); |
368 | } |
369 | |
370 | /* It's finally safe to call the filter function */ |
371 | error = entry->sfe_filter->sf_filter.sf_attach( |
372 | &entry->sfe_cookie, so); |
373 | |
374 | /* Lock the socket again */ |
375 | if (socklocked) { |
376 | socket_lock(so, refcount: 0); |
377 | } |
378 | |
379 | /* Lock the filters again */ |
380 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
381 | |
382 | /* |
383 | * If the attach function returns an error, |
384 | * this filter must be detached |
385 | */ |
386 | if (error) { |
387 | /* don't call sf_detach */ |
388 | entry->sfe_flags |= SFEF_NODETACH; |
389 | sflt_detach_locked(entry); |
390 | } |
391 | |
392 | /* Release the retain we held through the attach call */ |
393 | sflt_entry_release(entry); |
394 | } |
395 | |
396 | return error; |
397 | } |
398 | |
399 | errno_t |
400 | sflt_attach_internal(socket_t socket, sflt_handle handle) |
401 | { |
402 | if (socket == NULL || handle == 0) { |
403 | return EINVAL; |
404 | } |
405 | |
406 | int result = EINVAL; |
407 | |
408 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
409 | |
410 | struct socket_filter *filter = NULL; |
411 | TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) { |
412 | if (filter->sf_filter.sf_handle == handle) { |
413 | break; |
414 | } |
415 | } |
416 | |
417 | if (filter) { |
418 | result = sflt_attach_locked(so: socket, filter, socklocked: 1); |
419 | } |
420 | |
421 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
422 | |
423 | return result; |
424 | } |
425 | |
426 | static void |
427 | sflt_detach_locked(struct socket_filter_entry *entry) |
428 | { |
429 | if ((entry->sfe_flags & SFEF_ATTACHED) != 0) { |
430 | entry->sfe_flags &= ~SFEF_ATTACHED; |
431 | sflt_entry_release(entry); |
432 | } |
433 | } |
434 | |
435 | #pragma mark -- Socket Layer Hooks -- |
436 | |
437 | __private_extern__ void |
438 | sflt_initsock(struct socket *so) |
439 | { |
440 | /* |
441 | * Can only register socket filter for internet protocols |
442 | */ |
443 | if (SOCK_DOM(so) != PF_INET && SOCK_DOM(so) != PF_INET6) { |
444 | return; |
445 | } |
446 | |
447 | /* |
448 | * Point to the real protosw, as so_proto might have been |
449 | * pointed to a modified version. |
450 | */ |
451 | struct protosw *proto = so->so_proto->pr_protosw; |
452 | |
453 | lck_rw_lock_shared(lck: &sock_filter_lock); |
454 | if (TAILQ_FIRST(&proto->pr_filter_head) != NULL) { |
455 | /* Promote lock to exclusive */ |
456 | if (!lck_rw_lock_shared_to_exclusive(lck: &sock_filter_lock)) { |
457 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
458 | } |
459 | |
460 | /* |
461 | * Warning: A filter unregistering will be pulled out of |
462 | * the list. This could happen while we drop the lock in |
463 | * sftl_attach_locked or sflt_release_locked. For this |
464 | * reason we retain a reference on the filter (or next_filter) |
465 | * while calling this function. This protects us from a panic, |
466 | * but it could result in a socket being created without all |
467 | * of the global filters if we're attaching a filter as it |
468 | * is removed, if that's possible. |
469 | */ |
470 | struct socket_filter *filter = |
471 | TAILQ_FIRST(&proto->pr_filter_head); |
472 | |
473 | sflt_retain_locked(filter); |
474 | |
475 | while (filter) { |
476 | struct socket_filter *filter_next; |
477 | /* |
478 | * Warning: sflt_attach_private_locked |
479 | * will drop the lock |
480 | */ |
481 | sflt_attach_locked(so, filter, socklocked: 0); |
482 | |
483 | filter_next = TAILQ_NEXT(filter, sf_protosw_next); |
484 | if (filter_next) { |
485 | sflt_retain_locked(filter: filter_next); |
486 | } |
487 | |
488 | /* |
489 | * Warning: filt_release_locked may remove |
490 | * the filter from the queue |
491 | */ |
492 | sflt_release_locked(filter); |
493 | filter = filter_next; |
494 | } |
495 | } |
496 | lck_rw_done(lck: &sock_filter_lock); |
497 | } |
498 | |
499 | /* |
500 | * sflt_termsock |
501 | * |
502 | * Detaches all filters from the socket. |
503 | */ |
504 | __private_extern__ void |
505 | sflt_termsock(struct socket *so) |
506 | { |
507 | /* |
508 | * Fast path to avoid taking the lock |
509 | */ |
510 | if (so->so_filt == NULL) { |
511 | return; |
512 | } |
513 | |
514 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
515 | |
516 | struct socket_filter_entry *entry; |
517 | |
518 | while ((entry = so->so_filt) != NULL) { |
519 | /* Pull filter off the socket */ |
520 | so->so_filt = entry->sfe_next_onsocket; |
521 | entry->sfe_flags |= SFEF_NOSOCKET; |
522 | |
523 | /* Call detach */ |
524 | sflt_detach_locked(entry); |
525 | |
526 | /* |
527 | * On sflt_termsock, we can't return until the detach function |
528 | * has been called. Call the detach function - this is gross |
529 | * because the socket filter entry could be freed when we drop |
530 | * the lock, so we make copies on the stack and retain |
531 | * everything we need before dropping the lock. |
532 | */ |
533 | if ((entry->sfe_flags & SFEF_NODETACH) == 0 && |
534 | entry->sfe_filter->sf_filter.sf_detach) { |
535 | void *sfe_cookie = entry->sfe_cookie; |
536 | struct socket_filter *sfe_filter = entry->sfe_filter; |
537 | |
538 | /* Retain the socket filter */ |
539 | sflt_retain_locked(filter: sfe_filter); |
540 | |
541 | /* Mark that we've called the detach function */ |
542 | entry->sfe_flags |= SFEF_NODETACH; |
543 | |
544 | /* Drop the lock before calling the detach function */ |
545 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
546 | sfe_filter->sf_filter.sf_detach(sfe_cookie, so); |
547 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
548 | |
549 | /* Release the filter */ |
550 | sflt_release_locked(filter: sfe_filter); |
551 | } |
552 | } |
553 | |
554 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
555 | } |
556 | |
557 | |
558 | static void |
559 | sflt_notify_internal(struct socket *so, sflt_event_t event, void *param, |
560 | sflt_handle handle) |
561 | { |
562 | if (so->so_filt == NULL) { |
563 | return; |
564 | } |
565 | |
566 | struct socket_filter_entry *entry; |
567 | int unlocked = 0; |
568 | |
569 | lck_rw_lock_shared(lck: &sock_filter_lock); |
570 | for (entry = so->so_filt; entry; entry = entry->sfe_next_onsocket) { |
571 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
572 | entry->sfe_filter->sf_filter.sf_notify && |
573 | ((handle && entry->sfe_filter->sf_filter.sf_handle != |
574 | handle) || !handle)) { |
575 | /* |
576 | * Retain the filter entry and release |
577 | * the socket filter lock |
578 | */ |
579 | sflt_entry_retain(entry); |
580 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
581 | |
582 | /* If the socket isn't already unlocked, unlock it */ |
583 | if (unlocked == 0) { |
584 | unlocked = 1; |
585 | socket_unlock(so, refcount: 0); |
586 | } |
587 | |
588 | /* Finally call the filter */ |
589 | entry->sfe_filter->sf_filter.sf_notify( |
590 | entry->sfe_cookie, so, event, param); |
591 | |
592 | /* |
593 | * Take the socket filter lock again |
594 | * and release the entry |
595 | */ |
596 | lck_rw_lock_shared(lck: &sock_filter_lock); |
597 | sflt_entry_release(entry); |
598 | } |
599 | } |
600 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
601 | |
602 | if (unlocked != 0) { |
603 | socket_lock(so, refcount: 0); |
604 | } |
605 | } |
606 | |
607 | __private_extern__ void |
608 | sflt_notify(struct socket *so, sflt_event_t event, void *param) |
609 | { |
610 | sflt_notify_internal(so, event, param, handle: 0); |
611 | } |
612 | |
613 | static void |
614 | sflt_notify_after_register(struct socket *so, sflt_event_t event, |
615 | sflt_handle handle) |
616 | { |
617 | sflt_notify_internal(so, event, NULL, handle); |
618 | } |
619 | |
620 | __private_extern__ int |
621 | sflt_ioctl(struct socket *so, u_long cmd, caddr_t data) |
622 | { |
623 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
624 | return 0; |
625 | } |
626 | |
627 | struct socket_filter_entry *entry; |
628 | int unlocked = 0; |
629 | int error = 0; |
630 | |
631 | lck_rw_lock_shared(lck: &sock_filter_lock); |
632 | for (entry = so->so_filt; entry && error == 0; |
633 | entry = entry->sfe_next_onsocket) { |
634 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
635 | entry->sfe_filter->sf_filter.sf_ioctl) { |
636 | /* |
637 | * Retain the filter entry and release |
638 | * the socket filter lock |
639 | */ |
640 | sflt_entry_retain(entry); |
641 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
642 | |
643 | /* If the socket isn't already unlocked, unlock it */ |
644 | if (unlocked == 0) { |
645 | socket_unlock(so, refcount: 0); |
646 | unlocked = 1; |
647 | } |
648 | |
649 | /* Call the filter */ |
650 | error = entry->sfe_filter->sf_filter.sf_ioctl( |
651 | entry->sfe_cookie, so, cmd, data); |
652 | |
653 | /* |
654 | * Take the socket filter lock again |
655 | * and release the entry |
656 | */ |
657 | lck_rw_lock_shared(lck: &sock_filter_lock); |
658 | sflt_entry_release(entry); |
659 | } |
660 | } |
661 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
662 | |
663 | if (unlocked) { |
664 | socket_lock(so, refcount: 0); |
665 | } |
666 | |
667 | return error; |
668 | } |
669 | |
670 | __private_extern__ int |
671 | sflt_bind(struct socket *so, const struct sockaddr *nam) |
672 | { |
673 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
674 | return 0; |
675 | } |
676 | |
677 | struct socket_filter_entry *entry; |
678 | int unlocked = 0; |
679 | int error = 0; |
680 | |
681 | lck_rw_lock_shared(lck: &sock_filter_lock); |
682 | for (entry = so->so_filt; entry && error == 0; |
683 | entry = entry->sfe_next_onsocket) { |
684 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
685 | entry->sfe_filter->sf_filter.sf_bind) { |
686 | /* |
687 | * Retain the filter entry and |
688 | * release the socket filter lock |
689 | */ |
690 | sflt_entry_retain(entry); |
691 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
692 | |
693 | /* If the socket isn't already unlocked, unlock it */ |
694 | if (unlocked == 0) { |
695 | socket_unlock(so, refcount: 0); |
696 | unlocked = 1; |
697 | } |
698 | |
699 | /* Call the filter */ |
700 | error = entry->sfe_filter->sf_filter.sf_bind( |
701 | entry->sfe_cookie, so, nam); |
702 | |
703 | /* |
704 | * Take the socket filter lock again and |
705 | * release the entry |
706 | */ |
707 | lck_rw_lock_shared(lck: &sock_filter_lock); |
708 | sflt_entry_release(entry); |
709 | } |
710 | } |
711 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
712 | |
713 | if (unlocked) { |
714 | socket_lock(so, refcount: 0); |
715 | } |
716 | |
717 | return error; |
718 | } |
719 | |
720 | __private_extern__ int |
721 | sflt_listen(struct socket *so) |
722 | { |
723 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
724 | return 0; |
725 | } |
726 | |
727 | struct socket_filter_entry *entry; |
728 | int unlocked = 0; |
729 | int error = 0; |
730 | |
731 | lck_rw_lock_shared(lck: &sock_filter_lock); |
732 | for (entry = so->so_filt; entry && error == 0; |
733 | entry = entry->sfe_next_onsocket) { |
734 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
735 | entry->sfe_filter->sf_filter.sf_listen) { |
736 | /* |
737 | * Retain the filter entry and release |
738 | * the socket filter lock |
739 | */ |
740 | sflt_entry_retain(entry); |
741 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
742 | |
743 | /* If the socket isn't already unlocked, unlock it */ |
744 | if (unlocked == 0) { |
745 | socket_unlock(so, refcount: 0); |
746 | unlocked = 1; |
747 | } |
748 | |
749 | /* Call the filter */ |
750 | error = entry->sfe_filter->sf_filter.sf_listen( |
751 | entry->sfe_cookie, so); |
752 | |
753 | /* |
754 | * Take the socket filter lock again |
755 | * and release the entry |
756 | */ |
757 | lck_rw_lock_shared(lck: &sock_filter_lock); |
758 | sflt_entry_release(entry); |
759 | } |
760 | } |
761 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
762 | |
763 | if (unlocked) { |
764 | socket_lock(so, refcount: 0); |
765 | } |
766 | |
767 | return error; |
768 | } |
769 | |
770 | __private_extern__ int |
771 | sflt_accept(struct socket *head, struct socket *so, |
772 | const struct sockaddr *local, const struct sockaddr *remote) |
773 | { |
774 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
775 | return 0; |
776 | } |
777 | |
778 | struct socket_filter_entry *entry; |
779 | int unlocked = 0; |
780 | int error = 0; |
781 | |
782 | lck_rw_lock_shared(lck: &sock_filter_lock); |
783 | for (entry = so->so_filt; entry && error == 0; |
784 | entry = entry->sfe_next_onsocket) { |
785 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
786 | entry->sfe_filter->sf_filter.sf_accept) { |
787 | /* |
788 | * Retain the filter entry and |
789 | * release the socket filter lock |
790 | */ |
791 | sflt_entry_retain(entry); |
792 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
793 | |
794 | /* If the socket isn't already unlocked, unlock it */ |
795 | if (unlocked == 0) { |
796 | socket_unlock(so, refcount: 0); |
797 | unlocked = 1; |
798 | } |
799 | |
800 | /* Call the filter */ |
801 | error = entry->sfe_filter->sf_filter.sf_accept( |
802 | entry->sfe_cookie, head, so, local, remote); |
803 | |
804 | /* |
805 | * Take the socket filter lock again |
806 | * and release the entry |
807 | */ |
808 | lck_rw_lock_shared(lck: &sock_filter_lock); |
809 | sflt_entry_release(entry); |
810 | } |
811 | } |
812 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
813 | |
814 | if (unlocked) { |
815 | socket_lock(so, refcount: 0); |
816 | } |
817 | |
818 | return error; |
819 | } |
820 | |
821 | __private_extern__ int |
822 | sflt_getsockname(struct socket *so, struct sockaddr **local) |
823 | { |
824 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
825 | return 0; |
826 | } |
827 | |
828 | struct socket_filter_entry *entry; |
829 | int unlocked = 0; |
830 | int error = 0; |
831 | |
832 | lck_rw_lock_shared(lck: &sock_filter_lock); |
833 | for (entry = so->so_filt; entry && error == 0; |
834 | entry = entry->sfe_next_onsocket) { |
835 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
836 | entry->sfe_filter->sf_filter.sf_getsockname) { |
837 | /* |
838 | * Retain the filter entry and |
839 | * release the socket filter lock |
840 | */ |
841 | sflt_entry_retain(entry); |
842 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
843 | |
844 | /* If the socket isn't already unlocked, unlock it */ |
845 | if (unlocked == 0) { |
846 | socket_unlock(so, refcount: 0); |
847 | unlocked = 1; |
848 | } |
849 | |
850 | /* Call the filter */ |
851 | error = entry->sfe_filter->sf_filter.sf_getsockname( |
852 | entry->sfe_cookie, so, local); |
853 | |
854 | /* |
855 | * Take the socket filter lock again |
856 | * and release the entry |
857 | */ |
858 | lck_rw_lock_shared(lck: &sock_filter_lock); |
859 | sflt_entry_release(entry); |
860 | } |
861 | } |
862 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
863 | |
864 | if (unlocked) { |
865 | socket_lock(so, refcount: 0); |
866 | } |
867 | |
868 | return error; |
869 | } |
870 | |
871 | __private_extern__ int |
872 | sflt_getpeername(struct socket *so, struct sockaddr **remote) |
873 | { |
874 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
875 | return 0; |
876 | } |
877 | |
878 | struct socket_filter_entry *entry; |
879 | int unlocked = 0; |
880 | int error = 0; |
881 | |
882 | lck_rw_lock_shared(lck: &sock_filter_lock); |
883 | for (entry = so->so_filt; entry && error == 0; |
884 | entry = entry->sfe_next_onsocket) { |
885 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
886 | entry->sfe_filter->sf_filter.sf_getpeername) { |
887 | /* |
888 | * Retain the filter entry and release |
889 | * the socket filter lock |
890 | */ |
891 | sflt_entry_retain(entry); |
892 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
893 | |
894 | /* If the socket isn't already unlocked, unlock it */ |
895 | if (unlocked == 0) { |
896 | socket_unlock(so, refcount: 0); |
897 | unlocked = 1; |
898 | } |
899 | |
900 | /* Call the filter */ |
901 | error = entry->sfe_filter->sf_filter.sf_getpeername( |
902 | entry->sfe_cookie, so, remote); |
903 | |
904 | /* |
905 | * Take the socket filter lock again |
906 | * and release the entry |
907 | */ |
908 | lck_rw_lock_shared(lck: &sock_filter_lock); |
909 | sflt_entry_release(entry); |
910 | } |
911 | } |
912 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
913 | |
914 | if (unlocked) { |
915 | socket_lock(so, refcount: 0); |
916 | } |
917 | |
918 | return error; |
919 | } |
920 | |
921 | __private_extern__ int |
922 | sflt_connectin(struct socket *so, const struct sockaddr *remote) |
923 | { |
924 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
925 | return 0; |
926 | } |
927 | |
928 | struct socket_filter_entry *entry; |
929 | int unlocked = 0; |
930 | int error = 0; |
931 | |
932 | lck_rw_lock_shared(lck: &sock_filter_lock); |
933 | for (entry = so->so_filt; entry && error == 0; |
934 | entry = entry->sfe_next_onsocket) { |
935 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
936 | entry->sfe_filter->sf_filter.sf_connect_in) { |
937 | /* |
938 | * Retain the filter entry and release |
939 | * the socket filter lock |
940 | */ |
941 | sflt_entry_retain(entry); |
942 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
943 | |
944 | /* If the socket isn't already unlocked, unlock it */ |
945 | if (unlocked == 0) { |
946 | socket_unlock(so, refcount: 0); |
947 | unlocked = 1; |
948 | } |
949 | |
950 | /* Call the filter */ |
951 | error = entry->sfe_filter->sf_filter.sf_connect_in( |
952 | entry->sfe_cookie, so, remote); |
953 | |
954 | /* |
955 | * Take the socket filter lock again |
956 | * and release the entry |
957 | */ |
958 | lck_rw_lock_shared(lck: &sock_filter_lock); |
959 | sflt_entry_release(entry); |
960 | } |
961 | } |
962 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
963 | |
964 | if (unlocked) { |
965 | socket_lock(so, refcount: 0); |
966 | } |
967 | |
968 | return error; |
969 | } |
970 | |
971 | static int |
972 | sflt_connectout_common(struct socket *so, const struct sockaddr *nam) |
973 | { |
974 | struct socket_filter_entry *entry; |
975 | int unlocked = 0; |
976 | int error = 0; |
977 | |
978 | lck_rw_lock_shared(lck: &sock_filter_lock); |
979 | for (entry = so->so_filt; entry && error == 0; |
980 | entry = entry->sfe_next_onsocket) { |
981 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
982 | entry->sfe_filter->sf_filter.sf_connect_out) { |
983 | /* |
984 | * Retain the filter entry and release |
985 | * the socket filter lock |
986 | */ |
987 | sflt_entry_retain(entry); |
988 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
989 | |
990 | /* If the socket isn't already unlocked, unlock it */ |
991 | if (unlocked == 0) { |
992 | socket_unlock(so, refcount: 0); |
993 | unlocked = 1; |
994 | } |
995 | |
996 | /* Call the filter */ |
997 | error = entry->sfe_filter->sf_filter.sf_connect_out( |
998 | entry->sfe_cookie, so, nam); |
999 | |
1000 | /* |
1001 | * Take the socket filter lock again |
1002 | * and release the entry |
1003 | */ |
1004 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1005 | sflt_entry_release(entry); |
1006 | } |
1007 | } |
1008 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1009 | |
1010 | if (unlocked) { |
1011 | socket_lock(so, refcount: 0); |
1012 | } |
1013 | |
1014 | return error; |
1015 | } |
1016 | |
1017 | __private_extern__ int |
1018 | sflt_connectout(struct socket *so, const struct sockaddr *nam) |
1019 | { |
1020 | char buf[SOCK_MAXADDRLEN]; |
1021 | struct sockaddr *sa; |
1022 | int error; |
1023 | |
1024 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
1025 | return 0; |
1026 | } |
1027 | |
1028 | /* |
1029 | * Workaround for rdar://23362120 |
1030 | * Always pass a buffer that can hold an IPv6 socket address |
1031 | */ |
1032 | bzero(s: buf, n: sizeof(buf)); |
1033 | bcopy(src: nam, dst: buf, n: nam->sa_len); |
1034 | sa = (struct sockaddr *)buf; |
1035 | |
1036 | error = sflt_connectout_common(so, nam: sa); |
1037 | if (error != 0) { |
1038 | return error; |
1039 | } |
1040 | |
1041 | /* |
1042 | * If the address was modified, copy it back |
1043 | */ |
1044 | if (bcmp(s1: sa, s2: nam, n: nam->sa_len) != 0) { |
1045 | bcopy(src: sa, dst: (struct sockaddr *)(uintptr_t)nam, n: nam->sa_len); |
1046 | } |
1047 | |
1048 | return 0; |
1049 | } |
1050 | |
1051 | __private_extern__ int |
1052 | sflt_setsockopt(struct socket *so, struct sockopt *sopt) |
1053 | { |
1054 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
1055 | return 0; |
1056 | } |
1057 | |
1058 | /* Socket-options are checked at the MPTCP-layer */ |
1059 | if (so->so_flags & SOF_MP_SUBFLOW) { |
1060 | return 0; |
1061 | } |
1062 | |
1063 | struct socket_filter_entry *entry; |
1064 | int unlocked = 0; |
1065 | int error = 0; |
1066 | |
1067 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1068 | for (entry = so->so_filt; entry && error == 0; |
1069 | entry = entry->sfe_next_onsocket) { |
1070 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
1071 | entry->sfe_filter->sf_filter.sf_setoption) { |
1072 | /* |
1073 | * Retain the filter entry and release |
1074 | * the socket filter lock |
1075 | */ |
1076 | sflt_entry_retain(entry); |
1077 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1078 | |
1079 | /* If the socket isn't already unlocked, unlock it */ |
1080 | if (unlocked == 0) { |
1081 | socket_unlock(so, refcount: 0); |
1082 | unlocked = 1; |
1083 | } |
1084 | |
1085 | /* Call the filter */ |
1086 | error = entry->sfe_filter->sf_filter.sf_setoption( |
1087 | entry->sfe_cookie, so, sopt); |
1088 | |
1089 | /* |
1090 | * Take the socket filter lock again |
1091 | * and release the entry |
1092 | */ |
1093 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1094 | sflt_entry_release(entry); |
1095 | } |
1096 | } |
1097 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1098 | |
1099 | if (unlocked) { |
1100 | socket_lock(so, refcount: 0); |
1101 | } |
1102 | |
1103 | return error; |
1104 | } |
1105 | |
1106 | __private_extern__ int |
1107 | sflt_getsockopt(struct socket *so, struct sockopt *sopt) |
1108 | { |
1109 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
1110 | return 0; |
1111 | } |
1112 | |
1113 | /* Socket-options are checked at the MPTCP-layer */ |
1114 | if (so->so_flags & SOF_MP_SUBFLOW) { |
1115 | return 0; |
1116 | } |
1117 | |
1118 | struct socket_filter_entry *entry; |
1119 | int unlocked = 0; |
1120 | int error = 0; |
1121 | |
1122 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1123 | for (entry = so->so_filt; entry && error == 0; |
1124 | entry = entry->sfe_next_onsocket) { |
1125 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
1126 | entry->sfe_filter->sf_filter.sf_getoption) { |
1127 | /* |
1128 | * Retain the filter entry and release |
1129 | * the socket filter lock |
1130 | */ |
1131 | sflt_entry_retain(entry); |
1132 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1133 | |
1134 | /* If the socket isn't already unlocked, unlock it */ |
1135 | if (unlocked == 0) { |
1136 | socket_unlock(so, refcount: 0); |
1137 | unlocked = 1; |
1138 | } |
1139 | |
1140 | /* Call the filter */ |
1141 | error = entry->sfe_filter->sf_filter.sf_getoption( |
1142 | entry->sfe_cookie, so, sopt); |
1143 | |
1144 | /* |
1145 | * Take the socket filter lock again |
1146 | * and release the entry |
1147 | */ |
1148 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1149 | sflt_entry_release(entry); |
1150 | } |
1151 | } |
1152 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1153 | |
1154 | if (unlocked) { |
1155 | socket_lock(so, refcount: 0); |
1156 | } |
1157 | |
1158 | return error; |
1159 | } |
1160 | |
1161 | __private_extern__ int |
1162 | sflt_data_out(struct socket *so, const struct sockaddr *to, mbuf_t *data, |
1163 | mbuf_t *control, sflt_data_flag_t flags) |
1164 | { |
1165 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
1166 | return 0; |
1167 | } |
1168 | |
1169 | /* Socket-options are checked at the MPTCP-layer */ |
1170 | if (so->so_flags & SOF_MP_SUBFLOW) { |
1171 | return 0; |
1172 | } |
1173 | |
1174 | struct socket_filter_entry *entry; |
1175 | int unlocked = 0; |
1176 | int setsendthread = 0; |
1177 | int error = 0; |
1178 | |
1179 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1180 | for (entry = so->so_filt; entry && error == 0; |
1181 | entry = entry->sfe_next_onsocket) { |
1182 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
1183 | entry->sfe_filter->sf_filter.sf_data_out) { |
1184 | /* |
1185 | * Retain the filter entry and |
1186 | * release the socket filter lock |
1187 | */ |
1188 | sflt_entry_retain(entry); |
1189 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1190 | |
1191 | /* If the socket isn't already unlocked, unlock it */ |
1192 | if (unlocked == 0) { |
1193 | if (so->so_send_filt_thread == NULL) { |
1194 | setsendthread = 1; |
1195 | so->so_send_filt_thread = |
1196 | current_thread(); |
1197 | } |
1198 | socket_unlock(so, refcount: 0); |
1199 | unlocked = 1; |
1200 | } |
1201 | |
1202 | /* Call the filter */ |
1203 | error = entry->sfe_filter->sf_filter.sf_data_out( |
1204 | entry->sfe_cookie, so, to, data, control, flags); |
1205 | |
1206 | /* |
1207 | * Take the socket filter lock again |
1208 | * and release the entry |
1209 | */ |
1210 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1211 | sflt_entry_release(entry); |
1212 | } |
1213 | } |
1214 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1215 | |
1216 | if (unlocked) { |
1217 | socket_lock(so, refcount: 0); |
1218 | if (setsendthread) { |
1219 | so->so_send_filt_thread = NULL; |
1220 | } |
1221 | } |
1222 | |
1223 | return error; |
1224 | } |
1225 | |
1226 | __private_extern__ int |
1227 | sflt_data_in(struct socket *so, const struct sockaddr *from, mbuf_t *data, |
1228 | mbuf_t *control, sflt_data_flag_t flags) |
1229 | { |
1230 | if (so->so_filt == NULL || sflt_permission_check(sotoinpcb(so))) { |
1231 | return 0; |
1232 | } |
1233 | |
1234 | /* Socket-options are checked at the MPTCP-layer */ |
1235 | if (so->so_flags & SOF_MP_SUBFLOW) { |
1236 | return 0; |
1237 | } |
1238 | |
1239 | struct socket_filter_entry *entry; |
1240 | int error = 0; |
1241 | int unlocked = 0; |
1242 | |
1243 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1244 | |
1245 | for (entry = so->so_filt; entry && (error == 0); |
1246 | entry = entry->sfe_next_onsocket) { |
1247 | if ((entry->sfe_flags & SFEF_ATTACHED) && |
1248 | entry->sfe_filter->sf_filter.sf_data_in) { |
1249 | /* |
1250 | * Retain the filter entry and |
1251 | * release the socket filter lock |
1252 | */ |
1253 | sflt_entry_retain(entry); |
1254 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1255 | |
1256 | /* If the socket isn't already unlocked, unlock it */ |
1257 | if (unlocked == 0) { |
1258 | unlocked = 1; |
1259 | socket_unlock(so, refcount: 0); |
1260 | } |
1261 | |
1262 | /* Call the filter */ |
1263 | error = entry->sfe_filter->sf_filter.sf_data_in( |
1264 | entry->sfe_cookie, so, from, data, control, flags); |
1265 | |
1266 | /* |
1267 | * Take the socket filter lock again |
1268 | * and release the entry |
1269 | */ |
1270 | lck_rw_lock_shared(lck: &sock_filter_lock); |
1271 | sflt_entry_release(entry); |
1272 | } |
1273 | } |
1274 | lck_rw_unlock_shared(lck: &sock_filter_lock); |
1275 | |
1276 | if (unlocked) { |
1277 | socket_lock(so, refcount: 0); |
1278 | } |
1279 | |
1280 | return error; |
1281 | } |
1282 | |
1283 | #pragma mark -- KPI -- |
1284 | |
1285 | errno_t |
1286 | sflt_attach(socket_t socket, sflt_handle handle) |
1287 | { |
1288 | socket_lock(so: socket, refcount: 1); |
1289 | errno_t result = sflt_attach_internal(socket, handle); |
1290 | socket_unlock(so: socket, refcount: 1); |
1291 | return result; |
1292 | } |
1293 | |
1294 | errno_t |
1295 | sflt_detach(socket_t socket, sflt_handle handle) |
1296 | { |
1297 | struct socket_filter_entry *entry; |
1298 | errno_t result = 0; |
1299 | |
1300 | if (socket == NULL || handle == 0) { |
1301 | return EINVAL; |
1302 | } |
1303 | |
1304 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
1305 | for (entry = socket->so_filt; entry; entry = entry->sfe_next_onsocket) { |
1306 | if (entry->sfe_filter->sf_filter.sf_handle == handle && |
1307 | (entry->sfe_flags & SFEF_ATTACHED) != 0) { |
1308 | break; |
1309 | } |
1310 | } |
1311 | |
1312 | if (entry != NULL) { |
1313 | sflt_detach_locked(entry); |
1314 | } |
1315 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
1316 | |
1317 | return result; |
1318 | } |
1319 | |
1320 | struct solist { |
1321 | struct solist *next; |
1322 | struct socket *so; |
1323 | }; |
1324 | |
1325 | static errno_t |
1326 | sflt_register_common(const struct sflt_filter *filter, int domain, int type, |
1327 | int protocol, bool is_internal) |
1328 | { |
1329 | struct socket_filter *sock_filt = NULL; |
1330 | struct socket_filter *match = NULL; |
1331 | int error = 0; |
1332 | struct protosw *pr; |
1333 | unsigned int len; |
1334 | struct socket *so; |
1335 | struct inpcb *inp; |
1336 | struct solist *solisthead = NULL, *solist = NULL; |
1337 | |
1338 | if ((domain != PF_INET) && (domain != PF_INET6)) { |
1339 | return ENOTSUP; |
1340 | } |
1341 | |
1342 | pr = pffindproto(family: domain, protocol, type); |
1343 | if (pr == NULL) { |
1344 | return ENOENT; |
1345 | } |
1346 | |
1347 | if (filter->sf_attach == NULL || filter->sf_detach == NULL || |
1348 | filter->sf_handle == 0 || filter->sf_name == NULL) { |
1349 | return EINVAL; |
1350 | } |
1351 | |
1352 | /* Allocate the socket filter */ |
1353 | sock_filt = kalloc_type(struct socket_filter, |
1354 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
1355 | |
1356 | /* Legacy sflt_filter length; current structure minus extended */ |
1357 | len = sizeof(*filter) - sizeof(struct sflt_filter_ext); |
1358 | /* |
1359 | * Include extended fields if filter defines SFLT_EXTENDED. |
1360 | * We've zeroed out our internal sflt_filter placeholder, |
1361 | * so any unused portion would have been taken care of. |
1362 | */ |
1363 | if (filter->sf_flags & SFLT_EXTENDED) { |
1364 | unsigned int ext_len = filter->sf_len; |
1365 | |
1366 | if (ext_len > sizeof(struct sflt_filter_ext)) { |
1367 | ext_len = sizeof(struct sflt_filter_ext); |
1368 | } |
1369 | |
1370 | len += ext_len; |
1371 | } |
1372 | bcopy(src: filter, dst: &sock_filt->sf_filter, n: len); |
1373 | |
1374 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
1375 | /* Look for an existing entry */ |
1376 | TAILQ_FOREACH(match, &sock_filter_head, sf_global_next) { |
1377 | if (match->sf_filter.sf_handle == |
1378 | sock_filt->sf_filter.sf_handle) { |
1379 | break; |
1380 | } |
1381 | } |
1382 | |
1383 | /* Add the entry only if there was no existing entry */ |
1384 | if (match == NULL) { |
1385 | TAILQ_INSERT_TAIL(&sock_filter_head, sock_filt, sf_global_next); |
1386 | if ((sock_filt->sf_filter.sf_flags & SFLT_GLOBAL) != 0) { |
1387 | TAILQ_INSERT_TAIL(&pr->pr_filter_head, sock_filt, |
1388 | sf_protosw_next); |
1389 | sock_filt->sf_proto = pr; |
1390 | } |
1391 | os_ref_init(&sock_filt->sf_refcount, NULL); |
1392 | |
1393 | OSIncrementAtomic64(address: &net_api_stats.nas_sfltr_register_count); |
1394 | INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_total); |
1395 | if (is_internal) { |
1396 | sock_filt->sf_flags |= SFF_INTERNAL; |
1397 | OSIncrementAtomic64(address: &net_api_stats.nas_sfltr_register_os_count); |
1398 | INC_ATOMIC_INT64_LIM(net_api_stats.nas_sfltr_register_os_total); |
1399 | } |
1400 | } |
1401 | #if SKYWALK && defined(XNU_TARGET_OS_OSX) |
1402 | net_filter_event_mark(subsystem: NET_FILTER_EVENT_SOCKET, |
1403 | compatible: net_check_compatible_sfltr()); |
1404 | net_filter_event_mark(subsystem: NET_FILTER_EVENT_ALF, |
1405 | compatible: net_check_compatible_alf()); |
1406 | net_filter_event_mark(subsystem: NET_FILTER_EVENT_PARENTAL_CONTROLS, |
1407 | compatible: net_check_compatible_parental_controls()); |
1408 | #endif /* SKYWALK && XNU_TARGET_OS_OSX */ |
1409 | |
1410 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
1411 | |
1412 | if (match != NULL) { |
1413 | kfree_type(struct socket_filter, sock_filt); |
1414 | return EEXIST; |
1415 | } |
1416 | |
1417 | if (!(filter->sf_flags & SFLT_EXTENDED_REGISTRY)) { |
1418 | return error; |
1419 | } |
1420 | |
1421 | /* |
1422 | * Setup the filter on the TCP and UDP sockets already created. |
1423 | */ |
1424 | #define SOLIST_ADD(_so) do { \ |
1425 | solist->next = solisthead; \ |
1426 | sock_retain((_so)); \ |
1427 | solist->so = (_so); \ |
1428 | solisthead = solist; \ |
1429 | } while (0) |
1430 | if (protocol == IPPROTO_TCP) { |
1431 | lck_rw_lock_shared(lck: &tcbinfo.ipi_lock); |
1432 | LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) { |
1433 | so = inp->inp_socket; |
1434 | if (so == NULL || (so->so_state & SS_DEFUNCT) || |
1435 | (!(so->so_flags & SOF_MP_SUBFLOW) && |
1436 | (so->so_state & SS_NOFDREF)) || |
1437 | !SOCK_CHECK_DOM(so, domain) || |
1438 | !SOCK_CHECK_TYPE(so, type)) { |
1439 | continue; |
1440 | } |
1441 | solist = kalloc_type(struct solist, Z_NOWAIT); |
1442 | if (!solist) { |
1443 | continue; |
1444 | } |
1445 | SOLIST_ADD(so); |
1446 | } |
1447 | lck_rw_done(lck: &tcbinfo.ipi_lock); |
1448 | } else if (protocol == IPPROTO_UDP) { |
1449 | lck_rw_lock_shared(lck: &udbinfo.ipi_lock); |
1450 | LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) { |
1451 | so = inp->inp_socket; |
1452 | if (so == NULL || (so->so_state & SS_DEFUNCT) || |
1453 | (!(so->so_flags & SOF_MP_SUBFLOW) && |
1454 | (so->so_state & SS_NOFDREF)) || |
1455 | !SOCK_CHECK_DOM(so, domain) || |
1456 | !SOCK_CHECK_TYPE(so, type)) { |
1457 | continue; |
1458 | } |
1459 | solist = kalloc_type(struct solist, Z_NOWAIT); |
1460 | if (!solist) { |
1461 | continue; |
1462 | } |
1463 | SOLIST_ADD(so); |
1464 | } |
1465 | lck_rw_done(lck: &udbinfo.ipi_lock); |
1466 | } |
1467 | /* XXX it's possible to walk the raw socket list as well */ |
1468 | #undef SOLIST_ADD |
1469 | |
1470 | while (solisthead) { |
1471 | sflt_handle handle = filter->sf_handle; |
1472 | |
1473 | so = solisthead->so; |
1474 | socket_lock(so, refcount: 0); |
1475 | sflt_initsock(so); |
1476 | if (so->so_state & SS_ISCONNECTING) { |
1477 | sflt_notify_after_register(so, event: sock_evt_connecting, |
1478 | handle); |
1479 | } else if (so->so_state & SS_ISCONNECTED) { |
1480 | sflt_notify_after_register(so, event: sock_evt_connected, |
1481 | handle); |
1482 | } else if ((so->so_state & |
1483 | (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) == |
1484 | (SS_ISDISCONNECTING | SS_CANTRCVMORE | SS_CANTSENDMORE)) { |
1485 | sflt_notify_after_register(so, event: sock_evt_disconnecting, |
1486 | handle); |
1487 | } else if ((so->so_state & |
1488 | (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) == |
1489 | (SS_CANTRCVMORE | SS_CANTSENDMORE | SS_ISDISCONNECTED)) { |
1490 | sflt_notify_after_register(so, event: sock_evt_disconnected, |
1491 | handle); |
1492 | } else if (so->so_state & SS_CANTSENDMORE) { |
1493 | sflt_notify_after_register(so, event: sock_evt_cantsendmore, |
1494 | handle); |
1495 | } else if (so->so_state & SS_CANTRCVMORE) { |
1496 | sflt_notify_after_register(so, event: sock_evt_cantrecvmore, |
1497 | handle); |
1498 | } |
1499 | socket_unlock(so, refcount: 0); |
1500 | /* XXX no easy way to post the sock_evt_closing event */ |
1501 | sock_release(so); |
1502 | solist = solisthead; |
1503 | solisthead = solisthead->next; |
1504 | kfree_type(struct solist, solist); |
1505 | } |
1506 | |
1507 | return error; |
1508 | } |
1509 | |
1510 | errno_t |
1511 | sflt_register_internal(const struct sflt_filter *filter, int domain, int type, |
1512 | int protocol) |
1513 | { |
1514 | return sflt_register_common(filter, domain, type, protocol, true); |
1515 | } |
1516 | |
1517 | #define MAX_NUM_FRAMES 5 |
1518 | |
1519 | errno_t |
1520 | sflt_register(const struct sflt_filter *filter, int domain, int type, |
1521 | int protocol) |
1522 | { |
1523 | return sflt_register_common(filter, domain, type, protocol, false); |
1524 | } |
1525 | |
1526 | errno_t |
1527 | sflt_unregister(sflt_handle handle) |
1528 | { |
1529 | struct socket_filter *filter; |
1530 | lck_rw_lock_exclusive(lck: &sock_filter_lock); |
1531 | |
1532 | /* Find the entry by the handle */ |
1533 | TAILQ_FOREACH(filter, &sock_filter_head, sf_global_next) { |
1534 | if (filter->sf_filter.sf_handle == handle) { |
1535 | break; |
1536 | } |
1537 | } |
1538 | |
1539 | if (filter) { |
1540 | if (filter->sf_flags & SFF_INTERNAL) { |
1541 | VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_os_count) > 0); |
1542 | } |
1543 | VERIFY(OSDecrementAtomic64(&net_api_stats.nas_sfltr_register_count) > 0); |
1544 | |
1545 | /* Remove it from the global list */ |
1546 | TAILQ_REMOVE(&sock_filter_head, filter, sf_global_next); |
1547 | |
1548 | /* Remove it from the protosw list */ |
1549 | if ((filter->sf_filter.sf_flags & SFLT_GLOBAL) != 0) { |
1550 | TAILQ_REMOVE(&filter->sf_proto->pr_filter_head, |
1551 | filter, sf_protosw_next); |
1552 | } |
1553 | |
1554 | /* Detach from any sockets */ |
1555 | struct socket_filter_entry *entry = NULL; |
1556 | |
1557 | for (entry = filter->sf_entry_head; entry; |
1558 | entry = entry->sfe_next_onfilter) { |
1559 | sflt_detach_locked(entry); |
1560 | } |
1561 | |
1562 | /* Release the filter */ |
1563 | sflt_release_locked(filter); |
1564 | } |
1565 | #if SKYWALK && defined(XNU_TARGET_OS_OSX) |
1566 | net_filter_event_mark(subsystem: NET_FILTER_EVENT_SOCKET, |
1567 | compatible: net_check_compatible_sfltr()); |
1568 | net_filter_event_mark(subsystem: NET_FILTER_EVENT_ALF, |
1569 | compatible: net_check_compatible_alf()); |
1570 | net_filter_event_mark(subsystem: NET_FILTER_EVENT_PARENTAL_CONTROLS, |
1571 | compatible: net_check_compatible_parental_controls()); |
1572 | #endif /* SKYWALK && XNU_TARGET_OS_OSX */ |
1573 | |
1574 | lck_rw_unlock_exclusive(lck: &sock_filter_lock); |
1575 | |
1576 | if (filter == NULL) { |
1577 | return ENOENT; |
1578 | } |
1579 | |
1580 | return 0; |
1581 | } |
1582 | |
1583 | errno_t |
1584 | sock_inject_data_in(socket_t so, const struct sockaddr *from, mbuf_t data, |
1585 | mbuf_t control, sflt_data_flag_t flags) |
1586 | { |
1587 | int error = 0; |
1588 | |
1589 | if (so == NULL || data == NULL) { |
1590 | return EINVAL; |
1591 | } |
1592 | |
1593 | if (flags & sock_data_filt_flag_oob) { |
1594 | return ENOTSUP; |
1595 | } |
1596 | |
1597 | socket_lock(so, refcount: 1); |
1598 | |
1599 | /* reject if this is a subflow socket */ |
1600 | if (so->so_flags & SOF_MP_SUBFLOW) { |
1601 | error = ENOTSUP; |
1602 | goto done; |
1603 | } |
1604 | |
1605 | if (from) { |
1606 | if (sbappendaddr(sb: &so->so_rcv, |
1607 | asa: (struct sockaddr *)(uintptr_t)from, m0: data, control, NULL)) { |
1608 | sorwakeup(so); |
1609 | } |
1610 | goto done; |
1611 | } |
1612 | |
1613 | if (control) { |
1614 | if (sbappendcontrol(sb: &so->so_rcv, m0: data, control, NULL)) { |
1615 | sorwakeup(so); |
1616 | } |
1617 | goto done; |
1618 | } |
1619 | |
1620 | if (flags & sock_data_filt_flag_record) { |
1621 | if (control || from) { |
1622 | error = EINVAL; |
1623 | goto done; |
1624 | } |
1625 | if (sbappendrecord(sb: &so->so_rcv, m0: (struct mbuf *)data)) { |
1626 | sorwakeup(so); |
1627 | } |
1628 | goto done; |
1629 | } |
1630 | |
1631 | if (sbappend(sb: &so->so_rcv, m: data)) { |
1632 | sorwakeup(so); |
1633 | } |
1634 | done: |
1635 | socket_unlock(so, refcount: 1); |
1636 | return error; |
1637 | } |
1638 | |
1639 | errno_t |
1640 | sock_inject_data_out(socket_t so, const struct sockaddr *to, mbuf_t data, |
1641 | mbuf_t control, sflt_data_flag_t flags) |
1642 | { |
1643 | int sosendflags = 0; |
1644 | int error = 0; |
1645 | |
1646 | /* reject if this is a subflow socket */ |
1647 | if (so->so_flags & SOF_MP_SUBFLOW) { |
1648 | return ENOTSUP; |
1649 | } |
1650 | |
1651 | if (flags & sock_data_filt_flag_oob) { |
1652 | sosendflags = MSG_OOB; |
1653 | } |
1654 | |
1655 | #if SKYWALK |
1656 | sk_protect_t protect = sk_async_transmit_protect(); |
1657 | #endif /* SKYWALK */ |
1658 | |
1659 | error = sosend(so, addr: (struct sockaddr *)(uintptr_t)to, NULL, |
1660 | top: data, control, flags: sosendflags); |
1661 | |
1662 | #if SKYWALK |
1663 | sk_async_transmit_unprotect(protect); |
1664 | #endif /* SKYWALK */ |
1665 | |
1666 | return error; |
1667 | } |
1668 | |
1669 | sockopt_dir |
1670 | sockopt_direction(sockopt_t sopt) |
1671 | { |
1672 | return (sopt->sopt_dir == SOPT_GET) ? sockopt_get : sockopt_set; |
1673 | } |
1674 | |
1675 | int |
1676 | sockopt_level(sockopt_t sopt) |
1677 | { |
1678 | return sopt->sopt_level; |
1679 | } |
1680 | |
1681 | int |
1682 | sockopt_name(sockopt_t sopt) |
1683 | { |
1684 | return sopt->sopt_name; |
1685 | } |
1686 | |
1687 | size_t |
1688 | sockopt_valsize(sockopt_t sopt) |
1689 | { |
1690 | return sopt->sopt_valsize; |
1691 | } |
1692 | |
1693 | errno_t |
1694 | sockopt_copyin(sockopt_t sopt, void *data, size_t len) |
1695 | { |
1696 | return sooptcopyin(sopt, data, len, minlen: len); |
1697 | } |
1698 | |
1699 | errno_t |
1700 | sockopt_copyout(sockopt_t sopt, void *data, size_t len) |
1701 | { |
1702 | return sooptcopyout(sopt, data, len); |
1703 | } |
1704 | |
1705 | #if SKYWALK && defined(XNU_TARGET_OS_OSX) |
1706 | static bool |
1707 | net_check_compatible_sfltr(void) |
1708 | { |
1709 | if (net_api_stats.nas_sfltr_register_count > net_api_stats.nas_sfltr_register_os_count) { |
1710 | return false; |
1711 | } |
1712 | return true; |
1713 | } |
1714 | |
1715 | bool |
1716 | net_check_compatible_alf(void) |
1717 | { |
1718 | int alf_perm; |
1719 | size_t len = sizeof(alf_perm); |
1720 | errno_t error; |
1721 | |
1722 | error = kernel_sysctlbyname("net.alf.perm" , &alf_perm, &len, NULL, 0); |
1723 | if (error == 0) { |
1724 | if (alf_perm != 0) { |
1725 | return false; |
1726 | } |
1727 | } |
1728 | return true; |
1729 | } |
1730 | |
1731 | static bool |
1732 | net_check_compatible_parental_controls(void) |
1733 | { |
1734 | /* |
1735 | * Assumes the first 4 OS socket filters are for ALF and additional |
1736 | * OS filters are for Parental Controls web content filter |
1737 | */ |
1738 | if (net_api_stats.nas_sfltr_register_os_count > 4) { |
1739 | return false; |
1740 | } |
1741 | return true; |
1742 | } |
1743 | #endif /* SKYWALK && XNU_TARGET_OS_OSX */ |
1744 | |