1 | /* |
2 | * Copyright (c) 2013-2022 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. Please obtain a copy of the License at |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this |
11 | * file. |
12 | * |
13 | * The Original Code and all software distributed under the License are |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and |
19 | * limitations under the License. |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ |
22 | */ |
23 | |
24 | /* |
25 | * THEORY OF OPERATION |
26 | * |
27 | * The socket content filter subsystem provides a way for user space agents to |
28 | * make filtering decisions based on the content of the data being sent and |
29 | * received by INET/INET6 sockets. |
30 | * |
31 | * A content filter user space agents gets a copy of the data and the data is |
32 | * also kept in kernel buffer until the user space agents makes a pass or drop |
33 | * decision. This unidirectional flow of content avoids unnecessary data copies |
34 | * back to the kernel. |
35 | * |
36 | * A user space filter agent opens a kernel control socket with the name |
37 | * CONTENT_FILTER_CONTROL_NAME to attach to the socket content filter subsystem. |
38 | * When connected, a "struct content_filter" is created and set as the |
39 | * "unitinfo" of the corresponding kernel control socket instance. |
40 | * |
41 | * The socket content filter subsystem exchanges messages with the user space |
42 | * filter agent until an ultimate pass or drop decision is made by the |
43 | * user space filter agent. |
44 | * |
45 | * It should be noted that messages about many INET/INET6 sockets can be multiplexed |
46 | * over a single kernel control socket. |
47 | * |
48 | * Notes: |
49 | * - The current implementation supports all INET/INET6 sockets (i.e. TCP, |
50 | * UDP, ICMP, etc). |
51 | * - The current implementation supports up to two simultaneous content filters |
52 | * for iOS devices and eight simultaneous content filters for OSX. |
53 | * |
54 | * |
55 | * NECP FILTER CONTROL UNIT |
56 | * |
57 | * A user space filter agent uses the Network Extension Control Policy (NECP) |
58 | * database to specify which INET/INET6 sockets need to be filtered. The NECP |
59 | * criteria may be based on a variety of properties like user ID or proc UUID. |
60 | * |
61 | * The NECP "filter control unit" is used by the socket content filter subsystem |
62 | * to deliver the relevant INET/INET6 content information to the appropriate |
63 | * user space filter agent via its kernel control socket instance. |
64 | * This works as follows: |
65 | * |
66 | * 1) The user space filter agent specifies an NECP filter control unit when |
67 | * in adds its filtering rules to the NECP database. |
68 | * |
69 | * 2) The user space filter agent also sets its NECP filter control unit on the |
70 | * content filter kernel control socket via the socket option |
71 | * CFIL_OPT_NECP_CONTROL_UNIT. |
72 | * |
73 | * 3) The NECP database is consulted to find out if a given INET/INET6 socket |
74 | * needs to be subjected to content filtering and returns the corresponding |
75 | * NECP filter control unit -- the NECP filter control unit is actually |
76 | * stored in the INET/INET6 socket structure so the NECP lookup is really simple. |
77 | * |
78 | * 4) The NECP filter control unit is then used to find the corresponding |
79 | * kernel control socket instance. |
80 | * |
81 | * Note: NECP currently supports a single filter control unit per INET/INET6 socket |
82 | * but this restriction may be soon lifted. |
83 | * |
84 | * |
85 | * THE MESSAGING PROTOCOL |
86 | * |
87 | * The socket content filter subsystem and a user space filter agent |
88 | * communicate over the kernel control socket via an asynchronous |
89 | * messaging protocol (this is not a request-response protocol). |
90 | * The socket content filter subsystem sends event messages to the user |
91 | * space filter agent about the INET/INET6 sockets it is interested to filter. |
92 | * The user space filter agent sends action messages to either allow |
93 | * data to pass or to disallow the data flow (and drop the connection). |
94 | * |
95 | * All messages over a content filter kernel control socket share the same |
96 | * common header of type "struct cfil_msg_hdr". The message type tells if |
97 | * it's a event message "CFM_TYPE_EVENT" or a action message "CFM_TYPE_ACTION". |
98 | * The message header field "cfm_sock_id" identifies a given INET/INET6 flow. |
99 | * For TCP, flows are per-socket. For UDP and other datagrame protocols, there |
100 | * could be multiple flows per socket. |
101 | * |
102 | * Note the message header length field may be padded for alignment and can |
103 | * be larger than the actual content of the message. |
104 | * The field "cfm_op" describe the kind of event or action. |
105 | * |
106 | * Here are the kinds of content filter events: |
107 | * - CFM_OP_SOCKET_ATTACHED: a new INET/INET6 socket is being filtered |
108 | * - CFM_OP_SOCKET_CLOSED: A INET/INET6 socket is closed |
109 | * - CFM_OP_DATA_OUT: A span of data is being sent on a INET/INET6 socket |
110 | * - CFM_OP_DATA_IN: A span of data is being or received on a INET/INET6 socket |
111 | * |
112 | * |
113 | * EVENT MESSAGES |
114 | * |
115 | * The CFM_OP_DATA_OUT and CFM_OP_DATA_IN event messages contains a span of |
116 | * data that is being sent or received. The position of this span of data |
117 | * in the data flow is described by a set of start and end offsets. These |
118 | * are absolute 64 bits offsets. The first byte sent (or received) starts |
119 | * at offset 0 and ends at offset 1. The length of the content data |
120 | * is given by the difference between the end offset and the start offset. |
121 | * |
122 | * After a CFM_OP_SOCKET_ATTACHED is delivered, CFM_OP_DATA_OUT and |
123 | * CFM_OP_DATA_OUT events are not delivered until a CFM_OP_DATA_UPDATE |
124 | * action message is sent by the user space filter agent. |
125 | * |
126 | * Note: absolute 64 bits offsets should be large enough for the foreseeable |
127 | * future. A 64-bits counter will wrap after 468 years at 10 Gbit/sec: |
128 | * 2E64 / ((10E9 / 8) * 60 * 60 * 24 * 365.25) = 467.63 |
129 | * |
130 | * They are two kinds of primary content filter actions: |
131 | * - CFM_OP_DATA_UPDATE: to update pass or peek offsets for each direction. |
132 | * - CFM_OP_DROP: to shutdown socket and disallow further data flow |
133 | * |
134 | * There is also an action to mark a given client flow as already filtered |
135 | * at a higher level, CFM_OP_BLESS_CLIENT. |
136 | * |
137 | * |
138 | * ACTION MESSAGES |
139 | * |
140 | * The CFM_OP_DATA_UPDATE action messages let the user space filter |
141 | * agent allow data to flow up to the specified pass offset -- there |
142 | * is a pass offset for outgoing data and a pass offset for incoming data. |
143 | * When a new INET/INET6 socket is attached to the content filter and a flow is |
144 | * created, each pass offset is initially set to 0 so no data is allowed to pass by |
145 | * default. When the pass offset is set to CFM_MAX_OFFSET via a CFM_OP_DATA_UPDATE |
146 | * then the data flow becomes unrestricted. |
147 | * |
148 | * Note that pass offsets can only be incremented. A CFM_OP_DATA_UPDATE message |
149 | * with a pass offset smaller than the pass offset of a previous |
150 | * CFM_OP_DATA_UPDATE message is silently ignored. |
151 | * |
152 | * A user space filter agent also uses CFM_OP_DATA_UPDATE action messages |
153 | * to tell the kernel how much data it wants to see by using the peek offsets. |
154 | * Just like pass offsets, there is a peek offset for each direction. |
155 | * When a new INET/INET6 flow is created, each peek offset is initially set to 0 |
156 | * so no CFM_OP_DATA_OUT and CFM_OP_DATA_IN event messages are dispatched by default |
157 | * until a CFM_OP_DATA_UPDATE action message with a greater than 0 peek offset is sent |
158 | * by the user space filter agent. When the peek offset is set to CFM_MAX_OFFSET via |
159 | * a CFM_OP_DATA_UPDATE then the flow of update data events becomes unrestricted. |
160 | * |
161 | * Note that peek offsets cannot be smaller than the corresponding pass offset. |
162 | * Also a peek offsets cannot be smaller than the corresponding end offset |
163 | * of the last CFM_OP_DATA_OUT/CFM_OP_DATA_IN message dispatched. Trying |
164 | * to set a too small peek value is silently ignored. |
165 | * |
166 | * |
167 | * PER FLOW "struct cfil_info" |
168 | * |
169 | * As soon as a INET/INET6 socket gets attached to a content filter, a |
170 | * "struct cfil_info" is created to hold the content filtering state for this |
171 | * socket. For UDP and other datagram protocols, as soon as traffic is seen for |
172 | * each new flow identified by its 4-tuple of source address/port and destination |
173 | * address/port, a "struct cfil_info" is created. Each datagram socket may |
174 | * have multiple flows maintained in a hash table of "struct cfil_info" entries. |
175 | * |
176 | * The content filtering state is made of the following information |
177 | * for each direction: |
178 | * - The current pass offset; |
179 | * - The first and last offsets of the data pending, waiting for a filtering |
180 | * decision; |
181 | * - The inject queue for data that passed the filters and that needs |
182 | * to be re-injected; |
183 | * - A content filter specific state in a set of "struct cfil_entry" |
184 | * |
185 | * |
186 | * CONTENT FILTER STATE "struct cfil_entry" |
187 | * |
188 | * The "struct cfil_entry" maintains the information most relevant to the |
189 | * message handling over a kernel control socket with a user space filter agent. |
190 | * |
191 | * The "struct cfil_entry" holds the NECP filter control unit that corresponds |
192 | * to the kernel control socket unit it corresponds to and also has a pointer |
193 | * to the corresponding "struct content_filter". |
194 | * |
195 | * For each direction, "struct cfil_entry" maintains the following information: |
196 | * - The pass offset |
197 | * - The peek offset |
198 | * - The offset of the last data peeked at by the filter |
199 | * - A queue of data that's waiting to be delivered to the user space filter |
200 | * agent on the kernel control socket |
201 | * - A queue of data for which event messages have been sent on the kernel |
202 | * control socket and are pending for a filtering decision. |
203 | * |
204 | * |
205 | * CONTENT FILTER QUEUES |
206 | * |
207 | * Data that is being filtered is steered away from the INET/INET6 socket buffer |
208 | * and instead will sit in one of three content filter queues until the data |
209 | * can be re-injected into the INET/INET6 socket buffer. |
210 | * |
211 | * A content filter queue is represented by "struct cfil_queue" that contains |
212 | * a list of mbufs and the start and end offset of the data span of |
213 | * the list of mbufs. |
214 | * |
215 | * The data moves into the three content filter queues according to this |
216 | * sequence: |
217 | * a) The "cfe_ctl_q" of "struct cfil_entry" |
218 | * b) The "cfe_pending_q" of "struct cfil_entry" |
219 | * c) The "cfi_inject_q" of "struct cfil_info" |
220 | * |
221 | * Note: The sequence (a),(b) may be repeated several times if there is more |
222 | * than one content filter attached to the INET/INET6 socket. |
223 | * |
224 | * The "cfe_ctl_q" queue holds data than cannot be delivered to the |
225 | * kernel conntrol socket for two reasons: |
226 | * - The peek offset is less that the end offset of the mbuf data |
227 | * - The kernel control socket is flow controlled |
228 | * |
229 | * The "cfe_pending_q" queue holds data for which CFM_OP_DATA_OUT or |
230 | * CFM_OP_DATA_IN have been successfully dispatched to the kernel control |
231 | * socket and are waiting for a pass action message fromn the user space |
232 | * filter agent. An mbuf length must be fully allowed to pass to be removed |
233 | * from the cfe_pending_q. |
234 | * |
235 | * The "cfi_inject_q" queue holds data that has been fully allowed to pass |
236 | * by the user space filter agent and that needs to be re-injected into the |
237 | * INET/INET6 socket. |
238 | * |
239 | * |
240 | * IMPACT ON FLOW CONTROL |
241 | * |
242 | * An essential aspect of the content filer subsystem is to minimize the |
243 | * impact on flow control of the INET/INET6 sockets being filtered. |
244 | * |
245 | * The processing overhead of the content filtering may have an effect on |
246 | * flow control by adding noticeable delays and cannot be eliminated -- |
247 | * care must be taken by the user space filter agent to minimize the |
248 | * processing delays. |
249 | * |
250 | * The amount of data being filtered is kept in buffers while waiting for |
251 | * a decision by the user space filter agent. This amount of data pending |
252 | * needs to be subtracted from the amount of data available in the |
253 | * corresponding INET/INET6 socket buffer. This is done by modifying |
254 | * sbspace() and tcp_sbspace() to account for amount of data pending |
255 | * in the content filter. |
256 | * |
257 | * |
258 | * LOCKING STRATEGY |
259 | * |
260 | * The global state of content filter subsystem is protected by a single |
261 | * read-write lock "cfil_lck_rw". The data flow can be done with the |
262 | * cfil read-write lock held as shared so it can be re-entered from multiple |
263 | * threads. |
264 | * |
265 | * The per INET/INET6 socket content filterstate -- "struct cfil_info" -- is |
266 | * protected by the socket lock. |
267 | * |
268 | * A INET/INET6 socket lock cannot be taken while the cfil read-write lock |
269 | * is held. That's why we have some sequences where we drop the cfil read-write |
270 | * lock before taking the INET/INET6 lock. |
271 | * |
272 | * It is also important to lock the INET/INET6 socket buffer while the content |
273 | * filter is modifying the amount of pending data. Otherwise the calculations |
274 | * in sbspace() and tcp_sbspace() could be wrong. |
275 | * |
276 | * The "cfil_lck_rw" protects "struct content_filter" and also the fields |
277 | * "cfe_link" and "cfe_filter" of "struct cfil_entry". |
278 | * |
279 | * Actually "cfe_link" and "cfe_filter" are protected by both by |
280 | * "cfil_lck_rw" and the socket lock: they may be modified only when |
281 | * "cfil_lck_rw" is exclusive and the socket is locked. |
282 | * |
283 | * To read the other fields of "struct content_filter" we have to take |
284 | * "cfil_lck_rw" in shared mode. |
285 | * |
286 | * DATAGRAM SPECIFICS: |
287 | * |
288 | * The socket content filter supports all INET/INET6 protocols. However |
289 | * the treatments for TCP sockets and for datagram (UDP, ICMP, etc) sockets |
290 | * are slightly different. |
291 | * |
292 | * Each datagram socket may have multiple flows. Each flow is identified |
293 | * by the flow's source address/port and destination address/port tuple |
294 | * and is represented as a "struct cfil_info" entry. For each socket, |
295 | * a hash table is used to maintain the collection of flows under that socket. |
296 | * |
297 | * Each datagram flow is uniquely identified by it's "struct cfil_info" cfi_sock_id. |
298 | * The highest 32-bits of the cfi_sock_id contains the socket's so_gencnt. This portion |
299 | * of the cfi_sock_id is used locate the socket during socket lookup. The lowest 32-bits |
300 | * of the cfi_sock_id contains a hash of the flow's 4-tuple. This portion of the cfi_sock_id |
301 | * is used as the hash value for the flow hash table lookup within the parent socket. |
302 | * |
303 | * Since datagram sockets may not be connected, flow states may not be maintained in the |
304 | * socket structures and thus have to be saved for each packet. These saved states will be |
305 | * used for both outgoing and incoming reinjections. For outgoing packets, destination |
306 | * address/port as well as the current socket states will be saved. During reinjection, |
307 | * these saved states will be used instead. For incoming packets, control and address |
308 | * mbufs will be chained to the data. During reinjection, the whole chain will be queued |
309 | * onto the incoming socket buffer. |
310 | * |
311 | * LIMITATIONS |
312 | * |
313 | * - Support all INET/INET6 sockets, such as TCP, UDP, ICMP, etc |
314 | * |
315 | * - Does not support TCP unordered messages |
316 | */ |
317 | |
318 | /* |
319 | * TO DO LIST |
320 | * |
321 | * Deal with OOB |
322 | * |
323 | */ |
324 | |
325 | #include <sys/types.h> |
326 | #include <sys/kern_control.h> |
327 | #include <sys/queue.h> |
328 | #include <sys/domain.h> |
329 | #include <sys/protosw.h> |
330 | #include <sys/syslog.h> |
331 | #include <sys/systm.h> |
332 | #include <sys/param.h> |
333 | #include <sys/mbuf.h> |
334 | |
335 | #include <kern/locks.h> |
336 | #include <kern/zalloc.h> |
337 | #include <kern/debug.h> |
338 | |
339 | #include <net/ntstat.h> |
340 | #include <net/content_filter.h> |
341 | #include <net/content_filter_crypto.h> |
342 | |
343 | #define _IP_VHL |
344 | #include <netinet/ip.h> |
345 | #include <netinet/in_pcb.h> |
346 | #include <netinet/tcp.h> |
347 | #include <netinet/tcp_var.h> |
348 | #include <netinet/udp.h> |
349 | #include <netinet/udp_var.h> |
350 | #include <kern/socket_flows.h> |
351 | |
352 | #include <string.h> |
353 | #include <libkern/libkern.h> |
354 | #include <kern/sched_prim.h> |
355 | #include <kern/task.h> |
356 | #include <mach/task_info.h> |
357 | |
358 | #include <net/sockaddr_utils.h> |
359 | |
360 | #define MAX_CONTENT_FILTER 8 |
361 | |
362 | extern int tcp_msl; |
363 | extern struct inpcbinfo ripcbinfo; |
364 | struct cfil_entry; |
365 | |
366 | /* |
367 | * The structure content_filter represents a user space content filter |
368 | * It's created and associated with a kernel control socket instance |
369 | */ |
370 | struct content_filter { |
371 | kern_ctl_ref cf_kcref; |
372 | u_int32_t cf_kcunit; |
373 | u_int32_t cf_flags; |
374 | |
375 | uint32_t cf_necp_control_unit; |
376 | |
377 | uint32_t cf_sock_count; |
378 | TAILQ_HEAD(, cfil_entry) cf_sock_entries; |
379 | |
380 | cfil_crypto_state_t cf_crypto_state; |
381 | }; |
382 | |
383 | #define CFF_ACTIVE 0x01 |
384 | #define CFF_DETACHING 0x02 |
385 | #define CFF_FLOW_CONTROLLED 0x04 |
386 | #define CFF_PRESERVE_CONNECTIONS 0x08 |
387 | |
388 | struct content_filter *content_filters[MAX_CONTENT_FILTER]; |
389 | uint32_t cfil_active_count = 0; /* Number of active content filters */ |
390 | uint32_t cfil_sock_attached_count = 0; /* Number of sockets attachements */ |
391 | uint32_t cfil_sock_attached_stats_count = 0; /* Number of sockets requested periodic stats report */ |
392 | uint32_t cfil_close_wait_timeout = 1000; /* in milliseconds */ |
393 | |
394 | static kern_ctl_ref cfil_kctlref = NULL; |
395 | |
396 | static LCK_GRP_DECLARE(cfil_lck_grp, "content filter" ); |
397 | static LCK_RW_DECLARE(cfil_lck_rw, &cfil_lck_grp); |
398 | |
399 | #define CFIL_RW_LCK_MAX 8 |
400 | |
401 | int cfil_rw_nxt_lck = 0; |
402 | void* cfil_rw_lock_history[CFIL_RW_LCK_MAX]; |
403 | |
404 | int cfil_rw_nxt_unlck = 0; |
405 | void* cfil_rw_unlock_history[CFIL_RW_LCK_MAX]; |
406 | |
407 | static KALLOC_TYPE_DEFINE(content_filter_zone, struct content_filter, NET_KT_DEFAULT); |
408 | |
409 | MBUFQ_HEAD(cfil_mqhead); |
410 | |
411 | struct cfil_queue { |
412 | uint64_t q_start; /* offset of first byte in queue */ |
413 | uint64_t q_end; /* offset of last byte in queue */ |
414 | struct cfil_mqhead q_mq; |
415 | }; |
416 | |
417 | /* |
418 | * struct cfil_entry |
419 | * |
420 | * The is one entry per content filter |
421 | */ |
422 | struct cfil_entry { |
423 | TAILQ_ENTRY(cfil_entry) cfe_link; |
424 | SLIST_ENTRY(cfil_entry) cfe_order_link; |
425 | struct content_filter *cfe_filter; |
426 | |
427 | struct cfil_info *cfe_cfil_info; |
428 | uint32_t cfe_flags; |
429 | uint32_t cfe_necp_control_unit; |
430 | struct timeval cfe_last_event; /* To user space */ |
431 | struct timeval cfe_last_action; /* From user space */ |
432 | uint64_t cfe_byte_inbound_count_reported; /* stats already been reported */ |
433 | uint64_t cfe_byte_outbound_count_reported; /* stats already been reported */ |
434 | struct timeval cfe_stats_report_ts; /* Timestamp for last stats report */ |
435 | uint32_t cfe_stats_report_frequency; /* Interval for stats report in msecs */ |
436 | boolean_t cfe_laddr_sent; |
437 | |
438 | struct cfe_buf { |
439 | /* |
440 | * cfe_pending_q holds data that has been delivered to |
441 | * the filter and for which we are waiting for an action |
442 | */ |
443 | struct cfil_queue cfe_pending_q; |
444 | /* |
445 | * This queue is for data that has not be delivered to |
446 | * the content filter (new data, pass peek or flow control) |
447 | */ |
448 | struct cfil_queue cfe_ctl_q; |
449 | |
450 | uint64_t cfe_pass_offset; |
451 | uint64_t cfe_peek_offset; |
452 | uint64_t cfe_peeked; |
453 | } cfe_snd, cfe_rcv; |
454 | }; |
455 | |
456 | #define CFEF_CFIL_ATTACHED 0x0001 /* was attached to filter */ |
457 | #define CFEF_SENT_SOCK_ATTACHED 0x0002 /* sock attach event was sent */ |
458 | #define CFEF_DATA_START 0x0004 /* can send data event */ |
459 | #define CFEF_FLOW_CONTROLLED 0x0008 /* wait for flow control lift */ |
460 | #define CFEF_SENT_DISCONNECT_IN 0x0010 /* event was sent */ |
461 | #define CFEF_SENT_DISCONNECT_OUT 0x0020 /* event was sent */ |
462 | #define CFEF_SENT_SOCK_CLOSED 0x0040 /* closed event was sent */ |
463 | #define CFEF_CFIL_DETACHED 0x0080 /* filter was detached */ |
464 | |
465 | |
466 | #define CFI_ADD_TIME_LOG(cfil, t1, t0, op) \ |
467 | struct timeval64 _tdiff; \ |
468 | if ((cfil)->cfi_op_list_ctr < CFI_MAX_TIME_LOG_ENTRY) { \ |
469 | timersub(t1, t0, &_tdiff); \ |
470 | (cfil)->cfi_op_time[(cfil)->cfi_op_list_ctr] = (uint32_t)(_tdiff.tv_sec * 1000 + _tdiff.tv_usec / 1000);\ |
471 | (cfil)->cfi_op_list[(cfil)->cfi_op_list_ctr] = (unsigned char)op; \ |
472 | (cfil)->cfi_op_list_ctr ++; \ |
473 | } |
474 | |
475 | /* |
476 | * struct cfil_info |
477 | * |
478 | * There is a struct cfil_info per socket |
479 | */ |
480 | struct cfil_info { |
481 | TAILQ_ENTRY(cfil_info) cfi_link; |
482 | TAILQ_ENTRY(cfil_info) cfi_link_stats; |
483 | struct socket *cfi_so; |
484 | uint64_t cfi_flags; |
485 | uint64_t cfi_sock_id; |
486 | struct timeval64 cfi_first_event; |
487 | uint32_t cfi_op_list_ctr; |
488 | uint32_t cfi_op_time[CFI_MAX_TIME_LOG_ENTRY]; /* time interval in microseconds since first event */ |
489 | unsigned char cfi_op_list[CFI_MAX_TIME_LOG_ENTRY]; |
490 | union sockaddr_in_4_6 cfi_so_attach_faddr; /* faddr at the time of attach */ |
491 | union sockaddr_in_4_6 cfi_so_attach_laddr; /* laddr at the time of attach */ |
492 | |
493 | int cfi_dir; |
494 | uint64_t cfi_byte_inbound_count; |
495 | uint64_t cfi_byte_outbound_count; |
496 | |
497 | boolean_t cfi_isSignatureLatest; /* Indicates if signature covers latest flow attributes */ |
498 | u_int32_t cfi_filter_control_unit; |
499 | u_int32_t cfi_debug; |
500 | struct cfi_buf { |
501 | /* |
502 | * cfi_pending_first and cfi_pending_last describe the total |
503 | * amount of data outstanding for all the filters on |
504 | * this socket and data in the flow queue |
505 | * cfi_pending_mbcnt counts in sballoc() "chars of mbufs used" |
506 | */ |
507 | uint64_t cfi_pending_first; |
508 | uint64_t cfi_pending_last; |
509 | uint32_t cfi_pending_mbcnt; |
510 | uint32_t cfi_pending_mbnum; |
511 | uint32_t cfi_tail_drop_cnt; |
512 | /* |
513 | * cfi_pass_offset is the minimum of all the filters |
514 | */ |
515 | uint64_t cfi_pass_offset; |
516 | /* |
517 | * cfi_inject_q holds data that needs to be re-injected |
518 | * into the socket after filtering and that can |
519 | * be queued because of flow control |
520 | */ |
521 | struct cfil_queue cfi_inject_q; |
522 | } cfi_snd, cfi_rcv; |
523 | |
524 | struct cfil_entry cfi_entries[MAX_CONTENT_FILTER]; |
525 | struct soflow_hash_entry *cfi_hash_entry; |
526 | SLIST_HEAD(, cfil_entry) cfi_ordered_entries; |
527 | os_refcnt_t cfi_ref_count; |
528 | } __attribute__((aligned(8))); |
529 | |
530 | #define CFIF_DROP 0x0001 /* drop action applied */ |
531 | #define CFIF_CLOSE_WAIT 0x0002 /* waiting for filter to close */ |
532 | #define CFIF_SOCK_CLOSED 0x0004 /* socket is closed */ |
533 | #define CFIF_RETRY_INJECT_IN 0x0010 /* inject in failed */ |
534 | #define CFIF_RETRY_INJECT_OUT 0x0020 /* inject out failed */ |
535 | #define CFIF_SHUT_WR 0x0040 /* shutdown write */ |
536 | #define CFIF_SHUT_RD 0x0080 /* shutdown read */ |
537 | #define CFIF_SOCKET_CONNECTED 0x0100 /* socket is connected */ |
538 | #define CFIF_INITIAL_VERDICT 0x0200 /* received initial verdict */ |
539 | #define CFIF_NO_CLOSE_WAIT 0x0400 /* do not wait to close */ |
540 | #define CFIF_SO_DELAYED_DEAD 0x0800 /* Delayed socket DEAD marking */ |
541 | #define CFIF_SO_DELAYED_TCP_TIME_WAIT 0x1000 /* Delayed TCP FIN TIME WAIT */ |
542 | |
543 | #define CFI_MASK_GENCNT 0xFFFFFFFF00000000 /* upper 32 bits */ |
544 | #define CFI_SHIFT_GENCNT 32 |
545 | #define CFI_MASK_FLOWHASH 0x00000000FFFFFFFF /* lower 32 bits */ |
546 | #define CFI_SHIFT_FLOWHASH 0 |
547 | |
548 | #define CFI_ENTRY_KCUNIT(i, e) ((uint32_t)(((e) - &((i)->cfi_entries[0])) + 1)) |
549 | |
550 | static KALLOC_TYPE_DEFINE(cfil_info_zone, struct cfil_info, NET_KT_DEFAULT); |
551 | |
552 | TAILQ_HEAD(cfil_sock_head, cfil_info) cfil_sock_head; |
553 | TAILQ_HEAD(cfil_sock_head_stats, cfil_info) cfil_sock_head_stats; |
554 | |
555 | #define CFIL_QUEUE_VERIFY(x) if (cfil_debug) cfil_queue_verify(x) |
556 | #define CFIL_INFO_VERIFY(x) if (cfil_debug) cfil_info_verify(x) |
557 | |
558 | /* |
559 | * UDP Socket Support |
560 | */ |
561 | #define IS_ICMP(so) (so && (SOCK_CHECK_TYPE(so, SOCK_RAW) || SOCK_CHECK_TYPE(so, SOCK_DGRAM)) && \ |
562 | (SOCK_CHECK_PROTO(so, IPPROTO_ICMP) || SOCK_CHECK_PROTO(so, IPPROTO_ICMPV6))) |
563 | #define IS_RAW(so) (so && SOCK_CHECK_TYPE(so, SOCK_RAW) && SOCK_CHECK_PROTO(so, IPPROTO_RAW)) |
564 | |
565 | #define (so) (!IS_TCP(so) && !IS_UDP(so)) |
566 | #define GET_SO_PROTOCOL(so) (so ? SOCK_PROTO(so) : IPPROTO_IP) |
567 | #define GET_SO_INP_PROTOCOL(so) ((so && sotoinpcb(so)) ? sotoinpcb(so)->inp_ip_p : IPPROTO_IP) |
568 | #define GET_SO_PROTO(so) ((GET_SO_PROTOCOL(so) != IPPROTO_IP) ? GET_SO_PROTOCOL(so) : GET_SO_INP_PROTOCOL(so)) |
569 | #define IS_INP_V6(inp) (inp && (inp->inp_vflag & INP_IPV6)) |
570 | |
571 | #define UNCONNECTED(inp) (inp && (((inp->inp_vflag & INP_IPV4) && (inp->inp_faddr.s_addr == INADDR_ANY)) || \ |
572 | ((inp->inp_vflag & INP_IPV6) && IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_faddr)))) |
573 | #define IS_ENTRY_ATTACHED(cfil_info, kcunit) (cfil_info != NULL && (kcunit <= MAX_CONTENT_FILTER) && \ |
574 | cfil_info->cfi_entries[kcunit - 1].cfe_filter != NULL) |
575 | #define IS_DNS(local, remote) (check_port(local, 53) || check_port(remote, 53) || check_port(local, 5353) || check_port(remote, 5353)) |
576 | #define IS_INITIAL_TFO_DATA(so) (so && (so->so_flags1 & SOF1_PRECONNECT_DATA) && (so->so_state & SS_ISCONNECTING)) |
577 | #define NULLADDRESS(addr) ((addr.sa.sa_len == 0) || \ |
578 | (addr.sa.sa_family == AF_INET && addr.sin.sin_addr.s_addr == 0) || \ |
579 | (addr.sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr.sin6.sin6_addr))) |
580 | |
581 | #define SKIP_FILTER_FOR_TCP_SOCKET(so) \ |
582 | (so == NULL || \ |
583 | (!SOCK_CHECK_DOM(so, PF_INET) && !SOCK_CHECK_DOM(so, PF_INET6)) || \ |
584 | !SOCK_CHECK_TYPE(so, SOCK_STREAM) || \ |
585 | !SOCK_CHECK_PROTO(so, IPPROTO_TCP) || \ |
586 | (so->so_flags & SOF_MP_SUBFLOW) != 0 || \ |
587 | (so->so_flags1 & SOF1_CONTENT_FILTER_SKIP) != 0) |
588 | |
589 | /* |
590 | * Special handling for 0.0.0.0-faddr TCP flows. This flows will be changed to loopback addr by TCP and |
591 | * may result in an immediate TCP RESET and socket close. This leads to CFIL blocking the owner thread for |
592 | * 1 sec waiting for ack from user-space provider (ack recevied by CFIL but socket already removed from |
593 | * global socket list). To avoid this, identify these flows and do not perform the close-wait blocking. |
594 | * These flows are identified as destined to Loopback address and were disconnected shortly after connect |
595 | * (before initial-verdict received). |
596 | */ |
597 | #define IS_LOOPBACK_FADDR(inp) \ |
598 | (inp && ((IS_INP_V6(inp) && IN6_IS_ADDR_LOOPBACK(&inp->in6p_faddr)) || (ntohl(inp->inp_faddr.s_addr) == INADDR_LOOPBACK))) |
599 | |
600 | #define SET_NO_CLOSE_WAIT(inp, cfil_info) \ |
601 | if (inp && cfil_info && !(cfil_info->cfi_flags & CFIF_INITIAL_VERDICT) && IS_LOOPBACK_FADDR(inp)) { \ |
602 | cfil_info->cfi_flags |= CFIF_NO_CLOSE_WAIT; \ |
603 | } |
604 | |
605 | #define IS_NO_CLOSE_WAIT(cfil_info) (cfil_info && (cfil_info->cfi_flags & CFIF_NO_CLOSE_WAIT)) |
606 | |
607 | os_refgrp_decl(static, cfil_refgrp, "CFILRefGroup" , NULL); |
608 | |
609 | #define CFIL_INFO_FREE(cfil_info) \ |
610 | if (cfil_info && (os_ref_release(&cfil_info->cfi_ref_count) == 0)) { \ |
611 | cfil_info_free(cfil_info); \ |
612 | } |
613 | |
614 | #define SOCKET_PID(so) ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid) |
615 | #define MATCH_PID(so) (so && (cfil_log_pid == SOCKET_PID(so))) |
616 | #define MATCH_PORT(inp, local, remote) \ |
617 | ((inp && ntohs(inp->inp_lport) == cfil_log_port) || (inp && ntohs(inp->inp_fport) == cfil_log_port) || \ |
618 | check_port(local, cfil_log_port) || check_port(remote, cfil_log_port)) |
619 | #define MATCH_PROTO(so) (GET_SO_PROTO(so) == cfil_log_proto) |
620 | |
621 | #define DEBUG_FLOW(inp, so, local, remote) \ |
622 | ((cfil_log_port && MATCH_PORT(inp, local, remote)) || (cfil_log_pid && MATCH_PID(so)) || (cfil_log_proto && MATCH_PROTO(so))) |
623 | |
624 | #define SO_DELAYED_DEAD_SET(so, set) \ |
625 | if (so->so_cfil) { \ |
626 | if (set) { \ |
627 | so->so_cfil->cfi_flags |= CFIF_SO_DELAYED_DEAD; \ |
628 | } else { \ |
629 | so->so_cfil->cfi_flags &= ~CFIF_SO_DELAYED_DEAD; \ |
630 | } \ |
631 | } else if (so->so_flow_db) { \ |
632 | if (set) { \ |
633 | so->so_flow_db->soflow_db_flags |= SOFLOWF_SO_DELAYED_DEAD; \ |
634 | } else { \ |
635 | so->so_flow_db->soflow_db_flags &= ~SOFLOWF_SO_DELAYED_DEAD; \ |
636 | } \ |
637 | } |
638 | |
639 | #define SO_DELAYED_DEAD_GET(so) \ |
640 | (so->so_cfil ? (so->so_cfil->cfi_flags & CFIF_SO_DELAYED_DEAD) : \ |
641 | (so->so_flow_db) ? (so->so_flow_db->soflow_db_flags & SOFLOWF_SO_DELAYED_DEAD) : false) |
642 | |
643 | #define SO_DELAYED_TCP_TIME_WAIT_SET(so, set) \ |
644 | if (so->so_cfil) { \ |
645 | if (set) { \ |
646 | so->so_cfil->cfi_flags |= CFIF_SO_DELAYED_TCP_TIME_WAIT; \ |
647 | } else { \ |
648 | so->so_cfil->cfi_flags &= ~CFIF_SO_DELAYED_TCP_TIME_WAIT; \ |
649 | } \ |
650 | } |
651 | |
652 | #define SO_DELAYED_TCP_TIME_WAIT_GET(so) \ |
653 | (so->so_cfil ? (so->so_cfil->cfi_flags & CFIF_SO_DELAYED_TCP_TIME_WAIT) : false) |
654 | |
655 | /* |
656 | * Periodic Statistics Report: |
657 | */ |
658 | static struct thread *cfil_stats_report_thread; |
659 | #define CFIL_STATS_REPORT_INTERVAL_MIN_MSEC 500 // Highest report frequency |
660 | #define CFIL_STATS_REPORT_RUN_INTERVAL_NSEC (CFIL_STATS_REPORT_INTERVAL_MIN_MSEC * NSEC_PER_MSEC) |
661 | #define CFIL_STATS_REPORT_MAX_COUNT 50 // Max stats to be reported per run |
662 | |
663 | /* This buffer must have same layout as struct cfil_msg_stats_report */ |
664 | struct cfil_stats_report_buffer { |
665 | struct cfil_msg_hdr msghdr; |
666 | uint32_t count; |
667 | struct cfil_msg_sock_stats stats[CFIL_STATS_REPORT_MAX_COUNT]; |
668 | }; |
669 | static struct cfil_stats_report_buffer *global_cfil_stats_report_buffers[MAX_CONTENT_FILTER]; |
670 | static uint32_t global_cfil_stats_counts[MAX_CONTENT_FILTER]; |
671 | |
672 | /* |
673 | * UDP Garbage Collection: |
674 | */ |
675 | #define UDP_FLOW_GC_ACTION_TO 10 // Flow Action Timeout (no action from user space) in seconds |
676 | #define UDP_FLOW_GC_MAX_COUNT 100 // Max UDP flows to be handled per run |
677 | |
678 | /* |
679 | * UDP flow queue thresholds |
680 | */ |
681 | #define UDP_FLOW_GC_MBUF_CNT_MAX (2 << MBSHIFT) // Max mbuf byte count in flow queue (2MB) |
682 | #define UDP_FLOW_GC_MBUF_NUM_MAX (UDP_FLOW_GC_MBUF_CNT_MAX >> MCLSHIFT) // Max mbuf count in flow queue (1K) |
683 | #define UDP_FLOW_GC_MBUF_SHIFT 5 // Shift to get 1/32 of platform limits |
684 | /* |
685 | * UDP flow queue threshold globals: |
686 | */ |
687 | static unsigned int cfil_udp_gc_mbuf_num_max = UDP_FLOW_GC_MBUF_NUM_MAX; |
688 | static unsigned int cfil_udp_gc_mbuf_cnt_max = UDP_FLOW_GC_MBUF_CNT_MAX; |
689 | |
690 | /* |
691 | * CFIL specific mbuf tag: |
692 | * Save state of socket at the point of data entry into cfil. |
693 | * Use saved state for reinjection at protocol layer. |
694 | */ |
695 | struct cfil_tag { |
696 | union sockaddr_in_4_6 cfil_faddr; |
697 | uint32_t cfil_so_state_change_cnt; |
698 | uint32_t cfil_so_options; |
699 | int cfil_inp_flags; |
700 | }; |
701 | |
702 | /* |
703 | * Global behavior flags: |
704 | */ |
705 | #define CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS 0x00000001 |
706 | static uint32_t cfil_behavior_flags = 0; |
707 | |
708 | #define DO_PRESERVE_CONNECTIONS (cfil_behavior_flags & CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS) |
709 | |
710 | /* |
711 | * Statistics |
712 | */ |
713 | |
714 | struct cfil_stats cfil_stats; |
715 | |
716 | /* |
717 | * For troubleshooting |
718 | */ |
719 | int cfil_log_level = LOG_ERR; |
720 | int cfil_log_port = 0; |
721 | int cfil_log_pid = 0; |
722 | int cfil_log_proto = 0; |
723 | int cfil_log_data = 0; |
724 | int cfil_log_stats = 0; |
725 | int cfil_debug = 1; |
726 | |
727 | /* |
728 | * Sysctls for logs and statistics |
729 | */ |
730 | static int sysctl_cfil_filter_list(struct sysctl_oid *, void *, int, |
731 | struct sysctl_req *); |
732 | static int sysctl_cfil_sock_list(struct sysctl_oid *, void *, int, |
733 | struct sysctl_req *); |
734 | |
735 | SYSCTL_NODE(_net, OID_AUTO, cfil, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "cfil" ); |
736 | |
737 | SYSCTL_INT(_net_cfil, OID_AUTO, log, CTLFLAG_RW | CTLFLAG_LOCKED, |
738 | &cfil_log_level, 0, "" ); |
739 | |
740 | SYSCTL_INT(_net_cfil, OID_AUTO, log_port, CTLFLAG_RW | CTLFLAG_LOCKED, |
741 | &cfil_log_port, 0, "" ); |
742 | |
743 | SYSCTL_INT(_net_cfil, OID_AUTO, log_pid, CTLFLAG_RW | CTLFLAG_LOCKED, |
744 | &cfil_log_pid, 0, "" ); |
745 | |
746 | SYSCTL_INT(_net_cfil, OID_AUTO, log_proto, CTLFLAG_RW | CTLFLAG_LOCKED, |
747 | &cfil_log_proto, 0, "" ); |
748 | |
749 | SYSCTL_INT(_net_cfil, OID_AUTO, log_data, CTLFLAG_RW | CTLFLAG_LOCKED, |
750 | &cfil_log_data, 0, "" ); |
751 | |
752 | SYSCTL_INT(_net_cfil, OID_AUTO, log_stats, CTLFLAG_RW | CTLFLAG_LOCKED, |
753 | &cfil_log_stats, 0, "" ); |
754 | |
755 | SYSCTL_INT(_net_cfil, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, |
756 | &cfil_debug, 0, "" ); |
757 | |
758 | SYSCTL_UINT(_net_cfil, OID_AUTO, sock_attached_count, CTLFLAG_RD | CTLFLAG_LOCKED, |
759 | &cfil_sock_attached_count, 0, "" ); |
760 | |
761 | SYSCTL_UINT(_net_cfil, OID_AUTO, active_count, CTLFLAG_RD | CTLFLAG_LOCKED, |
762 | &cfil_active_count, 0, "" ); |
763 | |
764 | SYSCTL_UINT(_net_cfil, OID_AUTO, close_wait_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, |
765 | &cfil_close_wait_timeout, 0, "" ); |
766 | |
767 | SYSCTL_UINT(_net_cfil, OID_AUTO, behavior_flags, CTLFLAG_RW | CTLFLAG_LOCKED, |
768 | &cfil_behavior_flags, 0, "" ); |
769 | |
770 | static int cfil_sbtrim = 1; |
771 | SYSCTL_UINT(_net_cfil, OID_AUTO, sbtrim, CTLFLAG_RW | CTLFLAG_LOCKED, |
772 | &cfil_sbtrim, 0, "" ); |
773 | |
774 | SYSCTL_PROC(_net_cfil, OID_AUTO, filter_list, CTLFLAG_RD | CTLFLAG_LOCKED, |
775 | 0, 0, sysctl_cfil_filter_list, "S,cfil_filter_stat" , "" ); |
776 | |
777 | SYSCTL_PROC(_net_cfil, OID_AUTO, sock_list, CTLFLAG_RD | CTLFLAG_LOCKED, |
778 | 0, 0, sysctl_cfil_sock_list, "S,cfil_sock_stat" , "" ); |
779 | |
780 | SYSCTL_STRUCT(_net_cfil, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED, |
781 | &cfil_stats, cfil_stats, "" ); |
782 | |
783 | /* |
784 | * Forward declaration to appease the compiler |
785 | */ |
786 | static int cfil_action_data_pass(struct socket *, struct cfil_info *, uint32_t, int, |
787 | uint64_t, uint64_t); |
788 | static int cfil_action_drop(struct socket *, struct cfil_info *, uint32_t); |
789 | static int cfil_action_bless_client(uint32_t, struct cfil_msg_hdr *); |
790 | static int cfil_action_set_crypto_key(uint32_t, struct cfil_msg_hdr *); |
791 | static int cfil_dispatch_closed_event(struct socket *, struct cfil_info *, int); |
792 | static int cfil_data_common(struct socket *, struct cfil_info *, int, struct sockaddr *, |
793 | struct mbuf *, struct mbuf *, uint32_t); |
794 | static int cfil_data_filter(struct socket *, struct cfil_info *, uint32_t, int, |
795 | struct mbuf *, uint32_t); |
796 | static void fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *, |
797 | struct in_addr, u_int16_t); |
798 | static void fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *, |
799 | struct in6_addr *, u_int16_t, uint32_t); |
800 | |
801 | static int cfil_dispatch_attach_event(struct socket *, struct cfil_info *, uint32_t, int); |
802 | static void cfil_info_free(struct cfil_info *); |
803 | static struct cfil_info * cfil_info_alloc(struct socket *, struct soflow_hash_entry *); |
804 | static int cfil_info_attach_unit(struct socket *, uint32_t, struct cfil_info *); |
805 | static struct socket * cfil_socket_from_sock_id(cfil_sock_id_t, bool); |
806 | static struct socket * cfil_socket_from_client_uuid(uuid_t, bool *); |
807 | static int cfil_service_pending_queue(struct socket *, struct cfil_info *, uint32_t, int); |
808 | static int cfil_data_service_ctl_q(struct socket *, struct cfil_info *, uint32_t, int); |
809 | static void cfil_info_verify(struct cfil_info *); |
810 | static int cfil_update_data_offsets(struct socket *, struct cfil_info *, uint32_t, int, |
811 | uint64_t, uint64_t); |
812 | static int cfil_acquire_sockbuf(struct socket *, struct cfil_info *, int); |
813 | static void cfil_release_sockbuf(struct socket *, int); |
814 | static int cfil_filters_attached(struct socket *); |
815 | |
816 | static void cfil_rw_lock_exclusive(lck_rw_t *); |
817 | static void cfil_rw_unlock_exclusive(lck_rw_t *); |
818 | static void cfil_rw_lock_shared(lck_rw_t *); |
819 | static void cfil_rw_unlock_shared(lck_rw_t *); |
820 | static boolean_t cfil_rw_lock_shared_to_exclusive(lck_rw_t *); |
821 | static void cfil_rw_lock_exclusive_to_shared(lck_rw_t *); |
822 | |
823 | static unsigned int cfil_data_length(struct mbuf *, int *, int *); |
824 | static struct cfil_info *cfil_sock_udp_get_info(struct socket *, uint32_t, bool, struct soflow_hash_entry *, struct sockaddr *, struct sockaddr *); |
825 | static errno_t cfil_sock_udp_handle_data(bool, struct socket *, struct sockaddr *, struct sockaddr *, |
826 | struct mbuf *, struct mbuf *, uint32_t, struct soflow_hash_entry *); |
827 | static int32_t cfil_sock_udp_data_pending(struct sockbuf *, bool); |
828 | static void cfil_sock_udp_is_closed(struct socket *); |
829 | static int cfil_sock_udp_notify_shutdown(struct socket *, int, int, int); |
830 | static int cfil_sock_udp_shutdown(struct socket *, int *); |
831 | static void cfil_sock_udp_close_wait(struct socket *); |
832 | static void cfil_sock_udp_buf_update(struct sockbuf *); |
833 | static int cfil_filters_udp_attached(struct socket *, bool); |
834 | static void cfil_get_flow_address_v6(struct soflow_hash_entry *, struct inpcb *, |
835 | struct in6_addr **, struct in6_addr **, |
836 | u_int16_t *, u_int16_t *); |
837 | static void cfil_get_flow_address(struct soflow_hash_entry *, struct inpcb *, |
838 | struct in_addr *, struct in_addr *, |
839 | u_int16_t *, u_int16_t *); |
840 | static void cfil_info_log(int, struct cfil_info *, const char *); |
841 | void cfil_filter_show(u_int32_t); |
842 | void cfil_info_show(void); |
843 | bool cfil_info_action_timed_out(struct cfil_info *, int); |
844 | bool cfil_info_buffer_threshold_exceeded(struct cfil_info *); |
845 | struct m_tag *cfil_dgram_save_socket_state(struct cfil_info *, struct mbuf *); |
846 | boolean_t cfil_dgram_peek_socket_state(struct mbuf *m, int *inp_flags); |
847 | static void cfil_sock_received_verdict(struct socket *so); |
848 | static void cfil_fill_event_msg_addresses(struct soflow_hash_entry *, struct inpcb *, |
849 | union sockaddr_in_4_6 *, union sockaddr_in_4_6 *, |
850 | boolean_t, boolean_t); |
851 | static void cfil_stats_report_thread_func(void *, wait_result_t); |
852 | static void cfil_stats_report(void *v, wait_result_t w); |
853 | static bool cfil_dgram_gc_needed(struct socket *, struct soflow_hash_entry *, u_int64_t); |
854 | static bool cfil_dgram_gc_perform(struct socket *, struct soflow_hash_entry *); |
855 | static bool cfil_dgram_detach_entry(struct socket *, struct soflow_hash_entry *); |
856 | static bool cfil_dgram_detach_db(struct socket *, struct soflow_db *); |
857 | bool check_port(struct sockaddr *, u_short); |
858 | |
859 | /* |
860 | * Content filter global read write lock |
861 | */ |
862 | |
863 | static void |
864 | cfil_rw_lock_exclusive(lck_rw_t *lck) |
865 | { |
866 | void *lr_saved; |
867 | |
868 | lr_saved = __builtin_return_address(0); |
869 | |
870 | lck_rw_lock_exclusive(lck); |
871 | |
872 | cfil_rw_lock_history[cfil_rw_nxt_lck] = lr_saved; |
873 | cfil_rw_nxt_lck = (cfil_rw_nxt_lck + 1) % CFIL_RW_LCK_MAX; |
874 | } |
875 | |
876 | static void |
877 | cfil_rw_unlock_exclusive(lck_rw_t *lck) |
878 | { |
879 | void *lr_saved; |
880 | |
881 | lr_saved = __builtin_return_address(0); |
882 | |
883 | lck_rw_unlock_exclusive(lck); |
884 | |
885 | cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved; |
886 | cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX; |
887 | } |
888 | |
889 | static void |
890 | cfil_rw_lock_shared(lck_rw_t *lck) |
891 | { |
892 | void *lr_saved; |
893 | |
894 | lr_saved = __builtin_return_address(0); |
895 | |
896 | lck_rw_lock_shared(lck); |
897 | |
898 | cfil_rw_lock_history[cfil_rw_nxt_lck] = lr_saved; |
899 | cfil_rw_nxt_lck = (cfil_rw_nxt_lck + 1) % CFIL_RW_LCK_MAX; |
900 | } |
901 | |
902 | static void |
903 | cfil_rw_unlock_shared(lck_rw_t *lck) |
904 | { |
905 | void *lr_saved; |
906 | |
907 | lr_saved = __builtin_return_address(0); |
908 | |
909 | lck_rw_unlock_shared(lck); |
910 | |
911 | cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved; |
912 | cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX; |
913 | } |
914 | |
915 | static boolean_t |
916 | cfil_rw_lock_shared_to_exclusive(lck_rw_t *lck) |
917 | { |
918 | void *lr_saved; |
919 | boolean_t upgraded; |
920 | |
921 | lr_saved = __builtin_return_address(0); |
922 | |
923 | upgraded = lck_rw_lock_shared_to_exclusive(lck); |
924 | if (upgraded) { |
925 | cfil_rw_unlock_history[cfil_rw_nxt_unlck] = lr_saved; |
926 | cfil_rw_nxt_unlck = (cfil_rw_nxt_unlck + 1) % CFIL_RW_LCK_MAX; |
927 | } |
928 | return upgraded; |
929 | } |
930 | |
931 | static void |
932 | cfil_rw_lock_exclusive_to_shared(lck_rw_t *lck) |
933 | { |
934 | void *lr_saved; |
935 | |
936 | lr_saved = __builtin_return_address(0); |
937 | |
938 | lck_rw_lock_exclusive_to_shared(lck); |
939 | |
940 | cfil_rw_lock_history[cfil_rw_nxt_lck] = lr_saved; |
941 | cfil_rw_nxt_lck = (cfil_rw_nxt_lck + 1) % CFIL_RW_LCK_MAX; |
942 | } |
943 | |
944 | static void |
945 | cfil_rw_lock_assert_held(lck_rw_t *lck, int exclusive) |
946 | { |
947 | #if !MACH_ASSERT |
948 | #pragma unused(lck, exclusive) |
949 | #endif |
950 | LCK_RW_ASSERT(lck, |
951 | exclusive ? LCK_RW_ASSERT_EXCLUSIVE : LCK_RW_ASSERT_HELD); |
952 | } |
953 | |
954 | /* |
955 | * Return the number of bytes in the mbuf chain using the same |
956 | * method as m_length() or sballoc() |
957 | * |
958 | * Returns data len - starting from PKT start |
959 | * - retmbcnt - optional param to get total mbuf bytes in chain |
960 | * - retmbnum - optional param to get number of mbufs in chain |
961 | */ |
962 | static unsigned int |
963 | cfil_data_length(struct mbuf *m, int *retmbcnt, int *retmbnum) |
964 | { |
965 | struct mbuf *m0; |
966 | unsigned int pktlen = 0; |
967 | int mbcnt; |
968 | int mbnum; |
969 | |
970 | // Locate M_PKTHDR and mark as start of data if present |
971 | for (m0 = m; m0 != NULL; m0 = m0->m_next) { |
972 | if (m0->m_flags & M_PKTHDR) { |
973 | m = m0; |
974 | break; |
975 | } |
976 | } |
977 | |
978 | if (retmbcnt == NULL && retmbnum == NULL) { |
979 | return m_length(m); |
980 | } |
981 | |
982 | pktlen = 0; |
983 | mbcnt = 0; |
984 | mbnum = 0; |
985 | for (m0 = m; m0 != NULL; m0 = m0->m_next) { |
986 | pktlen += m0->m_len; |
987 | mbnum++; |
988 | mbcnt += _MSIZE; |
989 | if (m0->m_flags & M_EXT) { |
990 | mbcnt += m0->m_ext.ext_size; |
991 | } |
992 | } |
993 | if (retmbcnt) { |
994 | *retmbcnt = mbcnt; |
995 | } |
996 | if (retmbnum) { |
997 | *retmbnum = mbnum; |
998 | } |
999 | return pktlen; |
1000 | } |
1001 | |
1002 | static struct mbuf * |
1003 | cfil_data_start(struct mbuf *m) |
1004 | { |
1005 | struct mbuf *m0; |
1006 | |
1007 | // Locate M_PKTHDR and use it as start of data if present |
1008 | for (m0 = m; m0 != NULL; m0 = m0->m_next) { |
1009 | if (m0->m_flags & M_PKTHDR) { |
1010 | return m0; |
1011 | } |
1012 | } |
1013 | return m; |
1014 | } |
1015 | |
1016 | /* |
1017 | * Common mbuf queue utilities |
1018 | */ |
1019 | |
1020 | static inline void |
1021 | cfil_queue_init(struct cfil_queue *cfq) |
1022 | { |
1023 | cfq->q_start = 0; |
1024 | cfq->q_end = 0; |
1025 | MBUFQ_INIT(&cfq->q_mq); |
1026 | } |
1027 | |
1028 | static inline uint64_t |
1029 | cfil_queue_drain(struct cfil_queue *cfq) |
1030 | { |
1031 | uint64_t drained = cfq->q_start - cfq->q_end; |
1032 | cfq->q_start = 0; |
1033 | cfq->q_end = 0; |
1034 | MBUFQ_DRAIN(&cfq->q_mq); |
1035 | |
1036 | return drained; |
1037 | } |
1038 | |
1039 | /* Return 1 when empty, 0 otherwise */ |
1040 | static inline int |
1041 | cfil_queue_empty(struct cfil_queue *cfq) |
1042 | { |
1043 | return MBUFQ_EMPTY(&cfq->q_mq); |
1044 | } |
1045 | |
1046 | static inline uint64_t |
1047 | cfil_queue_offset_first(struct cfil_queue *cfq) |
1048 | { |
1049 | return cfq->q_start; |
1050 | } |
1051 | |
1052 | static inline uint64_t |
1053 | cfil_queue_offset_last(struct cfil_queue *cfq) |
1054 | { |
1055 | return cfq->q_end; |
1056 | } |
1057 | |
1058 | static inline uint64_t |
1059 | cfil_queue_len(struct cfil_queue *cfq) |
1060 | { |
1061 | return cfq->q_end - cfq->q_start; |
1062 | } |
1063 | |
1064 | /* |
1065 | * Routines to verify some fundamental assumptions |
1066 | */ |
1067 | |
1068 | static void |
1069 | cfil_queue_verify(struct cfil_queue *cfq) |
1070 | { |
1071 | mbuf_t chain; |
1072 | mbuf_t m; |
1073 | mbuf_t n; |
1074 | uint64_t queuesize = 0; |
1075 | |
1076 | /* Verify offset are ordered */ |
1077 | VERIFY(cfq->q_start <= cfq->q_end); |
1078 | |
1079 | /* |
1080 | * When queue is empty, the offsets are equal otherwise the offsets |
1081 | * are different |
1082 | */ |
1083 | VERIFY((MBUFQ_EMPTY(&cfq->q_mq) && cfq->q_start == cfq->q_end) || |
1084 | (!MBUFQ_EMPTY(&cfq->q_mq) && |
1085 | cfq->q_start != cfq->q_end)); |
1086 | |
1087 | MBUFQ_FOREACH(chain, &cfq->q_mq) { |
1088 | size_t chainsize = 0; |
1089 | m = chain; |
1090 | unsigned int mlen = cfil_data_length(m, NULL, NULL); |
1091 | // skip the addr and control stuff if present |
1092 | m = cfil_data_start(m); |
1093 | |
1094 | if (m == NULL || |
1095 | m == (void *)M_TAG_FREE_PATTERN || |
1096 | m->m_next == (void *)M_TAG_FREE_PATTERN || |
1097 | m->m_nextpkt == (void *)M_TAG_FREE_PATTERN) { |
1098 | panic("%s - mq %p is free at %p" , __func__, |
1099 | &cfq->q_mq, m); |
1100 | } |
1101 | for (n = m; n != NULL; n = n->m_next) { |
1102 | if (!m_has_mtype(m: n, mtype_flags: MTF_DATA | MTF_HEADER | MTF_OOBDATA)) { |
1103 | panic("%s - %p unsupported type %u" , __func__, |
1104 | n, n->m_type); |
1105 | } |
1106 | chainsize += n->m_len; |
1107 | } |
1108 | if (mlen != chainsize) { |
1109 | panic("%s - %p m_length() %u != chainsize %lu" , |
1110 | __func__, m, mlen, chainsize); |
1111 | } |
1112 | queuesize += chainsize; |
1113 | } |
1114 | OS_ANALYZER_SUPPRESS("81031590" ) if (queuesize != cfq->q_end - cfq->q_start) { |
1115 | panic("%s - %p queuesize %llu != offsetdiffs %llu" , __func__, |
1116 | m, queuesize, cfq->q_end - cfq->q_start); |
1117 | } |
1118 | } |
1119 | |
1120 | static void |
1121 | cfil_queue_enqueue(struct cfil_queue *cfq, mbuf_t m, size_t len) |
1122 | { |
1123 | CFIL_QUEUE_VERIFY(cfq); |
1124 | |
1125 | MBUFQ_ENQUEUE(&cfq->q_mq, m); |
1126 | cfq->q_end += len; |
1127 | |
1128 | CFIL_QUEUE_VERIFY(cfq); |
1129 | } |
1130 | |
1131 | static void |
1132 | cfil_queue_remove(struct cfil_queue *cfq, mbuf_t m, size_t len) |
1133 | { |
1134 | CFIL_QUEUE_VERIFY(cfq); |
1135 | |
1136 | VERIFY(cfil_data_length(m, NULL, NULL) == len); |
1137 | |
1138 | MBUFQ_REMOVE(&cfq->q_mq, m); |
1139 | MBUFQ_NEXT(m) = NULL; |
1140 | cfq->q_start += len; |
1141 | |
1142 | CFIL_QUEUE_VERIFY(cfq); |
1143 | } |
1144 | |
1145 | static mbuf_t |
1146 | cfil_queue_first(struct cfil_queue *cfq) |
1147 | { |
1148 | return MBUFQ_FIRST(&cfq->q_mq); |
1149 | } |
1150 | |
1151 | static mbuf_t |
1152 | cfil_queue_next(struct cfil_queue *cfq, mbuf_t m) |
1153 | { |
1154 | #pragma unused(cfq) |
1155 | return MBUFQ_NEXT(m); |
1156 | } |
1157 | |
1158 | static void |
1159 | cfil_entry_buf_verify(struct cfe_buf *cfe_buf) |
1160 | { |
1161 | CFIL_QUEUE_VERIFY(&cfe_buf->cfe_ctl_q); |
1162 | CFIL_QUEUE_VERIFY(&cfe_buf->cfe_pending_q); |
1163 | |
1164 | /* Verify the queues are ordered so that pending is before ctl */ |
1165 | VERIFY(cfe_buf->cfe_ctl_q.q_start >= cfe_buf->cfe_pending_q.q_end); |
1166 | |
1167 | /* The peek offset cannot be less than the pass offset */ |
1168 | VERIFY(cfe_buf->cfe_peek_offset >= cfe_buf->cfe_pass_offset); |
1169 | |
1170 | /* Make sure we've updated the offset we peeked at */ |
1171 | VERIFY(cfe_buf->cfe_ctl_q.q_start <= cfe_buf->cfe_peeked); |
1172 | } |
1173 | |
1174 | static void |
1175 | cfil_entry_verify(struct cfil_entry *entry) |
1176 | { |
1177 | cfil_entry_buf_verify(cfe_buf: &entry->cfe_snd); |
1178 | cfil_entry_buf_verify(cfe_buf: &entry->cfe_rcv); |
1179 | } |
1180 | |
1181 | static void |
1182 | cfil_info_buf_verify(struct cfi_buf *cfi_buf) |
1183 | { |
1184 | CFIL_QUEUE_VERIFY(&cfi_buf->cfi_inject_q); |
1185 | |
1186 | VERIFY(cfi_buf->cfi_pending_first <= cfi_buf->cfi_pending_last); |
1187 | } |
1188 | |
1189 | static void |
1190 | cfil_info_verify(struct cfil_info *cfil_info) |
1191 | { |
1192 | int i; |
1193 | |
1194 | if (cfil_info == NULL) { |
1195 | return; |
1196 | } |
1197 | |
1198 | cfil_info_buf_verify(cfi_buf: &cfil_info->cfi_snd); |
1199 | cfil_info_buf_verify(cfi_buf: &cfil_info->cfi_rcv); |
1200 | |
1201 | for (i = 0; i < MAX_CONTENT_FILTER; i++) { |
1202 | cfil_entry_verify(entry: &cfil_info->cfi_entries[i]); |
1203 | } |
1204 | } |
1205 | |
1206 | static void |
1207 | verify_content_filter(struct content_filter *cfc) |
1208 | { |
1209 | struct cfil_entry *entry; |
1210 | uint32_t count = 0; |
1211 | |
1212 | VERIFY(cfc->cf_sock_count >= 0); |
1213 | |
1214 | TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) { |
1215 | count++; |
1216 | VERIFY(cfc == entry->cfe_filter); |
1217 | } |
1218 | VERIFY(count == cfc->cf_sock_count); |
1219 | } |
1220 | |
1221 | /* |
1222 | * Kernel control socket callbacks |
1223 | */ |
1224 | static errno_t |
1225 | cfil_ctl_connect(kern_ctl_ref kctlref, struct sockaddr_ctl *sac, |
1226 | void **unitinfo) |
1227 | { |
1228 | errno_t error = 0; |
1229 | struct content_filter *cfc = NULL; |
1230 | |
1231 | CFIL_LOG(LOG_NOTICE, "" ); |
1232 | |
1233 | cfc = zalloc_flags(content_filter_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
1234 | |
1235 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1236 | |
1237 | if (sac->sc_unit == 0 || sac->sc_unit > MAX_CONTENT_FILTER) { |
1238 | CFIL_LOG(LOG_ERR, "bad sc_unit %u" , sac->sc_unit); |
1239 | error = EINVAL; |
1240 | } else if (content_filters[sac->sc_unit - 1] != NULL) { |
1241 | CFIL_LOG(LOG_ERR, "sc_unit %u in use" , sac->sc_unit); |
1242 | error = EADDRINUSE; |
1243 | } else { |
1244 | /* |
1245 | * kernel control socket kcunit numbers start at 1 |
1246 | */ |
1247 | content_filters[sac->sc_unit - 1] = cfc; |
1248 | |
1249 | cfc->cf_kcref = kctlref; |
1250 | cfc->cf_kcunit = sac->sc_unit; |
1251 | TAILQ_INIT(&cfc->cf_sock_entries); |
1252 | |
1253 | *unitinfo = cfc; |
1254 | cfil_active_count++; |
1255 | |
1256 | if (cfil_active_count == 1) { |
1257 | soflow_feat_set_functions(cfil_dgram_gc_needed, cfil_dgram_gc_perform, |
1258 | cfil_dgram_detach_entry, cfil_dgram_detach_db); |
1259 | } |
1260 | |
1261 | // Allocate periodic stats buffer for this filter |
1262 | if (global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] == NULL) { |
1263 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
1264 | |
1265 | struct cfil_stats_report_buffer *buf; |
1266 | |
1267 | buf = kalloc_type(struct cfil_stats_report_buffer, |
1268 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
1269 | |
1270 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1271 | |
1272 | /* Another thread may have won the race */ |
1273 | if (global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] != NULL) { |
1274 | kfree_type(struct cfil_stats_report_buffer, buf); |
1275 | } else { |
1276 | global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] = buf; |
1277 | } |
1278 | } |
1279 | } |
1280 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
1281 | |
1282 | if (error != 0 && cfc != NULL) { |
1283 | zfree(content_filter_zone, cfc); |
1284 | } |
1285 | |
1286 | if (error == 0) { |
1287 | OSIncrementAtomic(&cfil_stats.cfs_ctl_connect_ok); |
1288 | } else { |
1289 | OSIncrementAtomic(&cfil_stats.cfs_ctl_connect_fail); |
1290 | } |
1291 | |
1292 | CFIL_LOG(LOG_INFO, "return %d cfil_active_count %u kcunit %u" , |
1293 | error, cfil_active_count, sac->sc_unit); |
1294 | |
1295 | return error; |
1296 | } |
1297 | |
1298 | static void |
1299 | cfil_update_behavior_flags(void) |
1300 | { |
1301 | struct content_filter *cfc = NULL; |
1302 | |
1303 | // Update global flag |
1304 | bool preserve_connections = false; |
1305 | for (int i = 0; i < MAX_CONTENT_FILTER; i++) { |
1306 | cfc = content_filters[i]; |
1307 | if (cfc != NULL) { |
1308 | if (cfc->cf_flags & CFF_PRESERVE_CONNECTIONS) { |
1309 | preserve_connections = true; |
1310 | } else { |
1311 | preserve_connections = false; |
1312 | break; |
1313 | } |
1314 | } |
1315 | } |
1316 | if (preserve_connections == true) { |
1317 | cfil_behavior_flags |= CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS; |
1318 | } else { |
1319 | cfil_behavior_flags &= ~CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS; |
1320 | } |
1321 | CFIL_LOG(LOG_INFO, "CFIL Preserve Connections - %s" , |
1322 | (cfil_behavior_flags & CFIL_BEHAVIOR_FLAG_PRESERVE_CONNECTIONS) ? "On" : "Off" ); |
1323 | } |
1324 | |
1325 | static errno_t |
1326 | cfil_ctl_disconnect(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo) |
1327 | { |
1328 | #pragma unused(kctlref) |
1329 | errno_t error = 0; |
1330 | struct content_filter *cfc; |
1331 | struct cfil_entry *entry; |
1332 | uint64_t sock_flow_id = 0; |
1333 | |
1334 | CFIL_LOG(LOG_NOTICE, "" ); |
1335 | |
1336 | if (kcunit > MAX_CONTENT_FILTER) { |
1337 | CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)" , |
1338 | kcunit, MAX_CONTENT_FILTER); |
1339 | error = EINVAL; |
1340 | goto done; |
1341 | } |
1342 | |
1343 | cfc = (struct content_filter *)unitinfo; |
1344 | if (cfc == NULL) { |
1345 | goto done; |
1346 | } |
1347 | |
1348 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1349 | if (content_filters[kcunit - 1] != cfc || cfc->cf_kcunit != kcunit) { |
1350 | CFIL_LOG(LOG_ERR, "bad unit info %u)" , |
1351 | kcunit); |
1352 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
1353 | goto done; |
1354 | } |
1355 | cfc->cf_flags |= CFF_DETACHING; |
1356 | /* |
1357 | * Remove all sockets from the filter |
1358 | */ |
1359 | while ((entry = TAILQ_FIRST(&cfc->cf_sock_entries)) != NULL) { |
1360 | cfil_rw_lock_assert_held(lck: &cfil_lck_rw, exclusive: 1); |
1361 | |
1362 | verify_content_filter(cfc); |
1363 | /* |
1364 | * Accept all outstanding data by pushing to next filter |
1365 | * or back to socket |
1366 | * |
1367 | * TBD: Actually we should make sure all data has been pushed |
1368 | * back to socket |
1369 | */ |
1370 | if (entry->cfe_cfil_info && entry->cfe_cfil_info->cfi_so) { |
1371 | struct cfil_info *cfil_info = entry->cfe_cfil_info; |
1372 | struct socket *so = cfil_info->cfi_so; |
1373 | sock_flow_id = cfil_info->cfi_sock_id; |
1374 | |
1375 | /* Need to let data flow immediately */ |
1376 | entry->cfe_flags |= CFEF_SENT_SOCK_ATTACHED | |
1377 | CFEF_DATA_START; |
1378 | |
1379 | // Before we release global lock, retain the cfil_info - |
1380 | // We attempt to retain a valid cfil_info to prevent any deallocation until |
1381 | // we are done. Abort retain if cfil_info has already entered the free code path. |
1382 | if (cfil_info == NULL || os_ref_retain_try(rc: &cfil_info->cfi_ref_count) == false) { |
1383 | // Failing to retain cfil_info means detach is in progress already, |
1384 | // remove entry from filter list and move on. |
1385 | entry->cfe_filter = NULL; |
1386 | entry->cfe_necp_control_unit = 0; |
1387 | TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link); |
1388 | cfc->cf_sock_count--; |
1389 | continue; |
1390 | } |
1391 | |
1392 | /* |
1393 | * Respect locking hierarchy |
1394 | */ |
1395 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
1396 | |
1397 | // Search for socket from cfil_info sock_flow_id and lock so |
1398 | so = cfil_socket_from_sock_id(sock_flow_id, false); |
1399 | if (so == NULL || so != cfil_info->cfi_so) { |
1400 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1401 | |
1402 | // Socket has already been disconnected and removed from socket list. |
1403 | // Remove entry from filter list and move on. |
1404 | if (entry == TAILQ_FIRST(&cfc->cf_sock_entries)) { |
1405 | entry->cfe_filter = NULL; |
1406 | entry->cfe_necp_control_unit = 0; |
1407 | TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link); |
1408 | cfc->cf_sock_count--; |
1409 | } |
1410 | |
1411 | goto release_cfil_info; |
1412 | } |
1413 | |
1414 | /* |
1415 | * When cfe_filter is NULL the filter is detached |
1416 | * and the entry has been removed from cf_sock_entries |
1417 | */ |
1418 | if ((so->so_cfil == NULL && so->so_flow_db == NULL) || entry->cfe_filter == NULL) { |
1419 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1420 | goto release; |
1421 | } |
1422 | |
1423 | (void) cfil_action_data_pass(so, cfil_info, kcunit, 1, |
1424 | CFM_MAX_OFFSET, |
1425 | CFM_MAX_OFFSET); |
1426 | |
1427 | (void) cfil_action_data_pass(so, cfil_info, kcunit, 0, |
1428 | CFM_MAX_OFFSET, |
1429 | CFM_MAX_OFFSET); |
1430 | |
1431 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1432 | |
1433 | /* |
1434 | * Check again to make sure if the cfil_info is still valid |
1435 | * as the socket may have been unlocked when when calling |
1436 | * cfil_acquire_sockbuf() |
1437 | */ |
1438 | if (entry->cfe_filter == NULL || |
1439 | (so->so_cfil == NULL && soflow_db_get_feature_context(so->so_flow_db, sock_flow_id) == NULL)) { |
1440 | goto release; |
1441 | } |
1442 | |
1443 | /* The filter is now detached */ |
1444 | entry->cfe_flags |= CFEF_CFIL_DETACHED; |
1445 | |
1446 | if (cfil_info->cfi_debug) { |
1447 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: FILTER DISCONNECTED" ); |
1448 | } |
1449 | |
1450 | CFIL_LOG(LOG_NOTICE, "so %llx detached %u" , |
1451 | (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); |
1452 | if ((cfil_info->cfi_flags & CFIF_CLOSE_WAIT) && |
1453 | cfil_filters_attached(so) == 0) { |
1454 | CFIL_LOG(LOG_NOTICE, "so %llx waking" , |
1455 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
1456 | wakeup(chan: (caddr_t)cfil_info); |
1457 | } |
1458 | |
1459 | /* |
1460 | * Remove the filter entry from the content filter |
1461 | * but leave the rest of the state intact as the queues |
1462 | * may not be empty yet |
1463 | */ |
1464 | entry->cfe_filter = NULL; |
1465 | entry->cfe_necp_control_unit = 0; |
1466 | |
1467 | TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link); |
1468 | cfc->cf_sock_count--; |
1469 | |
1470 | // This is the last filter disconnecting, clear the cfil_info |
1471 | // saved control unit so we will be able to drop this flow if |
1472 | // a new filter get installed. |
1473 | if (cfil_active_count == 1) { |
1474 | cfil_info->cfi_filter_control_unit = 0; |
1475 | } |
1476 | release: |
1477 | socket_unlock(so, refcount: 1); |
1478 | |
1479 | release_cfil_info: |
1480 | /* |
1481 | * Release reference on cfil_info. To avoid double locking, |
1482 | * temporarily unlock in case it has been detached and we |
1483 | * end up freeing it which will take the global lock again. |
1484 | */ |
1485 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
1486 | CFIL_INFO_FREE(cfil_info); |
1487 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
1488 | } |
1489 | } |
1490 | verify_content_filter(cfc); |
1491 | |
1492 | /* Free the stats buffer for this filter */ |
1493 | if (global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] != NULL) { |
1494 | kfree_type(struct cfil_stats_report_buffer, |
1495 | global_cfil_stats_report_buffers[cfc->cf_kcunit - 1]); |
1496 | global_cfil_stats_report_buffers[cfc->cf_kcunit - 1] = NULL; |
1497 | } |
1498 | VERIFY(cfc->cf_sock_count == 0); |
1499 | |
1500 | /* |
1501 | * Make filter inactive |
1502 | */ |
1503 | content_filters[kcunit - 1] = NULL; |
1504 | cfil_active_count--; |
1505 | cfil_update_behavior_flags(); |
1506 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
1507 | |
1508 | if (cfc->cf_crypto_state != NULL) { |
1509 | cfil_crypto_cleanup_state(state: cfc->cf_crypto_state); |
1510 | cfc->cf_crypto_state = NULL; |
1511 | } |
1512 | |
1513 | zfree(content_filter_zone, cfc); |
1514 | done: |
1515 | if (error == 0) { |
1516 | OSIncrementAtomic(&cfil_stats.cfs_ctl_disconnect_ok); |
1517 | } else { |
1518 | OSIncrementAtomic(&cfil_stats.cfs_ctl_disconnect_fail); |
1519 | } |
1520 | |
1521 | CFIL_LOG(LOG_INFO, "return %d cfil_active_count %u kcunit %u" , |
1522 | error, cfil_active_count, kcunit); |
1523 | |
1524 | return error; |
1525 | } |
1526 | |
1527 | /* |
1528 | * cfil_acquire_sockbuf() |
1529 | * |
1530 | * Prevent any other thread from acquiring the sockbuf |
1531 | * We use sb_cfil_thread as a semaphore to prevent other threads from |
1532 | * messing with the sockbuf -- see sblock() |
1533 | * Note: We do not set SB_LOCK here because the thread may check or modify |
1534 | * SB_LOCK several times until it calls cfil_release_sockbuf() -- currently |
1535 | * sblock(), sbunlock() or sodefunct() |
1536 | */ |
1537 | static int |
1538 | cfil_acquire_sockbuf(struct socket *so, struct cfil_info *cfil_info, int outgoing) |
1539 | { |
1540 | thread_t tp = current_thread(); |
1541 | struct sockbuf *sb = outgoing ? &so->so_snd : &so->so_rcv; |
1542 | lck_mtx_t *mutex_held; |
1543 | int error = 0; |
1544 | |
1545 | /* |
1546 | * Wait until no thread is holding the sockbuf and other content |
1547 | * filter threads have released the sockbuf |
1548 | */ |
1549 | while ((sb->sb_flags & SB_LOCK) || |
1550 | (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp)) { |
1551 | if (so->so_proto->pr_getlock != NULL) { |
1552 | mutex_held = (*so->so_proto->pr_getlock)(so, PR_F_WILLUNLOCK); |
1553 | } else { |
1554 | mutex_held = so->so_proto->pr_domain->dom_mtx; |
1555 | } |
1556 | |
1557 | LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); |
1558 | |
1559 | sb->sb_wantlock++; |
1560 | VERIFY(sb->sb_wantlock != 0); |
1561 | |
1562 | msleep(chan: &sb->sb_flags, mtx: mutex_held, PSOCK, wmesg: "cfil_acquire_sockbuf" , |
1563 | NULL); |
1564 | |
1565 | VERIFY(sb->sb_wantlock != 0); |
1566 | sb->sb_wantlock--; |
1567 | } |
1568 | /* |
1569 | * Use reference count for repetitive calls on same thread |
1570 | */ |
1571 | if (sb->sb_cfil_refs == 0) { |
1572 | VERIFY(sb->sb_cfil_thread == NULL); |
1573 | VERIFY((sb->sb_flags & SB_LOCK) == 0); |
1574 | |
1575 | sb->sb_cfil_thread = tp; |
1576 | sb->sb_flags |= SB_LOCK; |
1577 | } |
1578 | sb->sb_cfil_refs++; |
1579 | |
1580 | /* We acquire the socket buffer when we need to cleanup */ |
1581 | if (cfil_info == NULL) { |
1582 | CFIL_LOG(LOG_ERR, "so %llx cfil detached" , |
1583 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
1584 | error = 0; |
1585 | } else if (cfil_info->cfi_flags & CFIF_DROP) { |
1586 | CFIL_LOG(LOG_ERR, "so %llx drop set" , |
1587 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
1588 | error = EPIPE; |
1589 | } |
1590 | |
1591 | return error; |
1592 | } |
1593 | |
1594 | static void |
1595 | cfil_release_sockbuf(struct socket *so, int outgoing) |
1596 | { |
1597 | struct sockbuf *sb = outgoing ? &so->so_snd : &so->so_rcv; |
1598 | thread_t tp = current_thread(); |
1599 | |
1600 | socket_lock_assert_owned(so); |
1601 | |
1602 | if (sb->sb_cfil_thread != NULL && sb->sb_cfil_thread != tp) { |
1603 | panic("%s sb_cfil_thread %p not current %p" , __func__, |
1604 | sb->sb_cfil_thread, tp); |
1605 | } |
1606 | /* |
1607 | * Don't panic if we are defunct because SB_LOCK has |
1608 | * been cleared by sodefunct() |
1609 | */ |
1610 | if (!(so->so_flags & SOF_DEFUNCT) && !(sb->sb_flags & SB_LOCK)) { |
1611 | panic("%s SB_LOCK not set on %p" , __func__, |
1612 | sb); |
1613 | } |
1614 | /* |
1615 | * We can unlock when the thread unwinds to the last reference |
1616 | */ |
1617 | sb->sb_cfil_refs--; |
1618 | if (sb->sb_cfil_refs == 0) { |
1619 | sb->sb_cfil_thread = NULL; |
1620 | sb->sb_flags &= ~SB_LOCK; |
1621 | |
1622 | if (sb->sb_wantlock > 0) { |
1623 | wakeup(chan: &sb->sb_flags); |
1624 | } |
1625 | } |
1626 | } |
1627 | |
1628 | cfil_sock_id_t |
1629 | cfil_sock_id_from_socket(struct socket *so) |
1630 | { |
1631 | if ((so->so_flags & SOF_CONTENT_FILTER) && so->so_cfil) { |
1632 | return so->so_cfil->cfi_sock_id; |
1633 | } else { |
1634 | return CFIL_SOCK_ID_NONE; |
1635 | } |
1636 | } |
1637 | |
1638 | /* |
1639 | * cfil_socket_safe_lock - |
1640 | * This routine attempts to lock the socket safely. |
1641 | * |
1642 | * The passed in pcbinfo is assumed to be locked and must be unlocked once the |
1643 | * inp state is safeguarded and before we attempt to lock/unlock the socket. |
1644 | * This is to prevent getting blocked by socket_lock() while holding the pcbinfo |
1645 | * lock, avoiding potential deadlock with other processes contending for the same |
1646 | * resources. This is also to avoid double locking the pcbinfo for rip sockets |
1647 | * since rip_unlock() will lock ripcbinfo if it needs to dispose inpcb when |
1648 | * so_usecount is 0. |
1649 | */ |
1650 | static bool |
1651 | cfil_socket_safe_lock(struct inpcb *inp, struct inpcbinfo *pcbinfo) |
1652 | { |
1653 | struct socket *so = NULL; |
1654 | |
1655 | VERIFY(pcbinfo != NULL); |
1656 | |
1657 | if (in_pcb_checkstate(inp, WNT_ACQUIRE, 0) != WNT_STOPUSING) { |
1658 | // Safeguarded the inp state, unlock pcbinfo before locking socket. |
1659 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1660 | |
1661 | so = inp->inp_socket; |
1662 | socket_lock(so, refcount: 1); |
1663 | if (in_pcb_checkstate(inp, WNT_RELEASE, 1) != WNT_STOPUSING) { |
1664 | return true; |
1665 | } |
1666 | } else { |
1667 | // Failed to safeguarded the inp state, unlock pcbinfo and abort. |
1668 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1669 | } |
1670 | |
1671 | if (so) { |
1672 | socket_unlock(so, refcount: 1); |
1673 | } |
1674 | return false; |
1675 | } |
1676 | |
1677 | static struct socket * |
1678 | cfil_socket_from_sock_id(cfil_sock_id_t cfil_sock_id, bool udp_only) |
1679 | { |
1680 | struct socket *so = NULL; |
1681 | u_int64_t gencnt = cfil_sock_id >> 32; |
1682 | u_int32_t flowhash = (u_int32_t)(cfil_sock_id & 0x0ffffffff); |
1683 | struct inpcb *inp = NULL; |
1684 | struct inpcbinfo *pcbinfo = NULL; |
1685 | |
1686 | if (udp_only) { |
1687 | goto find_udp; |
1688 | } |
1689 | |
1690 | pcbinfo = &tcbinfo; |
1691 | lck_rw_lock_shared(lck: &pcbinfo->ipi_lock); |
1692 | LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { |
1693 | if (inp->inp_state != INPCB_STATE_DEAD && |
1694 | inp->inp_socket != NULL && |
1695 | inp->inp_flowhash == flowhash && |
1696 | (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt && |
1697 | inp->inp_socket->so_cfil != NULL) { |
1698 | if (cfil_socket_safe_lock(inp, pcbinfo)) { |
1699 | so = inp->inp_socket; |
1700 | } |
1701 | /* pcbinfo is already unlocked, we are done. */ |
1702 | goto done; |
1703 | } |
1704 | } |
1705 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1706 | if (so != NULL) { |
1707 | goto done; |
1708 | } |
1709 | |
1710 | find_udp: |
1711 | |
1712 | pcbinfo = &udbinfo; |
1713 | lck_rw_lock_shared(lck: &pcbinfo->ipi_lock); |
1714 | LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { |
1715 | if (inp->inp_state != INPCB_STATE_DEAD && |
1716 | inp->inp_socket != NULL && |
1717 | inp->inp_socket->so_flow_db != NULL && |
1718 | (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) { |
1719 | if (cfil_socket_safe_lock(inp, pcbinfo)) { |
1720 | so = inp->inp_socket; |
1721 | } |
1722 | /* pcbinfo is already unlocked, we are done. */ |
1723 | goto done; |
1724 | } |
1725 | } |
1726 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1727 | if (so != NULL) { |
1728 | goto done; |
1729 | } |
1730 | |
1731 | pcbinfo = &ripcbinfo; |
1732 | lck_rw_lock_shared(lck: &pcbinfo->ipi_lock); |
1733 | LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { |
1734 | if (inp->inp_state != INPCB_STATE_DEAD && |
1735 | inp->inp_socket != NULL && |
1736 | inp->inp_socket->so_flow_db != NULL && |
1737 | (inp->inp_socket->so_gencnt & 0x0ffffffff) == gencnt) { |
1738 | if (cfil_socket_safe_lock(inp, pcbinfo)) { |
1739 | so = inp->inp_socket; |
1740 | } |
1741 | /* pcbinfo is already unlocked, we are done. */ |
1742 | goto done; |
1743 | } |
1744 | } |
1745 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1746 | |
1747 | done: |
1748 | if (so == NULL) { |
1749 | OSIncrementAtomic(&cfil_stats.cfs_sock_id_not_found); |
1750 | CFIL_LOG(LOG_DEBUG, |
1751 | "no socket for sock_id %llx gencnt %llx flowhash %x" , |
1752 | cfil_sock_id, gencnt, flowhash); |
1753 | } |
1754 | |
1755 | return so; |
1756 | } |
1757 | |
1758 | static struct socket * |
1759 | cfil_socket_from_client_uuid(uuid_t necp_client_uuid, bool *cfil_attached) |
1760 | { |
1761 | struct socket *so = NULL; |
1762 | struct inpcb *inp = NULL; |
1763 | struct inpcbinfo *pcbinfo = &tcbinfo; |
1764 | |
1765 | lck_rw_lock_shared(lck: &pcbinfo->ipi_lock); |
1766 | LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { |
1767 | if (inp->inp_state != INPCB_STATE_DEAD && |
1768 | inp->inp_socket != NULL && |
1769 | uuid_compare(uu1: inp->necp_client_uuid, uu2: necp_client_uuid) == 0) { |
1770 | *cfil_attached = (inp->inp_socket->so_cfil != NULL); |
1771 | if (cfil_socket_safe_lock(inp, pcbinfo)) { |
1772 | so = inp->inp_socket; |
1773 | } |
1774 | /* pcbinfo is already unlocked, we are done. */ |
1775 | goto done; |
1776 | } |
1777 | } |
1778 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1779 | if (so != NULL) { |
1780 | goto done; |
1781 | } |
1782 | |
1783 | pcbinfo = &udbinfo; |
1784 | lck_rw_lock_shared(lck: &pcbinfo->ipi_lock); |
1785 | LIST_FOREACH(inp, pcbinfo->ipi_listhead, inp_list) { |
1786 | if (inp->inp_state != INPCB_STATE_DEAD && |
1787 | inp->inp_socket != NULL && |
1788 | uuid_compare(uu1: inp->necp_client_uuid, uu2: necp_client_uuid) == 0) { |
1789 | *cfil_attached = (inp->inp_socket->so_flow_db != NULL); |
1790 | if (cfil_socket_safe_lock(inp, pcbinfo)) { |
1791 | so = inp->inp_socket; |
1792 | } |
1793 | /* pcbinfo is already unlocked, we are done. */ |
1794 | goto done; |
1795 | } |
1796 | } |
1797 | lck_rw_done(lck: &pcbinfo->ipi_lock); |
1798 | |
1799 | done: |
1800 | return so; |
1801 | } |
1802 | |
1803 | static void |
1804 | cfil_info_stats_toggle(struct cfil_info *cfil_info, struct cfil_entry *entry, uint32_t report_frequency) |
1805 | { |
1806 | struct cfil_info *cfil = NULL; |
1807 | Boolean found = FALSE; |
1808 | int kcunit; |
1809 | |
1810 | if (cfil_info == NULL) { |
1811 | return; |
1812 | } |
1813 | |
1814 | if (report_frequency) { |
1815 | if (entry == NULL) { |
1816 | return; |
1817 | } |
1818 | |
1819 | // Update stats reporting frequency. |
1820 | if (entry->cfe_stats_report_frequency != report_frequency) { |
1821 | entry->cfe_stats_report_frequency = report_frequency; |
1822 | if (entry->cfe_stats_report_frequency < CFIL_STATS_REPORT_INTERVAL_MIN_MSEC) { |
1823 | entry->cfe_stats_report_frequency = CFIL_STATS_REPORT_INTERVAL_MIN_MSEC; |
1824 | } |
1825 | microuptime(tv: &entry->cfe_stats_report_ts); |
1826 | |
1827 | // Insert cfil_info into list only if it is not in yet. |
1828 | TAILQ_FOREACH(cfil, &cfil_sock_head_stats, cfi_link_stats) { |
1829 | if (cfil == cfil_info) { |
1830 | return; |
1831 | } |
1832 | } |
1833 | |
1834 | TAILQ_INSERT_TAIL(&cfil_sock_head_stats, cfil_info, cfi_link_stats); |
1835 | |
1836 | // Wake up stats thread if this is first flow added |
1837 | if (cfil_sock_attached_stats_count == 0) { |
1838 | thread_wakeup((caddr_t)&cfil_sock_attached_stats_count); |
1839 | } |
1840 | cfil_sock_attached_stats_count++; |
1841 | |
1842 | if (cfil_info->cfi_debug && cfil_log_stats) { |
1843 | CFIL_LOG(LOG_ERR, "CFIL: VERDICT RECEIVED - STATS FLOW INSERTED: <so %llx sockID %llu <%llx>> stats frequency %d msecs" , |
1844 | cfil_info->cfi_so ? (uint64_t)VM_KERNEL_ADDRPERM(cfil_info->cfi_so) : 0, |
1845 | cfil_info->cfi_sock_id, cfil_info->cfi_sock_id, |
1846 | entry->cfe_stats_report_frequency); |
1847 | } |
1848 | } |
1849 | } else { |
1850 | // Turn off stats reporting for this filter. |
1851 | if (entry != NULL) { |
1852 | // Already off, no change. |
1853 | if (entry->cfe_stats_report_frequency == 0) { |
1854 | return; |
1855 | } |
1856 | |
1857 | entry->cfe_stats_report_frequency = 0; |
1858 | // If cfil_info still has filter(s) asking for stats, no need to remove from list. |
1859 | for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { |
1860 | if (cfil_info->cfi_entries[kcunit - 1].cfe_stats_report_frequency > 0) { |
1861 | return; |
1862 | } |
1863 | } |
1864 | } |
1865 | |
1866 | // No more filter asking for stats for this cfil_info, remove from list. |
1867 | if (!TAILQ_EMPTY(&cfil_sock_head_stats)) { |
1868 | found = FALSE; |
1869 | TAILQ_FOREACH(cfil, &cfil_sock_head_stats, cfi_link_stats) { |
1870 | if (cfil == cfil_info) { |
1871 | found = TRUE; |
1872 | break; |
1873 | } |
1874 | } |
1875 | if (found) { |
1876 | cfil_sock_attached_stats_count--; |
1877 | TAILQ_REMOVE(&cfil_sock_head_stats, cfil_info, cfi_link_stats); |
1878 | if (cfil_info->cfi_debug && cfil_log_stats) { |
1879 | CFIL_LOG(LOG_ERR, "CFIL: VERDICT RECEIVED - STATS FLOW DELETED: <so %llx sockID %llu <%llx>> stats frequency reset" , |
1880 | cfil_info->cfi_so ? (uint64_t)VM_KERNEL_ADDRPERM(cfil_info->cfi_so) : 0, |
1881 | cfil_info->cfi_sock_id, cfil_info->cfi_sock_id); |
1882 | } |
1883 | } |
1884 | } |
1885 | } |
1886 | } |
1887 | |
1888 | static errno_t |
1889 | cfil_ctl_send(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, mbuf_t m, |
1890 | int flags) |
1891 | { |
1892 | #pragma unused(kctlref, flags) |
1893 | errno_t error = 0; |
1894 | struct cfil_msg_hdr *msghdr; |
1895 | struct content_filter *cfc = (struct content_filter *)unitinfo; |
1896 | struct socket *so; |
1897 | struct cfil_msg_action *action_msg; |
1898 | struct cfil_entry *entry; |
1899 | struct cfil_info *cfil_info = NULL; |
1900 | unsigned int data_len = 0; |
1901 | |
1902 | CFIL_LOG(LOG_INFO, "" ); |
1903 | |
1904 | if (cfc == NULL) { |
1905 | CFIL_LOG(LOG_ERR, "no unitinfo" ); |
1906 | error = EINVAL; |
1907 | goto done; |
1908 | } |
1909 | |
1910 | if (kcunit > MAX_CONTENT_FILTER) { |
1911 | CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)" , |
1912 | kcunit, MAX_CONTENT_FILTER); |
1913 | error = EINVAL; |
1914 | goto done; |
1915 | } |
1916 | if (m == NULL) { |
1917 | CFIL_LOG(LOG_ERR, "null mbuf" ); |
1918 | error = EINVAL; |
1919 | goto done; |
1920 | } |
1921 | data_len = m_length(m); |
1922 | |
1923 | if (data_len < sizeof(struct cfil_msg_hdr)) { |
1924 | CFIL_LOG(LOG_ERR, "too short %u" , data_len); |
1925 | error = EINVAL; |
1926 | goto done; |
1927 | } |
1928 | msghdr = (struct cfil_msg_hdr *)mbuf_data(mbuf: m); |
1929 | if (msghdr->cfm_version != CFM_VERSION_CURRENT) { |
1930 | CFIL_LOG(LOG_ERR, "bad version %u" , msghdr->cfm_version); |
1931 | error = EINVAL; |
1932 | goto done; |
1933 | } |
1934 | if (msghdr->cfm_type != CFM_TYPE_ACTION) { |
1935 | CFIL_LOG(LOG_ERR, "bad type %u" , msghdr->cfm_type); |
1936 | error = EINVAL; |
1937 | goto done; |
1938 | } |
1939 | if (msghdr->cfm_len > data_len) { |
1940 | CFIL_LOG(LOG_ERR, "bad length %u" , msghdr->cfm_len); |
1941 | error = EINVAL; |
1942 | goto done; |
1943 | } |
1944 | |
1945 | /* Validate action operation */ |
1946 | switch (msghdr->cfm_op) { |
1947 | case CFM_OP_DATA_UPDATE: |
1948 | OSIncrementAtomic( |
1949 | &cfil_stats.cfs_ctl_action_data_update); |
1950 | break; |
1951 | case CFM_OP_DROP: |
1952 | OSIncrementAtomic(&cfil_stats.cfs_ctl_action_drop); |
1953 | break; |
1954 | case CFM_OP_BLESS_CLIENT: |
1955 | if (msghdr->cfm_len != sizeof(struct cfil_msg_bless_client)) { |
1956 | OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); |
1957 | error = EINVAL; |
1958 | CFIL_LOG(LOG_ERR, "bad len: %u for op %u" , |
1959 | msghdr->cfm_len, |
1960 | msghdr->cfm_op); |
1961 | goto done; |
1962 | } |
1963 | error = cfil_action_bless_client(kcunit, msghdr); |
1964 | goto done; |
1965 | case CFM_OP_SET_CRYPTO_KEY: |
1966 | if (msghdr->cfm_len != sizeof(struct cfil_msg_set_crypto_key)) { |
1967 | OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); |
1968 | error = EINVAL; |
1969 | CFIL_LOG(LOG_ERR, "bad len: %u for op %u" , |
1970 | msghdr->cfm_len, |
1971 | msghdr->cfm_op); |
1972 | goto done; |
1973 | } |
1974 | error = cfil_action_set_crypto_key(kcunit, msghdr); |
1975 | goto done; |
1976 | default: |
1977 | OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_op); |
1978 | CFIL_LOG(LOG_ERR, "bad op %u" , msghdr->cfm_op); |
1979 | error = EINVAL; |
1980 | goto done; |
1981 | } |
1982 | if (msghdr->cfm_len != sizeof(struct cfil_msg_action)) { |
1983 | OSIncrementAtomic(&cfil_stats.cfs_ctl_action_bad_len); |
1984 | error = EINVAL; |
1985 | CFIL_LOG(LOG_ERR, "bad len: %u for op %u" , |
1986 | msghdr->cfm_len, |
1987 | msghdr->cfm_op); |
1988 | goto done; |
1989 | } |
1990 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
1991 | if (cfc != (void *)content_filters[kcunit - 1]) { |
1992 | CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u" , |
1993 | kcunit); |
1994 | error = EINVAL; |
1995 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
1996 | goto done; |
1997 | } |
1998 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
1999 | |
2000 | // Search for socket (TCP+UDP and lock so) |
2001 | so = cfil_socket_from_sock_id(cfil_sock_id: msghdr->cfm_sock_id, false); |
2002 | if (so == NULL) { |
2003 | CFIL_LOG(LOG_NOTICE, "bad sock_id %llx" , |
2004 | msghdr->cfm_sock_id); |
2005 | error = EINVAL; |
2006 | goto done; |
2007 | } |
2008 | |
2009 | cfil_info = so->so_flow_db != NULL ? |
2010 | soflow_db_get_feature_context(so->so_flow_db, msghdr->cfm_sock_id) : so->so_cfil; |
2011 | |
2012 | // We should not obtain global lock here in order to avoid deadlock down the path. |
2013 | // But we attempt to retain a valid cfil_info to prevent any deallocation until |
2014 | // we are done. Abort retain if cfil_info has already entered the free code path. |
2015 | if (cfil_info && os_ref_retain_try(rc: &cfil_info->cfi_ref_count) == false) { |
2016 | socket_unlock(so, refcount: 1); |
2017 | goto done; |
2018 | } |
2019 | |
2020 | if (cfil_info == NULL) { |
2021 | CFIL_LOG(LOG_NOTICE, "so %llx <id %llu> not attached" , |
2022 | (uint64_t)VM_KERNEL_ADDRPERM(so), msghdr->cfm_sock_id); |
2023 | error = EINVAL; |
2024 | goto unlock; |
2025 | } else if (cfil_info->cfi_flags & CFIF_DROP) { |
2026 | CFIL_LOG(LOG_NOTICE, "so %llx drop set" , |
2027 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
2028 | error = EINVAL; |
2029 | goto unlock; |
2030 | } |
2031 | |
2032 | if (cfil_info->cfi_debug) { |
2033 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: RECEIVED MSG FROM FILTER" ); |
2034 | } |
2035 | |
2036 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
2037 | if (entry->cfe_filter == NULL) { |
2038 | CFIL_LOG(LOG_NOTICE, "so %llx no filter" , |
2039 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
2040 | error = EINVAL; |
2041 | goto unlock; |
2042 | } |
2043 | |
2044 | if (entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) { |
2045 | entry->cfe_flags |= CFEF_DATA_START; |
2046 | } else { |
2047 | CFIL_LOG(LOG_ERR, |
2048 | "so %llx attached not sent for %u" , |
2049 | (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); |
2050 | error = EINVAL; |
2051 | goto unlock; |
2052 | } |
2053 | |
2054 | microuptime(tv: &entry->cfe_last_action); |
2055 | CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_action, &cfil_info->cfi_first_event, msghdr->cfm_op); |
2056 | |
2057 | action_msg = (struct cfil_msg_action *)msghdr; |
2058 | |
2059 | switch (msghdr->cfm_op) { |
2060 | case CFM_OP_DATA_UPDATE: |
2061 | |
2062 | if (cfil_info->cfi_debug) { |
2063 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: RECEIVED CFM_OP_DATA_UPDATE" ); |
2064 | CFIL_LOG(LOG_ERR, "CFIL: VERDICT RECEIVED: <so %llx sockID %llu <%llx>> <IN peek:%llu pass:%llu, OUT peek:%llu pass:%llu>" , |
2065 | (uint64_t)VM_KERNEL_ADDRPERM(so), |
2066 | cfil_info->cfi_sock_id, cfil_info->cfi_sock_id, |
2067 | action_msg->cfa_in_peek_offset, action_msg->cfa_in_pass_offset, |
2068 | action_msg->cfa_out_peek_offset, action_msg->cfa_out_pass_offset); |
2069 | } |
2070 | |
2071 | /* |
2072 | * Received verdict, at this point we know this |
2073 | * socket connection is allowed. Unblock thread |
2074 | * immediately before proceeding to process the verdict. |
2075 | */ |
2076 | cfil_sock_received_verdict(so); |
2077 | |
2078 | if (action_msg->cfa_out_peek_offset != 0 || |
2079 | action_msg->cfa_out_pass_offset != 0) { |
2080 | error = cfil_action_data_pass(so, cfil_info, kcunit, 1, |
2081 | action_msg->cfa_out_pass_offset, |
2082 | action_msg->cfa_out_peek_offset); |
2083 | } |
2084 | if (error == EJUSTRETURN) { |
2085 | error = 0; |
2086 | } |
2087 | if (error != 0) { |
2088 | break; |
2089 | } |
2090 | if (action_msg->cfa_in_peek_offset != 0 || |
2091 | action_msg->cfa_in_pass_offset != 0) { |
2092 | error = cfil_action_data_pass(so, cfil_info, kcunit, 0, |
2093 | action_msg->cfa_in_pass_offset, |
2094 | action_msg->cfa_in_peek_offset); |
2095 | } |
2096 | if (error == EJUSTRETURN) { |
2097 | error = 0; |
2098 | } |
2099 | |
2100 | // Toggle stats reporting according to received verdict. |
2101 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
2102 | cfil_info_stats_toggle(cfil_info, entry, report_frequency: action_msg->cfa_stats_frequency); |
2103 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
2104 | |
2105 | break; |
2106 | |
2107 | case CFM_OP_DROP: |
2108 | if (cfil_info->cfi_debug) { |
2109 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: RECEIVED CFM_OP_DROP" ); |
2110 | CFIL_LOG(LOG_ERR, "CFIL: VERDICT DROP RECEIVED: <so %llx sockID %llu <%llx>> <IN peek:%llu pass:%llu, OUT peek:%llu pass:%llu>" , |
2111 | (uint64_t)VM_KERNEL_ADDRPERM(so), |
2112 | cfil_info->cfi_sock_id, cfil_info->cfi_sock_id, |
2113 | action_msg->cfa_in_peek_offset, action_msg->cfa_in_pass_offset, |
2114 | action_msg->cfa_out_peek_offset, action_msg->cfa_out_pass_offset); |
2115 | } |
2116 | |
2117 | error = cfil_action_drop(so, cfil_info, kcunit); |
2118 | cfil_sock_received_verdict(so); |
2119 | break; |
2120 | |
2121 | default: |
2122 | error = EINVAL; |
2123 | break; |
2124 | } |
2125 | unlock: |
2126 | CFIL_INFO_FREE(cfil_info) |
2127 | socket_unlock(so, refcount: 1); |
2128 | done: |
2129 | mbuf_freem(mbuf: m); |
2130 | |
2131 | if (error == 0) { |
2132 | OSIncrementAtomic(&cfil_stats.cfs_ctl_send_ok); |
2133 | } else { |
2134 | OSIncrementAtomic(&cfil_stats.cfs_ctl_send_bad); |
2135 | } |
2136 | |
2137 | return error; |
2138 | } |
2139 | |
2140 | static errno_t |
2141 | cfil_ctl_getopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, |
2142 | int opt, void *data, size_t *len) |
2143 | { |
2144 | #pragma unused(kctlref, opt) |
2145 | struct cfil_info *cfil_info = NULL; |
2146 | errno_t error = 0; |
2147 | struct content_filter *cfc = (struct content_filter *)unitinfo; |
2148 | |
2149 | CFIL_LOG(LOG_NOTICE, "" ); |
2150 | |
2151 | if (cfc == NULL) { |
2152 | CFIL_LOG(LOG_ERR, "no unitinfo" ); |
2153 | return EINVAL; |
2154 | } |
2155 | |
2156 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
2157 | |
2158 | if (kcunit > MAX_CONTENT_FILTER) { |
2159 | CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)" , |
2160 | kcunit, MAX_CONTENT_FILTER); |
2161 | error = EINVAL; |
2162 | goto done; |
2163 | } |
2164 | if (cfc != (void *)content_filters[kcunit - 1]) { |
2165 | CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u" , |
2166 | kcunit); |
2167 | error = EINVAL; |
2168 | goto done; |
2169 | } |
2170 | switch (opt) { |
2171 | case CFIL_OPT_NECP_CONTROL_UNIT: |
2172 | if (*len < sizeof(uint32_t)) { |
2173 | CFIL_LOG(LOG_ERR, "len too small %lu" , *len); |
2174 | error = EINVAL; |
2175 | goto done; |
2176 | } |
2177 | if (data != NULL) { |
2178 | *(uint32_t *)data = cfc->cf_necp_control_unit; |
2179 | } |
2180 | break; |
2181 | case CFIL_OPT_PRESERVE_CONNECTIONS: |
2182 | if (*len < sizeof(uint32_t)) { |
2183 | CFIL_LOG(LOG_ERR, "CFIL_OPT_PRESERVE_CONNECTIONS len too small %lu" , *len); |
2184 | error = EINVAL; |
2185 | goto done; |
2186 | } |
2187 | if (data != NULL) { |
2188 | *(uint32_t *)data = (cfc->cf_flags & CFF_PRESERVE_CONNECTIONS) ? true : false; |
2189 | } |
2190 | break; |
2191 | case CFIL_OPT_GET_SOCKET_INFO: |
2192 | if (*len != sizeof(struct cfil_opt_sock_info)) { |
2193 | CFIL_LOG(LOG_ERR, "len does not match %lu" , *len); |
2194 | error = EINVAL; |
2195 | goto done; |
2196 | } |
2197 | if (data == NULL) { |
2198 | CFIL_LOG(LOG_ERR, "data not passed" ); |
2199 | error = EINVAL; |
2200 | goto done; |
2201 | } |
2202 | |
2203 | struct cfil_opt_sock_info *sock_info = |
2204 | (struct cfil_opt_sock_info *) data; |
2205 | |
2206 | // Unlock here so that we never hold both cfil_lck_rw and the |
2207 | // socket_lock at the same time. Otherwise, this can deadlock |
2208 | // because soclose() takes the socket_lock and then exclusive |
2209 | // cfil_lck_rw and we require the opposite order. |
2210 | |
2211 | // WARNING: Be sure to never use anything protected |
2212 | // by cfil_lck_rw beyond this point. |
2213 | // WARNING: Be sure to avoid fallthrough and |
2214 | // goto return_already_unlocked from this branch. |
2215 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
2216 | |
2217 | // Search (TCP+UDP) and lock socket |
2218 | struct socket *sock = |
2219 | cfil_socket_from_sock_id(cfil_sock_id: sock_info->cfs_sock_id, false); |
2220 | if (sock == NULL) { |
2221 | CFIL_LOG(LOG_ERR, "CFIL: GET_SOCKET_INFO failed: bad sock_id %llu" , |
2222 | sock_info->cfs_sock_id); |
2223 | error = ENOENT; |
2224 | goto return_already_unlocked; |
2225 | } |
2226 | |
2227 | cfil_info = (sock->so_flow_db != NULL) ? |
2228 | soflow_db_get_feature_context(sock->so_flow_db, sock_info->cfs_sock_id) : sock->so_cfil; |
2229 | |
2230 | if (cfil_info == NULL) { |
2231 | CFIL_LOG(LOG_INFO, "CFIL: GET_SOCKET_INFO failed: so %llx not attached, cannot fetch info" , |
2232 | (uint64_t)VM_KERNEL_ADDRPERM(sock)); |
2233 | error = EINVAL; |
2234 | socket_unlock(so: sock, refcount: 1); |
2235 | goto return_already_unlocked; |
2236 | } |
2237 | |
2238 | if (sock->so_proto == NULL || sock->so_proto->pr_domain == NULL) { |
2239 | CFIL_LOG(LOG_INFO, "CFIL: GET_SOCKET_INFO failed: so %llx NULL so_proto / pr_domain" , |
2240 | (uint64_t)VM_KERNEL_ADDRPERM(sock)); |
2241 | error = EINVAL; |
2242 | socket_unlock(so: sock, refcount: 1); |
2243 | goto return_already_unlocked; |
2244 | } |
2245 | |
2246 | // Fill out family, type, and protocol |
2247 | sock_info->cfs_sock_family = SOCK_DOM(sock); |
2248 | sock_info->cfs_sock_type = SOCK_TYPE(sock); |
2249 | sock_info->cfs_sock_protocol = GET_SO_PROTO(sock); |
2250 | |
2251 | // Source and destination addresses |
2252 | struct inpcb *inp = sotoinpcb(sock); |
2253 | if (inp->inp_vflag & INP_IPV6) { |
2254 | struct in6_addr *laddr = NULL, *faddr = NULL; |
2255 | u_int16_t lport = 0, fport = 0; |
2256 | |
2257 | cfil_get_flow_address_v6(cfil_info->cfi_hash_entry, inp, |
2258 | &laddr, &faddr, &lport, &fport); |
2259 | fill_ip6_sockaddr_4_6(&sock_info->cfs_local, laddr, lport, inp->inp_lifscope); |
2260 | fill_ip6_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport, inp->inp_fifscope); |
2261 | } else if (inp->inp_vflag & INP_IPV4) { |
2262 | struct in_addr laddr = {.s_addr = 0}, faddr = {.s_addr = 0}; |
2263 | u_int16_t lport = 0, fport = 0; |
2264 | |
2265 | cfil_get_flow_address(cfil_info->cfi_hash_entry, inp, |
2266 | &laddr, &faddr, &lport, &fport); |
2267 | fill_ip_sockaddr_4_6(&sock_info->cfs_local, laddr, lport); |
2268 | fill_ip_sockaddr_4_6(&sock_info->cfs_remote, faddr, fport); |
2269 | } |
2270 | |
2271 | // Set the pid info |
2272 | sock_info->cfs_pid = sock->last_pid; |
2273 | memcpy(dst: sock_info->cfs_uuid, src: sock->last_uuid, n: sizeof(uuid_t)); |
2274 | |
2275 | if (sock->so_flags & SOF_DELEGATED) { |
2276 | sock_info->cfs_e_pid = sock->e_pid; |
2277 | memcpy(dst: sock_info->cfs_e_uuid, src: sock->e_uuid, n: sizeof(uuid_t)); |
2278 | } else { |
2279 | sock_info->cfs_e_pid = sock->last_pid; |
2280 | memcpy(dst: sock_info->cfs_e_uuid, src: sock->last_uuid, n: sizeof(uuid_t)); |
2281 | } |
2282 | |
2283 | socket_unlock(so: sock, refcount: 1); |
2284 | |
2285 | goto return_already_unlocked; |
2286 | default: |
2287 | error = ENOPROTOOPT; |
2288 | break; |
2289 | } |
2290 | done: |
2291 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
2292 | |
2293 | return error; |
2294 | |
2295 | return_already_unlocked: |
2296 | |
2297 | return error; |
2298 | } |
2299 | |
2300 | static errno_t |
2301 | cfil_ctl_setopt(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, |
2302 | int opt, void *data, size_t len) |
2303 | { |
2304 | #pragma unused(kctlref, opt) |
2305 | errno_t error = 0; |
2306 | struct content_filter *cfc = (struct content_filter *)unitinfo; |
2307 | |
2308 | CFIL_LOG(LOG_NOTICE, "" ); |
2309 | |
2310 | if (cfc == NULL) { |
2311 | CFIL_LOG(LOG_ERR, "no unitinfo" ); |
2312 | return EINVAL; |
2313 | } |
2314 | |
2315 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
2316 | |
2317 | if (kcunit > MAX_CONTENT_FILTER) { |
2318 | CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)" , |
2319 | kcunit, MAX_CONTENT_FILTER); |
2320 | error = EINVAL; |
2321 | goto done; |
2322 | } |
2323 | if (cfc != (void *)content_filters[kcunit - 1]) { |
2324 | CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u" , |
2325 | kcunit); |
2326 | error = EINVAL; |
2327 | goto done; |
2328 | } |
2329 | switch (opt) { |
2330 | case CFIL_OPT_NECP_CONTROL_UNIT: |
2331 | if (len < sizeof(uint32_t)) { |
2332 | CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT " |
2333 | "len too small %lu" , len); |
2334 | error = EINVAL; |
2335 | goto done; |
2336 | } |
2337 | if (cfc->cf_necp_control_unit != 0) { |
2338 | CFIL_LOG(LOG_ERR, "CFIL_OPT_NECP_CONTROL_UNIT " |
2339 | "already set %u" , |
2340 | cfc->cf_necp_control_unit); |
2341 | error = EINVAL; |
2342 | goto done; |
2343 | } |
2344 | cfc->cf_necp_control_unit = *(uint32_t *)data; |
2345 | break; |
2346 | case CFIL_OPT_PRESERVE_CONNECTIONS: |
2347 | if (len < sizeof(uint32_t)) { |
2348 | CFIL_LOG(LOG_ERR, "CFIL_OPT_PRESERVE_CONNECTIONS " |
2349 | "len too small %lu" , len); |
2350 | error = EINVAL; |
2351 | goto done; |
2352 | } |
2353 | uint32_t preserve_connections = *((uint32_t *)data); |
2354 | CFIL_LOG(LOG_INFO, "CFIL_OPT_PRESERVE_CONNECTIONS got %d (kcunit %d)" , preserve_connections, kcunit); |
2355 | if (preserve_connections) { |
2356 | cfc->cf_flags |= CFF_PRESERVE_CONNECTIONS; |
2357 | } else { |
2358 | cfc->cf_flags &= ~CFF_PRESERVE_CONNECTIONS; |
2359 | } |
2360 | |
2361 | cfil_update_behavior_flags(); |
2362 | break; |
2363 | default: |
2364 | error = ENOPROTOOPT; |
2365 | break; |
2366 | } |
2367 | done: |
2368 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
2369 | |
2370 | return error; |
2371 | } |
2372 | |
2373 | |
2374 | static void |
2375 | cfil_ctl_rcvd(kern_ctl_ref kctlref, u_int32_t kcunit, void *unitinfo, int flags) |
2376 | { |
2377 | #pragma unused(kctlref, flags) |
2378 | struct content_filter *cfc = (struct content_filter *)unitinfo; |
2379 | struct socket *so = NULL; |
2380 | int error; |
2381 | struct cfil_entry *entry; |
2382 | struct cfil_info *cfil_info = NULL; |
2383 | |
2384 | CFIL_LOG(LOG_INFO, "" ); |
2385 | |
2386 | if (cfc == NULL) { |
2387 | CFIL_LOG(LOG_ERR, "no unitinfo" ); |
2388 | OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad); |
2389 | return; |
2390 | } |
2391 | |
2392 | if (kcunit > MAX_CONTENT_FILTER) { |
2393 | CFIL_LOG(LOG_ERR, "kcunit %u > MAX_CONTENT_FILTER (%d)" , |
2394 | kcunit, MAX_CONTENT_FILTER); |
2395 | OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad); |
2396 | return; |
2397 | } |
2398 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
2399 | if (cfc != (void *)content_filters[kcunit - 1]) { |
2400 | CFIL_LOG(LOG_ERR, "unitinfo does not match for kcunit %u" , |
2401 | kcunit); |
2402 | OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_bad); |
2403 | goto done; |
2404 | } |
2405 | /* Let's assume the flow control is lifted */ |
2406 | if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { |
2407 | if (!cfil_rw_lock_shared_to_exclusive(lck: &cfil_lck_rw)) { |
2408 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
2409 | } |
2410 | |
2411 | cfc->cf_flags &= ~CFF_FLOW_CONTROLLED; |
2412 | |
2413 | cfil_rw_lock_exclusive_to_shared(lck: &cfil_lck_rw); |
2414 | LCK_RW_ASSERT(&cfil_lck_rw, LCK_RW_ASSERT_SHARED); |
2415 | } |
2416 | /* |
2417 | * Flow control will be raised again as soon as an entry cannot enqueue |
2418 | * to the kernel control socket |
2419 | */ |
2420 | while ((cfc->cf_flags & CFF_FLOW_CONTROLLED) == 0) { |
2421 | verify_content_filter(cfc); |
2422 | |
2423 | cfil_rw_lock_assert_held(lck: &cfil_lck_rw, exclusive: 0); |
2424 | |
2425 | /* Find an entry that is flow controlled */ |
2426 | TAILQ_FOREACH(entry, &cfc->cf_sock_entries, cfe_link) { |
2427 | if (entry->cfe_cfil_info == NULL || |
2428 | entry->cfe_cfil_info->cfi_so == NULL) { |
2429 | continue; |
2430 | } |
2431 | if ((entry->cfe_flags & CFEF_FLOW_CONTROLLED) == 0) { |
2432 | continue; |
2433 | } |
2434 | } |
2435 | if (entry == NULL) { |
2436 | break; |
2437 | } |
2438 | |
2439 | OSIncrementAtomic(&cfil_stats.cfs_ctl_rcvd_flow_lift); |
2440 | |
2441 | cfil_info = entry->cfe_cfil_info; |
2442 | so = cfil_info->cfi_so; |
2443 | |
2444 | if (cfil_info == NULL || os_ref_retain_try(rc: &cfil_info->cfi_ref_count) == false) { |
2445 | break; |
2446 | } |
2447 | |
2448 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
2449 | socket_lock(so, refcount: 1); |
2450 | |
2451 | do { |
2452 | error = cfil_acquire_sockbuf(so, cfil_info, outgoing: 1); |
2453 | if (error == 0) { |
2454 | error = cfil_data_service_ctl_q(so, cfil_info, kcunit, 1); |
2455 | } |
2456 | cfil_release_sockbuf(so, outgoing: 1); |
2457 | if (error != 0) { |
2458 | break; |
2459 | } |
2460 | |
2461 | error = cfil_acquire_sockbuf(so, cfil_info, outgoing: 0); |
2462 | if (error == 0) { |
2463 | error = cfil_data_service_ctl_q(so, cfil_info, kcunit, 0); |
2464 | } |
2465 | cfil_release_sockbuf(so, outgoing: 0); |
2466 | } while (0); |
2467 | |
2468 | CFIL_INFO_FREE(cfil_info); |
2469 | socket_lock_assert_owned(so); |
2470 | socket_unlock(so, refcount: 1); |
2471 | |
2472 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
2473 | } |
2474 | done: |
2475 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
2476 | } |
2477 | |
2478 | struct cflil_tag_container { |
2479 | struct m_tag cfil_m_tag; |
2480 | struct cfil_tag cfil_tag; |
2481 | }; |
2482 | |
2483 | static struct m_tag * |
2484 | m_tag_kalloc_cfil_udp(u_int32_t id, u_int16_t type, uint16_t len, int wait) |
2485 | { |
2486 | struct cflil_tag_container *tag_container; |
2487 | struct m_tag *tag = NULL; |
2488 | |
2489 | assert3u(id, ==, KERNEL_MODULE_TAG_ID); |
2490 | assert3u(type, ==, KERNEL_TAG_TYPE_CFIL_UDP); |
2491 | assert3u(len, ==, sizeof(struct cfil_tag)); |
2492 | |
2493 | if (len != sizeof(struct cfil_tag)) { |
2494 | return NULL; |
2495 | } |
2496 | |
2497 | tag_container = kalloc_type(struct cflil_tag_container, wait | M_ZERO); |
2498 | if (tag_container != NULL) { |
2499 | tag = &tag_container->cfil_m_tag; |
2500 | |
2501 | assert3p(tag, ==, tag_container); |
2502 | |
2503 | M_TAG_INIT(tag, id, type, len, &tag_container->cfil_tag, NULL); |
2504 | } |
2505 | |
2506 | return tag; |
2507 | } |
2508 | |
2509 | static void |
2510 | m_tag_kfree_cfil_udp(struct m_tag *tag) |
2511 | { |
2512 | struct cflil_tag_container *tag_container = (struct cflil_tag_container *)tag; |
2513 | |
2514 | kfree_type(struct cflil_tag_container, tag_container); |
2515 | } |
2516 | |
2517 | void |
2518 | cfil_register_m_tag(void) |
2519 | { |
2520 | errno_t error = 0; |
2521 | |
2522 | error = m_register_internal_tag_type(type: KERNEL_TAG_TYPE_CFIL_UDP, len: sizeof(struct cfil_tag), |
2523 | alloc_func: m_tag_kalloc_cfil_udp, free_func: m_tag_kfree_cfil_udp); |
2524 | |
2525 | assert3u(error, ==, 0); |
2526 | } |
2527 | |
2528 | void |
2529 | cfil_init(void) |
2530 | { |
2531 | struct kern_ctl_reg kern_ctl; |
2532 | errno_t error = 0; |
2533 | unsigned int mbuf_limit = 0; |
2534 | |
2535 | CFIL_LOG(LOG_NOTICE, "" ); |
2536 | |
2537 | /* |
2538 | * Compile time verifications |
2539 | */ |
2540 | _CASSERT(CFIL_MAX_FILTER_COUNT == MAX_CONTENT_FILTER); |
2541 | _CASSERT(sizeof(struct cfil_filter_stat) % sizeof(uint32_t) == 0); |
2542 | _CASSERT(sizeof(struct cfil_entry_stat) % sizeof(uint32_t) == 0); |
2543 | _CASSERT(sizeof(struct cfil_sock_stat) % sizeof(uint32_t) == 0); |
2544 | |
2545 | /* |
2546 | * Runtime time verifications |
2547 | */ |
2548 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_in_enqueued, |
2549 | sizeof(uint32_t))); |
2550 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_out_enqueued, |
2551 | sizeof(uint32_t))); |
2552 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_in_peeked, |
2553 | sizeof(uint32_t))); |
2554 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_ctl_q_out_peeked, |
2555 | sizeof(uint32_t))); |
2556 | |
2557 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_pending_q_in_enqueued, |
2558 | sizeof(uint32_t))); |
2559 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_pending_q_out_enqueued, |
2560 | sizeof(uint32_t))); |
2561 | |
2562 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_in_enqueued, |
2563 | sizeof(uint32_t))); |
2564 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_enqueued, |
2565 | sizeof(uint32_t))); |
2566 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_in_passed, |
2567 | sizeof(uint32_t))); |
2568 | VERIFY(IS_P2ALIGNED(&cfil_stats.cfs_inject_q_out_passed, |
2569 | sizeof(uint32_t))); |
2570 | |
2571 | /* |
2572 | * Allocate locks |
2573 | */ |
2574 | TAILQ_INIT(&cfil_sock_head); |
2575 | TAILQ_INIT(&cfil_sock_head_stats); |
2576 | |
2577 | /* |
2578 | * Register kernel control |
2579 | */ |
2580 | bzero(s: &kern_ctl, n: sizeof(kern_ctl)); |
2581 | strlcpy(dst: kern_ctl.ctl_name, CONTENT_FILTER_CONTROL_NAME, |
2582 | n: sizeof(kern_ctl.ctl_name)); |
2583 | kern_ctl.ctl_flags = CTL_FLAG_PRIVILEGED | CTL_FLAG_REG_EXTENDED; |
2584 | kern_ctl.ctl_sendsize = 512 * 1024; /* enough? */ |
2585 | kern_ctl.ctl_recvsize = 512 * 1024; /* enough? */ |
2586 | kern_ctl.ctl_connect = cfil_ctl_connect; |
2587 | kern_ctl.ctl_disconnect = cfil_ctl_disconnect; |
2588 | kern_ctl.ctl_send = cfil_ctl_send; |
2589 | kern_ctl.ctl_getopt = cfil_ctl_getopt; |
2590 | kern_ctl.ctl_setopt = cfil_ctl_setopt; |
2591 | kern_ctl.ctl_rcvd = cfil_ctl_rcvd; |
2592 | error = ctl_register(userkctl: &kern_ctl, kctlref: &cfil_kctlref); |
2593 | if (error != 0) { |
2594 | CFIL_LOG(LOG_ERR, "ctl_register failed: %d" , error); |
2595 | return; |
2596 | } |
2597 | |
2598 | // Spawn thread for statistics reporting |
2599 | if (kernel_thread_start(continuation: cfil_stats_report_thread_func, NULL, |
2600 | new_thread: &cfil_stats_report_thread) != KERN_SUCCESS) { |
2601 | panic_plain("%s: Can't create statistics report thread" , __func__); |
2602 | /* NOTREACHED */ |
2603 | } |
2604 | /* this must not fail */ |
2605 | VERIFY(cfil_stats_report_thread != NULL); |
2606 | |
2607 | // Set UDP per-flow mbuf thresholds to 1/32 of platform max |
2608 | mbuf_limit = MAX(UDP_FLOW_GC_MBUF_CNT_MAX, (nmbclusters << MCLSHIFT) >> UDP_FLOW_GC_MBUF_SHIFT); |
2609 | cfil_udp_gc_mbuf_num_max = (mbuf_limit >> MCLSHIFT); |
2610 | cfil_udp_gc_mbuf_cnt_max = mbuf_limit; |
2611 | |
2612 | memset(s: &global_cfil_stats_report_buffers, c: 0, n: sizeof(global_cfil_stats_report_buffers)); |
2613 | } |
2614 | |
2615 | struct cfil_info * |
2616 | cfil_info_alloc(struct socket *so, struct soflow_hash_entry *hash_entry) |
2617 | { |
2618 | int kcunit; |
2619 | struct cfil_info *cfil_info = NULL; |
2620 | struct inpcb *inp = sotoinpcb(so); |
2621 | |
2622 | CFIL_LOG(LOG_INFO, "" ); |
2623 | |
2624 | socket_lock_assert_owned(so); |
2625 | |
2626 | cfil_info = zalloc_flags(cfil_info_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
2627 | os_ref_init(&cfil_info->cfi_ref_count, &cfil_refgrp); |
2628 | |
2629 | cfil_queue_init(cfq: &cfil_info->cfi_snd.cfi_inject_q); |
2630 | cfil_queue_init(cfq: &cfil_info->cfi_rcv.cfi_inject_q); |
2631 | |
2632 | for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { |
2633 | struct cfil_entry *entry; |
2634 | |
2635 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
2636 | entry->cfe_cfil_info = cfil_info; |
2637 | |
2638 | /* Initialize the filter entry */ |
2639 | entry->cfe_filter = NULL; |
2640 | entry->cfe_flags = 0; |
2641 | entry->cfe_necp_control_unit = 0; |
2642 | entry->cfe_snd.cfe_pass_offset = 0; |
2643 | entry->cfe_snd.cfe_peek_offset = 0; |
2644 | entry->cfe_snd.cfe_peeked = 0; |
2645 | entry->cfe_rcv.cfe_pass_offset = 0; |
2646 | entry->cfe_rcv.cfe_peek_offset = 0; |
2647 | entry->cfe_rcv.cfe_peeked = 0; |
2648 | /* |
2649 | * Timestamp the last action to avoid pre-maturely |
2650 | * triggering garbage collection |
2651 | */ |
2652 | microuptime(tv: &entry->cfe_last_action); |
2653 | |
2654 | cfil_queue_init(cfq: &entry->cfe_snd.cfe_pending_q); |
2655 | cfil_queue_init(cfq: &entry->cfe_rcv.cfe_pending_q); |
2656 | cfil_queue_init(cfq: &entry->cfe_snd.cfe_ctl_q); |
2657 | cfil_queue_init(cfq: &entry->cfe_rcv.cfe_ctl_q); |
2658 | } |
2659 | |
2660 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
2661 | |
2662 | /* |
2663 | * Create a cfi_sock_id that's not the socket pointer! |
2664 | */ |
2665 | |
2666 | if (hash_entry == NULL) { |
2667 | // This is the TCP case, cfil_info is tracked per socket |
2668 | if (inp->inp_flowhash == 0) { |
2669 | inp_calc_flowhash(inp); |
2670 | ASSERT(inp->inp_flowhash != 0); |
2671 | } |
2672 | |
2673 | so->so_cfil = cfil_info; |
2674 | cfil_info->cfi_so = so; |
2675 | cfil_info->cfi_sock_id = |
2676 | ((so->so_gencnt << 32) | inp->inp_flowhash); |
2677 | } else { |
2678 | // This is the UDP case, cfil_info is tracked in per-socket hash |
2679 | cfil_info->cfi_so = so; |
2680 | cfil_info->cfi_hash_entry = hash_entry; |
2681 | cfil_info->cfi_sock_id = ((so->so_gencnt << 32) | (hash_entry->soflow_flowhash & 0xffffffff)); |
2682 | } |
2683 | |
2684 | TAILQ_INSERT_TAIL(&cfil_sock_head, cfil_info, cfi_link); |
2685 | SLIST_INIT(&cfil_info->cfi_ordered_entries); |
2686 | |
2687 | cfil_sock_attached_count++; |
2688 | |
2689 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
2690 | |
2691 | if (cfil_info != NULL) { |
2692 | OSIncrementAtomic(&cfil_stats.cfs_cfi_alloc_ok); |
2693 | } else { |
2694 | OSIncrementAtomic(&cfil_stats.cfs_cfi_alloc_fail); |
2695 | } |
2696 | |
2697 | return cfil_info; |
2698 | } |
2699 | |
2700 | int |
2701 | cfil_info_attach_unit(struct socket *so, uint32_t filter_control_unit, struct cfil_info *cfil_info) |
2702 | { |
2703 | int kcunit; |
2704 | int attached = 0; |
2705 | |
2706 | CFIL_LOG(LOG_INFO, "" ); |
2707 | |
2708 | socket_lock_assert_owned(so); |
2709 | |
2710 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
2711 | |
2712 | for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { |
2713 | struct content_filter *cfc = content_filters[kcunit - 1]; |
2714 | struct cfil_entry *entry; |
2715 | struct cfil_entry *iter_entry; |
2716 | struct cfil_entry *iter_prev; |
2717 | |
2718 | if (cfc == NULL) { |
2719 | continue; |
2720 | } |
2721 | if (!(cfc->cf_necp_control_unit & filter_control_unit)) { |
2722 | continue; |
2723 | } |
2724 | |
2725 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
2726 | |
2727 | entry->cfe_filter = cfc; |
2728 | entry->cfe_necp_control_unit = cfc->cf_necp_control_unit; |
2729 | TAILQ_INSERT_TAIL(&cfc->cf_sock_entries, entry, cfe_link); |
2730 | cfc->cf_sock_count++; |
2731 | |
2732 | /* Insert the entry into the list ordered by control unit */ |
2733 | iter_prev = NULL; |
2734 | SLIST_FOREACH(iter_entry, &cfil_info->cfi_ordered_entries, cfe_order_link) { |
2735 | if (entry->cfe_necp_control_unit < iter_entry->cfe_necp_control_unit) { |
2736 | break; |
2737 | } |
2738 | iter_prev = iter_entry; |
2739 | } |
2740 | |
2741 | if (iter_prev == NULL) { |
2742 | SLIST_INSERT_HEAD(&cfil_info->cfi_ordered_entries, entry, cfe_order_link); |
2743 | } else { |
2744 | SLIST_INSERT_AFTER(iter_prev, entry, cfe_order_link); |
2745 | } |
2746 | |
2747 | verify_content_filter(cfc); |
2748 | attached = 1; |
2749 | entry->cfe_flags |= CFEF_CFIL_ATTACHED; |
2750 | } |
2751 | |
2752 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
2753 | |
2754 | return attached; |
2755 | } |
2756 | |
2757 | static void |
2758 | cfil_info_free(struct cfil_info *cfil_info) |
2759 | { |
2760 | int kcunit; |
2761 | uint64_t in_drain = 0; |
2762 | uint64_t out_drained = 0; |
2763 | |
2764 | if (cfil_info == NULL) { |
2765 | return; |
2766 | } |
2767 | |
2768 | CFIL_LOG(LOG_INFO, "" ); |
2769 | |
2770 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
2771 | |
2772 | if (cfil_info->cfi_debug) { |
2773 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: FREEING CFIL_INFO" ); |
2774 | } |
2775 | |
2776 | for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { |
2777 | struct cfil_entry *entry; |
2778 | struct content_filter *cfc; |
2779 | |
2780 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
2781 | |
2782 | /* Don't be silly and try to detach twice */ |
2783 | if (entry->cfe_filter == NULL) { |
2784 | continue; |
2785 | } |
2786 | |
2787 | cfc = content_filters[kcunit - 1]; |
2788 | |
2789 | VERIFY(cfc == entry->cfe_filter); |
2790 | |
2791 | entry->cfe_filter = NULL; |
2792 | entry->cfe_necp_control_unit = 0; |
2793 | TAILQ_REMOVE(&cfc->cf_sock_entries, entry, cfe_link); |
2794 | cfc->cf_sock_count--; |
2795 | |
2796 | verify_content_filter(cfc); |
2797 | } |
2798 | |
2799 | cfil_sock_attached_count--; |
2800 | TAILQ_REMOVE(&cfil_sock_head, cfil_info, cfi_link); |
2801 | |
2802 | // Turn off stats reporting for cfil_info. |
2803 | cfil_info_stats_toggle(cfil_info, NULL, report_frequency: 0); |
2804 | |
2805 | out_drained += cfil_queue_drain(cfq: &cfil_info->cfi_snd.cfi_inject_q); |
2806 | in_drain += cfil_queue_drain(cfq: &cfil_info->cfi_rcv.cfi_inject_q); |
2807 | |
2808 | for (kcunit = 1; kcunit <= MAX_CONTENT_FILTER; kcunit++) { |
2809 | struct cfil_entry *entry; |
2810 | |
2811 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
2812 | out_drained += cfil_queue_drain(cfq: &entry->cfe_snd.cfe_pending_q); |
2813 | in_drain += cfil_queue_drain(cfq: &entry->cfe_rcv.cfe_pending_q); |
2814 | out_drained += cfil_queue_drain(cfq: &entry->cfe_snd.cfe_ctl_q); |
2815 | in_drain += cfil_queue_drain(cfq: &entry->cfe_rcv.cfe_ctl_q); |
2816 | } |
2817 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
2818 | |
2819 | if (out_drained) { |
2820 | OSIncrementAtomic(&cfil_stats.cfs_flush_out_free); |
2821 | } |
2822 | if (in_drain) { |
2823 | OSIncrementAtomic(&cfil_stats.cfs_flush_in_free); |
2824 | } |
2825 | |
2826 | zfree(cfil_info_zone, cfil_info); |
2827 | } |
2828 | |
2829 | /* |
2830 | * Received a verdict from userspace for a socket. |
2831 | * Perform any delayed operation if needed. |
2832 | */ |
2833 | static void |
2834 | cfil_sock_received_verdict(struct socket *so) |
2835 | { |
2836 | if (so == NULL || so->so_cfil == NULL) { |
2837 | return; |
2838 | } |
2839 | |
2840 | so->so_cfil->cfi_flags |= CFIF_INITIAL_VERDICT; |
2841 | |
2842 | /* |
2843 | * If socket has already been connected, trigger |
2844 | * soisconnected now. |
2845 | */ |
2846 | if (so->so_cfil->cfi_flags & CFIF_SOCKET_CONNECTED) { |
2847 | so->so_cfil->cfi_flags &= ~CFIF_SOCKET_CONNECTED; |
2848 | soisconnected(so); |
2849 | return; |
2850 | } |
2851 | } |
2852 | |
2853 | /* |
2854 | * Entry point from Sockets layer |
2855 | * The socket is locked. |
2856 | * |
2857 | * Checks if a connected socket is subject to filter and |
2858 | * pending the initial verdict. |
2859 | */ |
2860 | boolean_t |
2861 | cfil_sock_connected_pending_verdict(struct socket *so) |
2862 | { |
2863 | if (so == NULL || so->so_cfil == NULL) { |
2864 | return false; |
2865 | } |
2866 | |
2867 | if (so->so_cfil->cfi_flags & CFIF_INITIAL_VERDICT) { |
2868 | return false; |
2869 | } else { |
2870 | /* |
2871 | * Remember that this protocol is already connected, so |
2872 | * we will trigger soisconnected() upon receipt of |
2873 | * initial verdict later. |
2874 | */ |
2875 | so->so_cfil->cfi_flags |= CFIF_SOCKET_CONNECTED; |
2876 | return true; |
2877 | } |
2878 | } |
2879 | |
2880 | /* |
2881 | * Entry point from Flow Divert |
2882 | * The socket is locked. |
2883 | * |
2884 | * Mark socket as DEAD if all CFIL data has been processed by filter(s). |
2885 | * Otherwise, delay the marking until all data has been processed. |
2886 | */ |
2887 | boolean_t |
2888 | cfil_sock_is_dead(struct socket *so) |
2889 | { |
2890 | struct inpcb *inp = NULL; |
2891 | |
2892 | if (so == NULL) { |
2893 | return false; |
2894 | } |
2895 | |
2896 | socket_lock_assert_owned(so); |
2897 | |
2898 | if ((so->so_flags & SOF_CONTENT_FILTER) != 0) { |
2899 | int32_t pending_snd = cfil_sock_data_pending(sb: &so->so_snd); |
2900 | int32_t pending_rcv = cfil_sock_data_pending(sb: &so->so_rcv); |
2901 | if (pending_snd || pending_rcv) { |
2902 | SO_DELAYED_DEAD_SET(so, true) |
2903 | return false; |
2904 | } |
2905 | } |
2906 | |
2907 | inp = sotoinpcb(so); |
2908 | if (inp != NULL) { |
2909 | inp->inp_state = INPCB_STATE_DEAD; |
2910 | inpcb_gc_sched(inp->inp_pcbinfo, type: INPCB_TIMER_FAST); |
2911 | SO_DELAYED_DEAD_SET(so, false) |
2912 | return true; |
2913 | } |
2914 | return false; |
2915 | } |
2916 | |
2917 | /* |
2918 | * Entry point from tcp_timer.c |
2919 | * The socket is locked. |
2920 | * |
2921 | * Perform TCP FIN time wait handling if all CFIL data has been processed by filter(s). |
2922 | * Otherwise, delay until all data has been processed. |
2923 | */ |
2924 | boolean_t |
2925 | cfil_sock_tcp_add_time_wait(struct socket *so) |
2926 | { |
2927 | struct inpcb *inp = NULL; |
2928 | struct tcpcb *tp = NULL; |
2929 | |
2930 | // Only handle TCP sockets |
2931 | if (so == NULL || !IS_TCP(so)) { |
2932 | return false; |
2933 | } |
2934 | |
2935 | socket_lock_assert_owned(so); |
2936 | |
2937 | if ((so->so_flags & SOF_CONTENT_FILTER) != 0) { |
2938 | int32_t pending_snd = cfil_sock_data_pending(sb: &so->so_snd); |
2939 | int32_t pending_rcv = cfil_sock_data_pending(sb: &so->so_rcv); |
2940 | if (pending_snd || pending_rcv) { |
2941 | SO_DELAYED_TCP_TIME_WAIT_SET(so, true) |
2942 | return false; |
2943 | } |
2944 | } |
2945 | |
2946 | inp = sotoinpcb(so); |
2947 | tp = inp ? intotcpcb(inp) : NULL; |
2948 | if (tp != NULL) { |
2949 | add_to_time_wait_now(tp, delay: 2 * tcp_msl); |
2950 | SO_DELAYED_TCP_TIME_WAIT_SET(so, false) |
2951 | return true; |
2952 | } |
2953 | return false; |
2954 | } |
2955 | |
2956 | boolean_t |
2957 | cfil_filter_present(void) |
2958 | { |
2959 | return cfil_active_count > 0; |
2960 | } |
2961 | |
2962 | /* |
2963 | * Entry point from Sockets layer |
2964 | * The socket is locked. |
2965 | */ |
2966 | errno_t |
2967 | cfil_sock_attach(struct socket *so, struct sockaddr *local, struct sockaddr *remote, int dir) |
2968 | { |
2969 | errno_t error = 0; |
2970 | uint32_t filter_control_unit; |
2971 | int debug = 0; |
2972 | |
2973 | socket_lock_assert_owned(so); |
2974 | |
2975 | if (so->so_flags1 & SOF1_FLOW_DIVERT_SKIP) { |
2976 | /* |
2977 | * This socket has already been evaluated (and ultimately skipped) by |
2978 | * flow divert, so it has also already been through content filter if there |
2979 | * is one. |
2980 | */ |
2981 | goto done; |
2982 | } |
2983 | |
2984 | /* Limit ourselves to TCP that are not MPTCP subflows */ |
2985 | if (SKIP_FILTER_FOR_TCP_SOCKET(so)) { |
2986 | goto done; |
2987 | } |
2988 | |
2989 | debug = DEBUG_FLOW(sotoinpcb(so), so, local, remote); |
2990 | if (debug) { |
2991 | CFIL_LOG(LOG_INFO, "CFIL: TCP (dir %d) - debug flow with port %d" , dir, cfil_log_port); |
2992 | } |
2993 | |
2994 | filter_control_unit = necp_socket_get_content_filter_control_unit(so); |
2995 | if (filter_control_unit == 0) { |
2996 | goto done; |
2997 | } |
2998 | |
2999 | if (filter_control_unit == NECP_FILTER_UNIT_NO_FILTER) { |
3000 | goto done; |
3001 | } |
3002 | if ((filter_control_unit & NECP_MASK_USERSPACE_ONLY) != 0) { |
3003 | OSIncrementAtomic(&cfil_stats.cfs_sock_userspace_only); |
3004 | goto done; |
3005 | } |
3006 | if (cfil_active_count == 0) { |
3007 | OSIncrementAtomic(&cfil_stats.cfs_sock_attach_in_vain); |
3008 | goto done; |
3009 | } |
3010 | if (so->so_cfil != NULL) { |
3011 | OSIncrementAtomic(&cfil_stats.cfs_sock_attach_already); |
3012 | CFIL_LOG(LOG_ERR, "already attached" ); |
3013 | goto done; |
3014 | } else { |
3015 | cfil_info_alloc(so, NULL); |
3016 | if (so->so_cfil == NULL) { |
3017 | error = ENOMEM; |
3018 | OSIncrementAtomic(&cfil_stats.cfs_sock_attach_no_mem); |
3019 | goto done; |
3020 | } |
3021 | so->so_cfil->cfi_dir = dir; |
3022 | so->so_cfil->cfi_filter_control_unit = filter_control_unit; |
3023 | so->so_cfil->cfi_debug = debug; |
3024 | } |
3025 | if (cfil_info_attach_unit(so, filter_control_unit, cfil_info: so->so_cfil) == 0) { |
3026 | CFIL_LOG(LOG_ERR, "cfil_info_attach_unit(%u) failed" , |
3027 | filter_control_unit); |
3028 | OSIncrementAtomic(&cfil_stats.cfs_sock_attach_failed); |
3029 | goto done; |
3030 | } |
3031 | CFIL_LOG(LOG_INFO, "so %llx filter_control_unit %u sockID %llu <%llx>" , |
3032 | (uint64_t)VM_KERNEL_ADDRPERM(so), |
3033 | filter_control_unit, so->so_cfil->cfi_sock_id, so->so_cfil->cfi_sock_id); |
3034 | |
3035 | so->so_flags |= SOF_CONTENT_FILTER; |
3036 | OSIncrementAtomic(&cfil_stats.cfs_sock_attached); |
3037 | |
3038 | /* Hold a reference on the socket */ |
3039 | so->so_usecount++; |
3040 | |
3041 | /* |
3042 | * Save passed addresses for attach event msg (in case resend |
3043 | * is needed. |
3044 | */ |
3045 | if (remote != NULL && (remote->sa_len <= sizeof(union sockaddr_in_4_6))) { |
3046 | memcpy(dst: &so->so_cfil->cfi_so_attach_faddr, src: remote, n: remote->sa_len); |
3047 | } |
3048 | if (local != NULL && (local->sa_len <= sizeof(union sockaddr_in_4_6))) { |
3049 | memcpy(dst: &so->so_cfil->cfi_so_attach_laddr, src: local, n: local->sa_len); |
3050 | } |
3051 | |
3052 | error = cfil_dispatch_attach_event(so, so->so_cfil, 0, dir); |
3053 | /* We can recover from flow control or out of memory errors */ |
3054 | if (error == ENOBUFS || error == ENOMEM) { |
3055 | error = 0; |
3056 | } else if (error != 0) { |
3057 | goto done; |
3058 | } |
3059 | |
3060 | CFIL_INFO_VERIFY(so->so_cfil); |
3061 | done: |
3062 | return error; |
3063 | } |
3064 | |
3065 | /* |
3066 | * Entry point from Sockets layer |
3067 | * The socket is locked. |
3068 | */ |
3069 | errno_t |
3070 | cfil_sock_detach(struct socket *so) |
3071 | { |
3072 | if (NEED_DGRAM_FLOW_TRACKING(so)) { |
3073 | return 0; |
3074 | } |
3075 | |
3076 | if (so->so_cfil) { |
3077 | if (so->so_flags & SOF_CONTENT_FILTER) { |
3078 | so->so_flags &= ~SOF_CONTENT_FILTER; |
3079 | VERIFY(so->so_usecount > 0); |
3080 | so->so_usecount--; |
3081 | } |
3082 | CFIL_INFO_FREE(so->so_cfil); |
3083 | so->so_cfil = NULL; |
3084 | OSIncrementAtomic(&cfil_stats.cfs_sock_detached); |
3085 | } |
3086 | return 0; |
3087 | } |
3088 | |
3089 | /* |
3090 | * Fill in the address info of an event message from either |
3091 | * the socket or passed in address info. |
3092 | */ |
3093 | static void |
3094 | cfil_fill_event_msg_addresses(struct soflow_hash_entry *entry, struct inpcb *inp, |
3095 | union sockaddr_in_4_6 *sin_src, union sockaddr_in_4_6 *sin_dst, |
3096 | boolean_t isIPv4, boolean_t outgoing) |
3097 | { |
3098 | if (isIPv4) { |
3099 | struct in_addr laddr = {0}, faddr = {0}; |
3100 | u_int16_t lport = 0, fport = 0; |
3101 | |
3102 | cfil_get_flow_address(entry, inp, &laddr, &faddr, &lport, &fport); |
3103 | |
3104 | if (outgoing) { |
3105 | fill_ip_sockaddr_4_6(sin_src, laddr, lport); |
3106 | fill_ip_sockaddr_4_6(sin_dst, faddr, fport); |
3107 | } else { |
3108 | fill_ip_sockaddr_4_6(sin_src, faddr, fport); |
3109 | fill_ip_sockaddr_4_6(sin_dst, laddr, lport); |
3110 | } |
3111 | } else { |
3112 | struct in6_addr *laddr = NULL, *faddr = NULL; |
3113 | u_int16_t lport = 0, fport = 0; |
3114 | const u_int32_t lifscope = inp ? inp->inp_lifscope : IFSCOPE_UNKNOWN; |
3115 | const u_int32_t fifscope = inp ? inp->inp_fifscope : IFSCOPE_UNKNOWN; |
3116 | |
3117 | cfil_get_flow_address_v6(entry, inp, &laddr, &faddr, &lport, &fport); |
3118 | if (outgoing) { |
3119 | fill_ip6_sockaddr_4_6(sin_src, laddr, lport, lifscope); |
3120 | fill_ip6_sockaddr_4_6(sin_dst, faddr, fport, fifscope); |
3121 | } else { |
3122 | fill_ip6_sockaddr_4_6(sin_src, faddr, fport, fifscope); |
3123 | fill_ip6_sockaddr_4_6(sin_dst, laddr, lport, lifscope); |
3124 | } |
3125 | } |
3126 | } |
3127 | |
3128 | static boolean_t |
3129 | cfil_dispatch_attach_event_sign(cfil_crypto_state_t crypto_state, |
3130 | struct cfil_info *cfil_info, |
3131 | struct cfil_msg_sock_attached *msg) |
3132 | { |
3133 | struct cfil_crypto_data data = {}; |
3134 | struct iovec [1] = { { NULL, 0 } }; |
3135 | |
3136 | if (crypto_state == NULL || msg == NULL || cfil_info == NULL) { |
3137 | return false; |
3138 | } |
3139 | |
3140 | data.sock_id = msg->cfs_msghdr.cfm_sock_id; |
3141 | data.direction = msg->cfs_conn_dir; |
3142 | |
3143 | data.pid = msg->cfs_pid; |
3144 | data.effective_pid = msg->cfs_e_pid; |
3145 | uuid_copy(dst: data.uuid, src: msg->cfs_uuid); |
3146 | uuid_copy(dst: data.effective_uuid, src: msg->cfs_e_uuid); |
3147 | data.socketProtocol = msg->cfs_sock_protocol; |
3148 | if (data.direction == CFS_CONNECTION_DIR_OUT) { |
3149 | data.remote.sin6 = msg->cfs_dst.sin6; |
3150 | data.local.sin6 = msg->cfs_src.sin6; |
3151 | } else { |
3152 | data.remote.sin6 = msg->cfs_src.sin6; |
3153 | data.local.sin6 = msg->cfs_dst.sin6; |
3154 | } |
3155 | |
3156 | if (strlen(s: msg->cfs_remote_domain_name) > 0) { |
3157 | extra_data[0].iov_base = msg->cfs_remote_domain_name; |
3158 | extra_data[0].iov_len = strlen(s: msg->cfs_remote_domain_name); |
3159 | } |
3160 | |
3161 | // At attach, if local address is already present, no need to re-sign subsequent data messages. |
3162 | if (!NULLADDRESS(data.local)) { |
3163 | cfil_info->cfi_isSignatureLatest = true; |
3164 | } |
3165 | |
3166 | msg->cfs_signature_length = sizeof(cfil_crypto_signature); |
3167 | if (cfil_crypto_sign_data(state: crypto_state, data: &data, extra_data, extra_data_count: sizeof(extra_data) / sizeof(extra_data[0]), signature: msg->cfs_signature, signature_length: &msg->cfs_signature_length) != 0) { |
3168 | msg->cfs_signature_length = 0; |
3169 | CFIL_LOG(LOG_ERR, "CFIL: Failed to sign attached msg <sockID %llu <%llx>>" , |
3170 | msg->cfs_msghdr.cfm_sock_id, msg->cfs_msghdr.cfm_sock_id); |
3171 | return false; |
3172 | } |
3173 | |
3174 | return true; |
3175 | } |
3176 | |
3177 | struct cfil_sign_parameters { |
3178 | cfil_crypto_state_t csp_state; |
3179 | struct cfil_crypto_data *csp_data; |
3180 | uint8_t *csp_signature; |
3181 | uint32_t *csp_signature_size; |
3182 | }; |
3183 | |
3184 | static void |
3185 | cfil_sign_with_domain_name(char *domain_name, void *ctx) |
3186 | { |
3187 | struct cfil_sign_parameters *parameters = (struct cfil_sign_parameters *)ctx; |
3188 | struct iovec [1] = { { NULL, 0 } }; |
3189 | |
3190 | if (parameters == NULL) { |
3191 | return; |
3192 | } |
3193 | |
3194 | if (domain_name != NULL) { |
3195 | extra_data[0].iov_base = domain_name; |
3196 | extra_data[0].iov_len = strlen(s: domain_name); |
3197 | } |
3198 | |
3199 | *(parameters->csp_signature_size) = sizeof(cfil_crypto_signature); |
3200 | if (cfil_crypto_sign_data(state: parameters->csp_state, data: parameters->csp_data, |
3201 | extra_data, extra_data_count: sizeof(extra_data) / sizeof(extra_data[0]), |
3202 | signature: parameters->csp_signature, signature_length: parameters->csp_signature_size) != 0) { |
3203 | *(parameters->csp_signature_size) = 0; |
3204 | } |
3205 | } |
3206 | |
3207 | static boolean_t |
3208 | cfil_dispatch_data_event_sign(cfil_crypto_state_t crypto_state, |
3209 | struct socket *so, struct cfil_info *cfil_info, |
3210 | struct cfil_msg_data_event *msg) |
3211 | { |
3212 | struct cfil_crypto_data data = {}; |
3213 | |
3214 | if (crypto_state == NULL || msg == NULL || |
3215 | so == NULL || cfil_info == NULL) { |
3216 | return false; |
3217 | } |
3218 | |
3219 | data.sock_id = cfil_info->cfi_sock_id; |
3220 | data.direction = cfil_info->cfi_dir; |
3221 | data.pid = so->last_pid; |
3222 | memcpy(dst: data.uuid, src: so->last_uuid, n: sizeof(uuid_t)); |
3223 | if (so->so_flags & SOF_DELEGATED) { |
3224 | data.effective_pid = so->e_pid; |
3225 | memcpy(dst: data.effective_uuid, src: so->e_uuid, n: sizeof(uuid_t)); |
3226 | } else { |
3227 | data.effective_pid = so->last_pid; |
3228 | memcpy(dst: data.effective_uuid, src: so->last_uuid, n: sizeof(uuid_t)); |
3229 | } |
3230 | data.socketProtocol = GET_SO_PROTO(so); |
3231 | |
3232 | if (data.direction == CFS_CONNECTION_DIR_OUT) { |
3233 | data.remote.sin6 = msg->cfc_dst.sin6; |
3234 | data.local.sin6 = msg->cfc_src.sin6; |
3235 | } else { |
3236 | data.remote.sin6 = msg->cfc_src.sin6; |
3237 | data.local.sin6 = msg->cfc_dst.sin6; |
3238 | } |
3239 | |
3240 | // At first data, local address may show up for the first time, update address cache and |
3241 | // no need to re-sign subsequent data messages anymore. |
3242 | if (!NULLADDRESS(data.local)) { |
3243 | memcpy(dst: &cfil_info->cfi_so_attach_laddr, src: &data.local, n: data.local.sa.sa_len); |
3244 | cfil_info->cfi_isSignatureLatest = true; |
3245 | } |
3246 | |
3247 | struct cfil_sign_parameters parameters = { |
3248 | .csp_state = crypto_state, |
3249 | .csp_data = &data, |
3250 | .csp_signature = msg->cfd_signature, |
3251 | .csp_signature_size = &msg->cfd_signature_length, |
3252 | }; |
3253 | necp_with_inp_domain_name(so, ctx: ¶meters, with_func: cfil_sign_with_domain_name); |
3254 | |
3255 | if (msg->cfd_signature_length == 0) { |
3256 | CFIL_LOG(LOG_ERR, "CFIL: Failed to sign data msg <sockID %llu <%llx>>" , |
3257 | msg->cfd_msghdr.cfm_sock_id, msg->cfd_msghdr.cfm_sock_id); |
3258 | return false; |
3259 | } |
3260 | |
3261 | return true; |
3262 | } |
3263 | |
3264 | static boolean_t |
3265 | cfil_dispatch_closed_event_sign(cfil_crypto_state_t crypto_state, |
3266 | struct socket *so, struct cfil_info *cfil_info, |
3267 | struct cfil_msg_sock_closed *msg) |
3268 | { |
3269 | struct cfil_crypto_data data = {}; |
3270 | struct soflow_hash_entry hash_entry = {}; |
3271 | struct soflow_hash_entry *hash_entry_ptr = NULL; |
3272 | struct inpcb *inp = (struct inpcb *)so->so_pcb; |
3273 | |
3274 | if (crypto_state == NULL || msg == NULL || |
3275 | so == NULL || inp == NULL || cfil_info == NULL) { |
3276 | return false; |
3277 | } |
3278 | |
3279 | data.sock_id = cfil_info->cfi_sock_id; |
3280 | data.direction = cfil_info->cfi_dir; |
3281 | |
3282 | data.pid = so->last_pid; |
3283 | memcpy(dst: data.uuid, src: so->last_uuid, n: sizeof(uuid_t)); |
3284 | if (so->so_flags & SOF_DELEGATED) { |
3285 | data.effective_pid = so->e_pid; |
3286 | memcpy(dst: data.effective_uuid, src: so->e_uuid, n: sizeof(uuid_t)); |
3287 | } else { |
3288 | data.effective_pid = so->last_pid; |
3289 | memcpy(dst: data.effective_uuid, src: so->last_uuid, n: sizeof(uuid_t)); |
3290 | } |
3291 | data.socketProtocol = GET_SO_PROTO(so); |
3292 | |
3293 | /* |
3294 | * Fill in address info: |
3295 | * For UDP, use the cfil_info hash entry directly. |
3296 | * For TCP, compose an hash entry with the saved addresses. |
3297 | */ |
3298 | if (cfil_info->cfi_hash_entry != NULL) { |
3299 | hash_entry_ptr = cfil_info->cfi_hash_entry; |
3300 | } else if (cfil_info->cfi_so_attach_faddr.sa.sa_len > 0 || |
3301 | cfil_info->cfi_so_attach_laddr.sa.sa_len > 0) { |
3302 | soflow_fill_hash_entry_from_address(&hash_entry, TRUE, SA(&cfil_info->cfi_so_attach_laddr.sa), FALSE); |
3303 | soflow_fill_hash_entry_from_address(&hash_entry, FALSE, SA(&cfil_info->cfi_so_attach_faddr.sa), FALSE); |
3304 | hash_entry_ptr = &hash_entry; |
3305 | } |
3306 | if (hash_entry_ptr != NULL) { |
3307 | boolean_t outgoing = (cfil_info->cfi_dir == CFS_CONNECTION_DIR_OUT); |
3308 | union sockaddr_in_4_6 *src = outgoing ? &data.local : &data.remote; |
3309 | union sockaddr_in_4_6 *dst = outgoing ? &data.remote : &data.local; |
3310 | cfil_fill_event_msg_addresses(entry: hash_entry_ptr, inp, sin_src: src, sin_dst: dst, isIPv4: !IS_INP_V6(inp), outgoing); |
3311 | } |
3312 | |
3313 | data.byte_count_in = cfil_info->cfi_byte_inbound_count; |
3314 | data.byte_count_out = cfil_info->cfi_byte_outbound_count; |
3315 | |
3316 | struct cfil_sign_parameters parameters = { |
3317 | .csp_state = crypto_state, |
3318 | .csp_data = &data, |
3319 | .csp_signature = msg->cfc_signature, |
3320 | .csp_signature_size = &msg->cfc_signature_length |
3321 | }; |
3322 | necp_with_inp_domain_name(so, ctx: ¶meters, with_func: cfil_sign_with_domain_name); |
3323 | |
3324 | if (msg->cfc_signature_length == 0) { |
3325 | CFIL_LOG(LOG_ERR, "CFIL: Failed to sign closed msg <sockID %llu <%llx>>" , |
3326 | msg->cfc_msghdr.cfm_sock_id, msg->cfc_msghdr.cfm_sock_id); |
3327 | return false; |
3328 | } |
3329 | |
3330 | return true; |
3331 | } |
3332 | |
3333 | static void |
3334 | cfil_populate_attached_msg_domain_name(char *domain_name, void *ctx) |
3335 | { |
3336 | struct cfil_msg_sock_attached *msg_attached = (struct cfil_msg_sock_attached *)ctx; |
3337 | |
3338 | if (msg_attached == NULL) { |
3339 | return; |
3340 | } |
3341 | |
3342 | if (domain_name != NULL) { |
3343 | strlcpy(dst: msg_attached->cfs_remote_domain_name, src: domain_name, n: sizeof(msg_attached->cfs_remote_domain_name)); |
3344 | } |
3345 | } |
3346 | |
3347 | static bool |
3348 | cfil_copy_audit_token(pid_t pid, audit_token_t *buffer) |
3349 | { |
3350 | bool success = false; |
3351 | proc_t p = proc_find(pid); |
3352 | if (p != PROC_NULL) { |
3353 | task_t t = proc_task(p); |
3354 | if (t != TASK_NULL) { |
3355 | audit_token_t audit_token = {}; |
3356 | mach_msg_type_number_t count = TASK_AUDIT_TOKEN_COUNT; |
3357 | if (task_info(task: t, TASK_AUDIT_TOKEN, task_info_out: (task_info_t)&audit_token, task_info_count: &count) == KERN_SUCCESS) { |
3358 | memcpy(dst: buffer, src: &audit_token, n: sizeof(audit_token_t)); |
3359 | success = true; |
3360 | } |
3361 | } |
3362 | proc_rele(p); |
3363 | } |
3364 | return success; |
3365 | } |
3366 | |
3367 | static int |
3368 | cfil_dispatch_attach_event(struct socket *so, struct cfil_info *cfil_info, |
3369 | uint32_t kcunit, int conn_dir) |
3370 | { |
3371 | errno_t error = 0; |
3372 | struct cfil_entry *entry = NULL; |
3373 | struct cfil_msg_sock_attached *msg_attached; |
3374 | struct content_filter *cfc = NULL; |
3375 | struct inpcb *inp = (struct inpcb *)so->so_pcb; |
3376 | struct soflow_hash_entry *hash_entry_ptr = NULL; |
3377 | struct soflow_hash_entry hash_entry; |
3378 | |
3379 | memset(s: &hash_entry, c: 0, n: sizeof(struct soflow_hash_entry)); |
3380 | |
3381 | socket_lock_assert_owned(so); |
3382 | |
3383 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
3384 | |
3385 | if (so->so_proto == NULL || so->so_proto->pr_domain == NULL) { |
3386 | error = EINVAL; |
3387 | goto done; |
3388 | } |
3389 | |
3390 | if (kcunit == 0) { |
3391 | entry = SLIST_FIRST(&cfil_info->cfi_ordered_entries); |
3392 | } else { |
3393 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
3394 | } |
3395 | |
3396 | if (entry == NULL) { |
3397 | goto done; |
3398 | } |
3399 | |
3400 | cfc = entry->cfe_filter; |
3401 | if (cfc == NULL) { |
3402 | goto done; |
3403 | } |
3404 | |
3405 | if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED)) { |
3406 | goto done; |
3407 | } |
3408 | |
3409 | if (kcunit == 0) { |
3410 | kcunit = CFI_ENTRY_KCUNIT(cfil_info, entry); |
3411 | } |
3412 | |
3413 | CFIL_LOG(LOG_INFO, "so %llx filter_control_unit %u kcunit %u" , |
3414 | (uint64_t)VM_KERNEL_ADDRPERM(so), entry->cfe_necp_control_unit, kcunit); |
3415 | |
3416 | /* Would be wasteful to try when flow controlled */ |
3417 | if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { |
3418 | error = ENOBUFS; |
3419 | goto done; |
3420 | } |
3421 | |
3422 | msg_attached = kalloc_data(sizeof(struct cfil_msg_sock_attached), Z_WAITOK); |
3423 | if (msg_attached == NULL) { |
3424 | error = ENOMEM; |
3425 | goto done; |
3426 | } |
3427 | |
3428 | bzero(s: msg_attached, n: sizeof(struct cfil_msg_sock_attached)); |
3429 | msg_attached->cfs_msghdr.cfm_len = sizeof(struct cfil_msg_sock_attached); |
3430 | msg_attached->cfs_msghdr.cfm_version = CFM_VERSION_CURRENT; |
3431 | msg_attached->cfs_msghdr.cfm_type = CFM_TYPE_EVENT; |
3432 | msg_attached->cfs_msghdr.cfm_op = CFM_OP_SOCKET_ATTACHED; |
3433 | msg_attached->cfs_msghdr.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id; |
3434 | |
3435 | msg_attached->cfs_sock_family = SOCK_DOM(so); |
3436 | msg_attached->cfs_sock_type = SOCK_TYPE(so); |
3437 | msg_attached->cfs_sock_protocol = GET_SO_PROTO(so); |
3438 | msg_attached->cfs_pid = so->last_pid; |
3439 | memcpy(dst: msg_attached->cfs_uuid, src: so->last_uuid, n: sizeof(uuid_t)); |
3440 | if (so->so_flags & SOF_DELEGATED) { |
3441 | msg_attached->cfs_e_pid = so->e_pid; |
3442 | memcpy(dst: msg_attached->cfs_e_uuid, src: so->e_uuid, n: sizeof(uuid_t)); |
3443 | } else { |
3444 | msg_attached->cfs_e_pid = so->last_pid; |
3445 | memcpy(dst: msg_attached->cfs_e_uuid, src: so->last_uuid, n: sizeof(uuid_t)); |
3446 | } |
3447 | |
3448 | /* |
3449 | * Fill in address info: |
3450 | * For UDP, use the cfil_info hash entry directly. |
3451 | * For TCP, compose an hash entry with the saved addresses. |
3452 | */ |
3453 | if (cfil_info->cfi_hash_entry != NULL) { |
3454 | hash_entry_ptr = cfil_info->cfi_hash_entry; |
3455 | } else if (cfil_info->cfi_so_attach_faddr.sa.sa_len > 0 || |
3456 | cfil_info->cfi_so_attach_laddr.sa.sa_len > 0) { |
3457 | soflow_fill_hash_entry_from_address(&hash_entry, TRUE, SA(&cfil_info->cfi_so_attach_laddr.sa), FALSE); |
3458 | soflow_fill_hash_entry_from_address(&hash_entry, FALSE, SA(&cfil_info->cfi_so_attach_faddr.sa), FALSE); |
3459 | hash_entry_ptr = &hash_entry; |
3460 | } |
3461 | if (hash_entry_ptr != NULL) { |
3462 | cfil_fill_event_msg_addresses(entry: hash_entry_ptr, inp, |
3463 | sin_src: &msg_attached->cfs_src, sin_dst: &msg_attached->cfs_dst, |
3464 | isIPv4: !IS_INP_V6(inp), outgoing: conn_dir == CFS_CONNECTION_DIR_OUT); |
3465 | } |
3466 | msg_attached->cfs_conn_dir = conn_dir; |
3467 | |
3468 | if (msg_attached->cfs_e_pid != 0) { |
3469 | if (!cfil_copy_audit_token(pid: msg_attached->cfs_e_pid, buffer: (audit_token_t *)&msg_attached->cfs_audit_token)) { |
3470 | CFIL_LOG(LOG_ERR, "CFIL: Failed to get effective audit token for <sockID %llu <%llx>> " , |
3471 | entry->cfe_cfil_info->cfi_sock_id, entry->cfe_cfil_info->cfi_sock_id); |
3472 | } |
3473 | } |
3474 | |
3475 | if (msg_attached->cfs_pid != 0) { |
3476 | if (msg_attached->cfs_pid == msg_attached->cfs_e_pid) { |
3477 | memcpy(dst: &msg_attached->cfs_real_audit_token, src: &msg_attached->cfs_audit_token, n: sizeof(msg_attached->cfs_real_audit_token)); |
3478 | } else if (!cfil_copy_audit_token(pid: msg_attached->cfs_pid, buffer: (audit_token_t *)&msg_attached->cfs_real_audit_token)) { |
3479 | CFIL_LOG(LOG_ERR, "CFIL: Failed to get real audit token for <sockID %llu <%llx>> " , |
3480 | entry->cfe_cfil_info->cfi_sock_id, entry->cfe_cfil_info->cfi_sock_id); |
3481 | } |
3482 | } |
3483 | |
3484 | necp_with_inp_domain_name(so, ctx: msg_attached, with_func: cfil_populate_attached_msg_domain_name); |
3485 | |
3486 | if (cfil_info->cfi_debug) { |
3487 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: SENDING ATTACH UP" ); |
3488 | } |
3489 | |
3490 | cfil_dispatch_attach_event_sign(crypto_state: entry->cfe_filter->cf_crypto_state, cfil_info, msg: msg_attached); |
3491 | |
3492 | error = ctl_enqueuedata(kctlref: entry->cfe_filter->cf_kcref, |
3493 | unit: entry->cfe_filter->cf_kcunit, |
3494 | data: msg_attached, |
3495 | len: sizeof(struct cfil_msg_sock_attached), |
3496 | CTL_DATA_EOR); |
3497 | |
3498 | kfree_data(msg_attached, sizeof(struct cfil_msg_sock_attached)); |
3499 | |
3500 | if (error != 0) { |
3501 | CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed: %d" , error); |
3502 | goto done; |
3503 | } |
3504 | microuptime(tv: &entry->cfe_last_event); |
3505 | cfil_info->cfi_first_event.tv_sec = entry->cfe_last_event.tv_sec; |
3506 | cfil_info->cfi_first_event.tv_usec = entry->cfe_last_event.tv_usec; |
3507 | |
3508 | entry->cfe_flags |= CFEF_SENT_SOCK_ATTACHED; |
3509 | OSIncrementAtomic(&cfil_stats.cfs_attach_event_ok); |
3510 | done: |
3511 | |
3512 | /* We can recover from flow control */ |
3513 | if (error == ENOBUFS) { |
3514 | entry->cfe_flags |= CFEF_FLOW_CONTROLLED; |
3515 | OSIncrementAtomic(&cfil_stats.cfs_attach_event_flow_control); |
3516 | |
3517 | if (!cfil_rw_lock_shared_to_exclusive(lck: &cfil_lck_rw)) { |
3518 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
3519 | } |
3520 | |
3521 | cfc->cf_flags |= CFF_FLOW_CONTROLLED; |
3522 | |
3523 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
3524 | } else { |
3525 | if (error != 0) { |
3526 | OSIncrementAtomic(&cfil_stats.cfs_attach_event_fail); |
3527 | } |
3528 | |
3529 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
3530 | } |
3531 | return error; |
3532 | } |
3533 | |
3534 | static int |
3535 | cfil_dispatch_disconnect_event(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing) |
3536 | { |
3537 | errno_t error = 0; |
3538 | struct mbuf *msg = NULL; |
3539 | struct cfil_entry *entry; |
3540 | struct cfe_buf *entrybuf; |
3541 | struct cfil_msg_hdr msg_disconnected; |
3542 | struct content_filter *cfc; |
3543 | |
3544 | socket_lock_assert_owned(so); |
3545 | |
3546 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
3547 | |
3548 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
3549 | if (outgoing) { |
3550 | entrybuf = &entry->cfe_snd; |
3551 | } else { |
3552 | entrybuf = &entry->cfe_rcv; |
3553 | } |
3554 | |
3555 | cfc = entry->cfe_filter; |
3556 | if (cfc == NULL) { |
3557 | goto done; |
3558 | } |
3559 | |
3560 | // Mark if this flow qualifies for immediate close. |
3561 | SET_NO_CLOSE_WAIT(sotoinpcb(so), cfil_info); |
3562 | |
3563 | CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d" , |
3564 | (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); |
3565 | |
3566 | /* |
3567 | * Send the disconnection event once |
3568 | */ |
3569 | if ((outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_OUT)) || |
3570 | (!outgoing && (entry->cfe_flags & CFEF_SENT_DISCONNECT_IN))) { |
3571 | CFIL_LOG(LOG_INFO, "so %llx disconnect already sent" , |
3572 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
3573 | goto done; |
3574 | } |
3575 | |
3576 | /* |
3577 | * We're not disconnected as long as some data is waiting |
3578 | * to be delivered to the filter |
3579 | */ |
3580 | if (outgoing && cfil_queue_empty(cfq: &entrybuf->cfe_ctl_q) == 0) { |
3581 | CFIL_LOG(LOG_INFO, "so %llx control queue not empty" , |
3582 | (uint64_t)VM_KERNEL_ADDRPERM(so)); |
3583 | error = EBUSY; |
3584 | goto done; |
3585 | } |
3586 | /* Would be wasteful to try when flow controlled */ |
3587 | if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { |
3588 | error = ENOBUFS; |
3589 | goto done; |
3590 | } |
3591 | |
3592 | if (cfil_info->cfi_debug) { |
3593 | cfil_info_log(LOG_ERR, cfil_info, outgoing ? |
3594 | "CFIL: OUT - SENDING DISCONNECT UP" : |
3595 | "CFIL: IN - SENDING DISCONNECT UP" ); |
3596 | } |
3597 | |
3598 | bzero(s: &msg_disconnected, n: sizeof(struct cfil_msg_hdr)); |
3599 | msg_disconnected.cfm_len = sizeof(struct cfil_msg_hdr); |
3600 | msg_disconnected.cfm_version = CFM_VERSION_CURRENT; |
3601 | msg_disconnected.cfm_type = CFM_TYPE_EVENT; |
3602 | msg_disconnected.cfm_op = outgoing ? CFM_OP_DISCONNECT_OUT : |
3603 | CFM_OP_DISCONNECT_IN; |
3604 | msg_disconnected.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id; |
3605 | error = ctl_enqueuedata(kctlref: entry->cfe_filter->cf_kcref, |
3606 | unit: entry->cfe_filter->cf_kcunit, |
3607 | data: &msg_disconnected, |
3608 | len: sizeof(struct cfil_msg_hdr), |
3609 | CTL_DATA_EOR); |
3610 | if (error != 0) { |
3611 | CFIL_LOG(LOG_ERR, "ctl_enqueuembuf() failed: %d" , error); |
3612 | mbuf_freem(mbuf: msg); |
3613 | goto done; |
3614 | } |
3615 | microuptime(tv: &entry->cfe_last_event); |
3616 | CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_event, &cfil_info->cfi_first_event, msg_disconnected.cfm_op); |
3617 | |
3618 | /* Remember we have sent the disconnection message */ |
3619 | if (outgoing) { |
3620 | entry->cfe_flags |= CFEF_SENT_DISCONNECT_OUT; |
3621 | OSIncrementAtomic(&cfil_stats.cfs_disconnect_out_event_ok); |
3622 | } else { |
3623 | entry->cfe_flags |= CFEF_SENT_DISCONNECT_IN; |
3624 | OSIncrementAtomic(&cfil_stats.cfs_disconnect_in_event_ok); |
3625 | } |
3626 | done: |
3627 | if (error == ENOBUFS) { |
3628 | entry->cfe_flags |= CFEF_FLOW_CONTROLLED; |
3629 | OSIncrementAtomic( |
3630 | &cfil_stats.cfs_disconnect_event_flow_control); |
3631 | |
3632 | if (!cfil_rw_lock_shared_to_exclusive(lck: &cfil_lck_rw)) { |
3633 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
3634 | } |
3635 | |
3636 | cfc->cf_flags |= CFF_FLOW_CONTROLLED; |
3637 | |
3638 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
3639 | } else { |
3640 | if (error != 0) { |
3641 | OSIncrementAtomic( |
3642 | &cfil_stats.cfs_disconnect_event_fail); |
3643 | } |
3644 | |
3645 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
3646 | } |
3647 | return error; |
3648 | } |
3649 | |
3650 | int |
3651 | cfil_dispatch_closed_event(struct socket *so, struct cfil_info *cfil_info, int kcunit) |
3652 | { |
3653 | struct cfil_entry *entry; |
3654 | struct cfil_msg_sock_closed msg_closed; |
3655 | errno_t error = 0; |
3656 | struct content_filter *cfc; |
3657 | struct inpcb *inp = NULL; |
3658 | |
3659 | socket_lock_assert_owned(so); |
3660 | |
3661 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
3662 | |
3663 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
3664 | cfc = entry->cfe_filter; |
3665 | if (cfc == NULL) { |
3666 | goto done; |
3667 | } |
3668 | |
3669 | CFIL_LOG(LOG_INFO, "so %llx kcunit %d" , |
3670 | (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit); |
3671 | |
3672 | /* Would be wasteful to try when flow controlled */ |
3673 | if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { |
3674 | error = ENOBUFS; |
3675 | goto done; |
3676 | } |
3677 | /* |
3678 | * Send a single closed message per filter |
3679 | */ |
3680 | if ((entry->cfe_flags & CFEF_SENT_SOCK_CLOSED) != 0) { |
3681 | goto done; |
3682 | } |
3683 | if ((entry->cfe_flags & CFEF_SENT_SOCK_ATTACHED) == 0) { |
3684 | goto done; |
3685 | } |
3686 | |
3687 | microuptime(tv: &entry->cfe_last_event); |
3688 | CFI_ADD_TIME_LOG(cfil_info, &entry->cfe_last_event, &cfil_info->cfi_first_event, CFM_OP_SOCKET_CLOSED); |
3689 | |
3690 | bzero(s: &msg_closed, n: sizeof(struct cfil_msg_sock_closed)); |
3691 | msg_closed.cfc_msghdr.cfm_len = sizeof(struct cfil_msg_sock_closed); |
3692 | msg_closed.cfc_msghdr.cfm_version = CFM_VERSION_CURRENT; |
3693 | msg_closed.cfc_msghdr.cfm_type = CFM_TYPE_EVENT; |
3694 | msg_closed.cfc_msghdr.cfm_op = CFM_OP_SOCKET_CLOSED; |
3695 | msg_closed.cfc_msghdr.cfm_sock_id = entry->cfe_cfil_info->cfi_sock_id; |
3696 | msg_closed.cfc_first_event.tv_sec = cfil_info->cfi_first_event.tv_sec; |
3697 | msg_closed.cfc_first_event.tv_usec = cfil_info->cfi_first_event.tv_usec; |
3698 | memcpy(dst: msg_closed.cfc_op_time, src: cfil_info->cfi_op_time, n: sizeof(uint32_t) * CFI_MAX_TIME_LOG_ENTRY); |
3699 | memcpy(dst: msg_closed.cfc_op_list, src: cfil_info->cfi_op_list, n: sizeof(unsigned char) * CFI_MAX_TIME_LOG_ENTRY); |
3700 | msg_closed.cfc_op_list_ctr = cfil_info->cfi_op_list_ctr; |
3701 | msg_closed.cfc_byte_inbound_count = cfil_info->cfi_byte_inbound_count; |
3702 | msg_closed.cfc_byte_outbound_count = cfil_info->cfi_byte_outbound_count; |
3703 | |
3704 | if (entry->cfe_laddr_sent == false) { |
3705 | /* cache it if necessary */ |
3706 | if (cfil_info->cfi_so_attach_laddr.sa.sa_len == 0) { |
3707 | inp = cfil_info->cfi_so ? sotoinpcb(cfil_info->cfi_so) : NULL; |
3708 | if (inp != NULL) { |
3709 | boolean_t outgoing = (cfil_info->cfi_dir == CFS_CONNECTION_DIR_OUT); |
3710 | union sockaddr_in_4_6 *src = outgoing ? &cfil_info->cfi_so_attach_laddr : NULL; |
3711 | union sockaddr_in_4_6 *dst = outgoing ? NULL : &cfil_info->cfi_so_attach_laddr; |
3712 | cfil_fill_event_msg_addresses(entry: cfil_info->cfi_hash_entry, inp, |
3713 | sin_src: src, sin_dst: dst, isIPv4: !IS_INP_V6(inp), outgoing); |
3714 | } |
3715 | } |
3716 | |
3717 | if (cfil_info->cfi_so_attach_laddr.sa.sa_len != 0) { |
3718 | msg_closed.cfc_laddr.sin6 = cfil_info->cfi_so_attach_laddr.sin6; |
3719 | entry->cfe_laddr_sent = true; |
3720 | } |
3721 | } |
3722 | |
3723 | cfil_dispatch_closed_event_sign(crypto_state: entry->cfe_filter->cf_crypto_state, so, cfil_info, msg: &msg_closed); |
3724 | |
3725 | if (cfil_info->cfi_debug) { |
3726 | cfil_info_log(LOG_ERR, cfil_info, "CFIL: SENDING CLOSED UP" ); |
3727 | } |
3728 | |
3729 | /* for debugging |
3730 | * if (msg_closed.cfc_op_list_ctr > CFI_MAX_TIME_LOG_ENTRY) { |
3731 | * msg_closed.cfc_op_list_ctr = CFI_MAX_TIME_LOG_ENTRY; // just in case |
3732 | * } |
3733 | * for (unsigned int i = 0; i < msg_closed.cfc_op_list_ctr ; i++) { |
3734 | * CFIL_LOG(LOG_ERR, "MD: socket %llu event %2u, time + %u msec", msg_closed.cfc_msghdr.cfm_sock_id, (unsigned short)msg_closed.cfc_op_list[i], msg_closed.cfc_op_time[i]); |
3735 | * } |
3736 | */ |
3737 | |
3738 | error = ctl_enqueuedata(kctlref: entry->cfe_filter->cf_kcref, |
3739 | unit: entry->cfe_filter->cf_kcunit, |
3740 | data: &msg_closed, |
3741 | len: sizeof(struct cfil_msg_sock_closed), |
3742 | CTL_DATA_EOR); |
3743 | if (error != 0) { |
3744 | CFIL_LOG(LOG_ERR, "ctl_enqueuedata() failed: %d" , |
3745 | error); |
3746 | goto done; |
3747 | } |
3748 | |
3749 | entry->cfe_flags |= CFEF_SENT_SOCK_CLOSED; |
3750 | OSIncrementAtomic(&cfil_stats.cfs_closed_event_ok); |
3751 | done: |
3752 | /* We can recover from flow control */ |
3753 | if (error == ENOBUFS) { |
3754 | entry->cfe_flags |= CFEF_FLOW_CONTROLLED; |
3755 | OSIncrementAtomic(&cfil_stats.cfs_closed_event_flow_control); |
3756 | |
3757 | if (!cfil_rw_lock_shared_to_exclusive(lck: &cfil_lck_rw)) { |
3758 | cfil_rw_lock_exclusive(lck: &cfil_lck_rw); |
3759 | } |
3760 | |
3761 | cfc->cf_flags |= CFF_FLOW_CONTROLLED; |
3762 | |
3763 | cfil_rw_unlock_exclusive(lck: &cfil_lck_rw); |
3764 | } else { |
3765 | if (error != 0) { |
3766 | OSIncrementAtomic(&cfil_stats.cfs_closed_event_fail); |
3767 | } |
3768 | |
3769 | cfil_rw_unlock_shared(lck: &cfil_lck_rw); |
3770 | } |
3771 | |
3772 | return error; |
3773 | } |
3774 | |
3775 | static void |
3776 | fill_ip6_sockaddr_4_6(union sockaddr_in_4_6 *sin46, |
3777 | struct in6_addr *ip6, u_int16_t port, uint32_t ifscope) |
3778 | { |
3779 | if (sin46 == NULL) { |
3780 | return; |
3781 | } |
3782 | |
3783 | struct sockaddr_in6 *sin6 = &sin46->sin6; |
3784 | |
3785 | sin6->sin6_family = AF_INET6; |
3786 | sin6->sin6_len = sizeof(*sin6); |
3787 | sin6->sin6_port = port; |
3788 | sin6->sin6_addr = *ip6; |
3789 | if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { |
3790 | sin6->sin6_scope_id = ifscope; |
3791 | if (in6_embedded_scope) { |
3792 | in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id); |
3793 | if (sin6->sin6_addr.s6_addr16[1] != 0) { |
3794 | sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); |
3795 | sin6->sin6_addr.s6_addr16[1] = 0; |
3796 | } |
3797 | } |
3798 | } |
3799 | } |
3800 | |
3801 | static void |
3802 | fill_ip_sockaddr_4_6(union sockaddr_in_4_6 *sin46, |
3803 | struct in_addr ip, u_int16_t port) |
3804 | { |
3805 | if (sin46 == NULL) { |
3806 | return; |
3807 | } |
3808 | |
3809 | struct sockaddr_in *sin = &sin46->sin; |
3810 | |
3811 | sin->sin_family = AF_INET; |
3812 | sin->sin_len = sizeof(*sin); |
3813 | sin->sin_port = port; |
3814 | sin->sin_addr.s_addr = ip.s_addr; |
3815 | } |
3816 | |
3817 | static void |
3818 | cfil_get_flow_address_v6(struct soflow_hash_entry *entry, struct inpcb *inp, |
3819 | struct in6_addr **laddr, struct in6_addr **faddr, |
3820 | u_int16_t *lport, u_int16_t *fport) |
3821 | { |
3822 | if (entry != NULL) { |
3823 | *laddr = &entry->soflow_laddr.addr6; |
3824 | *faddr = &entry->soflow_faddr.addr6; |
3825 | *lport = entry->soflow_lport; |
3826 | *fport = entry->soflow_fport; |
3827 | } else { |
3828 | *laddr = &inp->in6p_laddr; |
3829 | *faddr = &inp->in6p_faddr; |
3830 | *lport = inp->inp_lport; |
3831 | *fport = inp->inp_fport; |
3832 | } |
3833 | } |
3834 | |
3835 | static void |
3836 | cfil_get_flow_address(struct soflow_hash_entry *entry, struct inpcb *inp, |
3837 | struct in_addr *laddr, struct in_addr *faddr, |
3838 | u_int16_t *lport, u_int16_t *fport) |
3839 | { |
3840 | if (entry != NULL) { |
3841 | *laddr = entry->soflow_laddr.addr46.ia46_addr4; |
3842 | *faddr = entry->soflow_faddr.addr46.ia46_addr4; |
3843 | *lport = entry->soflow_lport; |
3844 | *fport = entry->soflow_fport; |
3845 | } else { |
3846 | *laddr = inp->inp_laddr; |
3847 | *faddr = inp->inp_faddr; |
3848 | *lport = inp->inp_lport; |
3849 | *fport = inp->inp_fport; |
3850 | } |
3851 | } |
3852 | |
3853 | static int |
3854 | cfil_dispatch_data_event(struct socket *so, struct cfil_info *cfil_info, uint32_t kcunit, int outgoing, |
3855 | struct mbuf *data, unsigned int copyoffset, unsigned int copylen) |
3856 | { |
3857 | errno_t error = 0; |
3858 | struct mbuf *copy = NULL; |
3859 | struct mbuf *msg = NULL; |
3860 | unsigned int one = 1; |
3861 | struct cfil_msg_data_event *data_req; |
3862 | size_t hdrsize; |
3863 | struct inpcb *inp = (struct inpcb *)so->so_pcb; |
3864 | struct cfil_entry *entry; |
3865 | struct cfe_buf *entrybuf; |
3866 | struct content_filter *cfc; |
3867 | struct timeval tv; |
3868 | int inp_flags = 0; |
3869 | |
3870 | cfil_rw_lock_shared(lck: &cfil_lck_rw); |
3871 | |
3872 | entry = &cfil_info->cfi_entries[kcunit - 1]; |
3873 | if (outgoing) { |
3874 | entrybuf = &entry->cfe_snd; |
3875 | } else { |
3876 | entrybuf = &entry->cfe_rcv; |
3877 | } |
3878 | |
3879 | cfc = entry->cfe_filter; |
3880 | if (cfc == NULL) { |
3881 | goto done; |
3882 | } |
3883 | |
3884 | data = cfil_data_start(m: data); |
3885 | if (data == NULL) { |
3886 | CFIL_LOG(LOG_ERR, "No data start" ); |
3887 | goto done; |
3888 | } |
3889 | |
3890 | CFIL_LOG(LOG_INFO, "so %llx kcunit %u outgoing %d" , |
3891 | (uint64_t)VM_KERNEL_ADDRPERM(so), kcunit, outgoing); |
3892 | |
3893 | socket_lock_assert_owned(so); |
3894 | |
3895 | /* Would be wasteful to try */ |
3896 | if (cfc->cf_flags & CFF_FLOW_CONTROLLED) { |
3897 | error = ENOBUFS; |
3898 | goto done; |
3899 | } |
3900 | |
3901 | /* Make a copy of the data to pass to kernel control socket */ |
3902 | copy = m_copym_mode(data, copyoffset, copylen, M_DONTWAIT, NULL, NULL, |
3903 | M_COPYM_NOOP_HDR); |
3904 | if (copy == NULL) { |
3905 | CFIL_LOG(LOG_ERR, "m_copym_mode() failed" ); |
3906 | error = ENOMEM; |
3907 | goto done; |
3908 | } |
3909 | |
3910 | /* We need an mbuf packet for the message header */ |
3911 | hdrsize = sizeof(struct cfil_msg_data_event); |
3912 | error = mbuf_allocpacket(how: MBUF_DONTWAIT, packetlen: hdrsize, maxchunks: &one, mbuf: &msg); |
3913 | if (error != 0) { |
3914 | CFIL_LOG(LOG_ERR, "mbuf_allocpacket() failed" ); |
3915 | m_freem(copy); |
3916 | /* |
3917 | * ENOBUFS is to indicate flow control |
3918 | */ |
3919 | error = ENOMEM; |
3920 | goto done; |
3921 | } |
3922 | mbuf_setlen(mbuf: msg, len: hdrsize); |
3923 | mbuf_pkthdr_setlen(mbuf: msg, len: hdrsize + copylen); |
3924 | msg->m_next = copy; |
3925 | data_req = (struct cfil_msg_data_event *)mbuf_data(mbuf: msg); |
3926 | bzero(s: data_req, n: hdrsize); |
3927 | data_req->cfd_msghdr.cfm_len = (uint32_t)hdrsize + copylen; |
3928 | data_req->cfd_msghdr.cfm_version = 1; |
3929 | data_req->cfd_msghdr.cfm_type = CFM_TYPE_EVENT; |
3930 | data_req->cfd_msghdr.cfm_op = |
3931 | outgoing ? CFM_OP_DATA_OUT : CFM_OP_DATA_IN; |
3932 | data_req->cfd_msghdr.cfm_sock_id = |
3933 | entry->cfe_cfil_info->cfi_sock_id; |
3934 | data_req->cfd_start_offset = entrybuf->cfe_peeked; |
3935 | data_req->cfd_end_offset = entrybuf->cfe_peeked + copylen; |
3936 | |
3937 | data_req->cfd_flags = 0; |
3938 | if (OPTIONAL_IP_HEADER(so)) { |
3939 | /* |
3940 | * For non-UDP/TCP traffic, indicate to filters if optional |
3941 | * IP header is present: |
3942 | * outgoing - indicate according to INP_HDRINCL flag |
3943 | * incoming - For IPv4 only, stripping of IP header is |
3944 | * optional. But for CFIL, we delay stripping |
3945 | * at rip_input. So CFIL always expects IP |
3946 | * frames. IP header will be stripped according |
3947 | * to INP_STRIPHDR flag later at reinjection. |
3948 | */ |
3949 | if ((!outgoing && !IS_INP_V6(inp)) || |
3950 | (outgoing && cfil_dgram_peek_socket_state(m: data, inp_flags: &inp_flags) && (inp_flags & INP_HDRINCL))) { |
3951 | data_req->cfd_flags |= CFD_DATA_FLAG_IP_HEADER; |
3952 | } |
3953 | } |
3954 | |
3955 | /* |
3956 | * Copy address/port into event msg. |
3957 | * For non connected sockets need to copy addresses from passed |
3958 | * parameters |
3959 | */ |
3960 | cfil_fill_event_msg_addresses(entry: cfil_info->cfi_hash_entry, inp, |
3961 | sin_src: &data_req->cfc_src, sin_dst: &data_req->cfc_dst, |
3962 | <
---|