1/*
2 * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <string.h>
30
31#include <kern/thread_call.h>
32#include <kern/zalloc.h>
33
34#include <net/if.h>
35#include <net/if_var.h>
36#include <net/net_api_stats.h>
37#include <net/necp.h>
38#include <net/network_agent.h>
39#include <net/ntstat.h>
40
41#include <netinet/in_pcb.h>
42#include <netinet/in_var.h>
43#include <netinet/ip.h>
44#include <netinet/ip6.h>
45#include <netinet/mp_pcb.h>
46#include <netinet/tcp_cc.h>
47#include <netinet/tcp_fsm.h>
48#include <netinet/tcp_cache.h>
49#include <netinet6/in6_var.h>
50
51#include <sys/domain.h>
52#include <sys/file_internal.h>
53#include <sys/kauth.h>
54#include <sys/kernel.h>
55#include <sys/malloc.h>
56#include <sys/poll.h>
57#include <sys/priv.h>
58#include <sys/protosw.h>
59#include <sys/queue.h>
60#include <sys/socket.h>
61#include <sys/socketvar.h>
62#include <sys/sysproto.h>
63#include <sys/systm.h>
64#include <sys/types.h>
65#include <sys/codesign.h>
66#include <libkern/section_keywords.h>
67#include <IOKit/IOBSD.h>
68
69#include <os/refcnt.h>
70
71#include <CoreEntitlements/CoreEntitlements.h>
72
73#if SKYWALK
74#include <skywalk/os_skywalk_private.h>
75#include <skywalk/nexus/flowswitch/flow/flow_var.h>
76#include <skywalk/nexus/flowswitch/nx_flowswitch.h>
77#endif /* SKYWALK */
78
79#if CONFIG_MACF
80#include <security/mac_framework.h>
81#endif
82
83/*
84 * NECP Client Architecture
85 * ------------------------------------------------
86 * See <net/necp.c> for a discussion on NECP database architecture.
87 *
88 * Each client of NECP provides a set of parameters for a connection or network state
89 * evaluation, on which NECP policy evaluation is run. This produces a policy result
90 * which can be accessed by the originating process, along with events for when policies
91 * results have changed.
92 *
93 * ------------------------------------------------
94 * NECP Client FD
95 * ------------------------------------------------
96 * A process opens an NECP file descriptor using necp_open(). This is a very simple
97 * file descriptor, upon which the process may do the following operations:
98 * - necp_client_action(...), to add/remove/query clients
99 * - kqueue, to watch for readable events
100 * - close(), to close the client session and release all clients
101 *
102 * Client objects are allocated structures that hang off of the file descriptor. Each
103 * client contains:
104 * - Client ID, a UUID that references the client across the system
105 * - Parameters, a buffer of TLVs that describe the client's connection parameters,
106 * such as the remote and local endpoints, interface requirements, etc.
107 * - Result, a buffer of TLVs containing the current policy evaluation for the client.
108 * This result will be updated whenever a network change occurs that impacts the
109 * policy result for that client.
110 *
111 * +--------------+
112 * | NECP fd |
113 * +--------------+
114 * ||
115 * ==================================
116 * || || ||
117 * +--------------+ +--------------+ +--------------+
118 * | Client ID | | Client ID | | Client ID |
119 * | ---- | | ---- | | ---- |
120 * | Parameters | | Parameters | | Parameters |
121 * | ---- | | ---- | | ---- |
122 * | Result | | Result | | Result |
123 * +--------------+ +--------------+ +--------------+
124 *
125 * ------------------------------------------------
126 * Client Actions
127 * ------------------------------------------------
128 * - Add. Input parameters as a buffer of TLVs, and output a client ID. Allocates a
129 * new client structure on the file descriptor.
130 * - Remove. Input a client ID. Removes a client structure from the file descriptor.
131 * - Copy Parameters. Input a client ID, and output parameter TLVs.
132 * - Copy Result. Input a client ID, and output result TLVs. Alternatively, input empty
133 * client ID and get next unread client result.
134 * - Copy List. List all client IDs.
135 *
136 * ------------------------------------------------
137 * Client Policy Evaluation
138 * ------------------------------------------------
139 * Policies are evaluated for clients upon client creation, and upon update events,
140 * which are network/agent/policy changes coalesced by a timer.
141 *
142 * The policy evaluation goes through the following steps:
143 * 1. Parse client parameters.
144 * 2. Select a scoped interface if applicable. This involves using require/prohibit
145 * parameters, along with the local address, to select the most appropriate interface
146 * if not explicitly set by the client parameters.
147 * 3. Run NECP application-level policy evalution
148 * 4. Set policy result into client result buffer.
149 *
150 * ------------------------------------------------
151 * Client Observers
152 * ------------------------------------------------
153 * If necp_open() is called with the NECP_OPEN_FLAG_OBSERVER flag, and the process
154 * passes the necessary privilege check, the fd is allowed to use necp_client_action()
155 * to copy client state attached to the file descriptors of other processes, and to
156 * list all client IDs on the system.
157 */
158
159extern u_int32_t necp_debug;
160
161static int necpop_select(struct fileproc *, int, void *, vfs_context_t);
162static int necpop_close(struct fileglob *, vfs_context_t);
163static int necpop_kqfilter(struct fileproc *, struct knote *, struct kevent_qos_s *);
164
165// Timer functions
166static int necp_timeout_microseconds = 1000 * 100; // 100ms
167static int necp_timeout_leeway_microseconds = 1000 * 50; // 50ms
168#if SKYWALK
169static int necp_collect_stats_timeout_microseconds = 1000 * 1000 * 1; // 1s
170static int necp_collect_stats_timeout_leeway_microseconds = 1000 * 500; // 500ms
171static int necp_close_arenas_timeout_microseconds = 1000 * 1000 * 10; // 10s
172static int necp_close_arenas_timeout_leeway_microseconds = 1000 * 1000 * 1; // 1s
173#endif /* SKYWALK */
174
175static int necp_client_fd_count = 0;
176static int necp_observer_fd_count = 0;
177static int necp_client_count = 0;
178static int necp_socket_flow_count = 0;
179static int necp_if_flow_count = 0;
180static int necp_observer_message_limit = 256;
181
182/*
183 * NECP client tracing control -
184 *
185 * necp_client_tracing_level : 1 for client trace, 2 for flow trace, 3 for parameter details
186 * necp_client_tracing_pid : match client with pid
187 */
188static int necp_client_tracing_level = 0;
189static int necp_client_tracing_pid = 0;
190
191#define NECP_CLIENT_TRACE_LEVEL_CLIENT 1
192#define NECP_CLIENT_TRACE_LEVEL_FLOW 2
193#define NECP_CLIENT_TRACE_LEVEL_PARAMS 3
194
195#define NECP_CLIENT_TRACE_PID_MATCHED(pid) \
196 (pid == necp_client_tracing_pid)
197
198#define NECP_ENABLE_CLIENT_TRACE(level) \
199 ((necp_client_tracing_level >= level && \
200 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(client->proc_pid))) ? necp_client_tracing_level : 0)
201
202#define NECP_CLIENT_LOG(client, fmt, ...) \
203 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_CLIENT)) { \
204 uuid_string_t client_uuid_str = { }; \
205 uuid_unparse_lower(client->client_id, client_uuid_str); \
206 NECPLOG(LOG_NOTICE, "NECP_CLIENT_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
207 }
208
209#define NECP_CLIENT_FLOW_LOG(client, flow, fmt, ...) \
210 if (client && flow && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_FLOW)) { \
211 uuid_string_t client_uuid_str = { }; \
212 uuid_unparse_lower(client->client_id, client_uuid_str); \
213 uuid_string_t flow_uuid_str = { }; \
214 uuid_unparse_lower(flow->registration_id, flow_uuid_str); \
215 NECPLOG(LOG_NOTICE, "NECP CLIENT FLOW TRACE <pid %d %s> <flow %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, flow_uuid_str, ##__VA_ARGS__); \
216 }
217
218#define NECP_CLIENT_PARAMS_LOG(client, fmt, ...) \
219 if (client && NECP_ENABLE_CLIENT_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
220 uuid_string_t client_uuid_str = { }; \
221 uuid_unparse_lower(client->client_id, client_uuid_str); \
222 NECPLOG(LOG_NOTICE, "NECP_CLIENT_PARAMS_LOG <pid %d %s>: " fmt "\n", client ? client->proc_pid : 0, client_uuid_str, ##__VA_ARGS__); \
223 }
224
225#define NECP_SOCKET_PID(so) \
226 ((so->so_flags & SOF_DELEGATED) ? so->e_pid : so->last_pid)
227
228#define NECP_ENABLE_SOCKET_TRACE(level) \
229 ((necp_client_tracing_level >= level && \
230 (!necp_client_tracing_pid || NECP_CLIENT_TRACE_PID_MATCHED(NECP_SOCKET_PID(so)))) ? necp_client_tracing_level : 0)
231
232#define NECP_SOCKET_PARAMS_LOG(so, fmt, ...) \
233 if (so && NECP_ENABLE_SOCKET_TRACE(NECP_CLIENT_TRACE_LEVEL_PARAMS)) { \
234 NECPLOG(LOG_NOTICE, "NECP_SOCKET_PARAMS_LOG <pid %d>: " fmt "\n", NECP_SOCKET_PID(so), ##__VA_ARGS__); \
235 }
236
237#define NECP_SOCKET_ATTRIBUTE_LOG(fmt, ...) \
238 if (necp_client_tracing_level >= NECP_CLIENT_TRACE_LEVEL_PARAMS) { \
239 NECPLOG(LOG_NOTICE, "NECP_SOCKET_ATTRIBUTE_LOG: " fmt "\n", ##__VA_ARGS__); \
240 }
241
242#define NECP_CLIENT_TRACKER_LOG(pid, fmt, ...) \
243 if (pid) { \
244 NECPLOG(LOG_NOTICE, "NECP_CLIENT_TRACKER_LOG <pid %d>: " fmt "\n", pid, ##__VA_ARGS__); \
245 }
246
247#if SKYWALK
248static int necp_arena_count = 0;
249static int necp_sysctl_arena_count = 0;
250static int necp_nexus_flow_count = 0;
251
252/* userspace stats sanity check range, same unit as TCP (see TCP_RTT_SCALE) */
253static uint32_t necp_client_stats_rtt_floor = 1; // 32us
254static uint32_t necp_client_stats_rtt_ceiling = 1920000; // 60s
255const static struct sk_stats_flow ntstat_sk_stats_zero;
256#endif /* SKYWALK */
257
258/*
259 * Global lock to protect socket inp_necp_attributes across updates.
260 * NECP updating these attributes and clients accessing these attributes
261 * must take this lock.
262 */
263static LCK_GRP_DECLARE(necp_socket_attr_lock_grp, "necpSocketAttrGroup");
264LCK_MTX_DECLARE(necp_socket_attr_lock, &necp_socket_attr_lock_grp);
265
266os_refgrp_decl(static, necp_client_refgrp, "NECPClientRefGroup", NULL);
267
268SYSCTL_INT(_net_necp, NECPCTL_CLIENT_FD_COUNT, client_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_fd_count, 0, "");
269SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_FD_COUNT, observer_fd_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_observer_fd_count, 0, "");
270SYSCTL_INT(_net_necp, NECPCTL_CLIENT_COUNT, client_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_client_count, 0, "");
271SYSCTL_INT(_net_necp, NECPCTL_SOCKET_FLOW_COUNT, socket_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_socket_flow_count, 0, "");
272SYSCTL_INT(_net_necp, NECPCTL_IF_FLOW_COUNT, if_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_if_flow_count, 0, "");
273SYSCTL_INT(_net_necp, NECPCTL_OBSERVER_MESSAGE_LIMIT, observer_message_limit, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_observer_message_limit, 256, "");
274SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_LEVEL, necp_client_tracing_level, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_level, 0, "");
275SYSCTL_INT(_net_necp, NECPCTL_CLIENT_TRACING_PID, necp_client_tracing_pid, CTLFLAG_LOCKED | CTLFLAG_RW, &necp_client_tracing_pid, 0, "");
276
277#if SKYWALK
278SYSCTL_INT(_net_necp, NECPCTL_ARENA_COUNT, arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_arena_count, 0, "");
279SYSCTL_INT(_net_necp, NECPCTL_SYSCTL_ARENA_COUNT, sysctl_arena_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_sysctl_arena_count, 0, "");
280SYSCTL_INT(_net_necp, NECPCTL_NEXUS_FLOW_COUNT, nexus_flow_count, CTLFLAG_LOCKED | CTLFLAG_RD, &necp_nexus_flow_count, 0, "");
281#if (DEVELOPMENT || DEBUG)
282SYSCTL_UINT(_net_necp, OID_AUTO, collect_stats_interval_us, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_collect_stats_timeout_microseconds, 0, "");
283SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_floor, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_floor, 0, "");
284SYSCTL_UINT(_net_necp, OID_AUTO, necp_client_stats_rtt_ceiling, CTLFLAG_RW | CTLFLAG_LOCKED, &necp_client_stats_rtt_ceiling, 0, "");
285#endif /* (DEVELOPMENT || DEBUG) */
286#endif /* SKYWALK */
287
288#define NECP_MAX_CLIENT_LIST_SIZE 1024 * 1024 // 1MB
289#define NECP_MAX_AGENT_ACTION_SIZE 10 * 1024 // 10K
290
291extern int tvtohz(struct timeval *);
292extern unsigned int get_maxmtu(struct rtentry *);
293
294// Parsed parameters
295#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR 0x00001
296#define NECP_PARSED_PARAMETERS_FIELD_REMOTE_ADDR 0x00002
297#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF 0x00004
298#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IF 0x00008
299#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IFTYPE 0x00010
300#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_IFTYPE 0x00020
301#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT 0x00040
302#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT 0x00080
303#define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT 0x00100
304#define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT 0x00200
305#define NECP_PARSED_PARAMETERS_FIELD_REQUIRED_AGENT_TYPE 0x00400
306#define NECP_PARSED_PARAMETERS_FIELD_PROHIBITED_AGENT_TYPE 0x00800
307#define NECP_PARSED_PARAMETERS_FIELD_PREFERRED_AGENT_TYPE 0x01000
308#define NECP_PARSED_PARAMETERS_FIELD_AVOIDED_AGENT_TYPE 0x02000
309#define NECP_PARSED_PARAMETERS_FIELD_FLAGS 0x04000
310#define NECP_PARSED_PARAMETERS_FIELD_IP_PROTOCOL 0x08000
311#define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_PID 0x10000
312#define NECP_PARSED_PARAMETERS_FIELD_EFFECTIVE_UUID 0x20000
313#define NECP_PARSED_PARAMETERS_FIELD_TRAFFIC_CLASS 0x40000
314#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_PORT 0x80000
315#define NECP_PARSED_PARAMETERS_FIELD_DELEGATED_UPID 0x100000
316#define NECP_PARSED_PARAMETERS_FIELD_ETHERTYPE 0x200000
317#define NECP_PARSED_PARAMETERS_FIELD_TRANSPORT_PROTOCOL 0x400000
318#define NECP_PARSED_PARAMETERS_FIELD_LOCAL_ADDR_PREFERENCE 0x800000
319#define NECP_PARSED_PARAMETERS_FIELD_ATTRIBUTED_BUNDLE_IDENTIFIER 0x1000000
320#define NECP_PARSED_PARAMETERS_FIELD_PARENT_UUID 0x2000000
321#define NECP_PARSED_PARAMETERS_FIELD_FLOW_DEMUX_PATTERN 0x4000000
322#define NECP_PARSED_PARAMETERS_FIELD_UID 0x8000000
323#define NECP_PARSED_PARAMETERS_FIELD_PERSONA_ID 0x10000000
324
325
326#define NECP_MAX_INTERFACE_PARAMETERS 16
327#define NECP_MAX_AGENT_PARAMETERS 4
328struct necp_client_parsed_parameters {
329 u_int32_t valid_fields;
330 u_int32_t flags;
331 u_int64_t delegated_upid;
332 union necp_sockaddr_union local_addr;
333 union necp_sockaddr_union remote_addr;
334 u_int32_t required_interface_index;
335 char prohibited_interfaces[NECP_MAX_INTERFACE_PARAMETERS][IFXNAMSIZ];
336 u_int8_t required_interface_type;
337 u_int8_t local_address_preference;
338 u_int8_t prohibited_interface_types[NECP_MAX_INTERFACE_PARAMETERS];
339 struct necp_client_parameter_netagent_type required_netagent_types[NECP_MAX_AGENT_PARAMETERS];
340 struct necp_client_parameter_netagent_type prohibited_netagent_types[NECP_MAX_AGENT_PARAMETERS];
341 struct necp_client_parameter_netagent_type preferred_netagent_types[NECP_MAX_AGENT_PARAMETERS];
342 struct necp_client_parameter_netagent_type avoided_netagent_types[NECP_MAX_AGENT_PARAMETERS];
343 uuid_t required_netagents[NECP_MAX_AGENT_PARAMETERS];
344 uuid_t prohibited_netagents[NECP_MAX_AGENT_PARAMETERS];
345 uuid_t preferred_netagents[NECP_MAX_AGENT_PARAMETERS];
346 uuid_t avoided_netagents[NECP_MAX_AGENT_PARAMETERS];
347 u_int8_t ip_protocol;
348 u_int8_t transport_protocol;
349 u_int16_t ethertype;
350 pid_t effective_pid;
351 uuid_t effective_uuid;
352 uuid_t parent_uuid;
353 u_int32_t traffic_class;
354 struct necp_demux_pattern demux_patterns[NECP_MAX_DEMUX_PATTERNS];
355 u_int8_t demux_pattern_count;
356 uid_t uid;
357 uid_t persona_id;
358};
359
360static bool
361necp_find_matching_interface_index(struct necp_client_parsed_parameters *parsed_parameters,
362 u_int *return_ifindex, bool *validate_agents);
363
364static bool
365necp_ifnet_matches_local_address(struct ifnet *ifp, struct sockaddr *sa);
366
367static bool
368necp_ifnet_matches_parameters(struct ifnet *ifp,
369 struct necp_client_parsed_parameters *parsed_parameters,
370 u_int32_t override_flags,
371 u_int32_t *preferred_count,
372 bool secondary_interface,
373 bool require_scoped_field);
374
375static const struct fileops necp_fd_ops = {
376 .fo_type = DTYPE_NETPOLICY,
377 .fo_read = fo_no_read,
378 .fo_write = fo_no_write,
379 .fo_ioctl = fo_no_ioctl,
380 .fo_select = necpop_select,
381 .fo_close = necpop_close,
382 .fo_drain = fo_no_drain,
383 .fo_kqfilter = necpop_kqfilter,
384};
385
386struct necp_client_assertion {
387 LIST_ENTRY(necp_client_assertion) assertion_chain;
388 uuid_t asserted_netagent;
389};
390
391struct necp_client_flow_header {
392 struct necp_tlv_header outer_header;
393 struct necp_tlv_header flow_id_tlv_header;
394 uuid_t flow_id;
395 struct necp_tlv_header flags_tlv_header;
396 u_int32_t flags_value;
397 struct necp_tlv_header interface_tlv_header;
398 struct necp_client_result_interface interface_value;
399} __attribute__((__packed__));
400
401struct necp_client_flow_protoctl_event_header {
402 struct necp_tlv_header protoctl_tlv_header;
403 struct necp_client_flow_protoctl_event protoctl_event;
404} __attribute__((__packed__));
405
406struct necp_client_nexus_flow_header {
407 struct necp_client_flow_header flow_header;
408 struct necp_tlv_header agent_tlv_header;
409 struct necp_client_result_netagent agent_value;
410 struct necp_tlv_header tfo_cookie_tlv_header;
411 u_int8_t tfo_cookie_value[NECP_TFO_COOKIE_LEN_MAX];
412} __attribute__((__packed__));
413
414#if SKYWALK
415struct necp_arena_info;
416#endif
417
418struct necp_client_flow {
419 LIST_ENTRY(necp_client_flow) flow_chain;
420 unsigned invalid : 1;
421 unsigned nexus : 1; // If true, flow is a nexus; if false, flow is attached to socket
422 unsigned socket : 1;
423 unsigned viable : 1;
424 unsigned assigned : 1;
425 unsigned has_protoctl_event : 1;
426 unsigned check_tcp_heuristics : 1;
427 unsigned _reserved : 1;
428 union {
429 uuid_t nexus_agent;
430 struct {
431 void *socket_handle;
432 necp_client_flow_cb cb;
433 };
434 } u;
435 uint32_t interface_index;
436 u_short delegated_interface_index;
437 uint32_t interface_flags;
438 uint32_t necp_flow_flags;
439 struct necp_client_flow_protoctl_event protoctl_event;
440 union necp_sockaddr_union local_addr;
441 union necp_sockaddr_union remote_addr;
442
443 size_t assigned_results_length;
444 u_int8_t *assigned_results;
445};
446
447struct necp_client_flow_registration {
448 RB_ENTRY(necp_client_flow_registration) fd_link;
449 RB_ENTRY(necp_client_flow_registration) global_link;
450 RB_ENTRY(necp_client_flow_registration) client_link;
451 LIST_ENTRY(necp_client_flow_registration) collect_stats_chain;
452 uuid_t registration_id;
453 u_int32_t flags;
454 unsigned flow_result_read : 1;
455 unsigned defunct : 1;
456 void *interface_handle;
457 necp_client_flow_cb interface_cb;
458 struct necp_client *client;
459 LIST_HEAD(_necp_registration_flow_list, necp_client_flow) flow_list;
460#if SKYWALK
461 struct necp_arena_info *stats_arena; /* arena where the stats objects came from */
462 void * kstats_kaddr; /* kernel snapshot of untrusted userspace stats, for calculating delta */
463 mach_vm_address_t ustats_uaddr; /* userspace stats (untrusted) */
464 nstat_userland_context stats_handler_context;
465 struct flow_stats *nexus_stats; /* shared stats objects between necp_client and skywalk */
466#endif /* !SKYWALK */
467 u_int64_t last_interface_details __attribute__((aligned(sizeof(u_int64_t))));
468};
469
470static int necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1);
471
472RB_HEAD(_necp_client_flow_tree, necp_client_flow_registration);
473RB_PROTOTYPE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
474RB_GENERATE_PREV(_necp_client_flow_tree, necp_client_flow_registration, client_link, necp_client_flow_id_cmp);
475
476#define NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT 4
477#define NECP_CLIENT_MAX_INTERFACE_OPTIONS 32
478
479#define NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT (NECP_CLIENT_MAX_INTERFACE_OPTIONS - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT)
480
481struct necp_client {
482 RB_ENTRY(necp_client) link;
483 RB_ENTRY(necp_client) global_link;
484
485 decl_lck_mtx_data(, lock);
486 decl_lck_mtx_data(, route_lock);
487 os_refcnt_t reference_count;
488
489 uuid_t client_id;
490 unsigned result_read : 1;
491 unsigned group_members_read : 1;
492 unsigned allow_multiple_flows : 1;
493 unsigned legacy_client_is_flow : 1;
494
495 unsigned platform_binary : 1;
496 unsigned validated_parent : 1;
497
498 size_t result_length;
499 u_int8_t result[NECP_BASE_CLIENT_RESULT_SIZE];
500
501 necp_policy_id policy_id;
502 necp_policy_id skip_policy_id;
503
504 u_int8_t ip_protocol;
505 int proc_pid;
506
507 u_int64_t delegated_upid;
508
509 struct _necp_client_flow_tree flow_registrations;
510 LIST_HEAD(_necp_client_assertion_list, necp_client_assertion) assertion_list;
511
512 size_t assigned_group_members_length;
513 u_int8_t *assigned_group_members;
514
515 struct rtentry *current_route;
516
517 struct necp_client_interface_option interface_options[NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
518 struct necp_client_interface_option *extra_interface_options;
519 u_int8_t interface_option_count; // Number in interface_options + extra_interface_options
520
521 struct necp_client_result_netagent failed_trigger_agent;
522
523 void *agent_handle;
524
525 uuid_t override_euuid;
526
527#if SKYWALK
528 netns_token port_reservation;
529 nstat_context nstat_context;
530 uuid_t latest_flow_registration_id;
531 uuid_t parent_client_id;
532 struct necp_client *original_parameters_source;
533#endif /* !SKYWALK */
534
535 size_t parameters_length;
536 u_int8_t *parameters;
537};
538
539#define NECP_CLIENT_LOCK(_c) lck_mtx_lock(&_c->lock)
540#define NECP_CLIENT_UNLOCK(_c) lck_mtx_unlock(&_c->lock)
541#define NECP_CLIENT_ASSERT_LOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_OWNED)
542#define NECP_CLIENT_ASSERT_UNLOCKED(_c) LCK_MTX_ASSERT(&_c->lock, LCK_MTX_ASSERT_NOTOWNED)
543
544#define NECP_CLIENT_ROUTE_LOCK(_c) lck_mtx_lock(&_c->route_lock)
545#define NECP_CLIENT_ROUTE_UNLOCK(_c) lck_mtx_unlock(&_c->route_lock)
546
547static void necp_client_retain_locked(struct necp_client *client);
548static void necp_client_retain(struct necp_client *client);
549
550static bool necp_client_release_locked(struct necp_client *client);
551static bool necp_client_release(struct necp_client *client);
552
553static void
554necp_client_add_assertion(struct necp_client *client, uuid_t netagent_uuid);
555
556static bool
557necp_client_remove_assertion(struct necp_client *client, uuid_t netagent_uuid);
558
559static int
560necp_client_copy_parameters_locked(struct necp_client *client,
561 struct necp_client_nexus_parameters *parameters);
562
563LIST_HEAD(_necp_flow_registration_list, necp_client_flow_registration);
564static struct _necp_flow_registration_list necp_collect_stats_flow_list;
565
566struct necp_flow_defunct {
567 LIST_ENTRY(necp_flow_defunct) chain;
568
569 uuid_t flow_id;
570 uuid_t nexus_agent;
571 void *agent_handle;
572 int proc_pid;
573 u_int32_t flags;
574 struct necp_client_agent_parameters close_parameters;
575 bool has_close_parameters;
576};
577
578LIST_HEAD(_necp_flow_defunct_list, necp_flow_defunct);
579
580static int necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1);
581
582RB_HEAD(_necp_client_tree, necp_client);
583RB_PROTOTYPE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
584RB_GENERATE_PREV(_necp_client_tree, necp_client, link, necp_client_id_cmp);
585
586RB_HEAD(_necp_client_global_tree, necp_client);
587RB_PROTOTYPE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
588RB_GENERATE_PREV(_necp_client_global_tree, necp_client, global_link, necp_client_id_cmp);
589
590RB_HEAD(_necp_fd_flow_tree, necp_client_flow_registration);
591RB_PROTOTYPE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
592RB_GENERATE_PREV(_necp_fd_flow_tree, necp_client_flow_registration, fd_link, necp_client_flow_id_cmp);
593
594RB_HEAD(_necp_client_flow_global_tree, necp_client_flow_registration);
595RB_PROTOTYPE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
596RB_GENERATE_PREV(_necp_client_flow_global_tree, necp_client_flow_registration, global_link, necp_client_flow_id_cmp);
597
598static struct _necp_client_global_tree necp_client_global_tree;
599static struct _necp_client_flow_global_tree necp_client_flow_global_tree;
600
601struct necp_client_update {
602 TAILQ_ENTRY(necp_client_update) chain;
603
604 uuid_t client_id;
605
606 size_t update_length;
607 struct necp_client_observer_update *update;
608};
609
610#if SKYWALK
611struct necp_arena_info {
612 LIST_ENTRY(necp_arena_info) nai_chain;
613 u_int32_t nai_flags;
614 pid_t nai_proc_pid;
615 struct skmem_arena *nai_arena;
616 struct skmem_arena_mmap_info nai_mmap;
617 mach_vm_offset_t nai_roff;
618 u_int32_t nai_use_count;
619};
620#endif /* !SKYWALK */
621
622#define NAIF_ATTACHED 0x1 // arena is attached to list
623#define NAIF_REDIRECT 0x2 // arena mmap has been redirected
624#define NAIF_DEFUNCT 0x4 // arena is now defunct
625
626#define NECP_FD_REPORTED_AGENT_COUNT 2
627
628struct necp_fd_reported_agents {
629 uuid_t agent_uuid[NECP_FD_REPORTED_AGENT_COUNT];
630};
631
632struct necp_fd_data {
633 u_int8_t necp_fd_type;
634 LIST_ENTRY(necp_fd_data) chain;
635 struct _necp_client_tree clients;
636 struct _necp_fd_flow_tree flows;
637 TAILQ_HEAD(_necp_client_update_list, necp_client_update) update_list;
638 int update_count;
639 int flags;
640
641 unsigned background : 1;
642 unsigned request_in_process_flow_divert : 1;
643
644 int proc_pid;
645 decl_lck_mtx_data(, fd_lock);
646 struct selinfo si;
647
648 struct necp_fd_reported_agents reported_agents;
649#if SKYWALK
650 // Arenas and their mmap info for per-process stats. Stats objects are allocated from an active arena
651 // that is not redirected/defunct. The stats_arena_active keeps track of such an arena, and it also
652 // holds a reference count on the object. Each flow allocating a stats object also holds a reference
653 // the necp_arena_info (where the object got allocated from). During defunct, we redirect the mapping
654 // of the arena such that any attempt to access (read/write) will result in getting zero-filled pages.
655 // We then go thru all of the flows for the process and free the stats objects associated with them,
656 // followed by destroying the skmem region(s) associated with the arena. The stats_arena_list keeps
657 // track of all current and defunct stats arenas; there could be more than one arena created for the
658 // process as the arena destruction happens when its reference count drops to 0.
659 struct necp_arena_info *stats_arena_active;
660 LIST_HEAD(_necp_arena_info_list, necp_arena_info) stats_arena_list;
661 u_int32_t stats_arena_gencnt;
662
663 struct skmem_arena *sysctl_arena;
664 struct skmem_arena_mmap_info sysctl_mmap;
665 mach_vm_offset_t system_sysctls_roff;
666#endif /* !SKYWALK */
667};
668
669#define NECP_FD_LOCK(_f) lck_mtx_lock(&_f->fd_lock)
670#define NECP_FD_UNLOCK(_f) lck_mtx_unlock(&_f->fd_lock)
671#define NECP_FD_ASSERT_LOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_OWNED)
672#define NECP_FD_ASSERT_UNLOCKED(_f) LCK_MTX_ASSERT(&_f->fd_lock, LCK_MTX_ASSERT_NOTOWNED)
673
674static LIST_HEAD(_necp_fd_list, necp_fd_data) necp_fd_list;
675static LIST_HEAD(_necp_fd_observer_list, necp_fd_data) necp_fd_observer_list;
676
677#if SKYWALK
678static KALLOC_TYPE_DEFINE(necp_arena_info_zone, struct necp_arena_info, NET_KT_DEFAULT);
679#endif /* !SKYWALK */
680
681static LCK_ATTR_DECLARE(necp_fd_mtx_attr, 0, 0);
682static LCK_GRP_DECLARE(necp_fd_mtx_grp, "necp_fd");
683
684static LCK_RW_DECLARE_ATTR(necp_fd_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
685static LCK_RW_DECLARE_ATTR(necp_observer_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
686static LCK_RW_DECLARE_ATTR(necp_client_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
687static LCK_RW_DECLARE_ATTR(necp_flow_tree_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
688static LCK_RW_DECLARE_ATTR(necp_collect_stats_list_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
689
690
691#define NECP_STATS_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_collect_stats_list_lock)
692#define NECP_STATS_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_collect_stats_list_lock)
693#define NECP_STATS_LIST_UNLOCK() lck_rw_done(&necp_collect_stats_list_lock)
694
695#define NECP_CLIENT_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_client_tree_lock)
696#define NECP_CLIENT_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_client_tree_lock)
697#define NECP_CLIENT_TREE_UNLOCK() lck_rw_done(&necp_client_tree_lock)
698#define NECP_CLIENT_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_client_tree_lock, LCK_RW_ASSERT_HELD)
699
700#define NECP_FLOW_TREE_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_flow_tree_lock)
701#define NECP_FLOW_TREE_LOCK_SHARED() lck_rw_lock_shared(&necp_flow_tree_lock)
702#define NECP_FLOW_TREE_UNLOCK() lck_rw_done(&necp_flow_tree_lock)
703#define NECP_FLOW_TREE_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_flow_tree_lock, LCK_RW_ASSERT_HELD)
704
705#define NECP_FD_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_fd_lock)
706#define NECP_FD_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_fd_lock)
707#define NECP_FD_LIST_UNLOCK() lck_rw_done(&necp_fd_lock)
708#define NECP_FD_LIST_ASSERT_LOCKED() LCK_RW_ASSERT(&necp_fd_lock, LCK_RW_ASSERT_HELD)
709
710#define NECP_OBSERVER_LIST_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_observer_lock)
711#define NECP_OBSERVER_LIST_LOCK_SHARED() lck_rw_lock_shared(&necp_observer_lock)
712#define NECP_OBSERVER_LIST_UNLOCK() lck_rw_done(&necp_observer_lock)
713
714// Locking Notes
715
716// Take NECP_FD_LIST_LOCK when accessing or modifying the necp_fd_list
717// Take NECP_CLIENT_TREE_LOCK when accessing or modifying the necp_client_global_tree
718// Take NECP_FLOW_TREE_LOCK when accessing or modifying the necp_client_flow_global_tree
719// Take NECP_STATS_LIST_LOCK when accessing or modifying the necp_collect_stats_flow_list
720// Take NECP_FD_LOCK when accessing or modifying an necp_fd_data entry
721// Take NECP_CLIENT_LOCK when accessing or modifying a single necp_client
722// Take NECP_CLIENT_ROUTE_LOCK when accessing or modifying a client's route
723
724// Precedence, where 1 is the first lock that must be taken
725// 1. NECP_FD_LIST_LOCK
726// 2. NECP_FD_LOCK (any)
727// 3. NECP_CLIENT_TREE_LOCK
728// 4. NECP_CLIENT_LOCK (any)
729// 5. NECP_FLOW_TREE_LOCK
730// 6. NECP_STATS_LIST_LOCK
731// 7. NECP_CLIENT_ROUTE_LOCK (any)
732
733static thread_call_t necp_client_update_tcall;
734static uint32_t necp_update_all_clients_sched_cnt = 0;
735static uint64_t necp_update_all_clients_sched_abstime = 0;
736static LCK_RW_DECLARE_ATTR(necp_update_all_clients_lock, &necp_fd_mtx_grp, &necp_fd_mtx_attr);
737#define NECP_UPDATE_ALL_CLIENTS_LOCK_EXCLUSIVE() lck_rw_lock_exclusive(&necp_update_all_clients_lock)
738#define NECP_UPDATE_ALL_CLIENTS_SHARED_TO_EXCLUSIVE() lck_rw_lock_shared_to_exclusive(&necp_update_all_clients_lock)
739#define NECP_UPDATE_ALL_CLIENTS_SHARED() lck_rw_lock_shared(&necp_update_all_clients_lock)
740#define NECP_UPDATE_ALL_CLIENTS_UNLOCK() lck_rw_done(&necp_update_all_clients_lock)
741
742// Array of PIDs that will trigger in-process flow divert, protected by NECP_FD_LIST_LOCK
743#define NECP_MAX_FLOW_DIVERT_NEEDED_PIDS 4
744static pid_t necp_flow_divert_needed_pids[NECP_MAX_FLOW_DIVERT_NEEDED_PIDS];
745
746#if SKYWALK
747static thread_call_t necp_client_collect_stats_tcall;
748static thread_call_t necp_close_empty_arenas_tcall;
749
750static void necp_fd_insert_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
751static void necp_fd_remove_stats_arena(struct necp_fd_data *fd_data, struct necp_arena_info *nai);
752static struct necp_arena_info *necp_fd_mredirect_stats_arena(struct necp_fd_data *fd_data, struct proc *proc);
753
754static void necp_arena_info_retain(struct necp_arena_info *nai);
755static void necp_arena_info_release(struct necp_arena_info *nai);
756static struct necp_arena_info *necp_arena_info_alloc(void);
757static void necp_arena_info_free(struct necp_arena_info *nai);
758
759static int necp_arena_initialize(struct necp_fd_data *fd_data, bool locked);
760static int necp_stats_initialize(struct necp_fd_data *fd_data, struct necp_client *client,
761 struct necp_client_flow_registration *flow_registration, struct necp_stats_bufreq *bufreq);
762static int necp_arena_create(struct necp_fd_data *fd_data, size_t obj_size, size_t obj_cnt, struct proc *p);
763static int necp_arena_stats_obj_alloc(struct necp_fd_data *fd_data, mach_vm_offset_t *off, struct necp_arena_info **stats_arena, void **kstats_kaddr, boolean_t cansleep);
764static void necp_arena_stats_obj_free(struct necp_fd_data *fd_data, struct necp_arena_info *stats_arena, void **kstats_kaddr, mach_vm_address_t *ustats_uaddr);
765static void necp_stats_arenas_destroy(struct necp_fd_data *fd_data, boolean_t closing);
766
767static int necp_sysctl_arena_initialize(struct necp_fd_data *fd_data, bool locked);
768static void necp_sysctl_arena_destroy(struct necp_fd_data *fd_data);
769static void *necp_arena_sysctls_obj(struct necp_fd_data *fd_data, mach_vm_offset_t *off, size_t *size);
770#endif /* !SKYWALK */
771
772void necp_copy_inp_domain_info(struct inpcb *, struct socket *, nstat_domain_info *);
773void necp_with_inp_domain_name(struct socket *so, void *ctx, void (*with_func)(char *domain_name, void *ctx));
774
775static void
776necp_lock_socket_attributes(void)
777{
778 lck_mtx_lock(lck: &necp_socket_attr_lock);
779}
780
781static void
782necp_unlock_socket_attributes(void)
783{
784 lck_mtx_unlock(lck: &necp_socket_attr_lock);
785}
786
787/// NECP file descriptor functions
788
789static void
790necp_fd_notify(struct necp_fd_data *fd_data, bool locked)
791{
792 struct selinfo *si = &fd_data->si;
793
794 if (!locked) {
795 NECP_FD_LOCK(fd_data);
796 }
797
798 selwakeup(si);
799
800 // use a non-zero hint to tell the notification from the
801 // call done in kqueue_scan() which uses 0
802 KNOTE(&si->si_note, 1); // notification
803
804 if (!locked) {
805 NECP_FD_UNLOCK(fd_data);
806 }
807}
808
809static inline bool
810necp_client_has_unread_flows(struct necp_client *client)
811{
812 NECP_CLIENT_ASSERT_LOCKED(client);
813 struct necp_client_flow_registration *flow_registration = NULL;
814 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
815 if (!flow_registration->flow_result_read) {
816 return true;
817 }
818 }
819 return false;
820}
821
822static int
823necp_fd_poll(struct necp_fd_data *fd_data, int events, void *wql, struct proc *p, int is_kevent)
824{
825#pragma unused(wql, p, is_kevent)
826 u_int revents = 0;
827
828 u_int want_rx = events & (POLLIN | POLLRDNORM);
829 if (want_rx) {
830 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
831 // Push-mode observers are readable when they have a new update
832 if (!TAILQ_EMPTY(&fd_data->update_list)) {
833 revents |= want_rx;
834 }
835 } else {
836 // Standard fds are readable when some client is unread
837 struct necp_client *client = NULL;
838 bool has_unread_clients = FALSE;
839 RB_FOREACH(client, _necp_client_tree, &fd_data->clients) {
840 NECP_CLIENT_LOCK(client);
841 if (!client->result_read || !client->group_members_read || necp_client_has_unread_flows(client)) {
842 has_unread_clients = TRUE;
843 }
844 NECP_CLIENT_UNLOCK(client);
845 if (has_unread_clients) {
846 break;
847 }
848 }
849
850 if (has_unread_clients || fd_data->request_in_process_flow_divert) {
851 revents |= want_rx;
852 }
853 }
854 }
855
856 return revents;
857}
858
859static inline void
860necp_generate_client_id(uuid_t client_id, bool is_flow)
861{
862 uuid_generate_random(out: client_id);
863
864 if (is_flow) {
865 client_id[9] |= 0x01;
866 } else {
867 client_id[9] &= ~0x01;
868 }
869}
870
871static inline bool
872necp_client_id_is_flow(uuid_t client_id)
873{
874 return client_id[9] & 0x01;
875}
876
877static struct necp_client *
878necp_find_client_and_lock(uuid_t client_id)
879{
880 NECP_CLIENT_TREE_ASSERT_LOCKED();
881
882 struct necp_client *client = NULL;
883
884 if (necp_client_id_is_flow(client_id)) {
885 NECP_FLOW_TREE_LOCK_SHARED();
886 struct necp_client_flow_registration find;
887 uuid_copy(dst: find.registration_id, src: client_id);
888 struct necp_client_flow_registration *flow = RB_FIND(_necp_client_flow_global_tree, &necp_client_flow_global_tree, &find);
889 if (flow != NULL) {
890 client = flow->client;
891 }
892 NECP_FLOW_TREE_UNLOCK();
893 } else {
894 struct necp_client find;
895 uuid_copy(dst: find.client_id, src: client_id);
896 client = RB_FIND(_necp_client_global_tree, &necp_client_global_tree, &find);
897 }
898
899 if (client != NULL) {
900 NECP_CLIENT_LOCK(client);
901 }
902
903 return client;
904}
905
906static struct necp_client_flow_registration *
907necp_client_find_flow(struct necp_client *client, uuid_t flow_id)
908{
909 NECP_CLIENT_ASSERT_LOCKED(client);
910 struct necp_client_flow_registration *flow = NULL;
911
912 if (necp_client_id_is_flow(client_id: flow_id)) {
913 struct necp_client_flow_registration find;
914 uuid_copy(dst: find.registration_id, src: flow_id);
915 flow = RB_FIND(_necp_client_flow_tree, &client->flow_registrations, &find);
916 } else {
917 flow = RB_ROOT(&client->flow_registrations);
918 }
919
920 return flow;
921}
922
923static struct necp_client *
924necp_client_fd_find_client_unlocked(struct necp_fd_data *client_fd, uuid_t client_id)
925{
926 NECP_FD_ASSERT_LOCKED(client_fd);
927 struct necp_client *client = NULL;
928
929 if (necp_client_id_is_flow(client_id)) {
930 struct necp_client_flow_registration find;
931 uuid_copy(dst: find.registration_id, src: client_id);
932 struct necp_client_flow_registration *flow = RB_FIND(_necp_fd_flow_tree, &client_fd->flows, &find);
933 if (flow != NULL) {
934 client = flow->client;
935 }
936 } else {
937 struct necp_client find;
938 uuid_copy(dst: find.client_id, src: client_id);
939 client = RB_FIND(_necp_client_tree, &client_fd->clients, &find);
940 }
941
942 return client;
943}
944
945static struct necp_client *
946necp_client_fd_find_client_and_lock(struct necp_fd_data *client_fd, uuid_t client_id)
947{
948 struct necp_client *client = necp_client_fd_find_client_unlocked(client_fd, client_id);
949 if (client != NULL) {
950 NECP_CLIENT_LOCK(client);
951 }
952
953 return client;
954}
955
956static inline int
957necp_client_id_cmp(struct necp_client *client0, struct necp_client *client1)
958{
959 return uuid_compare(uu1: client0->client_id, uu2: client1->client_id);
960}
961
962static inline int
963necp_client_flow_id_cmp(struct necp_client_flow_registration *flow0, struct necp_client_flow_registration *flow1)
964{
965 return uuid_compare(uu1: flow0->registration_id, uu2: flow1->registration_id);
966}
967
968static int
969necpop_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
970{
971#pragma unused(fp, which, wql, ctx)
972 return 0;
973 struct necp_fd_data *fd_data = NULL;
974 int revents = 0;
975 int events = 0;
976 proc_t procp;
977
978 fd_data = (struct necp_fd_data *)fp_get_data(fp);
979 if (fd_data == NULL) {
980 return 0;
981 }
982
983 procp = vfs_context_proc(ctx);
984
985 switch (which) {
986 case FREAD: {
987 events = POLLIN;
988 break;
989 }
990
991 default: {
992 return 1;
993 }
994 }
995
996 NECP_FD_LOCK(fd_data);
997 revents = necp_fd_poll(fd_data, events, wql, p: procp, is_kevent: 0);
998 NECP_FD_UNLOCK(fd_data);
999
1000 return (events & revents) ? 1 : 0;
1001}
1002
1003static void
1004necp_fd_knrdetach(struct knote *kn)
1005{
1006 struct necp_fd_data *fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1007 struct selinfo *si = &fd_data->si;
1008
1009 NECP_FD_LOCK(fd_data);
1010 KNOTE_DETACH(&si->si_note, kn);
1011 NECP_FD_UNLOCK(fd_data);
1012}
1013
1014static int
1015necp_fd_knread(struct knote *kn, long hint)
1016{
1017#pragma unused(kn, hint)
1018 return 1; /* assume we are ready */
1019}
1020
1021static int
1022necp_fd_knrprocess(struct knote *kn, struct kevent_qos_s *kev)
1023{
1024 struct necp_fd_data *fd_data;
1025 int revents;
1026 int res;
1027
1028 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1029
1030 NECP_FD_LOCK(fd_data);
1031 revents = necp_fd_poll(fd_data, POLLIN, NULL, p: current_proc(), is_kevent: 1);
1032 res = ((revents & POLLIN) != 0);
1033 if (res) {
1034 knote_fill_kevent(kn, kev, data: 0);
1035 }
1036 NECP_FD_UNLOCK(fd_data);
1037 return res;
1038}
1039
1040static int
1041necp_fd_knrtouch(struct knote *kn, struct kevent_qos_s *kev)
1042{
1043#pragma unused(kev)
1044 struct necp_fd_data *fd_data;
1045 int revents;
1046
1047 fd_data = (struct necp_fd_data *)knote_kn_hook_get_raw(kn);
1048
1049 NECP_FD_LOCK(fd_data);
1050 revents = necp_fd_poll(fd_data, POLLIN, NULL, p: current_proc(), is_kevent: 1);
1051 NECP_FD_UNLOCK(fd_data);
1052
1053 return (revents & POLLIN) != 0;
1054}
1055
1056SECURITY_READ_ONLY_EARLY(struct filterops) necp_fd_rfiltops = {
1057 .f_isfd = 1,
1058 .f_detach = necp_fd_knrdetach,
1059 .f_event = necp_fd_knread,
1060 .f_touch = necp_fd_knrtouch,
1061 .f_process = necp_fd_knrprocess,
1062};
1063
1064static int
1065necpop_kqfilter(struct fileproc *fp, struct knote *kn,
1066 __unused struct kevent_qos_s *kev)
1067{
1068 struct necp_fd_data *fd_data = NULL;
1069 int revents;
1070
1071 if (kn->kn_filter != EVFILT_READ) {
1072 NECPLOG(LOG_ERR, "bad filter request %d", kn->kn_filter);
1073 knote_set_error(kn, EINVAL);
1074 return 0;
1075 }
1076
1077 fd_data = (struct necp_fd_data *)fp_get_data(fp);
1078 if (fd_data == NULL) {
1079 NECPLOG0(LOG_ERR, "No channel for kqfilter");
1080 knote_set_error(kn, ENOENT);
1081 return 0;
1082 }
1083
1084 NECP_FD_LOCK(fd_data);
1085 kn->kn_filtid = EVFILTID_NECP_FD;
1086 knote_kn_hook_set_raw(kn, kn_hook: fd_data);
1087 KNOTE_ATTACH(&fd_data->si.si_note, kn);
1088
1089 revents = necp_fd_poll(fd_data, POLLIN, NULL, p: current_proc(), is_kevent: 1);
1090
1091 NECP_FD_UNLOCK(fd_data);
1092
1093 return (revents & POLLIN) != 0;
1094}
1095
1096#define INTERFACE_FLAGS_SHIFT 32
1097#define INTERFACE_FLAGS_MASK 0xffffffff
1098#define INTERFACE_INDEX_SHIFT 0
1099#define INTERFACE_INDEX_MASK 0xffffffff
1100
1101static uint64_t
1102combine_interface_details(uint32_t interface_index, uint32_t interface_flags)
1103{
1104 return ((uint64_t)interface_flags & INTERFACE_FLAGS_MASK) << INTERFACE_FLAGS_SHIFT |
1105 ((uint64_t)interface_index & INTERFACE_INDEX_MASK) << INTERFACE_INDEX_SHIFT;
1106}
1107
1108#if SKYWALK
1109
1110static void
1111split_interface_details(uint64_t combined_details, uint32_t *interface_index, uint32_t *interface_flags)
1112{
1113 *interface_index = (combined_details >> INTERFACE_INDEX_SHIFT) & INTERFACE_INDEX_MASK;
1114 *interface_flags = (combined_details >> INTERFACE_FLAGS_SHIFT) & INTERFACE_FLAGS_MASK;
1115}
1116
1117static void
1118necp_flow_save_current_interface_details(struct necp_client_flow_registration *flow_registration)
1119{
1120 struct necp_client_flow *flow = NULL;
1121 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1122 if (flow->nexus) {
1123 uint64_t combined_details = combine_interface_details(interface_index: flow->interface_index, interface_flags: flow->interface_flags);
1124 os_atomic_store(&flow_registration->last_interface_details, combined_details, release);
1125 break;
1126 }
1127 }
1128}
1129
1130static void
1131necp_client_collect_interface_stats(struct necp_client_flow_registration *flow_registration, struct ifnet_stats_per_flow *ifs)
1132{
1133 struct necp_client_flow *flow = NULL;
1134
1135 if (ifs == NULL || ifs->txpackets == 0 || ifs->rxpackets == 0) {
1136 return; // App might have crashed without publishing ifs
1137 }
1138
1139 // Do malicious stats detection here
1140
1141 // Fold userspace stats into (trusted) kernel stats (stored in ifp).
1142 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1143 uint32_t if_idx = flow->interface_index;
1144 ifnet_t ifp = NULL;
1145 ifnet_head_lock_shared();
1146 if (if_idx != IFSCOPE_NONE && if_idx <= (uint32_t)if_index) {
1147 ifp = ifindex2ifnet[if_idx];
1148 ifnet_update_stats_per_flow(ifs, ifp);
1149 }
1150 ifnet_head_done();
1151
1152 // Currently there is only one flow that uses the shared necp
1153 // stats region, so this loop should exit after updating an ifp
1154 break;
1155 }
1156}
1157
1158static void
1159necp_client_collect_stats(struct necp_client_flow_registration *flow_registration)
1160{
1161 struct necp_all_kstats *kstats = (struct necp_all_kstats *)flow_registration->kstats_kaddr;
1162 if (kstats == NULL) {
1163 return;
1164 }
1165
1166 // Grab userspace stats delta (untrusted).
1167 struct necp_tcp_stats *curr_tcpstats = (struct necp_tcp_stats *)kstats->necp_stats_ustats;
1168 struct necp_tcp_stats *prev_tcpstats = (struct necp_tcp_stats *)&kstats->necp_stats_comm;
1169#define diff_n_update(field) \
1170 u_int32_t d_##field = (curr_tcpstats->necp_tcp_counts.necp_stat_##field - prev_tcpstats->necp_tcp_counts.necp_stat_##field); \
1171 prev_tcpstats->necp_tcp_counts.necp_stat_##field += d_##field;
1172 diff_n_update(rxpackets);
1173 diff_n_update(txpackets);
1174 if (d_rxpackets == 0 && d_txpackets == 0) {
1175 return; // no activity since last collection, stop here
1176 }
1177 diff_n_update(rxbytes);
1178 diff_n_update(txbytes);
1179 diff_n_update(rxduplicatebytes);
1180 diff_n_update(rxoutoforderbytes);
1181 diff_n_update(txretransmit);
1182 diff_n_update(connectattempts);
1183 diff_n_update(connectsuccesses);
1184 uint32_t rtt = prev_tcpstats->necp_tcp_counts.necp_stat_avg_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_avg_rtt;
1185 uint32_t rtt_var = prev_tcpstats->necp_tcp_counts.necp_stat_var_rtt = curr_tcpstats->necp_tcp_counts.necp_stat_var_rtt;
1186#undef diff_n_update
1187
1188 // Do malicious stats detection with the deltas here.
1189 // RTT check (not necessarily attacks, might just be not measured since we report stats async periodically).
1190 if (rtt < necp_client_stats_rtt_floor || rtt > necp_client_stats_rtt_ceiling) {
1191 rtt = rtt_var = 0; // nstat_route_update to skip 0 rtt
1192 }
1193
1194 // Fold userspace stats into (trusted) kernel stats (stored in route).
1195 NECP_CLIENT_ROUTE_LOCK(flow_registration->client);
1196 struct rtentry *route = flow_registration->client->current_route;
1197 if (route != NULL) {
1198 nstat_route_update(rte: route, connect_attempts: d_connectattempts, connect_successes: d_connectsuccesses, rx_packets: d_rxpackets, rx_bytes: d_rxbytes, rx_duplicatebytes: d_rxduplicatebytes,
1199 rx_outoforderbytes: d_rxoutoforderbytes, tx_packets: d_txpackets, tx_bytes: d_txbytes, tx_retransmit: d_txretransmit, rtt, rtt_var);
1200 }
1201 NECP_CLIENT_ROUTE_UNLOCK(flow_registration->client);
1202}
1203
1204// This is called from various places; "closing" here implies the client being closed/removed if true, otherwise being
1205// defunct. In the former, we expect the caller to not hold the lock; for the latter it must have acquired it.
1206static void
1207necp_destroy_flow_stats(struct necp_fd_data *fd_data,
1208 struct necp_client_flow_registration *flow_registration,
1209 struct ifnet_stats_per_flow *flow_ifnet_stats,
1210 boolean_t closing)
1211{
1212 NECP_FD_ASSERT_LOCKED(fd_data);
1213
1214 struct necp_client *client = flow_registration->client;
1215
1216 if (closing) {
1217 NECP_CLIENT_ASSERT_UNLOCKED(client);
1218 NECP_CLIENT_LOCK(client);
1219 } else {
1220 NECP_CLIENT_ASSERT_LOCKED(client);
1221 }
1222
1223 // the interface stats are independent of the flow stats, hence we check here
1224 if (flow_ifnet_stats != NULL) {
1225 necp_client_collect_interface_stats(flow_registration, ifs: flow_ifnet_stats);
1226 }
1227
1228 if (flow_registration->kstats_kaddr != NULL) {
1229 NECP_STATS_LIST_LOCK_EXCLUSIVE();
1230 necp_client_collect_stats(flow_registration);
1231 const bool destroyed = necp_client_release_locked(client); // Drop the reference held by the stats list
1232 ASSERT(!destroyed);
1233 (void)destroyed;
1234 LIST_REMOVE(flow_registration, collect_stats_chain);
1235 NECP_STATS_LIST_UNLOCK();
1236 if (flow_registration->stats_handler_context != NULL) {
1237 ntstat_userland_stats_close(nstat_ctx: flow_registration->stats_handler_context);
1238 flow_registration->stats_handler_context = NULL;
1239 }
1240 necp_arena_stats_obj_free(fd_data, stats_arena: flow_registration->stats_arena, kstats_kaddr: &flow_registration->kstats_kaddr, ustats_uaddr: &flow_registration->ustats_uaddr);
1241 ASSERT(flow_registration->kstats_kaddr == NULL);
1242 ASSERT(flow_registration->ustats_uaddr == 0);
1243 }
1244
1245 if (flow_registration->nexus_stats != NULL) {
1246 flow_stats_release(fs: flow_registration->nexus_stats);
1247 flow_registration->nexus_stats = NULL;
1248 }
1249
1250 if (closing) {
1251 NECP_CLIENT_UNLOCK(client);
1252 }
1253}
1254
1255static void
1256necp_schedule_collect_stats_clients(bool recur)
1257{
1258 if (necp_client_collect_stats_tcall == NULL ||
1259 (!recur && thread_call_isactive(call: necp_client_collect_stats_tcall))) {
1260 return;
1261 }
1262
1263 uint64_t deadline = 0;
1264 uint64_t leeway = 0;
1265 clock_interval_to_deadline(interval: necp_collect_stats_timeout_microseconds, NSEC_PER_USEC, result: &deadline);
1266 clock_interval_to_absolutetime_interval(interval: necp_collect_stats_timeout_leeway_microseconds, NSEC_PER_USEC, result: &leeway);
1267
1268 thread_call_enter_delayed_with_leeway(call: necp_client_collect_stats_tcall, NULL,
1269 deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1270}
1271
1272static void
1273necp_collect_stats_client_callout(__unused thread_call_param_t dummy,
1274 __unused thread_call_param_t arg)
1275{
1276 struct necp_client_flow_registration *flow_registration;
1277
1278 net_update_uptime();
1279 NECP_STATS_LIST_LOCK_SHARED();
1280 if (LIST_EMPTY(&necp_collect_stats_flow_list)) {
1281 NECP_STATS_LIST_UNLOCK();
1282 return;
1283 }
1284 LIST_FOREACH(flow_registration, &necp_collect_stats_flow_list, collect_stats_chain) {
1285 // Collecting stats should be cheap (atomic increments)
1286 // Values like flow_registration->kstats_kaddr are guaranteed to be valid
1287 // as long as the flow_registration is in the stats list
1288 necp_client_collect_stats(flow_registration);
1289 }
1290 NECP_STATS_LIST_UNLOCK();
1291
1292 necp_schedule_collect_stats_clients(TRUE); // recurring collection
1293}
1294
1295#endif /* !SKYWALK */
1296
1297static void
1298necp_defunct_flow_registration(struct necp_client *client,
1299 struct necp_client_flow_registration *flow_registration,
1300 struct _necp_flow_defunct_list *defunct_list)
1301{
1302 NECP_CLIENT_ASSERT_LOCKED(client);
1303
1304 if (!flow_registration->defunct) {
1305 bool needs_defunct = false;
1306 struct necp_client_flow *search_flow = NULL;
1307 LIST_FOREACH(search_flow, &flow_registration->flow_list, flow_chain) {
1308 if (search_flow->nexus &&
1309 !uuid_is_null(uu: search_flow->u.nexus_agent)) {
1310 // Save defunct values for the nexus
1311 if (defunct_list != NULL) {
1312 // Sleeping alloc won't fail; copy only what's necessary
1313 struct necp_flow_defunct *flow_defunct = kalloc_type(struct necp_flow_defunct,
1314 Z_WAITOK | Z_ZERO);
1315 uuid_copy(dst: flow_defunct->nexus_agent, src: search_flow->u.nexus_agent);
1316 uuid_copy(dst: flow_defunct->flow_id, src: ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1317 client->client_id :
1318 flow_registration->registration_id));
1319 flow_defunct->proc_pid = client->proc_pid;
1320 flow_defunct->agent_handle = client->agent_handle;
1321 flow_defunct->flags = flow_registration->flags;
1322#if SKYWALK
1323 if (flow_registration->kstats_kaddr != NULL) {
1324 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1325 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1326 if (quicstats != NULL) {
1327 memcpy(dst: flow_defunct->close_parameters.u.close_token, src: quicstats->necp_quic_extra.ssr_token, n: sizeof(flow_defunct->close_parameters.u.close_token));
1328 flow_defunct->has_close_parameters = true;
1329 }
1330 }
1331#endif /* SKYWALK */
1332 // Add to the list provided by caller
1333 LIST_INSERT_HEAD(defunct_list, flow_defunct, chain);
1334 }
1335
1336 needs_defunct = true;
1337 }
1338 }
1339
1340 if (needs_defunct) {
1341#if SKYWALK
1342 // Close the stats early
1343 if (flow_registration->stats_handler_context != NULL) {
1344 ntstat_userland_stats_event(nstat_ctx: flow_registration->stats_handler_context,
1345 NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT);
1346 }
1347#endif /* SKYWALK */
1348
1349 // Only set defunct if there was some assigned flow
1350 flow_registration->defunct = true;
1351 }
1352 }
1353}
1354
1355static void
1356necp_defunct_client_for_policy(struct necp_client *client,
1357 struct _necp_flow_defunct_list *defunct_list)
1358{
1359 NECP_CLIENT_ASSERT_LOCKED(client);
1360
1361 struct necp_client_flow_registration *flow_registration = NULL;
1362 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
1363 necp_defunct_flow_registration(client, flow_registration, defunct_list);
1364 }
1365}
1366
1367static void
1368necp_client_free(struct necp_client *client)
1369{
1370 NECP_CLIENT_ASSERT_UNLOCKED(client);
1371
1372 kfree_data(client->extra_interface_options,
1373 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT);
1374 client->extra_interface_options = NULL;
1375
1376 kfree_data(client->parameters, client->parameters_length);
1377 client->parameters = NULL;
1378
1379 kfree_data(client->assigned_group_members, client->assigned_group_members_length);
1380 client->assigned_group_members = NULL;
1381
1382 lck_mtx_destroy(lck: &client->route_lock, grp: &necp_fd_mtx_grp);
1383 lck_mtx_destroy(lck: &client->lock, grp: &necp_fd_mtx_grp);
1384
1385 kfree_type(struct necp_client, client);
1386}
1387
1388static void
1389necp_client_retain_locked(struct necp_client *client)
1390{
1391 NECP_CLIENT_ASSERT_LOCKED(client);
1392
1393 os_ref_retain_locked(rc: &client->reference_count);
1394}
1395
1396static void
1397necp_client_retain(struct necp_client *client)
1398{
1399 NECP_CLIENT_LOCK(client);
1400 necp_client_retain_locked(client);
1401 NECP_CLIENT_UNLOCK(client);
1402}
1403
1404static bool
1405necp_client_release_locked(struct necp_client *client)
1406{
1407 NECP_CLIENT_ASSERT_LOCKED(client);
1408
1409 os_ref_count_t count = os_ref_release_locked(rc: &client->reference_count);
1410 if (count == 0) {
1411 NECP_CLIENT_UNLOCK(client);
1412 necp_client_free(client);
1413 }
1414
1415 return count == 0;
1416}
1417
1418static bool
1419necp_client_release(struct necp_client *client)
1420{
1421 bool last_ref;
1422
1423 NECP_CLIENT_LOCK(client);
1424 if (!(last_ref = necp_client_release_locked(client))) {
1425 NECP_CLIENT_UNLOCK(client);
1426 }
1427
1428 return last_ref;
1429}
1430
1431static struct necp_client_update *
1432necp_client_update_alloc(const void *data, size_t length)
1433{
1434 struct necp_client_update *client_update;
1435 struct necp_client_observer_update *buffer;
1436 size_t alloc_size;
1437
1438 if (os_add_overflow(length, sizeof(*buffer), &alloc_size)) {
1439 return NULL;
1440 }
1441 buffer = kalloc_data(alloc_size, Z_WAITOK);
1442 if (buffer == NULL) {
1443 return NULL;
1444 }
1445
1446 client_update = kalloc_type(struct necp_client_update,
1447 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1448 client_update->update_length = alloc_size;
1449 client_update->update = buffer;
1450 memcpy(dst: buffer->tlv_buffer, src: data, n: length);
1451 return client_update;
1452}
1453
1454static void
1455necp_client_update_free(struct necp_client_update *client_update)
1456{
1457 kfree_data(client_update->update, client_update->update_length);
1458 kfree_type(struct necp_client_update, client_update);
1459}
1460
1461static void
1462necp_client_update_observer_add_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1463{
1464 struct necp_client_update *client_update;
1465
1466 NECP_FD_LOCK(observer_fd);
1467
1468 if (observer_fd->update_count >= necp_observer_message_limit) {
1469 NECP_FD_UNLOCK(observer_fd);
1470 return;
1471 }
1472
1473 client_update = necp_client_update_alloc(data: client->parameters, length: client->parameters_length);
1474 if (client_update != NULL) {
1475 uuid_copy(dst: client_update->client_id, src: client->client_id);
1476 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_PARAMETERS;
1477 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1478 observer_fd->update_count++;
1479
1480 necp_fd_notify(fd_data: observer_fd, true);
1481 }
1482
1483 NECP_FD_UNLOCK(observer_fd);
1484}
1485
1486static void
1487necp_client_update_observer_update_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1488{
1489 NECP_FD_LOCK(observer_fd);
1490
1491 if (observer_fd->update_count >= necp_observer_message_limit) {
1492 NECP_FD_UNLOCK(observer_fd);
1493 return;
1494 }
1495
1496 struct necp_client_update *client_update = necp_client_update_alloc(data: client->result, length: client->result_length);
1497 if (client_update != NULL) {
1498 uuid_copy(dst: client_update->client_id, src: client->client_id);
1499 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_RESULT;
1500 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1501 observer_fd->update_count++;
1502
1503 necp_fd_notify(fd_data: observer_fd, true);
1504 }
1505
1506 NECP_FD_UNLOCK(observer_fd);
1507}
1508
1509static void
1510necp_client_update_observer_remove_internal(struct necp_fd_data *observer_fd, struct necp_client *client)
1511{
1512 NECP_FD_LOCK(observer_fd);
1513
1514 if (observer_fd->update_count >= necp_observer_message_limit) {
1515 NECP_FD_UNLOCK(observer_fd);
1516 return;
1517 }
1518
1519 struct necp_client_update *client_update = necp_client_update_alloc(NULL, length: 0);
1520 if (client_update != NULL) {
1521 uuid_copy(dst: client_update->client_id, src: client->client_id);
1522 client_update->update->update_type = NECP_CLIENT_UPDATE_TYPE_REMOVE;
1523 TAILQ_INSERT_TAIL(&observer_fd->update_list, client_update, chain);
1524 observer_fd->update_count++;
1525
1526 necp_fd_notify(fd_data: observer_fd, true);
1527 }
1528
1529 NECP_FD_UNLOCK(observer_fd);
1530}
1531
1532static void
1533necp_client_update_observer_add(struct necp_client *client)
1534{
1535 NECP_OBSERVER_LIST_LOCK_SHARED();
1536
1537 if (LIST_EMPTY(&necp_fd_observer_list)) {
1538 // No observers, bail
1539 NECP_OBSERVER_LIST_UNLOCK();
1540 return;
1541 }
1542
1543 struct necp_fd_data *observer_fd = NULL;
1544 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1545 necp_client_update_observer_add_internal(observer_fd, client);
1546 }
1547
1548 NECP_OBSERVER_LIST_UNLOCK();
1549}
1550
1551static void
1552necp_client_update_observer_update(struct necp_client *client)
1553{
1554 NECP_OBSERVER_LIST_LOCK_SHARED();
1555
1556 if (LIST_EMPTY(&necp_fd_observer_list)) {
1557 // No observers, bail
1558 NECP_OBSERVER_LIST_UNLOCK();
1559 return;
1560 }
1561
1562 struct necp_fd_data *observer_fd = NULL;
1563 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1564 necp_client_update_observer_update_internal(observer_fd, client);
1565 }
1566
1567 NECP_OBSERVER_LIST_UNLOCK();
1568}
1569
1570static void
1571necp_client_update_observer_remove(struct necp_client *client)
1572{
1573 NECP_OBSERVER_LIST_LOCK_SHARED();
1574
1575 if (LIST_EMPTY(&necp_fd_observer_list)) {
1576 // No observers, bail
1577 NECP_OBSERVER_LIST_UNLOCK();
1578 return;
1579 }
1580
1581 struct necp_fd_data *observer_fd = NULL;
1582 LIST_FOREACH(observer_fd, &necp_fd_observer_list, chain) {
1583 necp_client_update_observer_remove_internal(observer_fd, client);
1584 }
1585
1586 NECP_OBSERVER_LIST_UNLOCK();
1587}
1588
1589static void
1590necp_destroy_client_flow_registration(struct necp_client *client,
1591 struct necp_client_flow_registration *flow_registration,
1592 pid_t pid, bool abort)
1593{
1594 NECP_CLIENT_ASSERT_LOCKED(client);
1595
1596 bool has_close_parameters = false;
1597 struct necp_client_agent_parameters close_parameters = {};
1598 memset(s: close_parameters.u.close_token, c: 0, n: sizeof(close_parameters.u.close_token));
1599#if SKYWALK
1600 if (flow_registration->kstats_kaddr != NULL) {
1601 struct necp_all_stats *ustats_kaddr = ((struct necp_all_kstats *)flow_registration->kstats_kaddr)->necp_stats_ustats;
1602 struct necp_quic_stats *quicstats = (struct necp_quic_stats *)ustats_kaddr;
1603 if (quicstats != NULL &&
1604 quicstats->necp_quic_udp_stats.necp_udp_hdr.necp_stats_type == NECP_CLIENT_STATISTICS_TYPE_QUIC) {
1605 memcpy(dst: close_parameters.u.close_token, src: quicstats->necp_quic_extra.ssr_token, n: sizeof(close_parameters.u.close_token));
1606 has_close_parameters = true;
1607 }
1608 }
1609
1610 // Release reference held on the stats arena
1611 if (flow_registration->stats_arena != NULL) {
1612 necp_arena_info_release(nai: flow_registration->stats_arena);
1613 flow_registration->stats_arena = NULL;
1614 }
1615#endif /* SKYWALK */
1616
1617 struct necp_client_flow *search_flow = NULL;
1618 struct necp_client_flow *temp_flow = NULL;
1619 LIST_FOREACH_SAFE(search_flow, &flow_registration->flow_list, flow_chain, temp_flow) {
1620 if (search_flow->nexus &&
1621 !uuid_is_null(uu: search_flow->u.nexus_agent)) {
1622 // Don't unregister for defunct flows
1623 if (!flow_registration->defunct) {
1624 u_int8_t message_type = (abort ? NETAGENT_MESSAGE_TYPE_ABORT_NEXUS :
1625 NETAGENT_MESSAGE_TYPE_CLOSE_NEXUS);
1626 if (((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1627 (flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1628 !(flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1629 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1630 }
1631 int netagent_error = netagent_client_message_with_params(agent_uuid: search_flow->u.nexus_agent,
1632 necp_client_uuid: ((flow_registration->flags & NECP_CLIENT_FLOW_FLAGS_USE_CLIENT_ID) ?
1633 client->client_id :
1634 flow_registration->registration_id),
1635 pid, handle: client->agent_handle,
1636 message_type,
1637 parameters: has_close_parameters ? &close_parameters : NULL,
1638 NULL, assigned_results_length: 0);
1639 if (netagent_error != 0 && netagent_error != ENOENT) {
1640 NECPLOG(LOG_ERR, "necp_client_remove close nexus error (%d) MESSAGE TYPE %u", netagent_error, message_type);
1641 }
1642 }
1643 uuid_clear(uu: search_flow->u.nexus_agent);
1644 }
1645 if (search_flow->assigned_results != NULL) {
1646 kfree_data(search_flow->assigned_results, search_flow->assigned_results_length);
1647 search_flow->assigned_results = NULL;
1648 }
1649 LIST_REMOVE(search_flow, flow_chain);
1650#if SKYWALK
1651 if (search_flow->nexus) {
1652 OSDecrementAtomic(&necp_nexus_flow_count);
1653 } else
1654#endif /* SKYWALK */
1655 if (search_flow->socket) {
1656 OSDecrementAtomic(&necp_socket_flow_count);
1657 } else {
1658 OSDecrementAtomic(&necp_if_flow_count);
1659 }
1660 kfree_type(struct necp_client_flow, search_flow);
1661 }
1662
1663 RB_REMOVE(_necp_client_flow_tree, &client->flow_registrations, flow_registration);
1664 flow_registration->client = NULL;
1665
1666 kfree_type(struct necp_client_flow_registration, flow_registration);
1667}
1668
1669static void
1670necp_destroy_client(struct necp_client *client, pid_t pid, bool abort)
1671{
1672 NECP_CLIENT_ASSERT_UNLOCKED(client);
1673
1674#if SKYWALK
1675 if (client->nstat_context != NULL) {
1676 // This is a catch-all that should be rarely used.
1677 nstat_provider_stats_close(nstat_ctx: client->nstat_context);
1678 client->nstat_context = NULL;
1679 }
1680 if (client->original_parameters_source != NULL) {
1681 necp_client_release(client: client->original_parameters_source);
1682 client->original_parameters_source = NULL;
1683 }
1684#endif /* SKYWALK */
1685 necp_client_update_observer_remove(client);
1686
1687 NECP_CLIENT_LOCK(client);
1688
1689 // Free route
1690 NECP_CLIENT_ROUTE_LOCK(client);
1691 if (client->current_route != NULL) {
1692 rtfree(client->current_route);
1693 client->current_route = NULL;
1694 }
1695 NECP_CLIENT_ROUTE_UNLOCK(client);
1696
1697 // Remove flow assignments
1698 struct necp_client_flow_registration *flow_registration = NULL;
1699 struct necp_client_flow_registration *temp_flow_registration = NULL;
1700 RB_FOREACH_SAFE(flow_registration, _necp_client_flow_tree, &client->flow_registrations, temp_flow_registration) {
1701 necp_destroy_client_flow_registration(client, flow_registration, pid, abort);
1702 }
1703
1704#if SKYWALK
1705 // Remove port reservation
1706 if (NETNS_TOKEN_VALID(&client->port_reservation)) {
1707 netns_release(token: &client->port_reservation);
1708 }
1709#endif /* !SKYWALK */
1710
1711 // Remove agent assertions
1712 struct necp_client_assertion *search_assertion = NULL;
1713 struct necp_client_assertion *temp_assertion = NULL;
1714 LIST_FOREACH_SAFE(search_assertion, &client->assertion_list, assertion_chain, temp_assertion) {
1715 int netagent_error = netagent_client_message(agent_uuid: search_assertion->asserted_netagent, necp_client_uuid: client->client_id, pid,
1716 handle: client->agent_handle, NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT);
1717 if (netagent_error != 0) {
1718 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR),
1719 "necp_client_remove unassert agent error (%d)", netagent_error);
1720 }
1721 LIST_REMOVE(search_assertion, assertion_chain);
1722 kfree_type(struct necp_client_assertion, search_assertion);
1723 }
1724
1725 if (!necp_client_release_locked(client)) {
1726 NECP_CLIENT_UNLOCK(client);
1727 }
1728
1729 OSDecrementAtomic(&necp_client_count);
1730}
1731
1732static bool
1733necp_defunct_client_fd_locked_inner(struct necp_fd_data *client_fd, struct _necp_flow_defunct_list *defunct_list, bool destroy_stats);
1734
1735static void
1736necp_process_defunct_list(struct _necp_flow_defunct_list *defunct_list)
1737{
1738 if (!LIST_EMPTY(defunct_list)) {
1739 struct necp_flow_defunct *flow_defunct = NULL;
1740 struct necp_flow_defunct *temp_flow_defunct = NULL;
1741
1742 // For each newly defunct client, send a message to the nexus to remove the flow
1743 LIST_FOREACH_SAFE(flow_defunct, defunct_list, chain, temp_flow_defunct) {
1744 if (!uuid_is_null(uu: flow_defunct->nexus_agent)) {
1745 u_int8_t message_type = NETAGENT_MESSAGE_TYPE_ABORT_NEXUS;
1746 if (((flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_BROWSE) ||
1747 (flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_RESOLVE)) &&
1748 !(flow_defunct->flags & NECP_CLIENT_FLOW_FLAGS_ALLOW_NEXUS)) {
1749 message_type = NETAGENT_MESSAGE_TYPE_CLIENT_UNASSERT;
1750 }
1751 int netagent_error = netagent_client_message_with_params(agent_uuid: flow_defunct->nexus_agent,
1752 necp_client_uuid: flow_defunct->flow_id,
1753 pid: flow_defunct->proc_pid,
1754 handle: flow_defunct->agent_handle,
1755 message_type,
1756 parameters: flow_defunct->has_close_parameters ? &flow_defunct->close_parameters : NULL,
1757 NULL, assigned_results_length: 0);
1758 if (netagent_error != 0) {
1759 char namebuf[MAXCOMLEN + 1];
1760 (void) strlcpy(dst: namebuf, src: "unknown", n: sizeof(namebuf));
1761 proc_name(pid: flow_defunct->proc_pid, buf: namebuf, size: sizeof(namebuf));
1762 NECPLOG((netagent_error == ENOENT ? LOG_DEBUG : LOG_ERR), "necp_update_client abort nexus error (%d) for pid %d %s", netagent_error, flow_defunct->proc_pid, namebuf);
1763 }
1764 }
1765 LIST_REMOVE(flow_defunct, chain);
1766 kfree_type(struct necp_flow_defunct, flow_defunct);
1767 }
1768 }
1769 ASSERT(LIST_EMPTY(defunct_list));
1770}
1771
1772static int
1773necpop_close(struct fileglob *fg, vfs_context_t ctx)
1774{
1775#pragma unused(ctx)
1776 struct necp_fd_data *fd_data = NULL;
1777 int error = 0;
1778
1779 fd_data = (struct necp_fd_data *)fg_get_data(fg);
1780 fg_set_data(fg, NULL);
1781
1782 if (fd_data != NULL) {
1783 struct _necp_client_tree clients_to_close;
1784 RB_INIT(&clients_to_close);
1785
1786 // Remove from list quickly
1787 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
1788 NECP_OBSERVER_LIST_LOCK_EXCLUSIVE();
1789 LIST_REMOVE(fd_data, chain);
1790 NECP_OBSERVER_LIST_UNLOCK();
1791 } else {
1792 NECP_FD_LIST_LOCK_EXCLUSIVE();
1793 LIST_REMOVE(fd_data, chain);
1794 NECP_FD_LIST_UNLOCK();
1795 }
1796
1797 NECP_FD_LOCK(fd_data);
1798 pid_t pid = fd_data->proc_pid;
1799
1800 struct _necp_flow_defunct_list defunct_list;
1801 LIST_INIT(&defunct_list);
1802
1803 (void)necp_defunct_client_fd_locked_inner(client_fd: fd_data, defunct_list: &defunct_list, false);
1804
1805 struct necp_client_flow_registration *flow_registration = NULL;
1806 struct necp_client_flow_registration *temp_flow_registration = NULL;
1807 RB_FOREACH_SAFE(flow_registration, _necp_fd_flow_tree, &fd_data->flows, temp_flow_registration) {
1808#if SKYWALK
1809 necp_destroy_flow_stats(fd_data, flow_registration, NULL, TRUE);
1810#endif /* SKYWALK */
1811 NECP_FLOW_TREE_LOCK_EXCLUSIVE();
1812 RB_REMOVE(_necp_client_flow_global_tree, &necp_client_flow_global_tree, flow_registration);
1813 NECP_FLOW_TREE_UNLOCK();
1814 RB_REMOVE(_necp_fd_flow_tree, &fd_data->flows, flow_registration);
1815 }
1816
1817 struct necp_client *client = NULL;
1818 struct necp_client *temp_client = NULL;
1819 RB_FOREACH_SAFE(client, _necp_client_tree, &fd_data->clients, temp_client) {
1820 // Clear out the agent_handle to avoid dangling pointers back to fd_data
1821 NECP_CLIENT_LOCK(client);
1822 client->agent_handle = NULL;
1823 NECP_CLIENT_UNLOCK(client);
1824
1825 NECP_CLIENT_TREE_LOCK_EXCLUSIVE();
1826 RB_REMOVE(_necp_client_global_tree, &necp_client_global_tree, client);
1827 NECP_CLIENT_TREE_UNLOCK();
1828 RB_REMOVE(_necp_client_tree, &fd_data->clients, client);
1829 RB_INSERT(_necp_client_tree, &clients_to_close, client);
1830 }
1831
1832 struct necp_client_update *client_update = NULL;
1833 struct necp_client_update *temp_update = NULL;
1834 TAILQ_FOREACH_SAFE(client_update, &fd_data->update_list, chain, temp_update) {
1835 // Flush pending updates
1836 TAILQ_REMOVE(&fd_data->update_list, client_update, chain);
1837 necp_client_update_free(client_update);
1838 }
1839 fd_data->update_count = 0;
1840
1841#if SKYWALK
1842 // Cleanup stats arena(s); indicate that we're closing
1843 necp_stats_arenas_destroy(fd_data, TRUE);
1844 ASSERT(fd_data->stats_arena_active == NULL);
1845 ASSERT(LIST_EMPTY(&fd_data->stats_arena_list));
1846
1847 // Cleanup systctl arena
1848 necp_sysctl_arena_destroy(fd_data);
1849 ASSERT(fd_data->sysctl_arena == NULL);
1850#endif /* SKYWALK */
1851
1852 NECP_FD_UNLOCK(fd_data);
1853
1854 selthreadclear(&fd_data->si);
1855
1856 lck_mtx_destroy(lck: &fd_data->fd_lock, grp: &necp_fd_mtx_grp);
1857
1858 if (fd_data->flags & NECP_OPEN_FLAG_PUSH_OBSERVER) {
1859 OSDecrementAtomic(&necp_observer_fd_count);
1860 } else {
1861 OSDecrementAtomic(&necp_client_fd_count);
1862 }
1863
1864 kfree_type(struct necp_fd_data, fd_data);
1865
1866 RB_FOREACH_SAFE(client, _necp_client_tree, &clients_to_close, temp_client) {
1867 RB_REMOVE(_necp_client_tree, &clients_to_close, client);
1868 necp_destroy_client(client, pid, true);
1869 }
1870
1871 necp_process_defunct_list(defunct_list: &defunct_list);
1872 }
1873
1874 return error;
1875}
1876
1877/// NECP client utilities
1878
1879static inline bool
1880necp_address_is_wildcard(const union necp_sockaddr_union * const addr)
1881{
1882 return (addr->sa.sa_family == AF_INET && addr->sin.sin_addr.s_addr == INADDR_ANY) ||
1883 (addr->sa.sa_family == AF_INET6 && IN6_IS_ADDR_UNSPECIFIED(&addr->sin6.sin6_addr));
1884}
1885
1886static int
1887necp_find_fd_data(struct proc *p, int fd,
1888 struct fileproc **fpp, struct necp_fd_data **fd_data)
1889{
1890 struct fileproc *fp;
1891 int error = fp_get_ftype(p, fd, ftype: DTYPE_NETPOLICY, ENODEV, fpp: &fp);
1892
1893 if (error == 0) {
1894 *fd_data = (struct necp_fd_data *)fp_get_data(fp);
1895 *fpp = fp;
1896
1897 if ((*fd_data)->necp_fd_type != necp_fd_type_client) {
1898 // Not a client fd, ignore
1899 fp_drop(p, fd, fp, locked: 0);
1900 error = EINVAL;
1901 }
1902 }
1903 return error;
1904}
1905
1906static void
1907necp_client_add_nexus_flow(struct necp_client_flow_registration *flow_registration,
1908 uuid_t nexus_agent,
1909 uint32_t interface_index,
1910 uint32_t interface_flags)
1911{
1912 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1913
1914 new_flow->nexus = TRUE;
1915 uuid_copy(dst: new_flow->u.nexus_agent, src: nexus_agent);
1916 new_flow->interface_index = interface_index;
1917 new_flow->interface_flags = interface_flags;
1918 new_flow->check_tcp_heuristics = TRUE;
1919
1920#if SKYWALK
1921 OSIncrementAtomic(&necp_nexus_flow_count);
1922#endif /* SKYWALK */
1923
1924 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
1925
1926#if SKYWALK
1927 necp_flow_save_current_interface_details(flow_registration);
1928#endif /* SKYWALK */
1929}
1930
1931static void
1932necp_client_add_nexus_flow_if_needed(struct necp_client_flow_registration *flow_registration,
1933 uuid_t nexus_agent,
1934 uint32_t interface_index)
1935{
1936 struct necp_client_flow *flow = NULL;
1937 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1938 if (flow->nexus &&
1939 uuid_compare(uu1: flow->u.nexus_agent, uu2: nexus_agent) == 0) {
1940 return;
1941 }
1942 }
1943
1944 uint32_t interface_flags = 0;
1945 ifnet_t ifp = NULL;
1946 ifnet_head_lock_shared();
1947 if (interface_index != IFSCOPE_NONE && interface_index <= (u_int32_t)if_index) {
1948 ifp = ifindex2ifnet[interface_index];
1949 if (ifp != NULL) {
1950 ifnet_lock_shared(ifp);
1951 interface_flags = nstat_ifnet_to_flags(ifp);
1952 ifnet_lock_done(ifp);
1953 }
1954 }
1955 ifnet_head_done();
1956 necp_client_add_nexus_flow(flow_registration, nexus_agent, interface_index, interface_flags);
1957}
1958
1959static struct necp_client_flow *
1960necp_client_add_interface_flow(struct necp_client_flow_registration *flow_registration,
1961 uint32_t interface_index)
1962{
1963 struct necp_client_flow *new_flow = kalloc_type(struct necp_client_flow, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1964
1965 // Neither nexus nor socket
1966 new_flow->interface_index = interface_index;
1967 new_flow->u.socket_handle = flow_registration->interface_handle;
1968 new_flow->u.cb = flow_registration->interface_cb;
1969
1970 OSIncrementAtomic(&necp_if_flow_count);
1971
1972 LIST_INSERT_HEAD(&flow_registration->flow_list, new_flow, flow_chain);
1973
1974 return new_flow;
1975}
1976
1977static struct necp_client_flow *
1978necp_client_add_interface_flow_if_needed(struct necp_client *client,
1979 struct necp_client_flow_registration *flow_registration,
1980 uint32_t interface_index)
1981{
1982 if (!client->allow_multiple_flows ||
1983 interface_index == IFSCOPE_NONE) {
1984 // Interface not set, or client not allowed to use this mode
1985 return NULL;
1986 }
1987
1988 struct necp_client_flow *flow = NULL;
1989 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
1990 if (!flow->nexus && !flow->socket && flow->interface_index == interface_index) {
1991 // Already have the flow
1992 flow->invalid = FALSE;
1993 flow->u.socket_handle = flow_registration->interface_handle;
1994 flow->u.cb = flow_registration->interface_cb;
1995 return NULL;
1996 }
1997 }
1998 return necp_client_add_interface_flow(flow_registration, interface_index);
1999}
2000
2001static void
2002necp_client_add_interface_option_if_needed(struct necp_client *client,
2003 uint32_t interface_index,
2004 uint32_t interface_generation,
2005 uuid_t *nexus_agent,
2006 bool network_provider)
2007{
2008 if ((interface_index == IFSCOPE_NONE && !network_provider) ||
2009 (client->interface_option_count != 0 && !client->allow_multiple_flows)) {
2010 // Interface not set, or client not allowed to use this mode
2011 return;
2012 }
2013
2014 if (client->interface_option_count >= NECP_CLIENT_MAX_INTERFACE_OPTIONS) {
2015 // Cannot take any more interface options
2016 return;
2017 }
2018
2019 // Check if already present
2020 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2021 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2022 struct necp_client_interface_option *option = &client->interface_options[option_i];
2023 if (option->interface_index == interface_index) {
2024 if (nexus_agent == NULL) {
2025 return;
2026 }
2027 if (uuid_compare(uu1: option->nexus_agent, uu2: *nexus_agent) == 0) {
2028 return;
2029 }
2030 if (uuid_is_null(uu: option->nexus_agent)) {
2031 uuid_copy(dst: option->nexus_agent, src: *nexus_agent);
2032 return;
2033 }
2034 // If we get to this point, this is a new nexus flow
2035 }
2036 } else {
2037 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2038 if (option->interface_index == interface_index) {
2039 if (nexus_agent == NULL) {
2040 return;
2041 }
2042 if (uuid_compare(uu1: option->nexus_agent, uu2: *nexus_agent) == 0) {
2043 return;
2044 }
2045 if (uuid_is_null(uu: option->nexus_agent)) {
2046 uuid_copy(dst: option->nexus_agent, src: *nexus_agent);
2047 return;
2048 }
2049 // If we get to this point, this is a new nexus flow
2050 }
2051 }
2052 }
2053
2054 // Add a new entry
2055 if (client->interface_option_count < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2056 // Add to static
2057 struct necp_client_interface_option *option = &client->interface_options[client->interface_option_count];
2058 option->interface_index = interface_index;
2059 option->interface_generation = interface_generation;
2060 if (nexus_agent != NULL) {
2061 uuid_copy(dst: option->nexus_agent, src: *nexus_agent);
2062 } else {
2063 uuid_clear(uu: option->nexus_agent);
2064 }
2065 client->interface_option_count++;
2066 } else {
2067 // Add to extra
2068 if (client->extra_interface_options == NULL) {
2069 client->extra_interface_options = (struct necp_client_interface_option *)kalloc_data(
2070 sizeof(struct necp_client_interface_option) * NECP_CLIENT_INTERFACE_OPTION_EXTRA_COUNT, Z_WAITOK | Z_ZERO);
2071 }
2072 if (client->extra_interface_options != NULL) {
2073 struct necp_client_interface_option *option = &client->extra_interface_options[client->interface_option_count - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2074 option->interface_index = interface_index;
2075 option->interface_generation = interface_generation;
2076 if (nexus_agent != NULL) {
2077 uuid_copy(dst: option->nexus_agent, src: *nexus_agent);
2078 } else {
2079 uuid_clear(uu: option->nexus_agent);
2080 }
2081 client->interface_option_count++;
2082 }
2083 }
2084}
2085
2086static bool
2087necp_client_flow_is_viable(proc_t proc, struct necp_client *client,
2088 struct necp_client_flow *flow)
2089{
2090 struct necp_aggregate_result result;
2091 bool ignore_address = (client->allow_multiple_flows && !flow->nexus && !flow->socket);
2092
2093 flow->necp_flow_flags = 0;
2094 int error = necp_application_find_policy_match_internal(proc, parameters: client->parameters,
2095 parameters_size: (u_int32_t)client->parameters_length,
2096 returned_result: &result, flags: &flow->necp_flow_flags, NULL,
2097 required_interface_index: flow->interface_index,
2098 override_local_addr: &flow->local_addr, override_remote_addr: &flow->remote_addr, NULL, NULL,
2099 NULL, ignore_address, true, NULL);
2100
2101 // Check for blocking agents
2102 for (int i = 0; i < NECP_MAX_NETAGENTS; i++) {
2103 if (uuid_is_null(uu: result.netagents[i])) {
2104 // Passed end of valid agents
2105 break;
2106 }
2107 if (result.netagent_use_flags[i] & NECP_AGENT_USE_FLAG_REMOVE) {
2108 // A removed agent, ignore
2109 continue;
2110 }
2111 u_int32_t flags = netagent_get_flags(uuid: result.netagents[i]);
2112 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2113 !(flags & NETAGENT_FLAG_VOLUNTARY) &&
2114 !(flags & NETAGENT_FLAG_ACTIVE) &&
2115 !(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY)) {
2116 // A required agent is not active, cause the flow to be marked non-viable
2117 return false;
2118 }
2119 }
2120
2121 if (flow->interface_index != IFSCOPE_NONE) {
2122 ifnet_head_lock_shared();
2123
2124 struct ifnet *ifp = ifindex2ifnet[flow->interface_index];
2125 if (ifp && ifp->if_delegated.ifp != IFSCOPE_NONE) {
2126 flow->delegated_interface_index = ifp->if_delegated.ifp->if_index;
2127 }
2128
2129 ifnet_head_done();
2130 }
2131
2132 return error == 0 &&
2133 result.routed_interface_index != IFSCOPE_NONE &&
2134 result.routing_result != NECP_KERNEL_POLICY_RESULT_DROP;
2135}
2136
2137static void
2138necp_flow_add_interface_flows(proc_t proc,
2139 struct necp_client *client,
2140 struct necp_client_flow_registration *flow_registration,
2141 bool send_initial)
2142{
2143 // Traverse all interfaces and add a tracking flow if needed
2144 for (u_int32_t option_i = 0; option_i < client->interface_option_count; option_i++) {
2145 if (option_i < NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT) {
2146 struct necp_client_interface_option *option = &client->interface_options[option_i];
2147 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, interface_index: option->interface_index);
2148 if (flow != NULL && send_initial) {
2149 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2150 if (flow->viable && flow->u.cb) {
2151 bool viable = flow->viable;
2152 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2153 flow->viable = viable;
2154 }
2155 }
2156 } else {
2157 struct necp_client_interface_option *option = &client->extra_interface_options[option_i - NECP_CLIENT_INTERFACE_OPTION_STATIC_COUNT];
2158 struct necp_client_flow *flow = necp_client_add_interface_flow_if_needed(client, flow_registration, interface_index: option->interface_index);
2159 if (flow != NULL && send_initial) {
2160 flow->viable = necp_client_flow_is_viable(proc, client, flow);
2161 if (flow->viable && flow->u.cb) {
2162 bool viable = flow->viable;
2163 flow->u.cb(flow_registration->interface_handle, NECP_CLIENT_CBACTION_INITIAL, flow->interface_index, flow->necp_flow_flags, &viable);
2164 flow->viable = viable;
2165 }
2166 }
2167 }
2168 }
2169}
2170
2171static bool
2172necp_client_update_flows(proc_t proc,
2173 struct necp_client *client,
2174 struct _necp_flow_defunct_list *defunct_list)
2175{
2176 NECP_CLIENT_ASSERT_LOCKED(client);
2177
2178 bool any_client_updated = FALSE;
2179 struct necp_client_flow *flow = NULL;
2180 struct necp_client_flow *temp_flow = NULL;
2181 struct necp_client_flow_registration *flow_registration = NULL;
2182 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2183 if (flow_registration->interface_cb != NULL) {
2184 // Add any interface flows that are not already tracked
2185 necp_flow_add_interface_flows(proc, client, flow_registration, false);
2186 }
2187
2188 LIST_FOREACH_SAFE(flow, &flow_registration->flow_list, flow_chain, temp_flow) {
2189 bool client_updated = FALSE;
2190
2191 // Check policy result for flow
2192 u_short old_delegated_ifindex = flow->delegated_interface_index;
2193
2194 int old_flags = flow->necp_flow_flags;
2195 bool viable = necp_client_flow_is_viable(proc, client, flow);
2196
2197 // TODO: Defunct nexus flows that are blocked by policy
2198
2199 if (flow->viable != viable) {
2200 flow->viable = viable;
2201 client_updated = TRUE;
2202 }
2203
2204 if ((old_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE) !=
2205 (flow->necp_flow_flags & NECP_CLIENT_RESULT_FLAG_FORCE_UPDATE)) {
2206 client_updated = TRUE;
2207 }
2208
2209 if (flow->delegated_interface_index != old_delegated_ifindex) {
2210 client_updated = TRUE;
2211 }
2212
2213 if (flow->viable && client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2214 bool flow_viable = flow->viable;
2215 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_VIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2216 flow->viable = flow_viable;
2217 }
2218
2219 if (!flow->viable || flow->invalid) {
2220 if (client_updated && (flow->socket || (!flow->socket && !flow->nexus)) && flow->u.cb) {
2221 bool flow_viable = flow->viable;
2222 flow->u.cb(flow->u.socket_handle, NECP_CLIENT_CBACTION_NONVIABLE, flow->interface_index, flow->necp_flow_flags, &flow_viable);
2223 flow->viable = flow_viable;
2224 }
2225 // The callback might change the viable-flag of the
2226 // flow depending on its policy. Thus, we need to
2227 // check the flags again after the callback.
2228 }
2229
2230#if SKYWALK
2231 if (defunct_list != NULL) {
2232 if (flow->invalid && flow->nexus && flow->assigned && !uuid_is_null(uu: flow->u.nexus_agent)) {
2233 // This is a nexus flow that was assigned, but not found on path
2234 u_int32_t flags = netagent_get_flags(uuid: flow->u.nexus_agent);
2235 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2236 // The agent is no longer registered! Mark defunct.
2237 necp_defunct_flow_registration(client, flow_registration, defunct_list);
2238 client_updated = TRUE;
2239 }
2240 }
2241 }
2242#else /* !SKYWALK */
2243 (void)defunct_list;
2244#endif /* !SKYWALK */
2245
2246 // Handle flows that no longer match
2247 if (!flow->viable || flow->invalid) {
2248 // Drop them as long as they aren't assigned data
2249 if (!flow->nexus && !flow->assigned) {
2250 if (flow->assigned_results != NULL) {
2251 kfree_data(flow->assigned_results, flow->assigned_results_length);
2252 flow->assigned_results = NULL;
2253 client_updated = TRUE;
2254 }
2255 LIST_REMOVE(flow, flow_chain);
2256#if SKYWALK
2257 if (flow->nexus) {
2258 OSDecrementAtomic(&necp_nexus_flow_count);
2259 } else
2260#endif /* SKYWALK */
2261 if (flow->socket) {
2262 OSDecrementAtomic(&necp_socket_flow_count);
2263 } else {
2264 OSDecrementAtomic(&necp_if_flow_count);
2265 }
2266 kfree_type(struct necp_client_flow, flow);
2267 }
2268 }
2269
2270 any_client_updated |= client_updated;
2271 }
2272#if SKYWALK
2273 necp_flow_save_current_interface_details(flow_registration);
2274#endif /* SKYWALK */
2275 }
2276
2277 return any_client_updated;
2278}
2279
2280static void
2281necp_client_mark_all_nonsocket_flows_as_invalid(struct necp_client *client)
2282{
2283 struct necp_client_flow_registration *flow_registration = NULL;
2284 struct necp_client_flow *flow = NULL;
2285 RB_FOREACH(flow_registration, _necp_client_flow_tree, &client->flow_registrations) {
2286 LIST_FOREACH(flow, &flow_registration->flow_list, flow_chain) {
2287 if (!flow->socket) { // Socket flows are not marked as invalid
2288 flow->invalid = TRUE;
2289 }
2290 }
2291 }
2292
2293 // Reset option count every update
2294 client->interface_option_count = 0;
2295}
2296
2297static inline bool
2298necp_netagent_is_requested(const struct necp_client_parsed_parameters *parameters,
2299 uuid_t *netagent_uuid)
2300{
2301 // Specific use agents only apply when requested
2302 bool requested = false;
2303 if (parameters != NULL) {
2304 // Check required agent UUIDs
2305 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2306 if (uuid_is_null(uu: parameters->required_netagents[i])) {
2307 break;
2308 }
2309 if (uuid_compare(uu1: parameters->required_netagents[i], uu2: *netagent_uuid) == 0) {
2310 requested = true;
2311 break;
2312 }
2313 }
2314
2315 if (!requested) {
2316 // Check required agent types
2317 bool fetched_type = false;
2318 char netagent_domain[NETAGENT_DOMAINSIZE];
2319 char netagent_type[NETAGENT_TYPESIZE];
2320 memset(s: &netagent_domain, c: 0, NETAGENT_DOMAINSIZE);
2321 memset(s: &netagent_type, c: 0, NETAGENT_TYPESIZE);
2322
2323 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2324 if (strlen(s: parameters->required_netagent_types[i].netagent_domain) == 0 ||
2325 strlen(s: parameters->required_netagent_types[i].netagent_type) == 0) {
2326 break;
2327 }
2328
2329 if (!fetched_type) {
2330 if (netagent_get_agent_domain_and_type(uuid: *netagent_uuid, domain: netagent_domain, type: netagent_type)) {
2331 fetched_type = TRUE;
2332 } else {
2333 break;
2334 }
2335 }
2336
2337 if ((strlen(s: parameters->required_netagent_types[i].netagent_domain) == 0 ||
2338 strncmp(s1: netagent_domain, s2: parameters->required_netagent_types[i].netagent_domain, NETAGENT_DOMAINSIZE) == 0) &&
2339 (strlen(s: parameters->required_netagent_types[i].netagent_type) == 0 ||
2340 strncmp(s1: netagent_type, s2: parameters->required_netagent_types[i].netagent_type, NETAGENT_TYPESIZE) == 0)) {
2341 requested = true;
2342 break;
2343 }
2344 }
2345 }
2346
2347 // Check preferred agent UUIDs
2348 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2349 if (uuid_is_null(uu: parameters->preferred_netagents[i])) {
2350 break;
2351 }
2352 if (uuid_compare(uu1: parameters->preferred_netagents[i], uu2: *netagent_uuid) == 0) {
2353 requested = true;
2354 break;
2355 }
2356 }
2357
2358 if (!requested) {
2359 // Check preferred agent types
2360 bool fetched_type = false;
2361 char netagent_domain[NETAGENT_DOMAINSIZE];
2362 char netagent_type[NETAGENT_TYPESIZE];
2363 memset(s: &netagent_domain, c: 0, NETAGENT_DOMAINSIZE);
2364 memset(s: &netagent_type, c: 0, NETAGENT_TYPESIZE);
2365
2366 for (int i = 0; i < NECP_MAX_AGENT_PARAMETERS; i++) {
2367 if (strlen(s: parameters->preferred_netagent_types[i].netagent_domain) == 0 ||
2368 strlen(s: parameters->preferred_netagent_types[i].netagent_type) == 0) {
2369 break;
2370 }
2371
2372 if (!fetched_type) {
2373 if (netagent_get_agent_domain_and_type(uuid: *netagent_uuid, domain: netagent_domain, type: netagent_type)) {
2374 fetched_type = TRUE;
2375 } else {
2376 break;
2377 }
2378 }
2379
2380 if ((strlen(s: parameters->preferred_netagent_types[i].netagent_domain) == 0 ||
2381 strncmp(s1: netagent_domain, s2: parameters->preferred_netagent_types[i].netagent_domain, NETAGENT_DOMAINSIZE) == 0) &&
2382 (strlen(s: parameters->preferred_netagent_types[i].netagent_type) == 0 ||
2383 strncmp(s1: netagent_type, s2: parameters->preferred_netagent_types[i].netagent_type, NETAGENT_TYPESIZE) == 0)) {
2384 requested = true;
2385 break;
2386 }
2387 }
2388 }
2389 }
2390
2391 return requested;
2392}
2393
2394static bool
2395necp_netagent_applies_to_client(struct necp_client *client,
2396 const struct necp_client_parsed_parameters *parameters,
2397 uuid_t *netagent_uuid, bool allow_nexus,
2398 uint32_t interface_index, uint32_t interface_generation)
2399{
2400#pragma unused(interface_index, interface_generation)
2401 bool applies = FALSE;
2402 u_int32_t flags = netagent_get_flags(uuid: *netagent_uuid);
2403 if (!(flags & NETAGENT_FLAG_REGISTERED)) {
2404 // Unregistered agents never apply
2405 return applies;
2406 }
2407
2408 const bool is_nexus_agent = ((flags & NETAGENT_FLAG_NEXUS_PROVIDER) ||
2409 (flags & NETAGENT_FLAG_NEXUS_LISTENER) ||
2410 (flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS) ||
2411 (flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS) ||
2412 (flags & NETAGENT_FLAG_INTERPOSE_NEXUS));
2413 if (is_nexus_agent) {
2414 if (!allow_nexus) {
2415 // Hide nexus providers unless allowed
2416 // Direct interfaces and direct policies are allowed to use a nexus
2417 // Delegate interfaces or re-scoped interfaces are not allowed
2418 return applies;
2419 }
2420
2421 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2422 !(flags & NETAGENT_FLAG_CUSTOM_ETHER_NEXUS)) {
2423 // Client requested a custom ether nexus, but this nexus isn't one
2424 return applies;
2425 }
2426
2427 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2428 !(flags & NETAGENT_FLAG_CUSTOM_IP_NEXUS)) {
2429 // Client requested a custom IP nexus, but this nexus isn't one
2430 return applies;
2431 }
2432
2433 if ((parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2434 !(flags & NETAGENT_FLAG_INTERPOSE_NEXUS)) {
2435 // Client requested an interpose nexus, but this nexus isn't one
2436 return applies;
2437 }
2438
2439 if (!(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_ETHER) &&
2440 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_CUSTOM_IP) &&
2441 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_INTERPOSE) &&
2442 !(flags & NETAGENT_FLAG_NEXUS_PROVIDER)) {
2443 // Client requested default parameters, but this nexus isn't generic
2444 return applies;
2445 }
2446 }
2447
2448 if (uuid_compare(uu1: client->failed_trigger_agent.netagent_uuid, uu2: *netagent_uuid) == 0) {
2449 if (client->failed_trigger_agent.generation == netagent_get_generation(uuid: *netagent_uuid)) {
2450 // If this agent was triggered, and failed, and hasn't changed, keep hiding it
2451 return applies;
2452 } else {
2453 // Mismatch generation, clear out old trigger
2454 uuid_clear(uu: client->failed_trigger_agent.netagent_uuid);
2455 client->failed_trigger_agent.generation = 0;
2456 }
2457 }
2458
2459 if (flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) {
2460 // Specific use agents only apply when requested
2461 applies = necp_netagent_is_requested(parameters, netagent_uuid);
2462 } else {
2463 applies = TRUE;
2464 }
2465
2466#if SKYWALK
2467 // Add nexus agent if it is a nexus, and either is not a listener, or the nexus supports listeners
2468 if (applies && is_nexus_agent &&
2469 !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_BROWSE) && // Don't add for browse paths
2470 ((flags & NETAGENT_FLAG_NEXUS_LISTENER) || !(parameters->flags & NECP_CLIENT_PARAMETER_FLAG_LISTENER))) {
2471 necp_client_add_interface_option_if_needed(client, interface_index,
2472 interface_generation, nexus_agent: netagent_uuid,
2473 network_provider: (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2474 }
2475#endif /* SKYWALK */
2476
2477 return applies;
2478}
2479
2480static void
2481necp_client_add_agent_interface_options(struct necp_client *client,
2482 const struct necp_client_parsed_parameters *parsed_parameters,
2483 ifnet_t ifp)
2484{
2485 if (ifp != NULL && ifp->if_agentids != NULL) {
2486 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2487 if (uuid_is_null(uu: ifp->if_agentids[i])) {
2488 continue;
2489 }
2490 // Relies on the side effect that nexus agents that apply will create flows
2491 (void)necp_netagent_applies_to_client(client, parameters: parsed_parameters, netagent_uuid: &ifp->if_agentids[i], TRUE,
2492 interface_index: ifp->if_index, interface_generation: ifnet_get_generation(ifp));
2493 }
2494 }
2495}
2496
2497static void
2498necp_client_add_browse_interface_options(struct necp_client *client,
2499 const struct necp_client_parsed_parameters *parsed_parameters,
2500 ifnet_t ifp)
2501{
2502 if (ifp != NULL && ifp->if_agentids != NULL) {
2503 for (u_int32_t i = 0; i < ifp->if_agentcount; i++) {
2504 if (uuid_is_null(uu: ifp->if_agentids[i])) {
2505 continue;
2506 }
2507
2508 u_int32_t flags = netagent_get_flags(uuid: ifp->if_agentids[i]);
2509 if ((flags & NETAGENT_FLAG_REGISTERED) &&
2510 (flags & NETAGENT_FLAG_ACTIVE) &&
2511 (flags & NETAGENT_FLAG_SUPPORTS_BROWSE) &&
2512 (!(flags & NETAGENT_FLAG_SPECIFIC_USE_ONLY) ||
2513 necp_netagent_is_requested(parameters: parsed_parameters, netagent_uuid: &ifp->if_agentids[i]))) {
2514 necp_client_add_interface_option_if_needed(client, interface_index: ifp->if_index, interface_generation: ifnet_get_generation(ifp), nexus_agent: &ifp->if_agentids[i], network_provider: (flags & NETAGENT_FLAG_NETWORK_PROVIDER));
2515
2516 // Finding one is enough
2517 break;
2518 }
2519 }
2520 }
2521}
2522
2523static inline bool
2524_necp_client_address_is_valid(struct sockaddr *address)
2525{
2526 if (address->sa_family == AF_INET) {
2527 return address->sa_len == sizeof(struct sockaddr_in);
2528 } else if (address->sa_family == AF_INET6) {
2529 return address->sa_len == sizeof(struct sockaddr_in6);
2530 } else {
2531 return FALSE;
2532 }
2533}
2534
2535#define necp_client_address_is_valid(S) _necp_client_address_is_valid(SA(S))
2536
2537static inline bool
2538necp_client_endpoint_is_unspecified(struct necp_client_endpoint *endpoint)
2539{
2540 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2541 if (endpoint->u.sa.sa_family == AF_INET) {
2542 return endpoint->u.sin.sin_addr.s_addr == INADDR_ANY;
2543 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2544 return IN6_IS_ADDR_UNSPECIFIED(&endpoint->u.sin6.sin6_addr);
2545 } else {
2546 return TRUE;
2547 }
2548 } else {
2549 return TRUE;
2550 }
2551}
2552
2553#if SKYWALK
2554static void
2555necp_client_update_local_port_parameters(u_int8_t *parameters,
2556 u_int32_t parameters_size,
2557 uint16_t local_port)
2558{
2559 size_t offset = 0;
2560 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2561 u_int8_t type = necp_buffer_get_tlv_type(buffer: parameters, tlv_offset: offset);
2562 u_int32_t length = necp_buffer_get_tlv_length(buffer: parameters, tlv_offset: offset);
2563
2564 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2565 // If the length is larger than what can fit in the remaining parameters size, bail
2566 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2567 break;
2568 }
2569
2570 if (length > 0) {
2571 u_int8_t *value = necp_buffer_get_tlv_value(buffer: parameters, tlv_offset: offset, NULL);
2572 if (value != NULL) {
2573 switch (type) {
2574 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2575 if (length >= sizeof(struct necp_policy_condition_addr)) {
2576 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2577 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2578 if (address_struct->address.sa.sa_family == AF_INET) {
2579 address_struct->address.sin.sin_port = local_port;
2580 } else if (address_struct->address.sa.sa_family == AF_INET6) {
2581 address_struct->address.sin6.sin6_port = local_port;
2582 }
2583 }
2584 }
2585 break;
2586 }
2587 case NECP_CLIENT_PARAMETER_LOCAL_ENDPOINT: {
2588 if (length >= sizeof(struct necp_client_endpoint)) {
2589 struct necp_client_endpoint *endpoint = (struct necp_client_endpoint *)(void *)value;
2590 if (necp_client_address_is_valid(&endpoint->u.sa)) {
2591 if (endpoint->u.sa.sa_family == AF_INET) {
2592 endpoint->u.sin.sin_port = local_port;
2593 } else if (endpoint->u.sa.sa_family == AF_INET6) {
2594 endpoint->u.sin6.sin6_port = local_port;
2595 }
2596 }
2597 }
2598 break;
2599 }
2600 default: {
2601 break;
2602 }
2603 }
2604 }
2605 }
2606
2607 offset += sizeof(struct necp_tlv_header) + length;
2608 }
2609}
2610#endif /* !SKYWALK */
2611
2612#define NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH 253
2613
2614static void
2615necp_client_trace_parameter_parsing(struct necp_client *client, u_int8_t type, u_int8_t *value, u_int32_t length)
2616{
2617 uint64_t num = 0;
2618 uint16_t shortBuf;
2619 uint32_t intBuf;
2620 char buffer[NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH + 1];
2621
2622 if (value != NULL && length > 0) {
2623 switch (length) {
2624 case 1:
2625 num = *value;
2626 break;
2627 case 2:
2628 memcpy(dst: &shortBuf, src: value, n: sizeof(shortBuf));
2629 num = shortBuf;
2630 break;
2631 case 4:
2632 memcpy(dst: &intBuf, src: value, n: sizeof(intBuf));
2633 num = intBuf;
2634 break;
2635 case 8:
2636 memcpy(dst: &num, src: value, n: sizeof(num));
2637 break;
2638 default:
2639 num = 0;
2640 break;
2641 }
2642 int len = NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH < length ? NECP_MAX_SOCKET_ATTRIBUTE_STRING_LENGTH : length;
2643 memcpy(dst: buffer, src: value, n: len);
2644 buffer[len] = 0;
2645 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d value <%llu (%llX)> %s", type, length, num, num, buffer);
2646 } else {
2647 NECP_CLIENT_PARAMS_LOG(client, "Parsing param - type %d length %d", type, length);
2648 }
2649}
2650
2651static void
2652necp_client_trace_parsed_parameters(struct necp_client *client, struct necp_client_parsed_parameters *parsed_parameters)
2653{
2654 int i;
2655 char local_buffer[64] = { };
2656 char remote_buffer[64] = { };
2657 uuid_string_t uuid_str = { };
2658 uuid_unparse_lower(uu: parsed_parameters->effective_uuid, out: uuid_str);
2659
2660 switch (parsed_parameters->local_addr.sa.sa_family) {
2661 case AF_INET:
2662 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2663 struct sockaddr_in *addr = &parsed_parameters->local_addr.sin;
2664 inet_ntop(AF_INET, &(addr->sin_addr), local_buffer, sizeof(local_buffer));
2665 }
2666 break;
2667 case AF_INET6:
2668 if (parsed_parameters->local_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2669 struct sockaddr_in6 *addr6 = &parsed_parameters->local_addr.sin6;
2670 inet_ntop(AF_INET6, &(addr6->sin6_addr), local_buffer, sizeof(local_buffer));
2671 }
2672 break;
2673 default:
2674 break;
2675 }
2676
2677 switch (parsed_parameters->remote_addr.sa.sa_family) {
2678 case AF_INET:
2679 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in)) {
2680 struct sockaddr_in *addr = &parsed_parameters->remote_addr.sin;
2681 inet_ntop(AF_INET, &(addr->sin_addr), remote_buffer, sizeof(remote_buffer));
2682 }
2683 break;
2684 case AF_INET6:
2685 if (parsed_parameters->remote_addr.sa.sa_len == sizeof(struct sockaddr_in6)) {
2686 struct sockaddr_in6 *addr6 = &parsed_parameters->remote_addr.sin6;
2687 inet_ntop(AF_INET6, &(addr6->sin6_addr), remote_buffer, sizeof(remote_buffer));
2688 }
2689 break;
2690 default:
2691 break;
2692 }
2693
2694 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - valid_fields %X flags %X delegated_upid %llu local_addr %s remote_addr %s "
2695 "required_interface_index %u required_interface_type %d local_address_preference %d "
2696 "ip_protocol %d transport_protocol %d ethertype %d effective_pid %d effective_uuid %s uid %d persona_id %d traffic_class %d",
2697 parsed_parameters->valid_fields,
2698 parsed_parameters->flags,
2699 parsed_parameters->delegated_upid,
2700 local_buffer, remote_buffer,
2701 parsed_parameters->required_interface_index,
2702 parsed_parameters->required_interface_type,
2703 parsed_parameters->local_address_preference,
2704 parsed_parameters->ip_protocol,
2705 parsed_parameters->transport_protocol,
2706 parsed_parameters->ethertype,
2707 parsed_parameters->effective_pid,
2708 uuid_str,
2709 parsed_parameters->uid,
2710 parsed_parameters->persona_id,
2711 parsed_parameters->traffic_class);
2712
2713 NECP_CLIENT_PARAMS_LOG(client, "Parsed params - tracker flags <known-tracker %X> <non-app-initiated %X> <silent %X> <app-approved %X>",
2714 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_KNOWN_TRACKER,
2715 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_NON_APP_INITIATED,
2716 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_SILENT,
2717 parsed_parameters->flags & NECP_CLIENT_PARAMETER_FLAG_APPROVED_APP_DOMAIN);
2718
2719 for (i = 0; i < NECP_MAX_INTERFACE_PARAMETERS && parsed_parameters->prohibited_interfaces[i][0]; i++) {
2720 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_interfaces[%d] <%s>", i, parsed_parameters->prohibited_interfaces[i]);
2721 }
2722
2723 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->required_netagent_types[i].netagent_domain[0]; i++) {
2724 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagent_types[%d] <%s> <%s>", i,
2725 parsed_parameters->required_netagent_types[i].netagent_domain,
2726 parsed_parameters->required_netagent_types[i].netagent_type);
2727 }
2728 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->prohibited_netagent_types[i].netagent_domain[0]; i++) {
2729 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagent_types[%d] <%s> <%s>", i,
2730 parsed_parameters->prohibited_netagent_types[i].netagent_domain,
2731 parsed_parameters->prohibited_netagent_types[i].netagent_type);
2732 }
2733 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->preferred_netagent_types[i].netagent_domain[0]; i++) {
2734 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagent_types[%d] <%s> <%s>", i,
2735 parsed_parameters->preferred_netagent_types[i].netagent_domain,
2736 parsed_parameters->preferred_netagent_types[i].netagent_type);
2737 }
2738 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && parsed_parameters->avoided_netagent_types[i].netagent_domain[0]; i++) {
2739 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagent_types[%d] <%s> <%s>", i,
2740 parsed_parameters->avoided_netagent_types[i].netagent_domain,
2741 parsed_parameters->avoided_netagent_types[i].netagent_type);
2742 }
2743
2744 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(uu: parsed_parameters->required_netagents[i]); i++) {
2745 uuid_unparse_lower(uu: parsed_parameters->required_netagents[i], out: uuid_str);
2746 NECP_CLIENT_PARAMS_LOG(client, "Parsed required_netagents[%d] <%s>", i, uuid_str);
2747 }
2748 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(uu: parsed_parameters->prohibited_netagents[i]); i++) {
2749 uuid_unparse_lower(uu: parsed_parameters->prohibited_netagents[i], out: uuid_str);
2750 NECP_CLIENT_PARAMS_LOG(client, "Parsed prohibited_netagents[%d] <%s>", i, uuid_str);
2751 }
2752 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(uu: parsed_parameters->preferred_netagents[i]); i++) {
2753 uuid_unparse_lower(uu: parsed_parameters->preferred_netagents[i], out: uuid_str);
2754 NECP_CLIENT_PARAMS_LOG(client, "Parsed preferred_netagents[%d] <%s>", i, uuid_str);
2755 }
2756 for (i = 0; i < NECP_MAX_AGENT_PARAMETERS && !uuid_is_null(uu: parsed_parameters->avoided_netagents[i]); i++) {
2757 uuid_unparse_lower(uu: parsed_parameters->avoided_netagents[i], out: uuid_str);
2758 NECP_CLIENT_PARAMS_LOG(client, "Parsed avoided_netagents[%d] <%s>", i, uuid_str);
2759 }
2760}
2761
2762static bool
2763necp_client_strings_are_equal(const char *string1, size_t string1_length,
2764 const char *string2, size_t string2_length)
2765{
2766 if (string1 == NULL || string2 == NULL) {
2767 return false;
2768 }
2769 const size_t string1_actual_length = strnlen(s: string1, n: string1_length);
2770 const size_t string2_actual_length = strnlen(s: string2, n: string2_length);
2771 if (string1_actual_length != string2_actual_length) {
2772 return false;
2773 }
2774 return strncmp(s1: string1, s2: string2, n: string1_actual_length) == 0;
2775}
2776
2777static int
2778necp_client_parse_parameters(struct necp_client *client, u_int8_t *parameters,
2779 u_int32_t parameters_size,
2780 struct necp_client_parsed_parameters *parsed_parameters)
2781{
2782 int error = 0;
2783 size_t offset = 0;
2784
2785 u_int32_t num_prohibited_interfaces = 0;
2786 u_int32_t num_prohibited_interface_types = 0;
2787 u_int32_t num_required_agents = 0;
2788 u_int32_t num_prohibited_agents = 0;
2789 u_int32_t num_preferred_agents = 0;
2790 u_int32_t num_avoided_agents = 0;
2791 u_int32_t num_required_agent_types = 0;
2792 u_int32_t num_prohibited_agent_types = 0;
2793 u_int32_t num_preferred_agent_types = 0;
2794 u_int32_t num_avoided_agent_types = 0;
2795 u_int8_t *resolver_tag = NULL;
2796 u_int32_t resolver_tag_length = 0;
2797 u_int8_t *client_hostname = NULL;
2798 u_int32_t hostname_length = 0;
2799 uuid_t parent_id = {};
2800
2801 if (parsed_parameters == NULL) {
2802 return EINVAL;
2803 }
2804
2805 memset(s: parsed_parameters, c: 0, n: sizeof(struct necp_client_parsed_parameters));
2806
2807 while ((offset + sizeof(struct necp_tlv_header)) <= parameters_size) {
2808 u_int8_t type = necp_buffer_get_tlv_type(buffer: parameters, tlv_offset: offset);
2809 u_int32_t length = necp_buffer_get_tlv_length(buffer: parameters, tlv_offset: offset);
2810
2811 if (length > (parameters_size - (offset + sizeof(struct necp_tlv_header)))) {
2812 // If the length is larger than what can fit in the remaining parameters size, bail
2813 NECPLOG(LOG_ERR, "Invalid TLV length (%u)", length);
2814 break;
2815 }
2816
2817 if (length > 0) {
2818 u_int8_t *value = necp_buffer_get_tlv_value(buffer: parameters, tlv_offset: offset, NULL);
2819 if (value != NULL) {
2820 switch (type) {
2821 case NECP_CLIENT_PARAMETER_BOUND_INTERFACE: {
2822 if (length <= IFXNAMSIZ && length > 0) {
2823 ifnet_t bound_interface = NULL;
2824 char interface_name[IFXNAMSIZ];
2825 memcpy(dst: interface_name, src: value, n: length);
2826 interface_name[length - 1] = 0; // Make sure the string is NULL terminated
2827 if (ifnet_find_by_name(ifname: interface_name, interface: &bound_interface) == 0) {
2828 parsed_parameters->required_interface_index = bound_interface->if_index;
2829 parsed_parameters->valid_fields |= NECP_PARSED_PARAMETERS_FIELD_REQUIRED_IF;
2830 ifnet_release(interface: bound_interface);
2831 }
2832 }
2833 break;
2834 }
2835 case NECP_CLIENT_PARAMETER_LOCAL_ADDRESS: {
2836 if (length >= sizeof(struct necp_policy_condition_addr)) {
2837 struct necp_policy_condition_addr *address_struct = (struct necp_policy_condition_addr *)(void *)value;
2838 if (necp_client_address_is_valid(&address_struct->address.sa)) {
2839 memcpy(dst: &parsed_parameters->local_addr, src: &address_struct->address, n: sizeof(address_struct->address));
2840 if (!