1 | /* |
2 | * Copyright (c) 2010-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <sys/param.h> |
30 | #include <sys/types.h> |
31 | #include <sys/kpi_mbuf.h> |
32 | #include <sys/socket.h> |
33 | #include <sys/kern_control.h> |
34 | #include <sys/mcache.h> |
35 | #include <sys/socketvar.h> |
36 | #include <sys/sysctl.h> |
37 | #include <sys/queue.h> |
38 | #include <sys/priv.h> |
39 | #include <sys/protosw.h> |
40 | #include <sys/persona.h> |
41 | |
42 | #include <kern/clock.h> |
43 | #include <kern/debug.h> |
44 | |
45 | #include <libkern/libkern.h> |
46 | #include <libkern/OSAtomic.h> |
47 | #include <libkern/locks.h> |
48 | |
49 | #include <net/if.h> |
50 | #include <net/if_var.h> |
51 | #include <net/if_types.h> |
52 | #include <net/route.h> |
53 | #include <net/dlil.h> |
54 | |
55 | // These includes appear in ntstat.h but we include them here first so they won't trigger |
56 | // any clang diagnostic errors. |
57 | #include <netinet/in.h> |
58 | #include <netinet/in_stat.h> |
59 | #include <netinet/tcp.h> |
60 | |
61 | #pragma clang diagnostic push |
62 | #pragma clang diagnostic error "-Wpadded" |
63 | #pragma clang diagnostic error "-Wpacked" |
64 | // This header defines structures shared with user space, so we need to ensure there is |
65 | // no compiler inserted padding in case the user space process isn't using the same |
66 | // architecture as the kernel (example: i386 process with x86_64 kernel). |
67 | #include <net/ntstat.h> |
68 | #pragma clang diagnostic pop |
69 | |
70 | #include <netinet/ip_var.h> |
71 | #include <netinet/in_pcb.h> |
72 | #include <netinet/in_var.h> |
73 | #include <netinet/tcp_var.h> |
74 | #include <netinet/tcp_fsm.h> |
75 | #include <netinet/tcp_cc.h> |
76 | #include <netinet/udp.h> |
77 | #include <netinet/udp_var.h> |
78 | #include <netinet6/in6_pcb.h> |
79 | #include <netinet6/in6_var.h> |
80 | |
81 | #include <net/sockaddr_utils.h> |
82 | |
83 | __private_extern__ int nstat_collect = 1; |
84 | |
85 | #if (DEBUG || DEVELOPMENT) |
86 | SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED, |
87 | &nstat_collect, 0, "Collect detailed statistics" ); |
88 | #endif /* (DEBUG || DEVELOPMENT) */ |
89 | |
90 | #if !XNU_TARGET_OS_OSX |
91 | static int nstat_privcheck = 1; |
92 | #else /* XNU_TARGET_OS_OSX */ |
93 | static int nstat_privcheck = 0; |
94 | #endif /* XNU_TARGET_OS_OSX */ |
95 | SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED, |
96 | &nstat_privcheck, 0, "Entitlement check" ); |
97 | |
98 | SYSCTL_NODE(_net, OID_AUTO, stats, |
99 | CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics" ); |
100 | |
101 | static int nstat_debug = 0; |
102 | SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, |
103 | &nstat_debug, 0, "" ); |
104 | |
105 | static int nstat_debug_pid = 0; // Only log socket level debug for specified pid |
106 | SYSCTL_INT(_net_stats, OID_AUTO, debug_pid, CTLFLAG_RW | CTLFLAG_LOCKED, |
107 | &nstat_debug_pid, 0, "" ); |
108 | |
109 | static int nstat_sendspace = 2048; |
110 | SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED, |
111 | &nstat_sendspace, 0, "" ); |
112 | |
113 | static int nstat_recvspace = 8192; |
114 | SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED, |
115 | &nstat_recvspace, 0, "" ); |
116 | |
117 | static struct nstat_stats nstat_stats; |
118 | SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED, |
119 | &nstat_stats, nstat_stats, "" ); |
120 | |
121 | static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */ |
122 | static u_int32_t nstat_lim_min_tx_pkts = 100; |
123 | static u_int32_t nstat_lim_min_rx_pkts = 100; |
124 | #if (DEBUG || DEVELOPMENT) |
125 | SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval, |
126 | CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0, |
127 | "Low internet stat report interval" ); |
128 | |
129 | SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts, |
130 | CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0, |
131 | "Low Internet, min transmit packets threshold" ); |
132 | |
133 | SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts, |
134 | CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0, |
135 | "Low Internet, min receive packets threshold" ); |
136 | #endif /* DEBUG || DEVELOPMENT */ |
137 | |
138 | static struct net_api_stats net_api_stats_before; |
139 | static u_int64_t net_api_stats_last_report_time; |
140 | #define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */ |
141 | static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL; |
142 | |
143 | #if (DEBUG || DEVELOPMENT) |
144 | SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval, |
145 | CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "" ); |
146 | #endif /* DEBUG || DEVELOPMENT */ |
147 | |
148 | #define NSTAT_DEBUG_SOCKET_PID_MATCHED(so) \ |
149 | (so && (nstat_debug_pid == (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid))) |
150 | |
151 | #define NSTAT_DEBUG_SOCKET_ON(so) \ |
152 | ((nstat_debug && (!nstat_debug_pid || NSTAT_DEBUG_SOCKET_PID_MATCHED(so))) ? nstat_debug : 0) |
153 | |
154 | #define NSTAT_DEBUG_SOCKET_LOG(so, fmt, ...) \ |
155 | if (NSTAT_DEBUG_SOCKET_ON(so)) { \ |
156 | printf("NSTAT_DEBUG_SOCKET <pid %d>: " fmt "\n", (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid), ##__VA_ARGS__); \ |
157 | } |
158 | |
159 | enum{ |
160 | NSTAT_FLAG_CLEANUP = (1 << 0), |
161 | NSTAT_FLAG_REQCOUNTS = (1 << 1), |
162 | NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2), |
163 | NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3), |
164 | }; |
165 | |
166 | #if !XNU_TARGET_OS_OSX |
167 | #define QUERY_CONTINUATION_SRC_COUNT 50 |
168 | #else /* XNU_TARGET_OS_OSX */ |
169 | #define QUERY_CONTINUATION_SRC_COUNT 100 |
170 | #endif /* XNU_TARGET_OS_OSX */ |
171 | |
172 | #ifndef ROUNDUP64 |
173 | #define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) |
174 | #endif |
175 | |
176 | #ifndef ADVANCE64 |
177 | #define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) |
178 | #endif |
179 | |
180 | typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src; |
181 | typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src; |
182 | |
183 | typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow; |
184 | typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow; |
185 | |
186 | typedef TAILQ_HEAD(, nstat_generic_shadow) tailq_head_generic_shadow; |
187 | typedef TAILQ_ENTRY(nstat_generic_shadow) tailq_entry_generic_shadow; |
188 | |
189 | typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails; |
190 | typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails; |
191 | |
192 | struct nstat_procdetails { |
193 | tailq_entry_procdetails pdet_link; |
194 | int pdet_pid; |
195 | u_int64_t pdet_upid; |
196 | char pdet_procname[64]; |
197 | uuid_t pdet_uuid; |
198 | u_int32_t pdet_refcnt; |
199 | u_int32_t pdet_magic; |
200 | }; |
201 | |
202 | typedef struct nstat_provider_filter { |
203 | u_int64_t npf_flags; |
204 | u_int64_t npf_events; |
205 | u_int64_t npf_extensions; |
206 | pid_t npf_pid; |
207 | uuid_t npf_uuid; |
208 | } nstat_provider_filter; |
209 | |
210 | |
211 | typedef struct nstat_control_state { |
212 | struct nstat_control_state *ncs_next; |
213 | /* A bitmask to indicate whether a provider ever done NSTAT_MSG_TYPE_ADD_ALL_SRCS */ |
214 | u_int32_t ncs_watching; |
215 | /* A bitmask to indicate whether a provider ever done NSTAT_MSG_TYPE_ADD_SRC */ |
216 | u_int32_t ncs_added_src; |
217 | decl_lck_mtx_data(, ncs_mtx); |
218 | kern_ctl_ref ncs_kctl; |
219 | u_int32_t ncs_unit; |
220 | nstat_src_ref_t ncs_next_srcref; |
221 | tailq_head_nstat_src ncs_src_queue; |
222 | mbuf_t ncs_accumulated; |
223 | u_int32_t ncs_flags; |
224 | nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT]; |
225 | /* state maintained for partial query requests */ |
226 | u_int64_t ncs_context; |
227 | u_int64_t ncs_seq; |
228 | /* For ease of debugging with lldb macros */ |
229 | struct nstat_procdetails *ncs_procdetails; |
230 | } nstat_control_state; |
231 | |
232 | typedef struct nstat_provider { |
233 | struct nstat_provider *next; |
234 | nstat_provider_id_t nstat_provider_id; |
235 | size_t nstat_descriptor_length; |
236 | errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie); |
237 | int (*nstat_gone)(nstat_provider_cookie_t cookie); |
238 | errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone); |
239 | errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req); |
240 | void (*nstat_watcher_remove)(nstat_control_state *state); |
241 | errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len); |
242 | void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked); |
243 | bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, u_int64_t suppression_flags); |
244 | bool (*nstat_cookie_equal)(nstat_provider_cookie_t cookie1, nstat_provider_cookie_t cookie2); |
245 | size_t (*nstat_copy_extension)(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len); |
246 | } nstat_provider; |
247 | |
248 | typedef struct nstat_src { |
249 | tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over. |
250 | nstat_control_state *ns_control; // The nstat_control_state that this is a source for |
251 | nstat_src_ref_t srcref; |
252 | nstat_provider *provider; |
253 | nstat_provider_cookie_t cookie; |
254 | uint32_t filter; |
255 | bool ns_reported; // At least one update/counts/desc message has been sent |
256 | uint64_t seq; |
257 | } nstat_src; |
258 | |
259 | // The merge structures are intended to give a global picture of what may be asked for by the current set of clients |
260 | // This is to avoid taking locks to check them all individually |
261 | typedef struct nstat_merged_provider_filter { |
262 | u_int64_t mf_events; // So far we only merge the events portion of any filters |
263 | } nstat_merged_provider_filter; |
264 | |
265 | typedef struct nstat_merged_provider_filters { |
266 | nstat_merged_provider_filter mpf_filters[NSTAT_PROVIDER_COUNT]; |
267 | } nstat_merged_provider_filters; |
268 | |
269 | static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *); |
270 | static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags); |
271 | static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone); |
272 | static errno_t nstat_control_send_removed(nstat_control_state *state, nstat_src *src, u_int16_t hdr_flags); |
273 | static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src); |
274 | static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t); |
275 | static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src, u_int64_t suppression_flags); |
276 | static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp); |
277 | static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial); |
278 | static void nstat_ifnet_report_ecn_stats(void); |
279 | static void nstat_ifnet_report_lim_stats(void); |
280 | static void nstat_net_api_report_stats(void); |
281 | static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req); |
282 | static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event); |
283 | |
284 | static u_int32_t nstat_udp_watchers = 0; |
285 | static u_int32_t nstat_tcp_watchers = 0; |
286 | static nstat_merged_provider_filters merged_filters = {}; |
287 | |
288 | static void nstat_control_register(void); |
289 | |
290 | /* |
291 | * The lock order is as follows: |
292 | * |
293 | * socket_lock (inpcb) |
294 | * nstat_mtx |
295 | * state->ncs_mtx |
296 | */ |
297 | static nstat_control_state *nstat_controls = NULL; |
298 | static uint64_t nstat_idle_time = 0; |
299 | static LCK_GRP_DECLARE(nstat_lck_grp, "network statistics kctl" ); |
300 | static LCK_MTX_DECLARE(nstat_mtx, &nstat_lck_grp); |
301 | |
302 | |
303 | /* some extern definitions */ |
304 | extern void tcp_report_stats(void); |
305 | |
306 | static void |
307 | nstat_copy_sa_out( |
308 | const struct sockaddr *src, |
309 | struct sockaddr *dst, |
310 | int maxlen) |
311 | { |
312 | if (src->sa_len > maxlen) { |
313 | return; |
314 | } |
315 | |
316 | SOCKADDR_COPY(src, dst, src->sa_len); |
317 | if (src->sa_family == AF_INET6 && |
318 | src->sa_len >= sizeof(struct sockaddr_in6)) { |
319 | struct sockaddr_in6 *sin6 = SIN6(dst); |
320 | if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) { |
321 | sin6->sin6_scope_id = (SIN6(src))->sin6_scope_id; |
322 | if (in6_embedded_scope) { |
323 | in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id); |
324 | sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]); |
325 | sin6->sin6_addr.s6_addr16[1] = 0; |
326 | } |
327 | } |
328 | } |
329 | } |
330 | |
331 | static void |
332 | nstat_ip_to_sockaddr( |
333 | const struct in_addr *ip, |
334 | u_int16_t port, |
335 | struct sockaddr_in *sin, |
336 | u_int32_t maxlen) |
337 | { |
338 | if (maxlen < sizeof(struct sockaddr_in)) { |
339 | return; |
340 | } |
341 | |
342 | sin->sin_family = AF_INET; |
343 | sin->sin_len = sizeof(*sin); |
344 | sin->sin_port = port; |
345 | sin->sin_addr = *ip; |
346 | } |
347 | |
348 | u_int32_t |
349 | nstat_ifnet_to_flags( |
350 | struct ifnet *ifp) |
351 | { |
352 | u_int32_t flags = 0; |
353 | u_int32_t functional_type = if_functional_type(ifp, FALSE); |
354 | |
355 | /* Panic if someone adds a functional type without updating ntstat. */ |
356 | VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST); |
357 | |
358 | switch (functional_type) { |
359 | case IFRTYPE_FUNCTIONAL_UNKNOWN: |
360 | flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE; |
361 | break; |
362 | case IFRTYPE_FUNCTIONAL_LOOPBACK: |
363 | flags |= NSTAT_IFNET_IS_LOOPBACK; |
364 | break; |
365 | case IFRTYPE_FUNCTIONAL_WIRED: |
366 | case IFRTYPE_FUNCTIONAL_INTCOPROC: |
367 | case IFRTYPE_FUNCTIONAL_MANAGEMENT: |
368 | flags |= NSTAT_IFNET_IS_WIRED; |
369 | break; |
370 | case IFRTYPE_FUNCTIONAL_WIFI_INFRA: |
371 | flags |= NSTAT_IFNET_IS_WIFI | NSTAT_IFNET_IS_WIFI_INFRA; |
372 | break; |
373 | case IFRTYPE_FUNCTIONAL_WIFI_AWDL: |
374 | flags |= NSTAT_IFNET_IS_WIFI | NSTAT_IFNET_IS_AWDL; |
375 | break; |
376 | case IFRTYPE_FUNCTIONAL_CELLULAR: |
377 | flags |= NSTAT_IFNET_IS_CELLULAR; |
378 | break; |
379 | case IFRTYPE_FUNCTIONAL_COMPANIONLINK: |
380 | flags |= NSTAT_IFNET_IS_COMPANIONLINK; |
381 | break; |
382 | } |
383 | |
384 | if (IFNET_IS_EXPENSIVE(ifp)) { |
385 | flags |= NSTAT_IFNET_IS_EXPENSIVE; |
386 | } |
387 | if (IFNET_IS_CONSTRAINED(ifp)) { |
388 | flags |= NSTAT_IFNET_IS_CONSTRAINED; |
389 | } |
390 | if (ifp->if_xflags & IFXF_LOW_LATENCY) { |
391 | flags |= NSTAT_IFNET_IS_WIFI | NSTAT_IFNET_IS_LLW; |
392 | } |
393 | |
394 | return flags; |
395 | } |
396 | |
397 | static void |
398 | nstat_update_local_flag_from_inpcb_route(const struct inpcb *inp, |
399 | u_int32_t *flags) |
400 | { |
401 | if (inp != NULL && |
402 | ((inp->inp_route.ro_rt != NULL && |
403 | IS_LOCALNET_ROUTE(inp->inp_route.ro_rt)) || |
404 | (inp->inp_flags2 & INP2_LAST_ROUTE_LOCAL))) { |
405 | *flags |= NSTAT_IFNET_IS_LOCAL; |
406 | } else { |
407 | *flags |= NSTAT_IFNET_IS_NON_LOCAL; |
408 | } |
409 | } |
410 | |
411 | static u_int32_t |
412 | nstat_inpcb_to_flags( |
413 | const struct inpcb *inp) |
414 | { |
415 | u_int32_t flags = 0; |
416 | |
417 | if (inp != NULL) { |
418 | if (inp->inp_last_outifp != NULL) { |
419 | struct ifnet *ifp = inp->inp_last_outifp; |
420 | flags = nstat_ifnet_to_flags(ifp); |
421 | |
422 | struct tcpcb *tp = intotcpcb(inp); |
423 | if (tp) { |
424 | if (tp->t_flags & TF_LOCAL) { |
425 | flags |= NSTAT_IFNET_IS_LOCAL; |
426 | } else { |
427 | flags |= NSTAT_IFNET_IS_NON_LOCAL; |
428 | } |
429 | } else { |
430 | nstat_update_local_flag_from_inpcb_route(inp, flags: &flags); |
431 | } |
432 | } else { |
433 | flags = NSTAT_IFNET_IS_UNKNOWN_TYPE; |
434 | nstat_update_local_flag_from_inpcb_route(inp, flags: &flags); |
435 | } |
436 | if (inp->inp_socket != NULL && |
437 | (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) { |
438 | flags |= NSTAT_IFNET_VIA_CELLFALLBACK; |
439 | } |
440 | } |
441 | return flags; |
442 | } |
443 | |
444 | static void |
445 | merge_current_event_filters(void) |
446 | { |
447 | // The nstat_mtx is assumed locked |
448 | nstat_merged_provider_filters new_merge = {}; |
449 | nstat_provider_type_t provider; |
450 | nstat_control_state *state; |
451 | |
452 | for (state = nstat_controls; state; state = state->ncs_next) { |
453 | for (provider = NSTAT_PROVIDER_NONE; provider <= NSTAT_PROVIDER_LAST; provider++) { |
454 | new_merge.mpf_filters[provider].mf_events |= state->ncs_provider_filters[provider].npf_events; |
455 | } |
456 | } |
457 | for (provider = NSTAT_PROVIDER_NONE; provider <= NSTAT_PROVIDER_LAST; provider++) { |
458 | // This should do atomic updates of the 64 bit words, where memcpy would be undefined |
459 | merged_filters.mpf_filters[provider].mf_events = new_merge.mpf_filters[provider].mf_events; |
460 | } |
461 | } |
462 | |
463 | |
464 | #pragma mark -- Network Statistic Providers -- |
465 | |
466 | static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie); |
467 | struct nstat_provider *nstat_providers = NULL; |
468 | |
469 | static struct nstat_provider* |
470 | nstat_find_provider_by_id( |
471 | nstat_provider_id_t id) |
472 | { |
473 | struct nstat_provider *provider; |
474 | |
475 | for (provider = nstat_providers; provider != NULL; provider = provider->next) { |
476 | if (provider->nstat_provider_id == id) { |
477 | break; |
478 | } |
479 | } |
480 | |
481 | return provider; |
482 | } |
483 | |
484 | static errno_t |
485 | nstat_lookup_entry( |
486 | nstat_provider_id_t id, |
487 | const void *data, |
488 | u_int32_t length, |
489 | nstat_provider **out_provider, |
490 | nstat_provider_cookie_t *out_cookie) |
491 | { |
492 | *out_provider = nstat_find_provider_by_id(id); |
493 | if (*out_provider == NULL) { |
494 | return ENOENT; |
495 | } |
496 | |
497 | return (*out_provider)->nstat_lookup(data, length, out_cookie); |
498 | } |
499 | |
500 | static void |
501 | nstat_control_sanitize_cookie( |
502 | nstat_control_state *state, |
503 | nstat_provider_id_t id, |
504 | nstat_provider_cookie_t cookie) |
505 | { |
506 | nstat_src *src = NULL; |
507 | |
508 | // Scan the source list to find any duplicate entry and remove it. |
509 | lck_mtx_lock(lck: &state->ncs_mtx); |
510 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
511 | { |
512 | nstat_provider *sp = src->provider; |
513 | if (sp->nstat_provider_id == id && |
514 | sp->nstat_cookie_equal != NULL && |
515 | sp->nstat_cookie_equal(src->cookie, cookie)) { |
516 | break; |
517 | } |
518 | } |
519 | if (src) { |
520 | nstat_control_send_goodbye(state, src); |
521 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
522 | } |
523 | lck_mtx_unlock(lck: &state->ncs_mtx); |
524 | |
525 | if (src) { |
526 | nstat_control_cleanup_source(NULL, src, TRUE); |
527 | } |
528 | } |
529 | |
530 | static void nstat_init_route_provider(void); |
531 | static void nstat_init_tcp_provider(void); |
532 | static void nstat_init_udp_provider(void); |
533 | #if SKYWALK |
534 | static void nstat_init_userland_tcp_provider(void); |
535 | static void nstat_init_userland_udp_provider(void); |
536 | static void nstat_init_userland_quic_provider(void); |
537 | #endif /* SKYWALK */ |
538 | static void nstat_init_userland_conn_provider(void); |
539 | static void nstat_init_udp_subflow_provider(void); |
540 | static void nstat_init_ifnet_provider(void); |
541 | |
542 | __private_extern__ void |
543 | nstat_init(void) |
544 | { |
545 | nstat_init_route_provider(); |
546 | nstat_init_tcp_provider(); |
547 | nstat_init_udp_provider(); |
548 | #if SKYWALK |
549 | nstat_init_userland_tcp_provider(); |
550 | nstat_init_userland_udp_provider(); |
551 | nstat_init_userland_quic_provider(); |
552 | #endif /* SKYWALK */ |
553 | nstat_init_userland_conn_provider(); |
554 | nstat_init_udp_subflow_provider(); |
555 | nstat_init_ifnet_provider(); |
556 | nstat_control_register(); |
557 | } |
558 | |
559 | #pragma mark -- Aligned Buffer Allocation -- |
560 | |
561 | struct { |
562 | u_int32_t ; |
563 | u_int32_t ; |
564 | }; |
565 | |
566 | static void* |
567 | nstat_malloc_aligned( |
568 | size_t length, |
569 | u_int8_t alignment, |
570 | zalloc_flags_t flags) |
571 | { |
572 | struct align_header *hdr = NULL; |
573 | size_t size = length + sizeof(*hdr) + alignment - 1; |
574 | |
575 | // Arbitrary limit to prevent abuse |
576 | if (length > (64 * 1024)) { |
577 | return NULL; |
578 | } |
579 | u_int8_t *buffer = (u_int8_t *)kalloc_data(size, flags); |
580 | if (buffer == NULL) { |
581 | return NULL; |
582 | } |
583 | |
584 | u_int8_t *aligned = buffer + sizeof(*hdr); |
585 | aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment); |
586 | |
587 | hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr)); |
588 | hdr->offset = aligned - buffer; |
589 | hdr->length = size; |
590 | |
591 | return aligned; |
592 | } |
593 | |
594 | static void |
595 | nstat_free_aligned( |
596 | void *buffer) |
597 | { |
598 | struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr)); |
599 | char *offset_buffer = (char *)buffer - hdr->offset; |
600 | kfree_data(offset_buffer, hdr->length); |
601 | } |
602 | |
603 | #pragma mark -- Utilities -- |
604 | |
605 | #define NSTAT_PROCDETAILS_MAGIC 0xfeedc001 |
606 | #define NSTAT_PROCDETAILS_UNMAGIC 0xdeadc001 |
607 | |
608 | static tailq_head_procdetails nstat_procdetails_head = TAILQ_HEAD_INITIALIZER(nstat_procdetails_head); |
609 | |
610 | static struct nstat_procdetails * |
611 | nstat_retain_curprocdetails(void) |
612 | { |
613 | struct nstat_procdetails *procdetails = NULL; |
614 | uint64_t upid = proc_uniqueid(current_proc()); |
615 | |
616 | lck_mtx_lock(lck: &nstat_mtx); |
617 | |
618 | TAILQ_FOREACH(procdetails, &nstat_procdetails_head, pdet_link) { |
619 | assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
620 | |
621 | if (procdetails->pdet_upid == upid) { |
622 | OSIncrementAtomic(&procdetails->pdet_refcnt); |
623 | break; |
624 | } |
625 | } |
626 | lck_mtx_unlock(lck: &nstat_mtx); |
627 | if (!procdetails) { |
628 | // No need for paranoia on locking, it would be OK if there are duplicate structs on the list |
629 | procdetails = kalloc_type(struct nstat_procdetails, |
630 | Z_WAITOK | Z_NOFAIL); |
631 | procdetails->pdet_pid = proc_selfpid(); |
632 | procdetails->pdet_upid = upid; |
633 | proc_selfname(buf: procdetails->pdet_procname, size: sizeof(procdetails->pdet_procname)); |
634 | proc_getexecutableuuid(current_proc(), procdetails->pdet_uuid, sizeof(uuid_t)); |
635 | procdetails->pdet_refcnt = 1; |
636 | procdetails->pdet_magic = NSTAT_PROCDETAILS_MAGIC; |
637 | lck_mtx_lock(lck: &nstat_mtx); |
638 | TAILQ_INSERT_HEAD(&nstat_procdetails_head, procdetails, pdet_link); |
639 | lck_mtx_unlock(lck: &nstat_mtx); |
640 | } |
641 | |
642 | return procdetails; |
643 | } |
644 | |
645 | static void |
646 | nstat_release_procdetails(struct nstat_procdetails *procdetails) |
647 | { |
648 | assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
649 | // These are harvested later to amortize costs |
650 | OSDecrementAtomic(&procdetails->pdet_refcnt); |
651 | } |
652 | |
653 | static void |
654 | nstat_prune_procdetails(void) |
655 | { |
656 | struct nstat_procdetails *procdetails; |
657 | struct nstat_procdetails *tmpdetails; |
658 | tailq_head_procdetails dead_list; |
659 | |
660 | TAILQ_INIT(&dead_list); |
661 | lck_mtx_lock(lck: &nstat_mtx); |
662 | |
663 | TAILQ_FOREACH_SAFE(procdetails, &nstat_procdetails_head, pdet_link, tmpdetails) |
664 | { |
665 | assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
666 | if (procdetails->pdet_refcnt == 0) { |
667 | // Pull it off the list |
668 | TAILQ_REMOVE(&nstat_procdetails_head, procdetails, pdet_link); |
669 | TAILQ_INSERT_TAIL(&dead_list, procdetails, pdet_link); |
670 | } |
671 | } |
672 | lck_mtx_unlock(lck: &nstat_mtx); |
673 | |
674 | while ((procdetails = TAILQ_FIRST(&dead_list))) { |
675 | TAILQ_REMOVE(&dead_list, procdetails, pdet_link); |
676 | procdetails->pdet_magic = NSTAT_PROCDETAILS_UNMAGIC; |
677 | kfree_type(struct nstat_procdetails, procdetails); |
678 | } |
679 | } |
680 | |
681 | #pragma mark -- Route Provider -- |
682 | |
683 | static nstat_provider nstat_route_provider; |
684 | |
685 | static errno_t |
686 | nstat_route_lookup( |
687 | const void *data, |
688 | u_int32_t length, |
689 | nstat_provider_cookie_t *out_cookie) |
690 | { |
691 | struct sockaddr *dst = NULL; |
692 | struct sockaddr *mask = NULL; |
693 | const nstat_route_add_param *param = (const nstat_route_add_param*)data; |
694 | *out_cookie = NULL; |
695 | |
696 | if (length < sizeof(*param)) { |
697 | return EINVAL; |
698 | } |
699 | |
700 | if (param->dst.v4.sin_family == 0 || |
701 | param->dst.v4.sin_family > AF_MAX || |
702 | (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) { |
703 | return EINVAL; |
704 | } |
705 | |
706 | if (param->dst.v4.sin_len > sizeof(param->dst) || |
707 | (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) { |
708 | return EINVAL; |
709 | } |
710 | if ((param->dst.v4.sin_family == AF_INET && |
711 | param->dst.v4.sin_len < sizeof(struct sockaddr_in)) || |
712 | (param->dst.v6.sin6_family == AF_INET6 && |
713 | param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) { |
714 | return EINVAL; |
715 | } |
716 | |
717 | dst = __DECONST_SA(¶m->dst.v4); |
718 | mask = param->mask.v4.sin_family ? __DECONST_SA(¶m->mask.v4) : NULL; |
719 | |
720 | struct radix_node_head *rnh = rt_tables[dst->sa_family]; |
721 | if (rnh == NULL) { |
722 | return EAFNOSUPPORT; |
723 | } |
724 | |
725 | lck_mtx_lock(rnh_lock); |
726 | struct rtentry *rt = rt_lookup(TRUE, dst, mask, rnh, param->ifindex); |
727 | lck_mtx_unlock(rnh_lock); |
728 | |
729 | if (rt) { |
730 | *out_cookie = (nstat_provider_cookie_t)rt; |
731 | } |
732 | |
733 | return rt ? 0 : ENOENT; |
734 | } |
735 | |
736 | static int |
737 | nstat_route_gone( |
738 | nstat_provider_cookie_t cookie) |
739 | { |
740 | struct rtentry *rt = (struct rtentry*)cookie; |
741 | return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0; |
742 | } |
743 | |
744 | static errno_t |
745 | nstat_route_counts( |
746 | nstat_provider_cookie_t cookie, |
747 | struct nstat_counts *out_counts, |
748 | int *out_gone) |
749 | { |
750 | struct rtentry *rt = (struct rtentry*)cookie; |
751 | struct nstat_counts *rt_stats = rt->rt_stats; |
752 | |
753 | if (out_gone) { |
754 | *out_gone = 0; |
755 | } |
756 | |
757 | if (out_gone && (rt->rt_flags & RTF_UP) == 0) { |
758 | *out_gone = 1; |
759 | } |
760 | |
761 | if (rt_stats) { |
762 | out_counts->nstat_rxpackets = os_atomic_load(&rt_stats->nstat_rxpackets, relaxed); |
763 | out_counts->nstat_rxbytes = os_atomic_load(&rt_stats->nstat_rxbytes, relaxed); |
764 | out_counts->nstat_txpackets = os_atomic_load(&rt_stats->nstat_txpackets, relaxed); |
765 | out_counts->nstat_txbytes = os_atomic_load(&rt_stats->nstat_txbytes, relaxed); |
766 | out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes; |
767 | out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes; |
768 | out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit; |
769 | out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts; |
770 | out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses; |
771 | out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt; |
772 | out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt; |
773 | out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt; |
774 | out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0; |
775 | } else { |
776 | bzero(s: out_counts, n: sizeof(*out_counts)); |
777 | } |
778 | |
779 | return 0; |
780 | } |
781 | |
782 | static void |
783 | nstat_route_release( |
784 | nstat_provider_cookie_t cookie, |
785 | __unused int locked) |
786 | { |
787 | rtfree((struct rtentry*)cookie); |
788 | } |
789 | |
790 | static u_int32_t nstat_route_watchers = 0; |
791 | |
792 | static int |
793 | nstat_route_walktree_add( |
794 | struct radix_node *rn, |
795 | void *context) |
796 | { |
797 | errno_t result = 0; |
798 | struct rtentry *rt = (struct rtentry *)rn; |
799 | nstat_control_state *state = (nstat_control_state*)context; |
800 | |
801 | LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED); |
802 | |
803 | /* RTF_UP can't change while rnh_lock is held */ |
804 | if ((rt->rt_flags & RTF_UP) != 0) { |
805 | /* Clear RTPRF_OURS if the route is still usable */ |
806 | RT_LOCK(rt); |
807 | if (rt_validate(rt)) { |
808 | RT_ADDREF_LOCKED(rt); |
809 | RT_UNLOCK(rt); |
810 | } else { |
811 | RT_UNLOCK(rt); |
812 | rt = NULL; |
813 | } |
814 | |
815 | /* Otherwise if RTF_CONDEMNED, treat it as if it were down */ |
816 | if (rt == NULL) { |
817 | return 0; |
818 | } |
819 | |
820 | result = nstat_control_source_add(context: 0, state, provider: &nstat_route_provider, cookie: rt); |
821 | if (result != 0) { |
822 | rtfree_locked(rt); |
823 | } |
824 | } |
825 | |
826 | return result; |
827 | } |
828 | |
829 | static errno_t |
830 | nstat_route_add_watcher( |
831 | nstat_control_state *state, |
832 | nstat_msg_add_all_srcs *req) |
833 | { |
834 | int i; |
835 | errno_t result = 0; |
836 | |
837 | lck_mtx_lock(rnh_lock); |
838 | |
839 | result = nstat_set_provider_filter(state, req); |
840 | if (result == 0) { |
841 | OSIncrementAtomic(&nstat_route_watchers); |
842 | |
843 | for (i = 1; i < AF_MAX; i++) { |
844 | struct radix_node_head *rnh; |
845 | rnh = rt_tables[i]; |
846 | if (!rnh) { |
847 | continue; |
848 | } |
849 | |
850 | result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state); |
851 | if (result != 0) { |
852 | // This is probably resource exhaustion. |
853 | // There currently isn't a good way to recover from this. |
854 | // Least bad seems to be to give up on the add-all but leave |
855 | // the watcher in place. |
856 | break; |
857 | } |
858 | } |
859 | } |
860 | lck_mtx_unlock(rnh_lock); |
861 | |
862 | return result; |
863 | } |
864 | |
865 | __private_extern__ void |
866 | nstat_route_new_entry( |
867 | struct rtentry *rt) |
868 | { |
869 | if (nstat_route_watchers == 0) { |
870 | return; |
871 | } |
872 | |
873 | lck_mtx_lock(lck: &nstat_mtx); |
874 | if ((rt->rt_flags & RTF_UP) != 0) { |
875 | nstat_control_state *state; |
876 | for (state = nstat_controls; state; state = state->ncs_next) { |
877 | if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) { |
878 | // this client is watching routes |
879 | // acquire a reference for the route |
880 | RT_ADDREF(rt); |
881 | |
882 | // add the source, if that fails, release the reference |
883 | if (nstat_control_source_add(context: 0, state, provider: &nstat_route_provider, cookie: rt) != 0) { |
884 | RT_REMREF(rt); |
885 | } |
886 | } |
887 | } |
888 | } |
889 | lck_mtx_unlock(lck: &nstat_mtx); |
890 | } |
891 | |
892 | static void |
893 | nstat_route_remove_watcher( |
894 | __unused nstat_control_state *state) |
895 | { |
896 | OSDecrementAtomic(&nstat_route_watchers); |
897 | } |
898 | |
899 | static errno_t |
900 | nstat_route_copy_descriptor( |
901 | nstat_provider_cookie_t cookie, |
902 | void *data, |
903 | size_t len) |
904 | { |
905 | nstat_route_descriptor *desc = (nstat_route_descriptor*)data; |
906 | if (len < sizeof(*desc)) { |
907 | return EINVAL; |
908 | } |
909 | bzero(s: desc, n: sizeof(*desc)); |
910 | |
911 | struct rtentry *rt = (struct rtentry*)cookie; |
912 | desc->id = (uint64_t)VM_KERNEL_ADDRHASH(rt); |
913 | desc->parent_id = (uint64_t)VM_KERNEL_ADDRHASH(rt->rt_parent); |
914 | desc->gateway_id = (uint64_t)VM_KERNEL_ADDRHASH(rt->rt_gwroute); |
915 | |
916 | |
917 | // key/dest |
918 | struct sockaddr *sa; |
919 | if ((sa = rt_key(rt))) { |
920 | nstat_copy_sa_out(src: sa, dst: &desc->dst.sa, maxlen: sizeof(desc->dst)); |
921 | } |
922 | |
923 | // mask |
924 | if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) { |
925 | memcpy(dst: &desc->mask, src: sa, n: sa->sa_len); |
926 | } |
927 | |
928 | // gateway |
929 | if ((sa = rt->rt_gateway)) { |
930 | nstat_copy_sa_out(src: sa, dst: &desc->gateway.sa, maxlen: sizeof(desc->gateway)); |
931 | } |
932 | |
933 | if (rt->rt_ifp) { |
934 | desc->ifindex = rt->rt_ifp->if_index; |
935 | } |
936 | |
937 | desc->flags = rt->rt_flags; |
938 | |
939 | return 0; |
940 | } |
941 | |
942 | static bool |
943 | nstat_route_reporting_allowed( |
944 | nstat_provider_cookie_t cookie, |
945 | nstat_provider_filter *filter, |
946 | __unused u_int64_t suppression_flags) |
947 | { |
948 | bool retval = true; |
949 | |
950 | if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { |
951 | struct rtentry *rt = (struct rtentry*)cookie; |
952 | struct ifnet *ifp = rt->rt_ifp; |
953 | |
954 | if (ifp) { |
955 | uint32_t interface_properties = nstat_ifnet_to_flags(ifp); |
956 | |
957 | if ((filter->npf_flags & interface_properties) == 0) { |
958 | retval = false; |
959 | } |
960 | } |
961 | } |
962 | return retval; |
963 | } |
964 | |
965 | static bool |
966 | nstat_route_cookie_equal( |
967 | nstat_provider_cookie_t cookie1, |
968 | nstat_provider_cookie_t cookie2) |
969 | { |
970 | struct rtentry *rt1 = (struct rtentry *)cookie1; |
971 | struct rtentry *rt2 = (struct rtentry *)cookie2; |
972 | |
973 | return (rt1 == rt2) ? true : false; |
974 | } |
975 | |
976 | static void |
977 | nstat_init_route_provider(void) |
978 | { |
979 | bzero(s: &nstat_route_provider, n: sizeof(nstat_route_provider)); |
980 | nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor); |
981 | nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE; |
982 | nstat_route_provider.nstat_lookup = nstat_route_lookup; |
983 | nstat_route_provider.nstat_gone = nstat_route_gone; |
984 | nstat_route_provider.nstat_counts = nstat_route_counts; |
985 | nstat_route_provider.nstat_release = nstat_route_release; |
986 | nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher; |
987 | nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher; |
988 | nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor; |
989 | nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed; |
990 | nstat_route_provider.nstat_cookie_equal = nstat_route_cookie_equal; |
991 | nstat_route_provider.next = nstat_providers; |
992 | nstat_providers = &nstat_route_provider; |
993 | } |
994 | |
995 | #pragma mark -- Route Collection -- |
996 | |
997 | __private_extern__ struct nstat_counts* |
998 | nstat_route_attach( |
999 | struct rtentry *rte) |
1000 | { |
1001 | struct nstat_counts *result = rte->rt_stats; |
1002 | if (result) { |
1003 | return result; |
1004 | } |
1005 | |
1006 | result = nstat_malloc_aligned(length: sizeof(*result), alignment: sizeof(u_int64_t), |
1007 | flags: Z_WAITOK | Z_ZERO); |
1008 | if (!result) { |
1009 | return result; |
1010 | } |
1011 | |
1012 | if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) { |
1013 | nstat_free_aligned(buffer: result); |
1014 | result = rte->rt_stats; |
1015 | } |
1016 | |
1017 | return result; |
1018 | } |
1019 | |
1020 | __private_extern__ void |
1021 | nstat_route_detach( |
1022 | struct rtentry *rte) |
1023 | { |
1024 | if (rte->rt_stats) { |
1025 | nstat_free_aligned(buffer: rte->rt_stats); |
1026 | rte->rt_stats = NULL; |
1027 | } |
1028 | } |
1029 | |
1030 | __private_extern__ void |
1031 | nstat_route_connect_attempt( |
1032 | struct rtentry *rte) |
1033 | { |
1034 | while (rte) { |
1035 | struct nstat_counts* stats = nstat_route_attach(rte); |
1036 | if (stats) { |
1037 | OSIncrementAtomic(&stats->nstat_connectattempts); |
1038 | } |
1039 | |
1040 | rte = rte->rt_parent; |
1041 | } |
1042 | } |
1043 | |
1044 | __private_extern__ void |
1045 | nstat_route_connect_success( |
1046 | struct rtentry *rte) |
1047 | { |
1048 | // This route |
1049 | while (rte) { |
1050 | struct nstat_counts* stats = nstat_route_attach(rte); |
1051 | if (stats) { |
1052 | OSIncrementAtomic(&stats->nstat_connectsuccesses); |
1053 | } |
1054 | |
1055 | rte = rte->rt_parent; |
1056 | } |
1057 | } |
1058 | |
1059 | __private_extern__ void |
1060 | nstat_route_tx( |
1061 | struct rtentry *rte, |
1062 | u_int32_t packets, |
1063 | u_int32_t bytes, |
1064 | u_int32_t flags) |
1065 | { |
1066 | while (rte) { |
1067 | struct nstat_counts* stats = nstat_route_attach(rte); |
1068 | if (stats) { |
1069 | if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) { |
1070 | OSAddAtomic(bytes, &stats->nstat_txretransmit); |
1071 | } else { |
1072 | OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets); |
1073 | OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes); |
1074 | } |
1075 | } |
1076 | |
1077 | rte = rte->rt_parent; |
1078 | } |
1079 | } |
1080 | |
1081 | __private_extern__ void |
1082 | nstat_route_rx( |
1083 | struct rtentry *rte, |
1084 | u_int32_t packets, |
1085 | u_int32_t bytes, |
1086 | u_int32_t flags) |
1087 | { |
1088 | while (rte) { |
1089 | struct nstat_counts* stats = nstat_route_attach(rte); |
1090 | if (stats) { |
1091 | if (flags == 0) { |
1092 | OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets); |
1093 | OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes); |
1094 | } else { |
1095 | if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) { |
1096 | OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes); |
1097 | } |
1098 | if (flags & NSTAT_RX_FLAG_DUPLICATE) { |
1099 | OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes); |
1100 | } |
1101 | } |
1102 | } |
1103 | |
1104 | rte = rte->rt_parent; |
1105 | } |
1106 | } |
1107 | |
1108 | /* atomically average current value at _val_addr with _new_val and store */ |
1109 | #define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \ |
1110 | volatile uint32_t _old_val; \ |
1111 | volatile uint32_t _avg; \ |
1112 | do { \ |
1113 | _old_val = *_val_addr; \ |
1114 | if (_old_val == 0) \ |
1115 | { \ |
1116 | _avg = _new_val; \ |
1117 | } \ |
1118 | else \ |
1119 | { \ |
1120 | _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \ |
1121 | } \ |
1122 | if (_old_val == _avg) break; \ |
1123 | } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \ |
1124 | } while (0); |
1125 | |
1126 | /* atomically compute minimum of current value at _val_addr with _new_val and store */ |
1127 | #define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \ |
1128 | volatile uint32_t _old_val; \ |
1129 | do { \ |
1130 | _old_val = *_val_addr; \ |
1131 | if (_old_val != 0 && _old_val < _new_val) \ |
1132 | { \ |
1133 | break; \ |
1134 | } \ |
1135 | } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \ |
1136 | } while (0); |
1137 | |
1138 | __private_extern__ void |
1139 | nstat_route_rtt( |
1140 | struct rtentry *rte, |
1141 | u_int32_t rtt, |
1142 | u_int32_t rtt_var) |
1143 | { |
1144 | const uint32_t decay = 3; |
1145 | |
1146 | while (rte) { |
1147 | struct nstat_counts* stats = nstat_route_attach(rte); |
1148 | if (stats) { |
1149 | NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay); |
1150 | NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt); |
1151 | NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay); |
1152 | } |
1153 | rte = rte->rt_parent; |
1154 | } |
1155 | } |
1156 | |
1157 | __private_extern__ void |
1158 | nstat_route_update( |
1159 | struct rtentry *rte, |
1160 | uint32_t connect_attempts, |
1161 | uint32_t connect_successes, |
1162 | uint32_t rx_packets, |
1163 | uint32_t rx_bytes, |
1164 | uint32_t rx_duplicatebytes, |
1165 | uint32_t rx_outoforderbytes, |
1166 | uint32_t tx_packets, |
1167 | uint32_t tx_bytes, |
1168 | uint32_t tx_retransmit, |
1169 | uint32_t rtt, |
1170 | uint32_t rtt_var) |
1171 | { |
1172 | const uint32_t decay = 3; |
1173 | |
1174 | while (rte) { |
1175 | struct nstat_counts* stats = nstat_route_attach(rte); |
1176 | if (stats) { |
1177 | OSAddAtomic(connect_attempts, &stats->nstat_connectattempts); |
1178 | OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses); |
1179 | OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets); |
1180 | OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes); |
1181 | OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit); |
1182 | OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets); |
1183 | OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes); |
1184 | OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes); |
1185 | OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes); |
1186 | |
1187 | if (rtt != 0) { |
1188 | NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay); |
1189 | NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt); |
1190 | NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay); |
1191 | } |
1192 | } |
1193 | rte = rte->rt_parent; |
1194 | } |
1195 | } |
1196 | |
1197 | #pragma mark -- TCP Kernel Provider -- |
1198 | |
1199 | /* |
1200 | * Due to the way the kernel deallocates a process (the process structure |
1201 | * might be gone by the time we get the PCB detach notification), |
1202 | * we need to cache the process name. Without this, proc_name() would |
1203 | * return null and the process name would never be sent to userland. |
1204 | * |
1205 | * For UDP sockets, we also store the cached the connection tuples along with |
1206 | * the interface index. This is necessary because when UDP sockets are |
1207 | * disconnected, the connection tuples are forever lost from the inpcb, thus |
1208 | * we need to keep track of the last call to connect() in ntstat. |
1209 | */ |
1210 | struct nstat_tucookie { |
1211 | struct inpcb *inp; |
1212 | char pname[MAXCOMLEN + 1]; |
1213 | bool cached; |
1214 | union{ |
1215 | struct sockaddr_in v4; |
1216 | struct sockaddr_in6 v6; |
1217 | } local; |
1218 | union{ |
1219 | struct sockaddr_in v4; |
1220 | struct sockaddr_in6 v6; |
1221 | } remote; |
1222 | unsigned int if_index; |
1223 | uint32_t ifnet_properties; |
1224 | }; |
1225 | |
1226 | static struct nstat_tucookie * |
1227 | nstat_tucookie_alloc_internal( |
1228 | struct inpcb *inp, |
1229 | bool ref, |
1230 | bool locked) |
1231 | { |
1232 | struct nstat_tucookie *cookie; |
1233 | |
1234 | cookie = kalloc_type(struct nstat_tucookie, |
1235 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
1236 | if (!locked) { |
1237 | LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED); |
1238 | } |
1239 | if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) { |
1240 | kfree_type(struct nstat_tucookie, cookie); |
1241 | return NULL; |
1242 | } |
1243 | cookie->inp = inp; |
1244 | proc_name(pid: inp->inp_socket->last_pid, buf: cookie->pname, |
1245 | size: sizeof(cookie->pname)); |
1246 | /* |
1247 | * We only increment the reference count for UDP sockets because we |
1248 | * only cache UDP socket tuples. |
1249 | */ |
1250 | if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) { |
1251 | OSIncrementAtomic(&inp->inp_nstat_refcnt); |
1252 | } |
1253 | |
1254 | return cookie; |
1255 | } |
1256 | |
1257 | __unused static struct nstat_tucookie * |
1258 | nstat_tucookie_alloc( |
1259 | struct inpcb *inp) |
1260 | { |
1261 | return nstat_tucookie_alloc_internal(inp, false, false); |
1262 | } |
1263 | |
1264 | static struct nstat_tucookie * |
1265 | nstat_tucookie_alloc_ref( |
1266 | struct inpcb *inp) |
1267 | { |
1268 | return nstat_tucookie_alloc_internal(inp, true, false); |
1269 | } |
1270 | |
1271 | static struct nstat_tucookie * |
1272 | nstat_tucookie_alloc_ref_locked( |
1273 | struct inpcb *inp) |
1274 | { |
1275 | return nstat_tucookie_alloc_internal(inp, true, true); |
1276 | } |
1277 | |
1278 | static void |
1279 | nstat_tucookie_release_internal( |
1280 | struct nstat_tucookie *cookie, |
1281 | int inplock) |
1282 | { |
1283 | if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) { |
1284 | OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt); |
1285 | } |
1286 | in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock); |
1287 | kfree_type(struct nstat_tucookie, cookie); |
1288 | } |
1289 | |
1290 | static void |
1291 | nstat_tucookie_release( |
1292 | struct nstat_tucookie *cookie) |
1293 | { |
1294 | nstat_tucookie_release_internal(cookie, false); |
1295 | } |
1296 | |
1297 | static void |
1298 | nstat_tucookie_release_locked( |
1299 | struct nstat_tucookie *cookie) |
1300 | { |
1301 | nstat_tucookie_release_internal(cookie, true); |
1302 | } |
1303 | |
1304 | |
1305 | static size_t |
1306 | nstat_inp_domain_info(struct inpcb *inp, nstat_domain_info *domain_info, size_t len) |
1307 | { |
1308 | // Note, the caller has guaranteed that the buffer has been zeroed, there is no need to clear it again |
1309 | struct socket *so = inp->inp_socket; |
1310 | |
1311 | if (so == NULL) { |
1312 | return 0; |
1313 | } |
1314 | |
1315 | NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: Collecting stats" ); |
1316 | |
1317 | if (domain_info == NULL) { |
1318 | return sizeof(nstat_domain_info); |
1319 | } |
1320 | |
1321 | if (len < sizeof(nstat_domain_info)) { |
1322 | return 0; |
1323 | } |
1324 | |
1325 | necp_copy_inp_domain_info(inp, so, domain_info); |
1326 | |
1327 | NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: <pid %d> Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> " |
1328 | "is_tracker %d is_non_app_initiated %d is_silent %d" , |
1329 | so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid, |
1330 | domain_info->domain_name, |
1331 | domain_info->domain_owner, |
1332 | domain_info->domain_tracker_ctxt, |
1333 | domain_info->domain_attributed_bundle_id, |
1334 | domain_info->is_tracker, |
1335 | domain_info->is_non_app_initiated, |
1336 | domain_info->is_silent); |
1337 | |
1338 | return sizeof(nstat_domain_info); |
1339 | } |
1340 | |
1341 | |
1342 | static nstat_provider nstat_tcp_provider; |
1343 | |
1344 | static errno_t |
1345 | nstat_tcp_lookup( |
1346 | __unused const void *data, |
1347 | __unused u_int32_t length, |
1348 | __unused nstat_provider_cookie_t *out_cookie) |
1349 | { |
1350 | // Looking up a specific connection is not supported. |
1351 | return ENOTSUP; |
1352 | } |
1353 | |
1354 | static int |
1355 | nstat_tcp_gone( |
1356 | nstat_provider_cookie_t cookie) |
1357 | { |
1358 | struct nstat_tucookie *tucookie = |
1359 | (struct nstat_tucookie *)cookie; |
1360 | struct inpcb *inp; |
1361 | struct tcpcb *tp; |
1362 | |
1363 | return (!(inp = tucookie->inp) || |
1364 | !(tp = intotcpcb(inp)) || |
1365 | inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0; |
1366 | } |
1367 | |
1368 | static errno_t |
1369 | nstat_tcp_counts( |
1370 | nstat_provider_cookie_t cookie, |
1371 | struct nstat_counts *out_counts, |
1372 | int *out_gone) |
1373 | { |
1374 | struct nstat_tucookie *tucookie = |
1375 | (struct nstat_tucookie *)cookie; |
1376 | struct inpcb *inp; |
1377 | |
1378 | bzero(s: out_counts, n: sizeof(*out_counts)); |
1379 | |
1380 | if (out_gone) { |
1381 | *out_gone = 0; |
1382 | } |
1383 | |
1384 | // if the pcb is in the dead state, we should stop using it |
1385 | if (nstat_tcp_gone(cookie)) { |
1386 | if (out_gone) { |
1387 | *out_gone = 1; |
1388 | } |
1389 | if (!(inp = tucookie->inp) || !intotcpcb(inp)) { |
1390 | return EINVAL; |
1391 | } |
1392 | } |
1393 | inp = tucookie->inp; |
1394 | struct tcpcb *tp = intotcpcb(inp); |
1395 | |
1396 | out_counts->nstat_rxpackets = os_atomic_load(&inp->inp_stat->rxpackets, relaxed); |
1397 | out_counts->nstat_rxbytes = os_atomic_load(&inp->inp_stat->rxbytes, relaxed); |
1398 | out_counts->nstat_txpackets = os_atomic_load(&inp->inp_stat->txpackets, relaxed); |
1399 | out_counts->nstat_txbytes = os_atomic_load(&inp->inp_stat->txbytes, relaxed); |
1400 | out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes; |
1401 | out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes; |
1402 | out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes; |
1403 | out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0; |
1404 | out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0; |
1405 | out_counts->nstat_avg_rtt = tp->t_srtt; |
1406 | out_counts->nstat_min_rtt = tp->t_rttbest; |
1407 | out_counts->nstat_var_rtt = tp->t_rttvar; |
1408 | if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) { |
1409 | out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt; |
1410 | } |
1411 | out_counts->nstat_cell_rxbytes = os_atomic_load(&inp->inp_cstat->rxbytes, relaxed); |
1412 | out_counts->nstat_cell_txbytes = os_atomic_load(&inp->inp_cstat->txbytes, relaxed); |
1413 | out_counts->nstat_wifi_rxbytes = os_atomic_load(&inp->inp_wstat->rxbytes, relaxed); |
1414 | out_counts->nstat_wifi_txbytes = os_atomic_load(&inp->inp_wstat->txbytes, relaxed); |
1415 | out_counts->nstat_wired_rxbytes = os_atomic_load(&inp->inp_Wstat->rxbytes, relaxed); |
1416 | out_counts->nstat_wired_txbytes = os_atomic_load(&inp->inp_Wstat->txbytes, relaxed); |
1417 | |
1418 | return 0; |
1419 | } |
1420 | |
1421 | static void |
1422 | nstat_tcp_release( |
1423 | nstat_provider_cookie_t cookie, |
1424 | int locked) |
1425 | { |
1426 | struct nstat_tucookie *tucookie = |
1427 | (struct nstat_tucookie *)cookie; |
1428 | |
1429 | nstat_tucookie_release_internal(cookie: tucookie, inplock: locked); |
1430 | } |
1431 | |
1432 | static errno_t |
1433 | nstat_tcp_add_watcher( |
1434 | nstat_control_state *state, |
1435 | nstat_msg_add_all_srcs *req) |
1436 | { |
1437 | // There is a tricky issue around getting all TCP sockets added once |
1438 | // and only once. nstat_tcp_new_pcb() is called prior to the new item |
1439 | // being placed on any lists where it might be found. |
1440 | // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher, |
1441 | // it should be impossible for a new socket to be added twice. |
1442 | // On the other hand, there is still a timing issue where a new socket |
1443 | // results in a call to nstat_tcp_new_pcb() before this watcher |
1444 | // is instantiated and yet the socket doesn't make it into ipi_listhead |
1445 | // prior to the scan. <rdar://problem/30361716> |
1446 | |
1447 | errno_t result; |
1448 | |
1449 | lck_rw_lock_shared(lck: &tcbinfo.ipi_lock); |
1450 | result = nstat_set_provider_filter(state, req); |
1451 | if (result == 0) { |
1452 | OSIncrementAtomic(&nstat_tcp_watchers); |
1453 | |
1454 | // Add all current tcp inpcbs. Ignore those in timewait |
1455 | struct inpcb *inp; |
1456 | struct nstat_tucookie *cookie; |
1457 | LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) |
1458 | { |
1459 | cookie = nstat_tucookie_alloc_ref(inp); |
1460 | if (cookie == NULL) { |
1461 | continue; |
1462 | } |
1463 | if (nstat_control_source_add(context: 0, state, provider: &nstat_tcp_provider, |
1464 | cookie) != 0) { |
1465 | nstat_tucookie_release(cookie); |
1466 | break; |
1467 | } |
1468 | } |
1469 | } |
1470 | |
1471 | lck_rw_done(lck: &tcbinfo.ipi_lock); |
1472 | |
1473 | return result; |
1474 | } |
1475 | |
1476 | static void |
1477 | nstat_tcp_remove_watcher( |
1478 | __unused nstat_control_state *state) |
1479 | { |
1480 | OSDecrementAtomic(&nstat_tcp_watchers); |
1481 | } |
1482 | |
1483 | __private_extern__ void |
1484 | nstat_tcp_new_pcb( |
1485 | struct inpcb *inp) |
1486 | { |
1487 | struct nstat_tucookie *cookie; |
1488 | |
1489 | inp->inp_start_timestamp = mach_continuous_time(); |
1490 | |
1491 | if (nstat_tcp_watchers == 0) { |
1492 | return; |
1493 | } |
1494 | |
1495 | socket_lock(so: inp->inp_socket, refcount: 0); |
1496 | lck_mtx_lock(lck: &nstat_mtx); |
1497 | nstat_control_state *state; |
1498 | for (state = nstat_controls; state; state = state->ncs_next) { |
1499 | if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) { |
1500 | // this client is watching tcp |
1501 | // acquire a reference for it |
1502 | cookie = nstat_tucookie_alloc_ref_locked(inp); |
1503 | if (cookie == NULL) { |
1504 | continue; |
1505 | } |
1506 | // add the source, if that fails, release the reference |
1507 | if (nstat_control_source_add(context: 0, state, |
1508 | provider: &nstat_tcp_provider, cookie) != 0) { |
1509 | nstat_tucookie_release_locked(cookie); |
1510 | break; |
1511 | } |
1512 | } |
1513 | } |
1514 | lck_mtx_unlock(lck: &nstat_mtx); |
1515 | socket_unlock(so: inp->inp_socket, refcount: 0); |
1516 | } |
1517 | |
1518 | __private_extern__ void |
1519 | nstat_pcb_detach(struct inpcb *inp) |
1520 | { |
1521 | nstat_control_state *state; |
1522 | nstat_src *src; |
1523 | tailq_head_nstat_src dead_list; |
1524 | struct nstat_tucookie *tucookie; |
1525 | errno_t result; |
1526 | |
1527 | if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) { |
1528 | return; |
1529 | } |
1530 | |
1531 | TAILQ_INIT(&dead_list); |
1532 | lck_mtx_lock(lck: &nstat_mtx); |
1533 | for (state = nstat_controls; state; state = state->ncs_next) { |
1534 | lck_mtx_lock(lck: &state->ncs_mtx); |
1535 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
1536 | { |
1537 | nstat_provider_id_t provider_id = src->provider->nstat_provider_id; |
1538 | if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) { |
1539 | tucookie = (struct nstat_tucookie *)src->cookie; |
1540 | if (tucookie->inp == inp) { |
1541 | break; |
1542 | } |
1543 | } |
1544 | } |
1545 | |
1546 | if (src) { |
1547 | result = nstat_control_send_goodbye(state, src); |
1548 | |
1549 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
1550 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
1551 | } |
1552 | lck_mtx_unlock(lck: &state->ncs_mtx); |
1553 | } |
1554 | lck_mtx_unlock(lck: &nstat_mtx); |
1555 | |
1556 | while ((src = TAILQ_FIRST(&dead_list))) { |
1557 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
1558 | nstat_control_cleanup_source(NULL, src, TRUE); |
1559 | } |
1560 | } |
1561 | |
1562 | __private_extern__ void |
1563 | nstat_pcb_event(struct inpcb *inp, u_int64_t event) |
1564 | { |
1565 | nstat_control_state *state; |
1566 | nstat_src *src; |
1567 | struct nstat_tucookie *tucookie; |
1568 | errno_t result; |
1569 | nstat_provider_id_t provider_id; |
1570 | |
1571 | if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) { |
1572 | return; |
1573 | } |
1574 | if (((merged_filters.mpf_filters[NSTAT_PROVIDER_TCP_KERNEL].mf_events & event) == 0) && |
1575 | ((merged_filters.mpf_filters[NSTAT_PROVIDER_UDP_KERNEL].mf_events & event) == 0)) { |
1576 | // There are clients for TCP and UDP, but none are interested in the event |
1577 | // This check saves taking the mutex and scanning the list |
1578 | return; |
1579 | } |
1580 | lck_mtx_lock(lck: &nstat_mtx); |
1581 | for (state = nstat_controls; state; state = state->ncs_next) { |
1582 | if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) && |
1583 | ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) { |
1584 | continue; |
1585 | } |
1586 | lck_mtx_lock(lck: &state->ncs_mtx); |
1587 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
1588 | { |
1589 | provider_id = src->provider->nstat_provider_id; |
1590 | if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) { |
1591 | tucookie = (struct nstat_tucookie *)src->cookie; |
1592 | if (tucookie->inp == inp) { |
1593 | break; |
1594 | } |
1595 | } |
1596 | } |
1597 | |
1598 | if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) { |
1599 | result = nstat_control_send_event(state, src, event); |
1600 | } |
1601 | lck_mtx_unlock(lck: &state->ncs_mtx); |
1602 | } |
1603 | lck_mtx_unlock(lck: &nstat_mtx); |
1604 | if (event == NSTAT_EVENT_SRC_ATTRIBUTION_CHANGE) { |
1605 | // As a convenience to clients, the bitmap is cleared when there is an attribution change |
1606 | // There is no interlock preventing clients from polling and collecting a half-cleared bitmap |
1607 | // but as the timestamp should be cleared first that should show that the bitmap is not applicable |
1608 | // The other race condition where an interested client process has exited and the new instance |
1609 | // has not yet shown up seems inconsequential enough not to burden the early exit path with additional checks |
1610 | inp_clear_activity_bitmap(inpb: inp); |
1611 | } |
1612 | } |
1613 | |
1614 | |
1615 | __private_extern__ void |
1616 | nstat_pcb_cache(struct inpcb *inp) |
1617 | { |
1618 | nstat_control_state *state; |
1619 | nstat_src *src; |
1620 | struct nstat_tucookie *tucookie; |
1621 | |
1622 | if (inp == NULL || nstat_udp_watchers == 0 || |
1623 | inp->inp_nstat_refcnt == 0) { |
1624 | return; |
1625 | } |
1626 | VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP); |
1627 | lck_mtx_lock(lck: &nstat_mtx); |
1628 | for (state = nstat_controls; state; state = state->ncs_next) { |
1629 | lck_mtx_lock(lck: &state->ncs_mtx); |
1630 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
1631 | { |
1632 | tucookie = (struct nstat_tucookie *)src->cookie; |
1633 | if (tucookie->inp == inp) { |
1634 | if (inp->inp_vflag & INP_IPV6) { |
1635 | in6_ip6_to_sockaddr(ip6: &inp->in6p_laddr, |
1636 | port: inp->inp_lport, |
1637 | ifscope: inp->inp_lifscope, |
1638 | sin6: &tucookie->local.v6, |
1639 | maxlen: sizeof(tucookie->local)); |
1640 | in6_ip6_to_sockaddr(ip6: &inp->in6p_faddr, |
1641 | port: inp->inp_fport, |
1642 | ifscope: inp->inp_fifscope, |
1643 | sin6: &tucookie->remote.v6, |
1644 | maxlen: sizeof(tucookie->remote)); |
1645 | } else if (inp->inp_vflag & INP_IPV4) { |
1646 | nstat_ip_to_sockaddr(ip: &inp->inp_laddr, |
1647 | port: inp->inp_lport, |
1648 | sin: &tucookie->local.v4, |
1649 | maxlen: sizeof(tucookie->local)); |
1650 | nstat_ip_to_sockaddr(ip: &inp->inp_faddr, |
1651 | port: inp->inp_fport, |
1652 | sin: &tucookie->remote.v4, |
1653 | maxlen: sizeof(tucookie->remote)); |
1654 | } |
1655 | if (inp->inp_last_outifp) { |
1656 | tucookie->if_index = |
1657 | inp->inp_last_outifp->if_index; |
1658 | } |
1659 | |
1660 | tucookie->ifnet_properties = nstat_inpcb_to_flags(inp); |
1661 | tucookie->cached = true; |
1662 | break; |
1663 | } |
1664 | } |
1665 | lck_mtx_unlock(lck: &state->ncs_mtx); |
1666 | } |
1667 | lck_mtx_unlock(lck: &nstat_mtx); |
1668 | } |
1669 | |
1670 | __private_extern__ void |
1671 | nstat_pcb_invalidate_cache(struct inpcb *inp) |
1672 | { |
1673 | nstat_control_state *state; |
1674 | nstat_src *src; |
1675 | struct nstat_tucookie *tucookie; |
1676 | |
1677 | if (inp == NULL || nstat_udp_watchers == 0 || |
1678 | inp->inp_nstat_refcnt == 0) { |
1679 | return; |
1680 | } |
1681 | VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP); |
1682 | lck_mtx_lock(lck: &nstat_mtx); |
1683 | for (state = nstat_controls; state; state = state->ncs_next) { |
1684 | lck_mtx_lock(lck: &state->ncs_mtx); |
1685 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
1686 | { |
1687 | tucookie = (struct nstat_tucookie *)src->cookie; |
1688 | if (tucookie->inp == inp) { |
1689 | tucookie->cached = false; |
1690 | break; |
1691 | } |
1692 | } |
1693 | lck_mtx_unlock(lck: &state->ncs_mtx); |
1694 | } |
1695 | lck_mtx_unlock(lck: &nstat_mtx); |
1696 | } |
1697 | |
1698 | static errno_t |
1699 | nstat_tcp_copy_descriptor( |
1700 | nstat_provider_cookie_t cookie, |
1701 | void *data, |
1702 | size_t len) |
1703 | { |
1704 | if (len < sizeof(nstat_tcp_descriptor)) { |
1705 | return EINVAL; |
1706 | } |
1707 | |
1708 | if (nstat_tcp_gone(cookie)) { |
1709 | return EINVAL; |
1710 | } |
1711 | |
1712 | nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data; |
1713 | struct nstat_tucookie *tucookie = |
1714 | (struct nstat_tucookie *)cookie; |
1715 | struct inpcb *inp = tucookie->inp; |
1716 | struct tcpcb *tp = intotcpcb(inp); |
1717 | bzero(s: desc, n: sizeof(*desc)); |
1718 | |
1719 | if (inp->inp_vflag & INP_IPV6) { |
1720 | in6_ip6_to_sockaddr(ip6: &inp->in6p_laddr, port: inp->inp_lport, ifscope: inp->inp_lifscope, |
1721 | sin6: &desc->local.v6, maxlen: sizeof(desc->local)); |
1722 | in6_ip6_to_sockaddr(ip6: &inp->in6p_faddr, port: inp->inp_fport, ifscope: inp->inp_fifscope, |
1723 | sin6: &desc->remote.v6, maxlen: sizeof(desc->remote)); |
1724 | } else if (inp->inp_vflag & INP_IPV4) { |
1725 | nstat_ip_to_sockaddr(ip: &inp->inp_laddr, port: inp->inp_lport, |
1726 | sin: &desc->local.v4, maxlen: sizeof(desc->local)); |
1727 | nstat_ip_to_sockaddr(ip: &inp->inp_faddr, port: inp->inp_fport, |
1728 | sin: &desc->remote.v4, maxlen: sizeof(desc->remote)); |
1729 | } |
1730 | |
1731 | desc->state = intotcpcb(inp)->t_state; |
1732 | desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 : |
1733 | inp->inp_last_outifp->if_index; |
1734 | |
1735 | // danger - not locked, values could be bogus |
1736 | desc->txunacked = tp->snd_max - tp->snd_una; |
1737 | desc->txwindow = tp->snd_wnd; |
1738 | desc->txcwindow = tp->snd_cwnd; |
1739 | desc->ifnet_properties = nstat_inpcb_to_flags(inp); |
1740 | |
1741 | if (CC_ALGO(tp)->name != NULL) { |
1742 | strlcpy(dst: desc->cc_algo, CC_ALGO(tp)->name, |
1743 | n: sizeof(desc->cc_algo)); |
1744 | } |
1745 | |
1746 | struct socket *so = inp->inp_socket; |
1747 | if (so) { |
1748 | // TBD - take the socket lock around these to make sure |
1749 | // they're in sync? |
1750 | desc->upid = so->last_upid; |
1751 | desc->pid = so->last_pid; |
1752 | desc->traffic_class = so->so_traffic_class; |
1753 | if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) { |
1754 | desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND; |
1755 | } |
1756 | if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) { |
1757 | desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG; |
1758 | } |
1759 | if (so->so_flags1 & SOF1_INBOUND) { |
1760 | desc->ifnet_properties |= NSTAT_SOURCE_IS_INBOUND; |
1761 | } else if (desc->state == TCPS_LISTEN) { |
1762 | desc->ifnet_properties |= NSTAT_SOURCE_IS_LISTENER; |
1763 | tucookie->ifnet_properties = NSTAT_SOURCE_IS_LISTENER; |
1764 | } else if (desc->state != TCPS_CLOSED) { |
1765 | desc->ifnet_properties |= NSTAT_SOURCE_IS_OUTBOUND; |
1766 | tucookie->ifnet_properties = NSTAT_SOURCE_IS_OUTBOUND; |
1767 | } else { |
1768 | desc->ifnet_properties |= tucookie->ifnet_properties; |
1769 | } |
1770 | proc_name(pid: desc->pid, buf: desc->pname, size: sizeof(desc->pname)); |
1771 | if (desc->pname[0] == 0) { |
1772 | strlcpy(dst: desc->pname, src: tucookie->pname, |
1773 | n: sizeof(desc->pname)); |
1774 | } else { |
1775 | desc->pname[sizeof(desc->pname) - 1] = 0; |
1776 | strlcpy(dst: tucookie->pname, src: desc->pname, |
1777 | n: sizeof(tucookie->pname)); |
1778 | } |
1779 | memcpy(dst: desc->uuid, src: so->last_uuid, n: sizeof(so->last_uuid)); |
1780 | memcpy(dst: desc->vuuid, src: so->so_vuuid, n: sizeof(so->so_vuuid)); |
1781 | if (so->so_flags & SOF_DELEGATED) { |
1782 | desc->eupid = so->e_upid; |
1783 | desc->epid = so->e_pid; |
1784 | memcpy(dst: desc->euuid, src: so->e_uuid, n: sizeof(so->e_uuid)); |
1785 | } else if (!uuid_is_null(uu: so->so_ruuid)) { |
1786 | memcpy(dst: desc->euuid, src: so->so_ruuid, n: sizeof(so->so_ruuid)); |
1787 | } else { |
1788 | desc->eupid = desc->upid; |
1789 | desc->epid = desc->pid; |
1790 | memcpy(dst: desc->euuid, src: desc->uuid, n: sizeof(desc->uuid)); |
1791 | } |
1792 | uuid_copy(dst: desc->fuuid, src: inp->necp_client_uuid); |
1793 | desc->persona_id = so->so_persona_id; |
1794 | desc->uid = kauth_cred_getuid(cred: so->so_cred); |
1795 | desc->sndbufsize = so->so_snd.sb_hiwat; |
1796 | desc->sndbufused = so->so_snd.sb_cc; |
1797 | desc->rcvbufsize = so->so_rcv.sb_hiwat; |
1798 | desc->rcvbufused = so->so_rcv.sb_cc; |
1799 | desc->fallback_mode = so->so_fallback_mode; |
1800 | |
1801 | if (nstat_debug) { |
1802 | uuid_string_t euuid_str = { 0 }; |
1803 | uuid_unparse(uu: desc->euuid, out: euuid_str); |
1804 | NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: TCP - pid %d uid %d euuid %s persona id %d" , desc->pid, desc->uid, euuid_str, desc->persona_id); |
1805 | } |
1806 | } |
1807 | |
1808 | tcp_get_connectivity_status(tp, &desc->connstatus); |
1809 | inp_get_activity_bitmap(inp, b: &desc->activity_bitmap); |
1810 | desc->start_timestamp = inp->inp_start_timestamp; |
1811 | desc->timestamp = mach_continuous_time(); |
1812 | return 0; |
1813 | } |
1814 | |
1815 | static bool |
1816 | nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP) |
1817 | { |
1818 | bool retval = true; |
1819 | |
1820 | if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) { |
1821 | struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; |
1822 | struct inpcb *inp = tucookie->inp; |
1823 | |
1824 | /* Only apply interface filter if at least one is allowed. */ |
1825 | if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { |
1826 | uint32_t interface_properties = nstat_inpcb_to_flags(inp); |
1827 | |
1828 | if ((filter->npf_flags & interface_properties) == 0) { |
1829 | // For UDP, we could have an undefined interface and yet transfers may have occurred. |
1830 | // We allow reporting if there have been transfers of the requested kind. |
1831 | // This is imperfect as we cannot account for the expensive attribute over wifi. |
1832 | // We also assume that cellular is expensive and we have no way to select for AWDL |
1833 | if (is_UDP) { |
1834 | do{ |
1835 | if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) && |
1836 | (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) { |
1837 | break; |
1838 | } |
1839 | if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) && |
1840 | (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) { |
1841 | break; |
1842 | } |
1843 | if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) && |
1844 | (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) { |
1845 | break; |
1846 | } |
1847 | return false; |
1848 | } while (0); |
1849 | } else { |
1850 | return false; |
1851 | } |
1852 | } |
1853 | } |
1854 | |
1855 | if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) { |
1856 | struct socket *so = inp->inp_socket; |
1857 | retval = false; |
1858 | |
1859 | if (so) { |
1860 | if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) && |
1861 | (filter->npf_pid == so->last_pid)) { |
1862 | retval = true; |
1863 | } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) && |
1864 | (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) { |
1865 | retval = true; |
1866 | } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) && |
1867 | (memcmp(s1: filter->npf_uuid, s2: so->last_uuid, n: sizeof(so->last_uuid)) == 0)) { |
1868 | retval = true; |
1869 | } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) && |
1870 | (memcmp(s1: filter->npf_uuid, s2: (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid, |
1871 | n: sizeof(so->last_uuid)) == 0)) { |
1872 | retval = true; |
1873 | } |
1874 | } |
1875 | } |
1876 | } |
1877 | return retval; |
1878 | } |
1879 | |
1880 | static bool |
1881 | nstat_tcp_reporting_allowed( |
1882 | nstat_provider_cookie_t cookie, |
1883 | nstat_provider_filter *filter, |
1884 | __unused u_int64_t suppression_flags) |
1885 | { |
1886 | return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE); |
1887 | } |
1888 | |
1889 | static size_t |
1890 | nstat_tcp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len) |
1891 | { |
1892 | struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; |
1893 | struct inpcb *inp = tucookie->inp; |
1894 | |
1895 | if (nstat_tcp_gone(cookie)) { |
1896 | return 0; |
1897 | } |
1898 | |
1899 | switch (extension_id) { |
1900 | case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN: |
1901 | return nstat_inp_domain_info(inp, domain_info: (nstat_domain_info *)buf, len); |
1902 | |
1903 | case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV: |
1904 | default: |
1905 | break; |
1906 | } |
1907 | return 0; |
1908 | } |
1909 | |
1910 | static void |
1911 | nstat_init_tcp_provider(void) |
1912 | { |
1913 | bzero(s: &nstat_tcp_provider, n: sizeof(nstat_tcp_provider)); |
1914 | nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor); |
1915 | nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL; |
1916 | nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup; |
1917 | nstat_tcp_provider.nstat_gone = nstat_tcp_gone; |
1918 | nstat_tcp_provider.nstat_counts = nstat_tcp_counts; |
1919 | nstat_tcp_provider.nstat_release = nstat_tcp_release; |
1920 | nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher; |
1921 | nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher; |
1922 | nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor; |
1923 | nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed; |
1924 | nstat_tcp_provider.nstat_copy_extension = nstat_tcp_extensions; |
1925 | nstat_tcp_provider.next = nstat_providers; |
1926 | nstat_providers = &nstat_tcp_provider; |
1927 | } |
1928 | |
1929 | #pragma mark -- UDP Provider -- |
1930 | |
1931 | static nstat_provider nstat_udp_provider; |
1932 | |
1933 | static errno_t |
1934 | nstat_udp_lookup( |
1935 | __unused const void *data, |
1936 | __unused u_int32_t length, |
1937 | __unused nstat_provider_cookie_t *out_cookie) |
1938 | { |
1939 | // Looking up a specific connection is not supported. |
1940 | return ENOTSUP; |
1941 | } |
1942 | |
1943 | static int |
1944 | nstat_udp_gone( |
1945 | nstat_provider_cookie_t cookie) |
1946 | { |
1947 | struct nstat_tucookie *tucookie = |
1948 | (struct nstat_tucookie *)cookie; |
1949 | struct inpcb *inp; |
1950 | |
1951 | return (!(inp = tucookie->inp) || |
1952 | inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0; |
1953 | } |
1954 | |
1955 | static errno_t |
1956 | nstat_udp_counts( |
1957 | nstat_provider_cookie_t cookie, |
1958 | struct nstat_counts *out_counts, |
1959 | int *out_gone) |
1960 | { |
1961 | struct nstat_tucookie *tucookie = |
1962 | (struct nstat_tucookie *)cookie; |
1963 | |
1964 | if (out_gone) { |
1965 | *out_gone = 0; |
1966 | } |
1967 | |
1968 | // if the pcb is in the dead state, we should stop using it |
1969 | if (nstat_udp_gone(cookie)) { |
1970 | if (out_gone) { |
1971 | *out_gone = 1; |
1972 | } |
1973 | if (!tucookie->inp) { |
1974 | return EINVAL; |
1975 | } |
1976 | } |
1977 | struct inpcb *inp = tucookie->inp; |
1978 | |
1979 | out_counts->nstat_rxpackets = os_atomic_load(&inp->inp_stat->rxpackets, relaxed); |
1980 | out_counts->nstat_rxbytes = os_atomic_load(&inp->inp_stat->rxbytes, relaxed); |
1981 | out_counts->nstat_txpackets = os_atomic_load(&inp->inp_stat->txpackets, relaxed); |
1982 | out_counts->nstat_txbytes = os_atomic_load(&inp->inp_stat->txbytes, relaxed); |
1983 | out_counts->nstat_cell_rxbytes = os_atomic_load(&inp->inp_cstat->rxbytes, relaxed); |
1984 | out_counts->nstat_cell_txbytes = os_atomic_load(&inp->inp_cstat->txbytes, relaxed); |
1985 | out_counts->nstat_wifi_rxbytes = os_atomic_load(&inp->inp_wstat->rxbytes, relaxed); |
1986 | out_counts->nstat_wifi_txbytes = os_atomic_load(&inp->inp_wstat->txbytes, relaxed); |
1987 | out_counts->nstat_wired_rxbytes = os_atomic_load(&inp->inp_Wstat->rxbytes, relaxed); |
1988 | out_counts->nstat_wired_txbytes = os_atomic_load(&inp->inp_Wstat->txbytes, relaxed); |
1989 | |
1990 | return 0; |
1991 | } |
1992 | |
1993 | static void |
1994 | nstat_udp_release( |
1995 | nstat_provider_cookie_t cookie, |
1996 | int locked) |
1997 | { |
1998 | struct nstat_tucookie *tucookie = |
1999 | (struct nstat_tucookie *)cookie; |
2000 | |
2001 | nstat_tucookie_release_internal(cookie: tucookie, inplock: locked); |
2002 | } |
2003 | |
2004 | static errno_t |
2005 | nstat_udp_add_watcher( |
2006 | nstat_control_state *state, |
2007 | nstat_msg_add_all_srcs *req) |
2008 | { |
2009 | // There is a tricky issue around getting all UDP sockets added once |
2010 | // and only once. nstat_udp_new_pcb() is called prior to the new item |
2011 | // being placed on any lists where it might be found. |
2012 | // By locking the udpinfo.ipi_lock prior to marking the state as a watcher, |
2013 | // it should be impossible for a new socket to be added twice. |
2014 | // On the other hand, there is still a timing issue where a new socket |
2015 | // results in a call to nstat_udp_new_pcb() before this watcher |
2016 | // is instantiated and yet the socket doesn't make it into ipi_listhead |
2017 | // prior to the scan. <rdar://problem/30361716> |
2018 | |
2019 | errno_t result; |
2020 | |
2021 | lck_rw_lock_shared(lck: &udbinfo.ipi_lock); |
2022 | result = nstat_set_provider_filter(state, req); |
2023 | |
2024 | if (result == 0) { |
2025 | struct inpcb *inp; |
2026 | struct nstat_tucookie *cookie; |
2027 | |
2028 | OSIncrementAtomic(&nstat_udp_watchers); |
2029 | |
2030 | // Add all current UDP inpcbs. |
2031 | LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list) |
2032 | { |
2033 | cookie = nstat_tucookie_alloc_ref(inp); |
2034 | if (cookie == NULL) { |
2035 | continue; |
2036 | } |
2037 | if (nstat_control_source_add(context: 0, state, provider: &nstat_udp_provider, |
2038 | cookie) != 0) { |
2039 | nstat_tucookie_release(cookie); |
2040 | break; |
2041 | } |
2042 | } |
2043 | } |
2044 | |
2045 | lck_rw_done(lck: &udbinfo.ipi_lock); |
2046 | |
2047 | return result; |
2048 | } |
2049 | |
2050 | static void |
2051 | nstat_udp_remove_watcher( |
2052 | __unused nstat_control_state *state) |
2053 | { |
2054 | OSDecrementAtomic(&nstat_udp_watchers); |
2055 | } |
2056 | |
2057 | __private_extern__ void |
2058 | nstat_udp_new_pcb( |
2059 | struct inpcb *inp) |
2060 | { |
2061 | struct nstat_tucookie *cookie; |
2062 | |
2063 | inp->inp_start_timestamp = mach_continuous_time(); |
2064 | |
2065 | if (nstat_udp_watchers == 0) { |
2066 | return; |
2067 | } |
2068 | |
2069 | socket_lock(so: inp->inp_socket, refcount: 0); |
2070 | lck_mtx_lock(lck: &nstat_mtx); |
2071 | nstat_control_state *state; |
2072 | for (state = nstat_controls; state; state = state->ncs_next) { |
2073 | if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) { |
2074 | // this client is watching tcp |
2075 | // acquire a reference for it |
2076 | cookie = nstat_tucookie_alloc_ref_locked(inp); |
2077 | if (cookie == NULL) { |
2078 | continue; |
2079 | } |
2080 | // add the source, if that fails, release the reference |
2081 | if (nstat_control_source_add(context: 0, state, |
2082 | provider: &nstat_udp_provider, cookie) != 0) { |
2083 | nstat_tucookie_release_locked(cookie); |
2084 | break; |
2085 | } |
2086 | } |
2087 | } |
2088 | lck_mtx_unlock(lck: &nstat_mtx); |
2089 | socket_unlock(so: inp->inp_socket, refcount: 0); |
2090 | } |
2091 | |
2092 | static errno_t |
2093 | nstat_udp_copy_descriptor( |
2094 | nstat_provider_cookie_t cookie, |
2095 | void *data, |
2096 | size_t len) |
2097 | { |
2098 | if (len < sizeof(nstat_udp_descriptor)) { |
2099 | return EINVAL; |
2100 | } |
2101 | |
2102 | if (nstat_udp_gone(cookie)) { |
2103 | return EINVAL; |
2104 | } |
2105 | |
2106 | struct nstat_tucookie *tucookie = |
2107 | (struct nstat_tucookie *)cookie; |
2108 | nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data; |
2109 | struct inpcb *inp = tucookie->inp; |
2110 | |
2111 | bzero(s: desc, n: sizeof(*desc)); |
2112 | |
2113 | if (tucookie->cached == false) { |
2114 | if (inp->inp_vflag & INP_IPV6) { |
2115 | in6_ip6_to_sockaddr(ip6: &inp->in6p_laddr, port: inp->inp_lport, ifscope: inp->inp_lifscope, |
2116 | sin6: &desc->local.v6, maxlen: sizeof(desc->local.v6)); |
2117 | in6_ip6_to_sockaddr(ip6: &inp->in6p_faddr, port: inp->inp_fport, ifscope: inp->inp_fifscope, |
2118 | sin6: &desc->remote.v6, maxlen: sizeof(desc->remote.v6)); |
2119 | } else if (inp->inp_vflag & INP_IPV4) { |
2120 | nstat_ip_to_sockaddr(ip: &inp->inp_laddr, port: inp->inp_lport, |
2121 | sin: &desc->local.v4, maxlen: sizeof(desc->local.v4)); |
2122 | nstat_ip_to_sockaddr(ip: &inp->inp_faddr, port: inp->inp_fport, |
2123 | sin: &desc->remote.v4, maxlen: sizeof(desc->remote.v4)); |
2124 | } |
2125 | desc->ifnet_properties = nstat_inpcb_to_flags(inp); |
2126 | } else { |
2127 | if (inp->inp_vflag & INP_IPV6) { |
2128 | memcpy(dst: &desc->local.v6, src: &tucookie->local.v6, |
2129 | n: sizeof(desc->local.v6)); |
2130 | memcpy(dst: &desc->remote.v6, src: &tucookie->remote.v6, |
2131 | n: sizeof(desc->remote.v6)); |
2132 | } else if (inp->inp_vflag & INP_IPV4) { |
2133 | memcpy(dst: &desc->local.v4, src: &tucookie->local.v4, |
2134 | n: sizeof(desc->local.v4)); |
2135 | memcpy(dst: &desc->remote.v4, src: &tucookie->remote.v4, |
2136 | n: sizeof(desc->remote.v4)); |
2137 | } |
2138 | desc->ifnet_properties = tucookie->ifnet_properties; |
2139 | } |
2140 | |
2141 | if (inp->inp_last_outifp) { |
2142 | desc->ifindex = inp->inp_last_outifp->if_index; |
2143 | } else { |
2144 | desc->ifindex = tucookie->if_index; |
2145 | } |
2146 | |
2147 | struct socket *so = inp->inp_socket; |
2148 | if (so) { |
2149 | // TBD - take the socket lock around these to make sure |
2150 | // they're in sync? |
2151 | desc->upid = so->last_upid; |
2152 | desc->pid = so->last_pid; |
2153 | proc_name(pid: desc->pid, buf: desc->pname, size: sizeof(desc->pname)); |
2154 | if (desc->pname[0] == 0) { |
2155 | strlcpy(dst: desc->pname, src: tucookie->pname, |
2156 | n: sizeof(desc->pname)); |
2157 | } else { |
2158 | desc->pname[sizeof(desc->pname) - 1] = 0; |
2159 | strlcpy(dst: tucookie->pname, src: desc->pname, |
2160 | n: sizeof(tucookie->pname)); |
2161 | } |
2162 | memcpy(dst: desc->uuid, src: so->last_uuid, n: sizeof(so->last_uuid)); |
2163 | memcpy(dst: desc->vuuid, src: so->so_vuuid, n: sizeof(so->so_vuuid)); |
2164 | if (so->so_flags & SOF_DELEGATED) { |
2165 | desc->eupid = so->e_upid; |
2166 | desc->epid = so->e_pid; |
2167 | memcpy(dst: desc->euuid, src: so->e_uuid, n: sizeof(so->e_uuid)); |
2168 | } else if (!uuid_is_null(uu: so->so_ruuid)) { |
2169 | memcpy(dst: desc->euuid, src: so->so_ruuid, n: sizeof(so->so_ruuid)); |
2170 | } else { |
2171 | desc->eupid = desc->upid; |
2172 | desc->epid = desc->pid; |
2173 | memcpy(dst: desc->euuid, src: desc->uuid, n: sizeof(desc->uuid)); |
2174 | } |
2175 | uuid_copy(dst: desc->fuuid, src: inp->necp_client_uuid); |
2176 | desc->persona_id = so->so_persona_id; |
2177 | desc->uid = kauth_cred_getuid(cred: so->so_cred); |
2178 | desc->rcvbufsize = so->so_rcv.sb_hiwat; |
2179 | desc->rcvbufused = so->so_rcv.sb_cc; |
2180 | desc->traffic_class = so->so_traffic_class; |
2181 | desc->fallback_mode = so->so_fallback_mode; |
2182 | inp_get_activity_bitmap(inp, b: &desc->activity_bitmap); |
2183 | desc->start_timestamp = inp->inp_start_timestamp; |
2184 | desc->timestamp = mach_continuous_time(); |
2185 | |
2186 | if (nstat_debug) { |
2187 | uuid_string_t euuid_str = { 0 }; |
2188 | uuid_unparse(uu: desc->euuid, out: euuid_str); |
2189 | NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: UDP - pid %d uid %d euuid %s persona id %d" , desc->pid, desc->uid, euuid_str, desc->persona_id); |
2190 | } |
2191 | } |
2192 | |
2193 | return 0; |
2194 | } |
2195 | |
2196 | static bool |
2197 | nstat_udp_reporting_allowed( |
2198 | nstat_provider_cookie_t cookie, |
2199 | nstat_provider_filter *filter, |
2200 | __unused u_int64_t suppression_flags) |
2201 | { |
2202 | return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE); |
2203 | } |
2204 | |
2205 | |
2206 | static size_t |
2207 | nstat_udp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len) |
2208 | { |
2209 | struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie; |
2210 | struct inpcb *inp = tucookie->inp; |
2211 | if (nstat_udp_gone(cookie)) { |
2212 | return 0; |
2213 | } |
2214 | |
2215 | switch (extension_id) { |
2216 | case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN: |
2217 | return nstat_inp_domain_info(inp, domain_info: (nstat_domain_info *)buf, len); |
2218 | |
2219 | default: |
2220 | break; |
2221 | } |
2222 | return 0; |
2223 | } |
2224 | |
2225 | |
2226 | static void |
2227 | nstat_init_udp_provider(void) |
2228 | { |
2229 | bzero(s: &nstat_udp_provider, n: sizeof(nstat_udp_provider)); |
2230 | nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL; |
2231 | nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor); |
2232 | nstat_udp_provider.nstat_lookup = nstat_udp_lookup; |
2233 | nstat_udp_provider.nstat_gone = nstat_udp_gone; |
2234 | nstat_udp_provider.nstat_counts = nstat_udp_counts; |
2235 | nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher; |
2236 | nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher; |
2237 | nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor; |
2238 | nstat_udp_provider.nstat_release = nstat_udp_release; |
2239 | nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed; |
2240 | nstat_udp_provider.nstat_copy_extension = nstat_udp_extensions; |
2241 | nstat_udp_provider.next = nstat_providers; |
2242 | nstat_providers = &nstat_udp_provider; |
2243 | } |
2244 | |
2245 | #if SKYWALK |
2246 | |
2247 | #pragma mark -- TCP/UDP/QUIC Userland |
2248 | |
2249 | // Almost all of this infrastucture is common to both TCP and UDP |
2250 | |
2251 | static u_int32_t nstat_userland_quic_watchers = 0; |
2252 | static u_int32_t nstat_userland_udp_watchers = 0; |
2253 | static u_int32_t nstat_userland_tcp_watchers = 0; |
2254 | |
2255 | static u_int32_t nstat_userland_quic_shadows = 0; |
2256 | static u_int32_t nstat_userland_udp_shadows = 0; |
2257 | static u_int32_t nstat_userland_tcp_shadows = 0; |
2258 | |
2259 | static nstat_provider nstat_userland_quic_provider; |
2260 | static nstat_provider nstat_userland_udp_provider; |
2261 | static nstat_provider nstat_userland_tcp_provider; |
2262 | |
2263 | enum nstat_rnf_override { |
2264 | nstat_rnf_override_not_set, |
2265 | nstat_rnf_override_enabled, |
2266 | nstat_rnf_override_disabled |
2267 | }; |
2268 | |
2269 | struct nstat_tu_shadow { |
2270 | tailq_entry_tu_shadow shad_link; |
2271 | userland_stats_request_vals_fn *shad_getvals_fn; |
2272 | userland_stats_request_extension_fn *shad_get_extension_fn; |
2273 | userland_stats_provider_context *shad_provider_context; |
2274 | u_int64_t shad_properties; |
2275 | u_int64_t shad_start_timestamp; |
2276 | nstat_provider_id_t shad_provider; |
2277 | struct nstat_procdetails *shad_procdetails; |
2278 | bool shad_live; // false if defunct |
2279 | enum nstat_rnf_override shad_rnf_override; |
2280 | uint32_t shad_magic; |
2281 | }; |
2282 | |
2283 | // Magic number checking should remain in place until the userland provider has been fully proven |
2284 | #define TU_SHADOW_MAGIC 0xfeedf00d |
2285 | #define TU_SHADOW_UNMAGIC 0xdeaddeed |
2286 | |
2287 | static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head); |
2288 | |
2289 | static errno_t |
2290 | nstat_userland_tu_lookup( |
2291 | __unused const void *data, |
2292 | __unused u_int32_t length, |
2293 | __unused nstat_provider_cookie_t *out_cookie) |
2294 | { |
2295 | // Looking up a specific connection is not supported |
2296 | return ENOTSUP; |
2297 | } |
2298 | |
2299 | static int |
2300 | nstat_userland_tu_gone( |
2301 | __unused nstat_provider_cookie_t cookie) |
2302 | { |
2303 | // Returns non-zero if the source has gone. |
2304 | // We don't keep a source hanging around, so the answer is always 0 |
2305 | return 0; |
2306 | } |
2307 | |
2308 | static errno_t |
2309 | nstat_userland_tu_counts( |
2310 | nstat_provider_cookie_t cookie, |
2311 | struct nstat_counts *out_counts, |
2312 | int *out_gone) |
2313 | { |
2314 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie; |
2315 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2316 | assert(shad->shad_live); |
2317 | |
2318 | bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, out_counts, NULL); |
2319 | |
2320 | if (out_gone) { |
2321 | *out_gone = 0; |
2322 | } |
2323 | |
2324 | return (result)? 0 : EIO; |
2325 | } |
2326 | |
2327 | |
2328 | static errno_t |
2329 | nstat_userland_tu_copy_descriptor( |
2330 | nstat_provider_cookie_t cookie, |
2331 | void *data, |
2332 | __unused size_t len) |
2333 | { |
2334 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie; |
2335 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2336 | assert(shad->shad_live); |
2337 | struct nstat_procdetails *procdetails = shad->shad_procdetails; |
2338 | assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
2339 | |
2340 | bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, data); |
2341 | |
2342 | switch (shad->shad_provider) { |
2343 | case NSTAT_PROVIDER_TCP_USERLAND: |
2344 | { |
2345 | nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)data; |
2346 | desc->pid = procdetails->pdet_pid; |
2347 | desc->upid = procdetails->pdet_upid; |
2348 | uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid); |
2349 | strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname)); |
2350 | if (shad->shad_rnf_override == nstat_rnf_override_enabled) { |
2351 | desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK; |
2352 | desc->fallback_mode = SO_FALLBACK_MODE_FAST; |
2353 | } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) { |
2354 | desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK; |
2355 | desc->fallback_mode = SO_FALLBACK_MODE_NONE; |
2356 | } |
2357 | desc->ifnet_properties |= (uint32_t)shad->shad_properties; |
2358 | desc->start_timestamp = shad->shad_start_timestamp; |
2359 | desc->timestamp = mach_continuous_time(); |
2360 | } |
2361 | break; |
2362 | case NSTAT_PROVIDER_UDP_USERLAND: |
2363 | { |
2364 | nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data; |
2365 | desc->pid = procdetails->pdet_pid; |
2366 | desc->upid = procdetails->pdet_upid; |
2367 | uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid); |
2368 | strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname)); |
2369 | if (shad->shad_rnf_override == nstat_rnf_override_enabled) { |
2370 | desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK; |
2371 | desc->fallback_mode = SO_FALLBACK_MODE_FAST; |
2372 | } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) { |
2373 | desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK; |
2374 | desc->fallback_mode = SO_FALLBACK_MODE_NONE; |
2375 | } |
2376 | desc->ifnet_properties |= (uint32_t)shad->shad_properties; |
2377 | desc->start_timestamp = shad->shad_start_timestamp; |
2378 | desc->timestamp = mach_continuous_time(); |
2379 | } |
2380 | break; |
2381 | case NSTAT_PROVIDER_QUIC_USERLAND: |
2382 | { |
2383 | nstat_quic_descriptor *desc = (nstat_quic_descriptor *)data; |
2384 | desc->pid = procdetails->pdet_pid; |
2385 | desc->upid = procdetails->pdet_upid; |
2386 | uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid); |
2387 | strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname)); |
2388 | if (shad->shad_rnf_override == nstat_rnf_override_enabled) { |
2389 | desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK; |
2390 | desc->fallback_mode = SO_FALLBACK_MODE_FAST; |
2391 | } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) { |
2392 | desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK; |
2393 | desc->fallback_mode = SO_FALLBACK_MODE_NONE; |
2394 | } |
2395 | desc->ifnet_properties |= (uint32_t)shad->shad_properties; |
2396 | desc->start_timestamp = shad->shad_start_timestamp; |
2397 | desc->timestamp = mach_continuous_time(); |
2398 | } |
2399 | break; |
2400 | default: |
2401 | break; |
2402 | } |
2403 | return (result)? 0 : EIO; |
2404 | } |
2405 | |
2406 | static void |
2407 | nstat_userland_tu_release( |
2408 | __unused nstat_provider_cookie_t cookie, |
2409 | __unused int locked) |
2410 | { |
2411 | // Called when a nstat_src is detached. |
2412 | // We don't reference count or ask for delayed release so nothing to do here. |
2413 | // Note that any associated nstat_tu_shadow may already have been released. |
2414 | } |
2415 | |
2416 | static bool |
2417 | check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid) |
2418 | { |
2419 | bool retval = true; |
2420 | |
2421 | if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) { |
2422 | retval = false; |
2423 | |
2424 | if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) && |
2425 | (filter->npf_pid == pid)) { |
2426 | retval = true; |
2427 | } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) && |
2428 | (filter->npf_pid == epid)) { |
2429 | retval = true; |
2430 | } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) && |
2431 | (memcmp(s1: filter->npf_uuid, s2: uuid, n: sizeof(*uuid)) == 0)) { |
2432 | retval = true; |
2433 | } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) && |
2434 | (memcmp(s1: filter->npf_uuid, s2: euuid, n: sizeof(*euuid)) == 0)) { |
2435 | retval = true; |
2436 | } |
2437 | } |
2438 | return retval; |
2439 | } |
2440 | |
2441 | static bool |
2442 | nstat_userland_tcp_reporting_allowed( |
2443 | nstat_provider_cookie_t cookie, |
2444 | nstat_provider_filter *filter, |
2445 | __unused u_int64_t suppression_flags) |
2446 | { |
2447 | bool retval = true; |
2448 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie; |
2449 | |
2450 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2451 | |
2452 | if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { |
2453 | u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE; |
2454 | |
2455 | if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) { |
2456 | if ((filter->npf_flags & ifflags) == 0) { |
2457 | return false; |
2458 | } |
2459 | } |
2460 | } |
2461 | |
2462 | if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) { |
2463 | nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far? |
2464 | if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &tcp_desc)) { |
2465 | retval = check_reporting_for_user(filter, pid: (pid_t)tcp_desc.pid, epid: (pid_t)tcp_desc.epid, |
2466 | uuid: &tcp_desc.uuid, euuid: &tcp_desc.euuid); |
2467 | } else { |
2468 | retval = false; // No further information, so might as well give up now. |
2469 | } |
2470 | } |
2471 | return retval; |
2472 | } |
2473 | |
2474 | static size_t |
2475 | nstat_userland_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len) |
2476 | { |
2477 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie; |
2478 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2479 | assert(shad->shad_live); |
2480 | assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
2481 | |
2482 | return shad->shad_get_extension_fn(shad->shad_provider_context, extension_id, buf, len); |
2483 | } |
2484 | |
2485 | |
2486 | static bool |
2487 | nstat_userland_udp_reporting_allowed( |
2488 | nstat_provider_cookie_t cookie, |
2489 | nstat_provider_filter *filter, |
2490 | __unused u_int64_t suppression_flags) |
2491 | { |
2492 | bool retval = true; |
2493 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie; |
2494 | |
2495 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2496 | |
2497 | if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { |
2498 | u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE; |
2499 | |
2500 | if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) { |
2501 | if ((filter->npf_flags & ifflags) == 0) { |
2502 | return false; |
2503 | } |
2504 | } |
2505 | } |
2506 | if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) { |
2507 | nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far? |
2508 | if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &udp_desc)) { |
2509 | retval = check_reporting_for_user(filter, pid: (pid_t)udp_desc.pid, epid: (pid_t)udp_desc.epid, |
2510 | uuid: &udp_desc.uuid, euuid: &udp_desc.euuid); |
2511 | } else { |
2512 | retval = false; // No further information, so might as well give up now. |
2513 | } |
2514 | } |
2515 | return retval; |
2516 | } |
2517 | |
2518 | static bool |
2519 | nstat_userland_quic_reporting_allowed( |
2520 | nstat_provider_cookie_t cookie, |
2521 | nstat_provider_filter *filter, |
2522 | __unused u_int64_t suppression_flags) |
2523 | { |
2524 | bool retval = true; |
2525 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie; |
2526 | |
2527 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2528 | |
2529 | if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) { |
2530 | u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE; |
2531 | |
2532 | if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) { |
2533 | if ((filter->npf_flags & ifflags) == 0) { |
2534 | return false; |
2535 | } |
2536 | } |
2537 | } |
2538 | if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) { |
2539 | nstat_quic_descriptor quic_desc; // Stack allocation - OK or pushing the limits too far? |
2540 | if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &quic_desc)) { |
2541 | retval = check_reporting_for_user(filter, pid: (pid_t)quic_desc.pid, epid: (pid_t)quic_desc.epid, |
2542 | uuid: &quic_desc.uuid, euuid: &quic_desc.euuid); |
2543 | } else { |
2544 | retval = false; // No further information, so might as well give up now. |
2545 | } |
2546 | } |
2547 | return retval; |
2548 | } |
2549 | |
2550 | static errno_t |
2551 | nstat_userland_protocol_add_watcher( |
2552 | nstat_control_state *state, |
2553 | nstat_msg_add_all_srcs *req, |
2554 | nstat_provider_type_t nstat_provider_type, |
2555 | nstat_provider *nstat_provider, |
2556 | u_int32_t *proto_watcher_cnt) |
2557 | { |
2558 | errno_t result; |
2559 | |
2560 | lck_mtx_lock(lck: &nstat_mtx); |
2561 | result = nstat_set_provider_filter(state, req); |
2562 | |
2563 | if (result == 0) { |
2564 | struct nstat_tu_shadow *shad; |
2565 | |
2566 | OSIncrementAtomic(proto_watcher_cnt); |
2567 | |
2568 | TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) { |
2569 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2570 | |
2571 | if ((shad->shad_provider == nstat_provider_type) && (shad->shad_live)) { |
2572 | result = nstat_control_source_add(context: 0, state, provider: nstat_provider, cookie: shad); |
2573 | if (result != 0) { |
2574 | printf("%s - nstat_control_source_add returned %d for " |
2575 | "provider type: %d\n" , __func__, result, nstat_provider_type); |
2576 | break; |
2577 | } |
2578 | } |
2579 | } |
2580 | } |
2581 | lck_mtx_unlock(lck: &nstat_mtx); |
2582 | |
2583 | return result; |
2584 | } |
2585 | |
2586 | static errno_t |
2587 | nstat_userland_tcp_add_watcher( |
2588 | nstat_control_state *state, |
2589 | nstat_msg_add_all_srcs *req) |
2590 | { |
2591 | return nstat_userland_protocol_add_watcher(state, req, nstat_provider_type: NSTAT_PROVIDER_TCP_USERLAND, |
2592 | nstat_provider: &nstat_userland_tcp_provider, proto_watcher_cnt: &nstat_userland_tcp_watchers); |
2593 | } |
2594 | |
2595 | static errno_t |
2596 | nstat_userland_udp_add_watcher( |
2597 | nstat_control_state *state, |
2598 | nstat_msg_add_all_srcs *req) |
2599 | { |
2600 | return nstat_userland_protocol_add_watcher(state, req, nstat_provider_type: NSTAT_PROVIDER_UDP_USERLAND, |
2601 | nstat_provider: &nstat_userland_udp_provider, proto_watcher_cnt: &nstat_userland_udp_watchers); |
2602 | } |
2603 | |
2604 | static errno_t |
2605 | nstat_userland_quic_add_watcher( |
2606 | nstat_control_state *state, |
2607 | nstat_msg_add_all_srcs *req) |
2608 | { |
2609 | return nstat_userland_protocol_add_watcher(state, req, nstat_provider_type: NSTAT_PROVIDER_QUIC_USERLAND, |
2610 | nstat_provider: &nstat_userland_quic_provider, proto_watcher_cnt: &nstat_userland_quic_watchers); |
2611 | } |
2612 | |
2613 | static void |
2614 | nstat_userland_tcp_remove_watcher( |
2615 | __unused nstat_control_state *state) |
2616 | { |
2617 | OSDecrementAtomic(&nstat_userland_tcp_watchers); |
2618 | } |
2619 | |
2620 | static void |
2621 | nstat_userland_udp_remove_watcher( |
2622 | __unused nstat_control_state *state) |
2623 | { |
2624 | OSDecrementAtomic(&nstat_userland_udp_watchers); |
2625 | } |
2626 | |
2627 | static void |
2628 | nstat_userland_quic_remove_watcher( |
2629 | __unused nstat_control_state *state) |
2630 | { |
2631 | OSDecrementAtomic(&nstat_userland_quic_watchers); |
2632 | } |
2633 | |
2634 | |
2635 | static void |
2636 | nstat_init_userland_tcp_provider(void) |
2637 | { |
2638 | bzero(s: &nstat_userland_tcp_provider, n: sizeof(nstat_userland_tcp_provider)); |
2639 | nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor); |
2640 | nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND; |
2641 | nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup; |
2642 | nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone; |
2643 | nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts; |
2644 | nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release; |
2645 | nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher; |
2646 | nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher; |
2647 | nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor; |
2648 | nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed; |
2649 | nstat_userland_tcp_provider.nstat_copy_extension = nstat_userland_extensions; |
2650 | nstat_userland_tcp_provider.next = nstat_providers; |
2651 | nstat_providers = &nstat_userland_tcp_provider; |
2652 | } |
2653 | |
2654 | |
2655 | static void |
2656 | nstat_init_userland_udp_provider(void) |
2657 | { |
2658 | bzero(s: &nstat_userland_udp_provider, n: sizeof(nstat_userland_udp_provider)); |
2659 | nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor); |
2660 | nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND; |
2661 | nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup; |
2662 | nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone; |
2663 | nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts; |
2664 | nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release; |
2665 | nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher; |
2666 | nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher; |
2667 | nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor; |
2668 | nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed; |
2669 | nstat_userland_udp_provider.nstat_copy_extension = nstat_userland_extensions; |
2670 | nstat_userland_udp_provider.next = nstat_providers; |
2671 | nstat_providers = &nstat_userland_udp_provider; |
2672 | } |
2673 | |
2674 | static void |
2675 | nstat_init_userland_quic_provider(void) |
2676 | { |
2677 | bzero(s: &nstat_userland_quic_provider, n: sizeof(nstat_userland_quic_provider)); |
2678 | nstat_userland_quic_provider.nstat_descriptor_length = sizeof(nstat_quic_descriptor); |
2679 | nstat_userland_quic_provider.nstat_provider_id = NSTAT_PROVIDER_QUIC_USERLAND; |
2680 | nstat_userland_quic_provider.nstat_lookup = nstat_userland_tu_lookup; |
2681 | nstat_userland_quic_provider.nstat_gone = nstat_userland_tu_gone; |
2682 | nstat_userland_quic_provider.nstat_counts = nstat_userland_tu_counts; |
2683 | nstat_userland_quic_provider.nstat_release = nstat_userland_tu_release; |
2684 | nstat_userland_quic_provider.nstat_watcher_add = nstat_userland_quic_add_watcher; |
2685 | nstat_userland_quic_provider.nstat_watcher_remove = nstat_userland_quic_remove_watcher; |
2686 | nstat_userland_quic_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor; |
2687 | nstat_userland_quic_provider.nstat_reporting_allowed = nstat_userland_quic_reporting_allowed; |
2688 | nstat_userland_quic_provider.nstat_copy_extension = nstat_userland_extensions; |
2689 | nstat_userland_quic_provider.next = nstat_providers; |
2690 | nstat_providers = &nstat_userland_quic_provider; |
2691 | } |
2692 | |
2693 | |
2694 | // Things get started with a call to netstats to say that there’s a new connection: |
2695 | __private_extern__ nstat_userland_context |
2696 | ntstat_userland_stats_open(userland_stats_provider_context *ctx, |
2697 | int provider_id, |
2698 | u_int64_t properties, |
2699 | userland_stats_request_vals_fn req_fn, |
2700 | userland_stats_request_extension_fn req_extension_fn) |
2701 | { |
2702 | struct nstat_tu_shadow *shad; |
2703 | struct nstat_procdetails *procdetails; |
2704 | nstat_provider *provider; |
2705 | |
2706 | if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) && |
2707 | (provider_id != NSTAT_PROVIDER_UDP_USERLAND) && |
2708 | (provider_id != NSTAT_PROVIDER_QUIC_USERLAND)) { |
2709 | printf("%s - incorrect provider is supplied, %d\n" , __func__, provider_id); |
2710 | return NULL; |
2711 | } |
2712 | |
2713 | shad = kalloc_type(struct nstat_tu_shadow, Z_WAITOK | Z_NOFAIL); |
2714 | |
2715 | procdetails = nstat_retain_curprocdetails(); |
2716 | |
2717 | if (procdetails == NULL) { |
2718 | kfree_type(struct nstat_tu_shadow, shad); |
2719 | return NULL; |
2720 | } |
2721 | |
2722 | shad->shad_getvals_fn = req_fn; |
2723 | shad->shad_get_extension_fn = req_extension_fn; |
2724 | shad->shad_provider_context = ctx; |
2725 | shad->shad_provider = provider_id; |
2726 | shad->shad_properties = properties; |
2727 | shad->shad_procdetails = procdetails; |
2728 | shad->shad_rnf_override = nstat_rnf_override_not_set; |
2729 | shad->shad_start_timestamp = mach_continuous_time(); |
2730 | shad->shad_live = true; |
2731 | shad->shad_magic = TU_SHADOW_MAGIC; |
2732 | |
2733 | lck_mtx_lock(lck: &nstat_mtx); |
2734 | nstat_control_state *state; |
2735 | |
2736 | // Even if there are no watchers, we save the shadow structure |
2737 | TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link); |
2738 | |
2739 | if (provider_id == NSTAT_PROVIDER_TCP_USERLAND) { |
2740 | nstat_userland_tcp_shadows++; |
2741 | provider = &nstat_userland_tcp_provider; |
2742 | } else if (provider_id == NSTAT_PROVIDER_UDP_USERLAND) { |
2743 | nstat_userland_udp_shadows++; |
2744 | provider = &nstat_userland_udp_provider; |
2745 | } else { |
2746 | nstat_userland_quic_shadows++; |
2747 | provider = &nstat_userland_quic_provider; |
2748 | } |
2749 | |
2750 | for (state = nstat_controls; state; state = state->ncs_next) { |
2751 | if ((state->ncs_watching & (1 << provider_id)) != 0) { |
2752 | // this client is watching tcp/udp/quic userland |
2753 | // Link to it. |
2754 | int result = nstat_control_source_add(context: 0, state, provider, cookie: shad); |
2755 | if (result != 0) { |
2756 | // There should be some kind of statistics for failures like this. |
2757 | // <rdar://problem/31377195> The kernel ntstat component should keep some |
2758 | // internal counters reflecting operational state for eventual AWD reporting |
2759 | } |
2760 | } |
2761 | } |
2762 | lck_mtx_unlock(lck: &nstat_mtx); |
2763 | |
2764 | return (nstat_userland_context)shad; |
2765 | } |
2766 | |
2767 | |
2768 | __private_extern__ void |
2769 | ntstat_userland_stats_close(nstat_userland_context nstat_ctx) |
2770 | { |
2771 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx; |
2772 | tailq_head_nstat_src dead_list; |
2773 | nstat_src *src; |
2774 | |
2775 | if (shad == NULL) { |
2776 | return; |
2777 | } |
2778 | |
2779 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2780 | TAILQ_INIT(&dead_list); |
2781 | |
2782 | lck_mtx_lock(lck: &nstat_mtx); |
2783 | if (nstat_userland_udp_watchers != 0 || |
2784 | nstat_userland_tcp_watchers != 0 || |
2785 | nstat_userland_quic_watchers != 0) { |
2786 | nstat_control_state *state; |
2787 | errno_t result; |
2788 | |
2789 | for (state = nstat_controls; state; state = state->ncs_next) { |
2790 | lck_mtx_lock(lck: &state->ncs_mtx); |
2791 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
2792 | { |
2793 | if (shad == (struct nstat_tu_shadow *)src->cookie) { |
2794 | nstat_provider_id_t provider_id = src->provider->nstat_provider_id; |
2795 | if (provider_id == NSTAT_PROVIDER_TCP_USERLAND || |
2796 | provider_id == NSTAT_PROVIDER_UDP_USERLAND || |
2797 | provider_id == NSTAT_PROVIDER_QUIC_USERLAND) { |
2798 | break; |
2799 | } |
2800 | } |
2801 | } |
2802 | |
2803 | if (src) { |
2804 | result = nstat_control_send_goodbye(state, src); |
2805 | |
2806 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
2807 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
2808 | } |
2809 | lck_mtx_unlock(lck: &state->ncs_mtx); |
2810 | } |
2811 | } |
2812 | TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link); |
2813 | |
2814 | if (shad->shad_live) { |
2815 | if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) { |
2816 | nstat_userland_tcp_shadows--; |
2817 | } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) { |
2818 | nstat_userland_udp_shadows--; |
2819 | } else { |
2820 | nstat_userland_quic_shadows--; |
2821 | } |
2822 | } |
2823 | |
2824 | lck_mtx_unlock(lck: &nstat_mtx); |
2825 | |
2826 | while ((src = TAILQ_FIRST(&dead_list))) { |
2827 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
2828 | nstat_control_cleanup_source(NULL, src, TRUE); |
2829 | } |
2830 | nstat_release_procdetails(procdetails: shad->shad_procdetails); |
2831 | shad->shad_magic = TU_SHADOW_UNMAGIC; |
2832 | |
2833 | kfree_type(struct nstat_tu_shadow, shad); |
2834 | } |
2835 | |
2836 | static void |
2837 | ntstat_userland_stats_event_locked( |
2838 | struct nstat_tu_shadow *shad, |
2839 | uint64_t event) |
2840 | { |
2841 | nstat_control_state *state; |
2842 | nstat_src *src; |
2843 | errno_t result; |
2844 | nstat_provider_id_t provider_id; |
2845 | |
2846 | if (nstat_userland_udp_watchers != 0 || nstat_userland_tcp_watchers != 0 || nstat_userland_quic_watchers != 0) { |
2847 | for (state = nstat_controls; state; state = state->ncs_next) { |
2848 | if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_USERLAND].npf_events & event) == 0) && |
2849 | ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_USERLAND].npf_events & event) == 0) && |
2850 | ((state->ncs_provider_filters[NSTAT_PROVIDER_QUIC_USERLAND].npf_events & event) == 0)) { |
2851 | continue; |
2852 | } |
2853 | lck_mtx_lock(lck: &state->ncs_mtx); |
2854 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) { |
2855 | provider_id = src->provider->nstat_provider_id; |
2856 | if (provider_id == NSTAT_PROVIDER_TCP_USERLAND || provider_id == NSTAT_PROVIDER_UDP_USERLAND || |
2857 | provider_id == NSTAT_PROVIDER_QUIC_USERLAND) { |
2858 | if (shad == (struct nstat_tu_shadow *)src->cookie) { |
2859 | break; |
2860 | } |
2861 | } |
2862 | } |
2863 | if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) { |
2864 | result = nstat_control_send_event(state, src, event); |
2865 | } |
2866 | lck_mtx_unlock(lck: &state->ncs_mtx); |
2867 | } |
2868 | } |
2869 | } |
2870 | |
2871 | __private_extern__ void |
2872 | ntstat_userland_stats_event( |
2873 | nstat_userland_context nstat_ctx, |
2874 | uint64_t event) |
2875 | { |
2876 | // This will need refinement for when we do genuine stats filtering |
2877 | // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications |
2878 | // For now it deals only with events that potentially cause any traditional netstat sources to be closed |
2879 | |
2880 | struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx; |
2881 | tailq_head_nstat_src dead_list; |
2882 | nstat_src *src; |
2883 | |
2884 | if (shad == NULL) { |
2885 | return; |
2886 | } |
2887 | |
2888 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2889 | |
2890 | if (event & NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT) { |
2891 | TAILQ_INIT(&dead_list); |
2892 | |
2893 | lck_mtx_lock(lck: &nstat_mtx); |
2894 | if (nstat_userland_udp_watchers != 0 || |
2895 | nstat_userland_tcp_watchers != 0 || |
2896 | nstat_userland_quic_watchers != 0) { |
2897 | nstat_control_state *state; |
2898 | errno_t result; |
2899 | |
2900 | for (state = nstat_controls; state; state = state->ncs_next) { |
2901 | lck_mtx_lock(lck: &state->ncs_mtx); |
2902 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
2903 | { |
2904 | if (shad == (struct nstat_tu_shadow *)src->cookie) { |
2905 | break; |
2906 | } |
2907 | } |
2908 | |
2909 | if (src) { |
2910 | if (!(src->filter & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)) { |
2911 | result = nstat_control_send_goodbye(state, src); |
2912 | |
2913 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
2914 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
2915 | } |
2916 | } |
2917 | lck_mtx_unlock(lck: &state->ncs_mtx); |
2918 | } |
2919 | } |
2920 | lck_mtx_unlock(lck: &nstat_mtx); |
2921 | |
2922 | while ((src = TAILQ_FIRST(&dead_list))) { |
2923 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
2924 | nstat_control_cleanup_source(NULL, src, TRUE); |
2925 | } |
2926 | } |
2927 | } |
2928 | |
2929 | __private_extern__ void |
2930 | nstats_userland_stats_defunct_for_process(int pid) |
2931 | { |
2932 | // Note that this can be called multiple times for the same process |
2933 | tailq_head_nstat_src dead_list; |
2934 | nstat_src *src, *tmpsrc; |
2935 | struct nstat_tu_shadow *shad; |
2936 | |
2937 | TAILQ_INIT(&dead_list); |
2938 | |
2939 | lck_mtx_lock(lck: &nstat_mtx); |
2940 | |
2941 | if (nstat_userland_udp_watchers != 0 || |
2942 | nstat_userland_tcp_watchers != 0 || |
2943 | nstat_userland_quic_watchers != 0) { |
2944 | nstat_control_state *state; |
2945 | errno_t result; |
2946 | |
2947 | for (state = nstat_controls; state; state = state->ncs_next) { |
2948 | lck_mtx_lock(lck: &state->ncs_mtx); |
2949 | TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) |
2950 | { |
2951 | nstat_provider_id_t provider_id = src->provider->nstat_provider_id; |
2952 | if (provider_id == NSTAT_PROVIDER_TCP_USERLAND || |
2953 | provider_id == NSTAT_PROVIDER_UDP_USERLAND || |
2954 | provider_id == NSTAT_PROVIDER_QUIC_USERLAND) { |
2955 | shad = (struct nstat_tu_shadow *)src->cookie; |
2956 | if (shad->shad_procdetails->pdet_pid == pid) { |
2957 | result = nstat_control_send_goodbye(state, src); |
2958 | |
2959 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
2960 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
2961 | } |
2962 | } |
2963 | } |
2964 | lck_mtx_unlock(lck: &state->ncs_mtx); |
2965 | } |
2966 | } |
2967 | |
2968 | TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) { |
2969 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
2970 | |
2971 | if (shad->shad_live) { |
2972 | if (shad->shad_procdetails->pdet_pid == pid) { |
2973 | shad->shad_live = false; |
2974 | if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) { |
2975 | nstat_userland_tcp_shadows--; |
2976 | } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) { |
2977 | nstat_userland_udp_shadows--; |
2978 | } else { |
2979 | nstat_userland_quic_shadows--; |
2980 | } |
2981 | } |
2982 | } |
2983 | } |
2984 | |
2985 | lck_mtx_unlock(lck: &nstat_mtx); |
2986 | |
2987 | while ((src = TAILQ_FIRST(&dead_list))) { |
2988 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
2989 | nstat_control_cleanup_source(NULL, src, TRUE); |
2990 | } |
2991 | } |
2992 | |
2993 | errno_t |
2994 | nstat_userland_mark_rnf_override(uuid_t target_fuuid, bool rnf_override) |
2995 | { |
2996 | // Note that this can be called multiple times for the same process |
2997 | struct nstat_tu_shadow *shad; |
2998 | uuid_t fuuid; |
2999 | errno_t result; |
3000 | |
3001 | lck_mtx_lock(lck: &nstat_mtx); |
3002 | // We set the fallback state regardles of watchers as there may be future ones that need to know |
3003 | TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) { |
3004 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
3005 | assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
3006 | if (shad->shad_get_extension_fn(shad->shad_provider_context, NSTAT_EXTENDED_UPDATE_TYPE_FUUID, fuuid, sizeof(fuuid))) { |
3007 | if (uuid_compare(uu1: fuuid, uu2: target_fuuid) == 0) { |
3008 | break; |
3009 | } |
3010 | } |
3011 | } |
3012 | if (shad) { |
3013 | if (shad->shad_procdetails->pdet_pid != proc_selfpid()) { |
3014 | result = EPERM; |
3015 | } else { |
3016 | result = 0; |
3017 | // It would be possible but awkward to check the previous value |
3018 | // for RNF override, and send an event only if changed. |
3019 | // In practice it's fine to send an event regardless, |
3020 | // which "pushes" the last statistics for the previous mode |
3021 | shad->shad_rnf_override = rnf_override ? nstat_rnf_override_enabled |
3022 | : nstat_rnf_override_disabled; |
3023 | ntstat_userland_stats_event_locked(shad, |
3024 | event: rnf_override ? NSTAT_EVENT_SRC_ENTER_CELLFALLBACK |
3025 | : NSTAT_EVENT_SRC_EXIT_CELLFALLBACK); |
3026 | } |
3027 | } else { |
3028 | result = EEXIST; |
3029 | } |
3030 | |
3031 | lck_mtx_unlock(lck: &nstat_mtx); |
3032 | |
3033 | return result; |
3034 | } |
3035 | |
3036 | #pragma mark -- Generic Providers -- |
3037 | |
3038 | static nstat_provider nstat_userland_conn_provider; |
3039 | static nstat_provider nstat_udp_subflow_provider; |
3040 | |
3041 | static u_int32_t nstat_generic_provider_watchers[NSTAT_PROVIDER_COUNT]; |
3042 | |
3043 | struct nstat_generic_shadow { |
3044 | tailq_entry_generic_shadow gshad_link; |
3045 | nstat_provider_context gshad_provider_context; |
3046 | nstat_provider_request_vals_fn *gshad_getvals_fn; |
3047 | nstat_provider_request_extensions_fn *gshad_getextensions_fn; |
3048 | u_int64_t gshad_properties; |
3049 | u_int64_t gshad_start_timestamp; |
3050 | struct nstat_procdetails *gshad_procdetails; |
3051 | nstat_provider_id_t gshad_provider; |
3052 | int32_t gshad_refcnt; |
3053 | uint32_t gshad_magic; |
3054 | }; |
3055 | |
3056 | // Magic number checking should remain in place until the userland provider has been fully proven |
3057 | #define NSTAT_GENERIC_SHADOW_MAGIC 0xfadef00d |
3058 | #define NSTAT_GENERIC_SHADOW_UNMAGIC 0xfadedead |
3059 | |
3060 | static tailq_head_generic_shadow nstat_gshad_head = TAILQ_HEAD_INITIALIZER(nstat_gshad_head); |
3061 | |
3062 | static inline void |
3063 | nstat_retain_gshad( |
3064 | struct nstat_generic_shadow *gshad) |
3065 | { |
3066 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3067 | |
3068 | OSIncrementAtomic(&gshad->gshad_refcnt); |
3069 | } |
3070 | |
3071 | static void |
3072 | nstat_release_gshad( |
3073 | struct nstat_generic_shadow *gshad) |
3074 | { |
3075 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3076 | |
3077 | if (OSDecrementAtomic(&gshad->gshad_refcnt) == 1) { |
3078 | nstat_release_procdetails(procdetails: gshad->gshad_procdetails); |
3079 | gshad->gshad_magic = NSTAT_GENERIC_SHADOW_UNMAGIC; |
3080 | kfree_type(struct nstat_generic_shadow, gshad); |
3081 | } |
3082 | } |
3083 | |
3084 | static errno_t |
3085 | nstat_generic_provider_lookup( |
3086 | __unused const void *data, |
3087 | __unused u_int32_t length, |
3088 | __unused nstat_provider_cookie_t *out_cookie) |
3089 | { |
3090 | // Looking up a specific connection is not supported |
3091 | return ENOTSUP; |
3092 | } |
3093 | |
3094 | static int |
3095 | nstat_generic_provider_gone( |
3096 | __unused nstat_provider_cookie_t cookie) |
3097 | { |
3098 | // Returns non-zero if the source has gone. |
3099 | // We don't keep a source hanging around, so the answer is always 0 |
3100 | return 0; |
3101 | } |
3102 | |
3103 | static errno_t |
3104 | nstat_generic_provider_counts( |
3105 | nstat_provider_cookie_t cookie, |
3106 | struct nstat_counts *out_counts, |
3107 | int *out_gone) |
3108 | { |
3109 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie; |
3110 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3111 | |
3112 | memset(s: out_counts, c: 0, n: sizeof(*out_counts)); |
3113 | |
3114 | bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, out_counts, NULL); |
3115 | |
3116 | if (out_gone) { |
3117 | *out_gone = 0; |
3118 | } |
3119 | return (result)? 0 : EIO; |
3120 | } |
3121 | |
3122 | |
3123 | static errno_t |
3124 | nstat_generic_provider_copy_descriptor( |
3125 | nstat_provider_cookie_t cookie, |
3126 | void *data, |
3127 | __unused size_t len) |
3128 | { |
3129 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie; |
3130 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3131 | struct nstat_procdetails *procdetails = gshad->gshad_procdetails; |
3132 | assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
3133 | |
3134 | bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, data); |
3135 | |
3136 | switch (gshad->gshad_provider) { |
3137 | case NSTAT_PROVIDER_CONN_USERLAND: |
3138 | { |
3139 | nstat_connection_descriptor *desc = (nstat_connection_descriptor *)data; |
3140 | desc->pid = procdetails->pdet_pid; |
3141 | desc->upid = procdetails->pdet_upid; |
3142 | uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid); |
3143 | strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname)); |
3144 | desc->start_timestamp = gshad->gshad_start_timestamp; |
3145 | desc->timestamp = mach_continuous_time(); |
3146 | break; |
3147 | } |
3148 | case NSTAT_PROVIDER_UDP_SUBFLOW: |
3149 | { |
3150 | nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data; |
3151 | desc->pid = procdetails->pdet_pid; |
3152 | desc->upid = procdetails->pdet_upid; |
3153 | uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid); |
3154 | strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname)); |
3155 | desc->start_timestamp = gshad->gshad_start_timestamp; |
3156 | desc->timestamp = mach_continuous_time(); |
3157 | break; |
3158 | } |
3159 | default: |
3160 | break; |
3161 | } |
3162 | return (result)? 0 : EIO; |
3163 | } |
3164 | |
3165 | static void |
3166 | nstat_generic_provider_release( |
3167 | __unused nstat_provider_cookie_t cookie, |
3168 | __unused int locked) |
3169 | { |
3170 | // Called when a nstat_src is detached. |
3171 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie; |
3172 | |
3173 | nstat_release_gshad(gshad); |
3174 | } |
3175 | |
3176 | static bool |
3177 | nstat_generic_provider_reporting_allowed( |
3178 | nstat_provider_cookie_t cookie, |
3179 | nstat_provider_filter *filter, |
3180 | u_int64_t suppression_flags) |
3181 | { |
3182 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie; |
3183 | |
3184 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3185 | |
3186 | if ((filter->npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0) { |
3187 | if ((filter->npf_flags & suppression_flags) != 0) { |
3188 | return false; |
3189 | } |
3190 | } |
3191 | |
3192 | // Filter based on interface and connection flags |
3193 | // If a provider doesn't support flags, a client shouldn't attempt to use filtering |
3194 | if ((filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS) != 0) { |
3195 | u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE; |
3196 | |
3197 | if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, &ifflags, NULL, NULL)) { |
3198 | if ((filter->npf_flags & ifflags) == 0) { |
3199 | return false; |
3200 | } |
3201 | } |
3202 | } |
3203 | |
3204 | if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) { |
3205 | struct nstat_procdetails *procdetails = gshad->gshad_procdetails; |
3206 | assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
3207 | |
3208 | // Check details that we have readily to hand before asking the provider for descriptor items |
3209 | if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) && |
3210 | (filter->npf_pid == procdetails->pdet_pid)) { |
3211 | return true; |
3212 | } |
3213 | if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) && |
3214 | (memcmp(s1: filter->npf_uuid, s2: &procdetails->pdet_uuid, n: sizeof(filter->npf_uuid)) == 0)) { |
3215 | return true; |
3216 | } |
3217 | if ((filter->npf_flags & (NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID)) != 0) { |
3218 | nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far? |
3219 | switch (gshad->gshad_provider) { |
3220 | case NSTAT_PROVIDER_CONN_USERLAND: |
3221 | // Filtering by effective uuid or effective pid is currently not supported |
3222 | filter->npf_flags &= ~((uint64_t)(NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID)); |
3223 | printf("%s - attempt to filter conn provider by effective pid/uuid, not supported\n" , __func__); |
3224 | return true; |
3225 | |
3226 | case NSTAT_PROVIDER_UDP_SUBFLOW: |
3227 | if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, &udp_desc)) { |
3228 | if (check_reporting_for_user(filter, pid: procdetails->pdet_pid, epid: (pid_t)udp_desc.epid, |
3229 | uuid: &procdetails->pdet_uuid, euuid: &udp_desc.euuid)) { |
3230 | return true; |
3231 | } |
3232 | } |
3233 | break; |
3234 | default: |
3235 | break; |
3236 | } |
3237 | } |
3238 | return false; |
3239 | } |
3240 | return true; |
3241 | } |
3242 | |
3243 | static size_t |
3244 | nstat_generic_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len) |
3245 | { |
3246 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie; |
3247 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3248 | assert(gshad->gshad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC); |
3249 | |
3250 | if (gshad->gshad_getextensions_fn == NULL) { |
3251 | return 0; |
3252 | } |
3253 | return gshad->gshad_getextensions_fn(gshad->gshad_provider_context, extension_id, buf, len); |
3254 | } |
3255 | |
3256 | static errno_t |
3257 | nstat_generic_provider_add_watcher( |
3258 | nstat_control_state *state, |
3259 | nstat_msg_add_all_srcs *req) |
3260 | { |
3261 | errno_t result; |
3262 | nstat_provider_id_t provider_id = req->provider; |
3263 | nstat_provider *provider; |
3264 | |
3265 | switch (provider_id) { |
3266 | case NSTAT_PROVIDER_CONN_USERLAND: |
3267 | provider = &nstat_userland_conn_provider; |
3268 | break; |
3269 | case NSTAT_PROVIDER_UDP_SUBFLOW: |
3270 | provider = &nstat_udp_subflow_provider; |
3271 | break; |
3272 | default: |
3273 | return ENOTSUP; |
3274 | } |
3275 | |
3276 | lck_mtx_lock(lck: &nstat_mtx); |
3277 | result = nstat_set_provider_filter(state, req); |
3278 | |
3279 | if (result == 0) { |
3280 | struct nstat_generic_shadow *gshad; |
3281 | nstat_provider_filter *filter = &state->ncs_provider_filters[provider_id]; |
3282 | |
3283 | OSIncrementAtomic(&nstat_generic_provider_watchers[provider_id]); |
3284 | |
3285 | TAILQ_FOREACH(gshad, &nstat_gshad_head, gshad_link) { |
3286 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3287 | |
3288 | if (gshad->gshad_provider == provider_id) { |
3289 | if (filter->npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) { |
3290 | u_int64_t npf_flags = filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS; |
3291 | if ((npf_flags != 0) && ((npf_flags & gshad->gshad_properties) == 0)) { |
3292 | // Skip this one |
3293 | // Note - no filtering by pid or UUID supported at this point, for simplicity |
3294 | continue; |
3295 | } |
3296 | } |
3297 | nstat_retain_gshad(gshad); |
3298 | result = nstat_control_source_add(context: 0, state, provider, cookie: gshad); |
3299 | if (result != 0) { |
3300 | printf("%s - nstat_control_source_add returned %d for " |
3301 | "provider type: %d\n" , __func__, result, provider_id); |
3302 | nstat_release_gshad(gshad); |
3303 | break; |
3304 | } |
3305 | } |
3306 | } |
3307 | } |
3308 | lck_mtx_unlock(lck: &nstat_mtx); |
3309 | |
3310 | return result; |
3311 | } |
3312 | |
3313 | static void |
3314 | nstat_userland_conn_remove_watcher( |
3315 | __unused nstat_control_state *state) |
3316 | { |
3317 | OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_CONN_USERLAND]); |
3318 | } |
3319 | |
3320 | static void |
3321 | nstat_udp_subflow_remove_watcher( |
3322 | __unused nstat_control_state *state) |
3323 | { |
3324 | OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_UDP_SUBFLOW]); |
3325 | } |
3326 | |
3327 | static void |
3328 | nstat_init_userland_conn_provider(void) |
3329 | { |
3330 | bzero(s: &nstat_userland_conn_provider, n: sizeof(nstat_userland_conn_provider)); |
3331 | nstat_userland_conn_provider.nstat_descriptor_length = sizeof(nstat_connection_descriptor); |
3332 | nstat_userland_conn_provider.nstat_provider_id = NSTAT_PROVIDER_CONN_USERLAND; |
3333 | nstat_userland_conn_provider.nstat_lookup = nstat_generic_provider_lookup; |
3334 | nstat_userland_conn_provider.nstat_gone = nstat_generic_provider_gone; |
3335 | nstat_userland_conn_provider.nstat_counts = nstat_generic_provider_counts; |
3336 | nstat_userland_conn_provider.nstat_release = nstat_generic_provider_release; |
3337 | nstat_userland_conn_provider.nstat_watcher_add = nstat_generic_provider_add_watcher; |
3338 | nstat_userland_conn_provider.nstat_watcher_remove = nstat_userland_conn_remove_watcher; |
3339 | nstat_userland_conn_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor; |
3340 | nstat_userland_conn_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed; |
3341 | nstat_userland_conn_provider.nstat_copy_extension = nstat_generic_extensions; |
3342 | nstat_userland_conn_provider.next = nstat_providers; |
3343 | nstat_providers = &nstat_userland_conn_provider; |
3344 | } |
3345 | |
3346 | static void |
3347 | nstat_init_udp_subflow_provider(void) |
3348 | { |
3349 | bzero(s: &nstat_udp_subflow_provider, n: sizeof(nstat_udp_subflow_provider)); |
3350 | nstat_udp_subflow_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor); |
3351 | nstat_udp_subflow_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_SUBFLOW; |
3352 | nstat_udp_subflow_provider.nstat_lookup = nstat_generic_provider_lookup; |
3353 | nstat_udp_subflow_provider.nstat_gone = nstat_generic_provider_gone; |
3354 | nstat_udp_subflow_provider.nstat_counts = nstat_generic_provider_counts; |
3355 | nstat_udp_subflow_provider.nstat_release = nstat_generic_provider_release; |
3356 | nstat_udp_subflow_provider.nstat_watcher_add = nstat_generic_provider_add_watcher; |
3357 | nstat_udp_subflow_provider.nstat_watcher_remove = nstat_udp_subflow_remove_watcher; |
3358 | nstat_udp_subflow_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor; |
3359 | nstat_udp_subflow_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed; |
3360 | nstat_udp_subflow_provider.nstat_copy_extension = nstat_generic_extensions; |
3361 | nstat_udp_subflow_provider.next = nstat_providers; |
3362 | nstat_providers = &nstat_udp_subflow_provider; |
3363 | } |
3364 | |
3365 | // Things get started with a call from the provider to netstats to say that there’s a new source |
3366 | __private_extern__ nstat_context |
3367 | nstat_provider_stats_open(nstat_provider_context ctx, |
3368 | int provider_id, |
3369 | u_int64_t properties, |
3370 | nstat_provider_request_vals_fn req_fn, |
3371 | nstat_provider_request_extensions_fn req_extensions_fn) |
3372 | { |
3373 | struct nstat_generic_shadow *gshad; |
3374 | struct nstat_procdetails *procdetails; |
3375 | nstat_provider *provider = nstat_find_provider_by_id(id: provider_id); |
3376 | |
3377 | gshad = kalloc_type(struct nstat_generic_shadow, Z_WAITOK | Z_NOFAIL); |
3378 | |
3379 | procdetails = nstat_retain_curprocdetails(); |
3380 | |
3381 | if (procdetails == NULL) { |
3382 | kfree_type(struct nstat_generic_shadow, gshad); |
3383 | return NULL; |
3384 | } |
3385 | |
3386 | gshad->gshad_getvals_fn = req_fn; |
3387 | gshad->gshad_getextensions_fn = req_extensions_fn; |
3388 | gshad->gshad_provider_context = ctx; |
3389 | gshad->gshad_properties = properties; |
3390 | gshad->gshad_procdetails = procdetails; |
3391 | gshad->gshad_provider = provider_id; |
3392 | gshad->gshad_start_timestamp = mach_continuous_time(); |
3393 | gshad->gshad_refcnt = 0; |
3394 | gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC; |
3395 | nstat_retain_gshad(gshad); |
3396 | |
3397 | lck_mtx_lock(lck: &nstat_mtx); |
3398 | nstat_control_state *state; |
3399 | |
3400 | // Even if there are no watchers, we save the shadow structure |
3401 | TAILQ_INSERT_HEAD(&nstat_gshad_head, gshad, gshad_link); |
3402 | |
3403 | for (state = nstat_controls; state; state = state->ncs_next) { |
3404 | if ((state->ncs_watching & (1 << provider_id)) != 0) { |
3405 | // Does this client want an initial filtering to be made? |
3406 | u_int64_t npf_flags = state->ncs_provider_filters[provider->nstat_provider_id].npf_flags; |
3407 | if (npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) { |
3408 | npf_flags &= NSTAT_FILTER_IFNET_AND_CONN_FLAGS; |
3409 | if ((npf_flags != 0) && ((npf_flags & properties) == 0)) { |
3410 | // Skip this one |
3411 | // Note - no filtering by pid or UUID supported at this point, for simplicity |
3412 | continue; |
3413 | } |
3414 | } |
3415 | // this client is watching, so link to it. |
3416 | nstat_retain_gshad(gshad); |
3417 | int result = nstat_control_source_add(context: 0, state, provider, cookie: gshad); |
3418 | if (result != 0) { |
3419 | // There should be some kind of statistics for failures like this. |
3420 | // <rdar://problem/31377195> The kernel ntstat component should keep some |
3421 | // internal counters reflecting operational state for eventual AWD reporting |
3422 | nstat_release_gshad(gshad); |
3423 | } |
3424 | } |
3425 | } |
3426 | lck_mtx_unlock(lck: &nstat_mtx); |
3427 | |
3428 | return (nstat_context) gshad; |
3429 | } |
3430 | |
3431 | |
3432 | // When the source is closed, netstats will make one last call on the request functions to retrieve final values |
3433 | __private_extern__ void |
3434 | nstat_provider_stats_close(nstat_context nstat_ctx) |
3435 | { |
3436 | tailq_head_nstat_src dead_list; |
3437 | nstat_src *src; |
3438 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx; |
3439 | |
3440 | if (gshad == NULL) { |
3441 | printf("%s - called with null reference" , __func__); |
3442 | return; |
3443 | } |
3444 | |
3445 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3446 | |
3447 | if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) { |
3448 | printf("%s - called with incorrect shadow magic 0x%x" , __func__, gshad->gshad_magic); |
3449 | } |
3450 | |
3451 | TAILQ_INIT(&dead_list); |
3452 | |
3453 | lck_mtx_lock(lck: &nstat_mtx); |
3454 | |
3455 | TAILQ_REMOVE(&nstat_gshad_head, gshad, gshad_link); |
3456 | |
3457 | int32_t num_srcs = gshad->gshad_refcnt - 1; |
3458 | if ((nstat_generic_provider_watchers[gshad->gshad_provider] != 0) && (num_srcs > 0)) { |
3459 | nstat_control_state *state; |
3460 | errno_t result; |
3461 | |
3462 | for (state = nstat_controls; state; state = state->ncs_next) { |
3463 | // Only scan further if this client is watching |
3464 | if ((state->ncs_watching & (1 << gshad->gshad_provider)) != 0) { |
3465 | lck_mtx_lock(lck: &state->ncs_mtx); |
3466 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
3467 | { |
3468 | if ((gshad == (struct nstat_generic_shadow *)src->cookie) && |
3469 | (gshad->gshad_provider == src->provider->nstat_provider_id)) { |
3470 | break; |
3471 | } |
3472 | } |
3473 | if (src) { |
3474 | result = nstat_control_send_goodbye(state, src); |
3475 | // There is currently no recovery possible from failure to send, |
3476 | // so no need to check the return code. |
3477 | // rdar://28312774 (Scalability and resilience issues in ntstat.c) |
3478 | |
3479 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
3480 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
3481 | --num_srcs; |
3482 | } |
3483 | lck_mtx_unlock(lck: &state->ncs_mtx); |
3484 | |
3485 | // Performance optimization, don't scan full lists if no chance of presence |
3486 | if (num_srcs == 0) { |
3487 | break; |
3488 | } |
3489 | } |
3490 | } |
3491 | } |
3492 | lck_mtx_unlock(lck: &nstat_mtx); |
3493 | |
3494 | while ((src = TAILQ_FIRST(&dead_list))) { |
3495 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
3496 | nstat_control_cleanup_source(NULL, src, TRUE); |
3497 | } |
3498 | nstat_release_gshad(gshad); |
3499 | } |
3500 | |
3501 | // Events that cause a significant change may be reported via a flags word |
3502 | void |
3503 | nstat_provider_stats_event(__unused nstat_context nstat_ctx, __unused uint64_t event) |
3504 | { |
3505 | nstat_src *src; |
3506 | struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx; |
3507 | |
3508 | if (gshad == NULL) { |
3509 | printf("%s - called with null reference" , __func__); |
3510 | return; |
3511 | } |
3512 | |
3513 | assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC); |
3514 | |
3515 | if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) { |
3516 | printf("%s - called with incorrect shadow magic 0x%x" , __func__, gshad->gshad_magic); |
3517 | } |
3518 | |
3519 | lck_mtx_lock(lck: &nstat_mtx); |
3520 | |
3521 | if (nstat_generic_provider_watchers[gshad->gshad_provider] != 0) { |
3522 | nstat_control_state *state; |
3523 | errno_t result; |
3524 | nstat_provider_id_t provider_id = gshad->gshad_provider; |
3525 | |
3526 | for (state = nstat_controls; state; state = state->ncs_next) { |
3527 | // Only scan further if this client is watching and has interest in the event |
3528 | // or the client has requested "boring" unchanged status to be ignored |
3529 | if (((state->ncs_watching & (1 << provider_id)) != 0) && |
3530 | (((state->ncs_provider_filters[provider_id].npf_events & event) != 0) || |
3531 | ((state->ncs_provider_filters[provider_id].npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0))) { |
3532 | lck_mtx_lock(lck: &state->ncs_mtx); |
3533 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
3534 | { |
3535 | if (gshad == (struct nstat_generic_shadow *)src->cookie) { |
3536 | break; |
3537 | } |
3538 | } |
3539 | |
3540 | if (src) { |
3541 | src->ns_reported = false; |
3542 | if ((state->ncs_provider_filters[provider_id].npf_events & event) != 0) { |
3543 | result = nstat_control_send_event(state, src, event); |
3544 | // There is currently no recovery possible from failure to send, |
3545 | // so no need to check the return code. |
3546 | // rdar://28312774 (Scalability and resilience issues in ntstat.c) |
3547 | } |
3548 | } |
3549 | lck_mtx_unlock(lck: &state->ncs_mtx); |
3550 | } |
3551 | } |
3552 | } |
3553 | lck_mtx_unlock(lck: &nstat_mtx); |
3554 | } |
3555 | |
3556 | #endif /* SKYWALK */ |
3557 | |
3558 | |
3559 | #pragma mark -- ifnet Provider -- |
3560 | |
3561 | static nstat_provider nstat_ifnet_provider; |
3562 | |
3563 | /* |
3564 | * We store a pointer to the ifnet and the original threshold |
3565 | * requested by the client. |
3566 | */ |
3567 | struct nstat_ifnet_cookie { |
3568 | struct ifnet *ifp; |
3569 | uint64_t threshold; |
3570 | }; |
3571 | |
3572 | static errno_t |
3573 | nstat_ifnet_lookup( |
3574 | const void *data, |
3575 | u_int32_t length, |
3576 | nstat_provider_cookie_t *out_cookie) |
3577 | { |
3578 | const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data; |
3579 | struct ifnet *ifp; |
3580 | boolean_t changed = FALSE; |
3581 | nstat_control_state *state; |
3582 | nstat_src *src; |
3583 | struct nstat_ifnet_cookie *cookie; |
3584 | |
3585 | if (length < sizeof(*param) || param->threshold < 1024 * 1024) { |
3586 | return EINVAL; |
3587 | } |
3588 | if (nstat_privcheck != 0) { |
3589 | errno_t result = priv_check_cred(cred: kauth_cred_get(), |
3590 | PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, flags: 0); |
3591 | if (result != 0) { |
3592 | return result; |
3593 | } |
3594 | } |
3595 | cookie = kalloc_type(struct nstat_ifnet_cookie, |
3596 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
3597 | |
3598 | ifnet_head_lock_shared(); |
3599 | TAILQ_FOREACH(ifp, &ifnet_head, if_link) |
3600 | { |
3601 | if (!ifnet_is_attached(ifp, refio: 1)) { |
3602 | continue; |
3603 | } |
3604 | ifnet_lock_exclusive(ifp); |
3605 | if (ifp->if_index == param->ifindex) { |
3606 | cookie->ifp = ifp; |
3607 | cookie->threshold = param->threshold; |
3608 | *out_cookie = cookie; |
3609 | if (!ifp->if_data_threshold || |
3610 | ifp->if_data_threshold > param->threshold) { |
3611 | changed = TRUE; |
3612 | ifp->if_data_threshold = param->threshold; |
3613 | } |
3614 | ifnet_lock_done(ifp); |
3615 | ifnet_reference(interface: ifp); |
3616 | ifnet_decr_iorefcnt(ifp); |
3617 | break; |
3618 | } |
3619 | ifnet_lock_done(ifp); |
3620 | ifnet_decr_iorefcnt(ifp); |
3621 | } |
3622 | ifnet_head_done(); |
3623 | |
3624 | /* |
3625 | * When we change the threshold to something smaller, we notify |
3626 | * all of our clients with a description message. |
3627 | * We won't send a message to the client we are currently serving |
3628 | * because it has no `ifnet source' yet. |
3629 | */ |
3630 | if (changed) { |
3631 | lck_mtx_lock(lck: &nstat_mtx); |
3632 | for (state = nstat_controls; state; state = state->ncs_next) { |
3633 | lck_mtx_lock(lck: &state->ncs_mtx); |
3634 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
3635 | { |
3636 | if (src->provider != &nstat_ifnet_provider) { |
3637 | continue; |
3638 | } |
3639 | nstat_control_send_description(state, src, context: 0, hdr_flags: 0); |
3640 | } |
3641 | lck_mtx_unlock(lck: &state->ncs_mtx); |
3642 | } |
3643 | lck_mtx_unlock(lck: &nstat_mtx); |
3644 | } |
3645 | if (cookie->ifp == NULL) { |
3646 | kfree_type(struct nstat_ifnet_cookie, cookie); |
3647 | } |
3648 | |
3649 | return ifp ? 0 : EINVAL; |
3650 | } |
3651 | |
3652 | static int |
3653 | nstat_ifnet_gone( |
3654 | nstat_provider_cookie_t cookie) |
3655 | { |
3656 | struct ifnet *ifp; |
3657 | struct nstat_ifnet_cookie *ifcookie = |
3658 | (struct nstat_ifnet_cookie *)cookie; |
3659 | |
3660 | ifnet_head_lock_shared(); |
3661 | TAILQ_FOREACH(ifp, &ifnet_head, if_link) |
3662 | { |
3663 | if (ifp == ifcookie->ifp) { |
3664 | break; |
3665 | } |
3666 | } |
3667 | ifnet_head_done(); |
3668 | |
3669 | return ifp ? 0 : 1; |
3670 | } |
3671 | |
3672 | static errno_t |
3673 | nstat_ifnet_counts( |
3674 | nstat_provider_cookie_t cookie, |
3675 | struct nstat_counts *out_counts, |
3676 | int *out_gone) |
3677 | { |
3678 | struct nstat_ifnet_cookie *ifcookie = |
3679 | (struct nstat_ifnet_cookie *)cookie; |
3680 | struct ifnet *ifp = ifcookie->ifp; |
3681 | |
3682 | if (out_gone) { |
3683 | *out_gone = 0; |
3684 | } |
3685 | |
3686 | // if the ifnet is gone, we should stop using it |
3687 | if (nstat_ifnet_gone(cookie)) { |
3688 | if (out_gone) { |
3689 | *out_gone = 1; |
3690 | } |
3691 | return EINVAL; |
3692 | } |
3693 | |
3694 | bzero(s: out_counts, n: sizeof(*out_counts)); |
3695 | out_counts->nstat_rxpackets = ifp->if_ipackets; |
3696 | out_counts->nstat_rxbytes = ifp->if_ibytes; |
3697 | out_counts->nstat_txpackets = ifp->if_opackets; |
3698 | out_counts->nstat_txbytes = ifp->if_obytes; |
3699 | out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0; |
3700 | return 0; |
3701 | } |
3702 | |
3703 | static void |
3704 | nstat_ifnet_release( |
3705 | nstat_provider_cookie_t cookie, |
3706 | __unused int locked) |
3707 | { |
3708 | struct nstat_ifnet_cookie *ifcookie; |
3709 | struct ifnet *ifp; |
3710 | nstat_control_state *state; |
3711 | nstat_src *src; |
3712 | uint64_t minthreshold = UINT64_MAX; |
3713 | |
3714 | /* |
3715 | * Find all the clients that requested a threshold |
3716 | * for this ifnet and re-calculate if_data_threshold. |
3717 | */ |
3718 | lck_mtx_lock(lck: &nstat_mtx); |
3719 | for (state = nstat_controls; state; state = state->ncs_next) { |
3720 | lck_mtx_lock(lck: &state->ncs_mtx); |
3721 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
3722 | { |
3723 | /* Skip the provider we are about to detach. */ |
3724 | if (src->provider != &nstat_ifnet_provider || |
3725 | src->cookie == cookie) { |
3726 | continue; |
3727 | } |
3728 | ifcookie = (struct nstat_ifnet_cookie *)src->cookie; |
3729 | if (ifcookie->threshold < minthreshold) { |
3730 | minthreshold = ifcookie->threshold; |
3731 | } |
3732 | } |
3733 | lck_mtx_unlock(lck: &state->ncs_mtx); |
3734 | } |
3735 | lck_mtx_unlock(lck: &nstat_mtx); |
3736 | /* |
3737 | * Reset if_data_threshold or disable it. |
3738 | */ |
3739 | ifcookie = (struct nstat_ifnet_cookie *)cookie; |
3740 | ifp = ifcookie->ifp; |
3741 | if (ifnet_is_attached(ifp, refio: 1)) { |
3742 | ifnet_lock_exclusive(ifp); |
3743 | if (minthreshold == UINT64_MAX) { |
3744 | ifp->if_data_threshold = 0; |
3745 | } else { |
3746 | ifp->if_data_threshold = minthreshold; |
3747 | } |
3748 | ifnet_lock_done(ifp); |
3749 | ifnet_decr_iorefcnt(ifp); |
3750 | } |
3751 | ifnet_release(interface: ifp); |
3752 | kfree_type(struct nstat_ifnet_cookie, ifcookie); |
3753 | } |
3754 | |
3755 | static void |
3756 | nstat_ifnet_copy_link_status( |
3757 | struct ifnet *ifp, |
3758 | struct nstat_ifnet_descriptor *desc) |
3759 | { |
3760 | struct if_link_status *ifsr = ifp->if_link_status; |
3761 | nstat_ifnet_desc_link_status *link_status = &desc->link_status; |
3762 | |
3763 | link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE; |
3764 | if (ifsr == NULL) { |
3765 | return; |
3766 | } |
3767 | |
3768 | lck_rw_lock_shared(lck: &ifp->if_link_status_lock); |
3769 | |
3770 | if (ifp->if_type == IFT_CELLULAR) { |
3771 | nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular; |
3772 | struct if_cellular_status_v1 *if_cell_sr = |
3773 | &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1; |
3774 | |
3775 | if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1) { |
3776 | goto done; |
3777 | } |
3778 | |
3779 | link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR; |
3780 | |
3781 | if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) { |
3782 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID; |
3783 | cell_status->link_quality_metric = if_cell_sr->link_quality_metric; |
3784 | } |
3785 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) { |
3786 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID; |
3787 | cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth; |
3788 | } |
3789 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) { |
3790 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID; |
3791 | cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth; |
3792 | } |
3793 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) { |
3794 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID; |
3795 | cell_status->ul_min_latency = if_cell_sr->ul_min_latency; |
3796 | } |
3797 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) { |
3798 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID; |
3799 | cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency; |
3800 | } |
3801 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) { |
3802 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID; |
3803 | cell_status->ul_max_latency = if_cell_sr->ul_max_latency; |
3804 | } |
3805 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) { |
3806 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID; |
3807 | if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE) { |
3808 | cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE; |
3809 | } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW) { |
3810 | cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW; |
3811 | } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM) { |
3812 | cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM; |
3813 | } else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH) { |
3814 | cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH; |
3815 | } else { |
3816 | cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID; |
3817 | } |
3818 | } |
3819 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) { |
3820 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID; |
3821 | cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost; |
3822 | } |
3823 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) { |
3824 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID; |
3825 | cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size; |
3826 | } |
3827 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) { |
3828 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID; |
3829 | cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size; |
3830 | } |
3831 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) { |
3832 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID; |
3833 | cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size; |
3834 | } |
3835 | if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) { |
3836 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID; |
3837 | cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth; |
3838 | } |
3839 | if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) { |
3840 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID; |
3841 | cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth; |
3842 | } |
3843 | if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) { |
3844 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID; |
3845 | cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time; |
3846 | } |
3847 | if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) { |
3848 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID; |
3849 | cell_status->config_backoff_time = if_cell_sr->config_backoff_time; |
3850 | } |
3851 | if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) { |
3852 | cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID; |
3853 | cell_status->mss_recommended = if_cell_sr->mss_recommended; |
3854 | } |
3855 | } else if (IFNET_IS_WIFI(ifp)) { |
3856 | nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi; |
3857 | struct if_wifi_status_v1 *if_wifi_sr = |
3858 | &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1; |
3859 | |
3860 | if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1) { |
3861 | goto done; |
3862 | } |
3863 | |
3864 | link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI; |
3865 | |
3866 | if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) { |
3867 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID; |
3868 | wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric; |
3869 | } |
3870 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) { |
3871 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID; |
3872 | wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth; |
3873 | } |
3874 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) { |
3875 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID; |
3876 | wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth; |
3877 | } |
3878 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) { |
3879 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID; |
3880 | wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency; |
3881 | } |
3882 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) { |
3883 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID; |
3884 | wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency; |
3885 | } |
3886 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) { |
3887 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID; |
3888 | wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency; |
3889 | } |
3890 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) { |
3891 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID; |
3892 | if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE) { |
3893 | wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE; |
3894 | } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW) { |
3895 | wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW; |
3896 | } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM) { |
3897 | wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM; |
3898 | } else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH) { |
3899 | wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH; |
3900 | } else { |
3901 | wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID; |
3902 | } |
3903 | } |
3904 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) { |
3905 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID; |
3906 | wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost; |
3907 | } |
3908 | if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) { |
3909 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID; |
3910 | wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate; |
3911 | } |
3912 | if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) { |
3913 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID; |
3914 | wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth; |
3915 | } |
3916 | if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) { |
3917 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID; |
3918 | wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth; |
3919 | } |
3920 | if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) { |
3921 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID; |
3922 | wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency; |
3923 | } |
3924 | if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) { |
3925 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID; |
3926 | wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency; |
3927 | } |
3928 | if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) { |
3929 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID; |
3930 | wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency; |
3931 | } |
3932 | if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) { |
3933 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID; |
3934 | wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate; |
3935 | } |
3936 | if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) { |
3937 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID; |
3938 | if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ) { |
3939 | wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ; |
3940 | } else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ) { |
3941 | wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ; |
3942 | } else { |
3943 | wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID; |
3944 | } |
3945 | } |
3946 | if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) { |
3947 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID; |
3948 | wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate; |
3949 | } |
3950 | if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) { |
3951 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID; |
3952 | wifi_status->scan_count = if_wifi_sr->scan_count; |
3953 | } |
3954 | if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) { |
3955 | wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID; |
3956 | wifi_status->scan_duration = if_wifi_sr->scan_duration; |
3957 | } |
3958 | } |
3959 | |
3960 | done: |
3961 | lck_rw_done(lck: &ifp->if_link_status_lock); |
3962 | } |
3963 | |
3964 | static u_int64_t nstat_ifnet_last_report_time = 0; |
3965 | extern int tcp_report_stats_interval; |
3966 | |
3967 | static void |
3968 | nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst) |
3969 | { |
3970 | /* Retransmit percentage */ |
3971 | if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) { |
3972 | /* shift by 10 for precision */ |
3973 | ifst->rxmit_percent = |
3974 | ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts; |
3975 | } else { |
3976 | ifst->rxmit_percent = 0; |
3977 | } |
3978 | |
3979 | /* Out-of-order percentage */ |
3980 | if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) { |
3981 | /* shift by 10 for precision */ |
3982 | ifst->oo_percent = |
3983 | ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts; |
3984 | } else { |
3985 | ifst->oo_percent = 0; |
3986 | } |
3987 | |
3988 | /* Reorder percentage */ |
3989 | if (ifst->total_reorderpkts > 0 && |
3990 | (ifst->total_txpkts + ifst->total_rxpkts) > 0) { |
3991 | /* shift by 10 for precision */ |
3992 | ifst->reorder_percent = |
3993 | ((ifst->total_reorderpkts << 10) * 100) / |
3994 | (ifst->total_txpkts + ifst->total_rxpkts); |
3995 | } else { |
3996 | ifst->reorder_percent = 0; |
3997 | } |
3998 | } |
3999 | |
4000 | static void |
4001 | nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st) |
4002 | { |
4003 | u_int64_t ecn_on_conn, ecn_off_conn; |
4004 | |
4005 | if (if_st == NULL) { |
4006 | return; |
4007 | } |
4008 | ecn_on_conn = if_st->ecn_client_success + |
4009 | if_st->ecn_server_success; |
4010 | ecn_off_conn = if_st->ecn_off_conn + |
4011 | (if_st->ecn_client_setup - if_st->ecn_client_success) + |
4012 | (if_st->ecn_server_setup - if_st->ecn_server_success); |
4013 | |
4014 | /* |
4015 | * report sack episodes, rst_drop and rxmit_drop |
4016 | * as a ratio per connection, shift by 10 for precision |
4017 | */ |
4018 | if (ecn_on_conn > 0) { |
4019 | if_st->ecn_on.sack_episodes = |
4020 | (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn; |
4021 | if_st->ecn_on.rst_drop = |
4022 | (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn; |
4023 | if_st->ecn_on.rxmit_drop = |
4024 | (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn; |
4025 | } else { |
4026 | /* set to zero, just in case */ |
4027 | if_st->ecn_on.sack_episodes = 0; |
4028 | if_st->ecn_on.rst_drop = 0; |
4029 | if_st->ecn_on.rxmit_drop = 0; |
4030 | } |
4031 | |
4032 | if (ecn_off_conn > 0) { |
4033 | if_st->ecn_off.sack_episodes = |
4034 | (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn; |
4035 | if_st->ecn_off.rst_drop = |
4036 | (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn; |
4037 | if_st->ecn_off.rxmit_drop = |
4038 | (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn; |
4039 | } else { |
4040 | if_st->ecn_off.sack_episodes = 0; |
4041 | if_st->ecn_off.rst_drop = 0; |
4042 | if_st->ecn_off.rxmit_drop = 0; |
4043 | } |
4044 | if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn; |
4045 | } |
4046 | |
4047 | static void |
4048 | nstat_ifnet_report_ecn_stats(void) |
4049 | { |
4050 | u_int64_t uptime, last_report_time; |
4051 | struct nstat_sysinfo_data data; |
4052 | struct nstat_sysinfo_ifnet_ecn_stats *st; |
4053 | struct ifnet *ifp; |
4054 | |
4055 | uptime = net_uptime(); |
4056 | |
4057 | if ((int)(uptime - nstat_ifnet_last_report_time) < |
4058 | tcp_report_stats_interval) { |
4059 | return; |
4060 | } |
4061 | |
4062 | last_report_time = nstat_ifnet_last_report_time; |
4063 | nstat_ifnet_last_report_time = uptime; |
4064 | data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS; |
4065 | st = &data.u.ifnet_ecn_stats; |
4066 | |
4067 | ifnet_head_lock_shared(); |
4068 | TAILQ_FOREACH(ifp, &ifnet_head, if_link) { |
4069 | if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL) { |
4070 | continue; |
4071 | } |
4072 | |
4073 | if (!IF_FULLY_ATTACHED(ifp)) { |
4074 | continue; |
4075 | } |
4076 | |
4077 | /* Limit reporting to Wifi, Ethernet and cellular. */ |
4078 | if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) { |
4079 | continue; |
4080 | } |
4081 | |
4082 | bzero(s: st, n: sizeof(*st)); |
4083 | if (IFNET_IS_CELLULAR(ifp)) { |
4084 | st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR; |
4085 | } else if (IFNET_IS_WIFI(ifp)) { |
4086 | st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI; |
4087 | } else { |
4088 | st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET; |
4089 | } |
4090 | data.unsent_data_cnt = ifp->if_unsent_data_cnt; |
4091 | /* skip if there was no update since last report */ |
4092 | if (ifp->if_ipv4_stat->timestamp <= 0 || |
4093 | ifp->if_ipv4_stat->timestamp < last_report_time) { |
4094 | goto v6; |
4095 | } |
4096 | st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4; |
4097 | /* compute percentages using packet counts */ |
4098 | nstat_ifnet_compute_percentages(ifst: &ifp->if_ipv4_stat->ecn_on); |
4099 | nstat_ifnet_compute_percentages(ifst: &ifp->if_ipv4_stat->ecn_off); |
4100 | nstat_ifnet_normalize_counter(if_st: ifp->if_ipv4_stat); |
4101 | bcopy(src: ifp->if_ipv4_stat, dst: &st->ecn_stat, |
4102 | n: sizeof(st->ecn_stat)); |
4103 | nstat_sysinfo_send_data(&data); |
4104 | bzero(s: ifp->if_ipv4_stat, n: sizeof(*ifp->if_ipv4_stat)); |
4105 | |
4106 | v6: |
4107 | /* skip if there was no update since last report */ |
4108 | if (ifp->if_ipv6_stat->timestamp <= 0 || |
4109 | ifp->if_ipv6_stat->timestamp < last_report_time) { |
4110 | continue; |
4111 | } |
4112 | st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6; |
4113 | |
4114 | /* compute percentages using packet counts */ |
4115 | nstat_ifnet_compute_percentages(ifst: &ifp->if_ipv6_stat->ecn_on); |
4116 | nstat_ifnet_compute_percentages(ifst: &ifp->if_ipv6_stat->ecn_off); |
4117 | nstat_ifnet_normalize_counter(if_st: ifp->if_ipv6_stat); |
4118 | bcopy(src: ifp->if_ipv6_stat, dst: &st->ecn_stat, |
4119 | n: sizeof(st->ecn_stat)); |
4120 | nstat_sysinfo_send_data(&data); |
4121 | |
4122 | /* Zero the stats in ifp */ |
4123 | bzero(s: ifp->if_ipv6_stat, n: sizeof(*ifp->if_ipv6_stat)); |
4124 | } |
4125 | ifnet_head_done(); |
4126 | } |
4127 | |
4128 | /* Some thresholds to determine Low Iternet mode */ |
4129 | #define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */ |
4130 | #define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */ |
4131 | #define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */ |
4132 | #define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */ |
4133 | #define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */ |
4134 | |
4135 | static boolean_t |
4136 | nstat_lim_activity_check(struct if_lim_perf_stat *st) |
4137 | { |
4138 | /* check that the current activity is enough to report stats */ |
4139 | if (st->lim_total_txpkts < nstat_lim_min_tx_pkts || |
4140 | st->lim_total_rxpkts < nstat_lim_min_rx_pkts || |
4141 | st->lim_conn_attempts == 0) { |
4142 | return FALSE; |
4143 | } |
4144 | |
4145 | /* |
4146 | * Compute percentages if there was enough activity. Use |
4147 | * shift-left by 10 to preserve precision. |
4148 | */ |
4149 | st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) / |
4150 | st->lim_total_txpkts) * 100; |
4151 | |
4152 | st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) / |
4153 | st->lim_total_rxpkts) * 100; |
4154 | |
4155 | st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) / |
4156 | st->lim_conn_attempts) * 100; |
4157 | |
4158 | /* |
4159 | * Is Low Internet detected? First order metrics are bandwidth |
4160 | * and RTT. If these metrics are below the minimum thresholds |
4161 | * defined then the network attachment can be classified as |
4162 | * having Low Internet capacity. |
4163 | * |
4164 | * High connection timeout rate also indicates Low Internet |
4165 | * capacity. |
4166 | */ |
4167 | if (st->lim_dl_max_bandwidth > 0 && |
4168 | st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD) { |
4169 | st->lim_dl_detected = 1; |
4170 | } |
4171 | |
4172 | if ((st->lim_ul_max_bandwidth > 0 && |
4173 | st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) || |
4174 | st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD) { |
4175 | st->lim_ul_detected = 1; |
4176 | } |
4177 | |
4178 | if (st->lim_conn_attempts > 20 && |
4179 | st->lim_conn_timeout_percent >= |
4180 | NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD) { |
4181 | st->lim_ul_detected = 1; |
4182 | } |
4183 | /* |
4184 | * Second order metrics: If there was high packet loss even after |
4185 | * using delay based algorithms then we classify it as Low Internet |
4186 | * again |
4187 | */ |
4188 | if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts && |
4189 | st->lim_packet_loss_percent >= |
4190 | NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD) { |
4191 | st->lim_ul_detected = 1; |
4192 | } |
4193 | return TRUE; |
4194 | } |
4195 | |
4196 | static u_int64_t nstat_lim_last_report_time = 0; |
4197 | static void |
4198 | nstat_ifnet_report_lim_stats(void) |
4199 | { |
4200 | u_int64_t uptime; |
4201 | struct nstat_sysinfo_data data; |
4202 | struct nstat_sysinfo_lim_stats *st; |
4203 | struct ifnet *ifp; |
4204 | int err; |
4205 | |
4206 | uptime = net_uptime(); |
4207 | |
4208 | if ((u_int32_t)(uptime - nstat_lim_last_report_time) < |
4209 | nstat_lim_interval) { |
4210 | return; |
4211 | } |
4212 | |
4213 | nstat_lim_last_report_time = uptime; |
4214 | data.flags = NSTAT_SYSINFO_LIM_STATS; |
4215 | st = &data.u.lim_stats; |
4216 | data.unsent_data_cnt = 0; |
4217 | |
4218 | ifnet_head_lock_shared(); |
4219 | TAILQ_FOREACH(ifp, &ifnet_head, if_link) { |
4220 | if (!IF_FULLY_ATTACHED(ifp)) { |
4221 | continue; |
4222 | } |
4223 | |
4224 | /* Limit reporting to Wifi, Ethernet and cellular */ |
4225 | if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp))) { |
4226 | continue; |
4227 | } |
4228 | |
4229 | if (!nstat_lim_activity_check(st: &ifp->if_lim_stat)) { |
4230 | continue; |
4231 | } |
4232 | |
4233 | bzero(s: st, n: sizeof(*st)); |
4234 | st->ifnet_siglen = sizeof(st->ifnet_signature); |
4235 | err = ifnet_get_netsignature(ifp, AF_INET, |
4236 | (u_int8_t *)&st->ifnet_siglen, NULL, |
4237 | st->ifnet_signature); |
4238 | if (err != 0) { |
4239 | err = ifnet_get_netsignature(ifp, AF_INET6, |
4240 | (u_int8_t *)&st->ifnet_siglen, NULL, |
4241 | st->ifnet_signature); |
4242 | if (err != 0) { |
4243 | continue; |
4244 | } |
4245 | } |
4246 | ifnet_lock_shared(ifp); |
4247 | if (IFNET_IS_CELLULAR(ifp)) { |
4248 | st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR; |
4249 | } else if (IFNET_IS_WIFI(ifp)) { |
4250 | st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI; |
4251 | } else { |
4252 | st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET; |
4253 | } |
4254 | bcopy(src: &ifp->if_lim_stat, dst: &st->lim_stat, |
4255 | n: sizeof(st->lim_stat)); |
4256 | |
4257 | /* Zero the stats in ifp */ |
4258 | bzero(s: &ifp->if_lim_stat, n: sizeof(ifp->if_lim_stat)); |
4259 | ifnet_lock_done(ifp); |
4260 | nstat_sysinfo_send_data(&data); |
4261 | } |
4262 | ifnet_head_done(); |
4263 | } |
4264 | |
4265 | static errno_t |
4266 | nstat_ifnet_copy_descriptor( |
4267 | nstat_provider_cookie_t cookie, |
4268 | void *data, |
4269 | size_t len) |
4270 | { |
4271 | nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data; |
4272 | struct nstat_ifnet_cookie *ifcookie = |
4273 | (struct nstat_ifnet_cookie *)cookie; |
4274 | struct ifnet *ifp = ifcookie->ifp; |
4275 | |
4276 | if (len < sizeof(nstat_ifnet_descriptor)) { |
4277 | return EINVAL; |
4278 | } |
4279 | |
4280 | if (nstat_ifnet_gone(cookie)) { |
4281 | return EINVAL; |
4282 | } |
4283 | |
4284 | bzero(s: desc, n: sizeof(*desc)); |
4285 | ifnet_lock_shared(ifp); |
4286 | strlcpy(dst: desc->name, src: ifp->if_xname, n: sizeof(desc->name)); |
4287 | desc->ifindex = ifp->if_index; |
4288 | desc->threshold = ifp->if_data_threshold; |
4289 | desc->type = ifp->if_type; |
4290 | if (ifp->if_desc.ifd_len < sizeof(desc->description)) { |
4291 | memcpy(dst: desc->description, src: ifp->if_desc.ifd_desc, |
4292 | n: sizeof(desc->description)); |
4293 | } |
4294 | nstat_ifnet_copy_link_status(ifp, desc); |
4295 | ifnet_lock_done(ifp); |
4296 | return 0; |
4297 | } |
4298 | |
4299 | static bool |
4300 | nstat_ifnet_cookie_equal( |
4301 | nstat_provider_cookie_t cookie1, |
4302 | nstat_provider_cookie_t cookie2) |
4303 | { |
4304 | struct nstat_ifnet_cookie *c1 = (struct nstat_ifnet_cookie *)cookie1; |
4305 | struct nstat_ifnet_cookie *c2 = (struct nstat_ifnet_cookie *)cookie2; |
4306 | |
4307 | return (c1->ifp->if_index == c2->ifp->if_index) ? true : false; |
4308 | } |
4309 | |
4310 | static void |
4311 | nstat_init_ifnet_provider(void) |
4312 | { |
4313 | bzero(s: &nstat_ifnet_provider, n: sizeof(nstat_ifnet_provider)); |
4314 | nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET; |
4315 | nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor); |
4316 | nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup; |
4317 | nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone; |
4318 | nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts; |
4319 | nstat_ifnet_provider.nstat_watcher_add = NULL; |
4320 | nstat_ifnet_provider.nstat_watcher_remove = NULL; |
4321 | nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor; |
4322 | nstat_ifnet_provider.nstat_cookie_equal = nstat_ifnet_cookie_equal; |
4323 | nstat_ifnet_provider.nstat_release = nstat_ifnet_release; |
4324 | nstat_ifnet_provider.next = nstat_providers; |
4325 | nstat_providers = &nstat_ifnet_provider; |
4326 | } |
4327 | |
4328 | __private_extern__ void |
4329 | nstat_ifnet_threshold_reached(unsigned int ifindex) |
4330 | { |
4331 | nstat_control_state *state; |
4332 | nstat_src *src; |
4333 | struct ifnet *ifp; |
4334 | struct nstat_ifnet_cookie *ifcookie; |
4335 | |
4336 | lck_mtx_lock(lck: &nstat_mtx); |
4337 | for (state = nstat_controls; state; state = state->ncs_next) { |
4338 | lck_mtx_lock(lck: &state->ncs_mtx); |
4339 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
4340 | { |
4341 | if (src->provider != &nstat_ifnet_provider) { |
4342 | continue; |
4343 | } |
4344 | ifcookie = (struct nstat_ifnet_cookie *)src->cookie; |
4345 | ifp = ifcookie->ifp; |
4346 | if (ifp->if_index != ifindex) { |
4347 | continue; |
4348 | } |
4349 | nstat_control_send_counts(state, src, 0, 0, NULL); |
4350 | } |
4351 | lck_mtx_unlock(lck: &state->ncs_mtx); |
4352 | } |
4353 | lck_mtx_unlock(lck: &nstat_mtx); |
4354 | } |
4355 | |
4356 | #pragma mark -- Sysinfo -- |
4357 | static void |
4358 | nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val) |
4359 | { |
4360 | kv->nstat_sysinfo_key = key; |
4361 | kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR; |
4362 | kv->u.nstat_sysinfo_scalar = val; |
4363 | kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar); |
4364 | } |
4365 | |
4366 | static void |
4367 | nstat_set_keyval_u64_scalar(nstat_sysinfo_keyval *kv, int key, u_int64_t val) |
4368 | { |
4369 | kv->nstat_sysinfo_key = key; |
4370 | kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR; |
4371 | kv->u.nstat_sysinfo_scalar = val; |
4372 | kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar); |
4373 | } |
4374 | |
4375 | static void |
4376 | nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf, |
4377 | u_int32_t len) |
4378 | { |
4379 | kv->nstat_sysinfo_key = key; |
4380 | kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING; |
4381 | kv->nstat_sysinfo_valsize = min(a: len, |
4382 | NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE); |
4383 | bcopy(src: buf, dst: kv->u.nstat_sysinfo_string, n: kv->nstat_sysinfo_valsize); |
4384 | } |
4385 | |
4386 | static void |
4387 | nstat_sysinfo_send_data_internal( |
4388 | nstat_control_state *control, |
4389 | nstat_sysinfo_data *data) |
4390 | { |
4391 | nstat_msg_sysinfo_counts *syscnt = NULL; |
4392 | size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0; |
4393 | nstat_sysinfo_keyval *kv; |
4394 | errno_t result = 0; |
4395 | size_t i = 0; |
4396 | |
4397 | allocsize = offsetof(nstat_msg_sysinfo_counts, counts); |
4398 | countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals); |
4399 | finalsize = allocsize; |
4400 | |
4401 | /* get number of key-vals for each kind of stat */ |
4402 | switch (data->flags) { |
4403 | case NSTAT_SYSINFO_TCP_STATS: |
4404 | nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT; |
4405 | break; |
4406 | case NSTAT_SYSINFO_IFNET_ECN_STATS: |
4407 | nkeyvals = (sizeof(struct if_tcp_ecn_stat) / |
4408 | sizeof(u_int64_t)); |
4409 | |
4410 | /* Two more keys for ifnet type and proto */ |
4411 | nkeyvals += 2; |
4412 | |
4413 | /* One key for unsent data. */ |
4414 | nkeyvals++; |
4415 | break; |
4416 | case NSTAT_SYSINFO_LIM_STATS: |
4417 | nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT; |
4418 | break; |
4419 | case NSTAT_SYSINFO_NET_API_STATS: |
4420 | nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT; |
4421 | break; |
4422 | default: |
4423 | return; |
4424 | } |
4425 | countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals; |
4426 | allocsize += countsize; |
4427 | |
4428 | syscnt = (nstat_msg_sysinfo_counts *) kalloc_data(allocsize, |
4429 | Z_WAITOK | Z_ZERO); |
4430 | if (syscnt == NULL) { |
4431 | return; |
4432 | } |
4433 | |
4434 | kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals; |
4435 | switch (data->flags) { |
4436 | case NSTAT_SYSINFO_TCP_STATS: |
4437 | { |
4438 | nstat_set_keyval_scalar(kv: &kv[i++], |
4439 | key: NSTAT_SYSINFO_KEY_IPV4_AVGRTT, |
4440 | val: data->u.tcp_stats.ipv4_avgrtt); |
4441 | nstat_set_keyval_scalar(kv: &kv[i++], |
4442 | key: NSTAT_SYSINFO_KEY_IPV6_AVGRTT, |
4443 | val: data->u.tcp_stats.ipv6_avgrtt); |
4444 | nstat_set_keyval_scalar(kv: &kv[i++], |
4445 | key: NSTAT_SYSINFO_KEY_SEND_PLR, |
4446 | val: data->u.tcp_stats.send_plr); |
4447 | nstat_set_keyval_scalar(kv: &kv[i++], |
4448 | key: NSTAT_SYSINFO_KEY_RECV_PLR, |
4449 | val: data->u.tcp_stats.recv_plr); |
4450 | nstat_set_keyval_scalar(kv: &kv[i++], |
4451 | key: NSTAT_SYSINFO_KEY_SEND_TLRTO, |
4452 | val: data->u.tcp_stats.send_tlrto_rate); |
4453 | nstat_set_keyval_scalar(kv: &kv[i++], |
4454 | key: NSTAT_SYSINFO_KEY_SEND_REORDERRATE, |
4455 | val: data->u.tcp_stats.send_reorder_rate); |
4456 | nstat_set_keyval_scalar(kv: &kv[i++], |
4457 | key: NSTAT_SYSINFO_CONNECTION_ATTEMPTS, |
4458 | val: data->u.tcp_stats.connection_attempts); |
4459 | nstat_set_keyval_scalar(kv: &kv[i++], |
4460 | key: NSTAT_SYSINFO_CONNECTION_ACCEPTS, |
4461 | val: data->u.tcp_stats.connection_accepts); |
4462 | nstat_set_keyval_scalar(kv: &kv[i++], |
4463 | key: NSTAT_SYSINFO_ECN_CLIENT_ENABLED, |
4464 | val: data->u.tcp_stats.ecn_client_enabled); |
4465 | nstat_set_keyval_scalar(kv: &kv[i++], |
4466 | key: NSTAT_SYSINFO_ECN_SERVER_ENABLED, |
4467 | val: data->u.tcp_stats.ecn_server_enabled); |
4468 | nstat_set_keyval_scalar(kv: &kv[i++], |
4469 | key: NSTAT_SYSINFO_ECN_CLIENT_SETUP, |
4470 | val: data->u.tcp_stats.ecn_client_setup); |
4471 | nstat_set_keyval_scalar(kv: &kv[i++], |
4472 | key: NSTAT_SYSINFO_ECN_SERVER_SETUP, |
4473 | val: data->u.tcp_stats.ecn_server_setup); |
4474 | nstat_set_keyval_scalar(kv: &kv[i++], |
4475 | key: NSTAT_SYSINFO_ECN_CLIENT_SUCCESS, |
4476 | val: data->u.tcp_stats.ecn_client_success); |
4477 | nstat_set_keyval_scalar(kv: &kv[i++], |
4478 | key: NSTAT_SYSINFO_ECN_SERVER_SUCCESS, |
4479 | val: data->u.tcp_stats.ecn_server_success); |
4480 | nstat_set_keyval_scalar(kv: &kv[i++], |
4481 | key: NSTAT_SYSINFO_ECN_NOT_SUPPORTED, |
4482 | val: data->u.tcp_stats.ecn_not_supported); |
4483 | nstat_set_keyval_scalar(kv: &kv[i++], |
4484 | key: NSTAT_SYSINFO_ECN_LOST_SYN, |
4485 | val: data->u.tcp_stats.ecn_lost_syn); |
4486 | nstat_set_keyval_scalar(kv: &kv[i++], |
4487 | key: NSTAT_SYSINFO_ECN_LOST_SYNACK, |
4488 | val: data->u.tcp_stats.ecn_lost_synack); |
4489 | nstat_set_keyval_scalar(kv: &kv[i++], |
4490 | key: NSTAT_SYSINFO_ECN_RECV_CE, |
4491 | val: data->u.tcp_stats.ecn_recv_ce); |
4492 | nstat_set_keyval_scalar(kv: &kv[i++], |
4493 | key: NSTAT_SYSINFO_ECN_RECV_ECE, |
4494 | val: data->u.tcp_stats.ecn_recv_ece); |
4495 | nstat_set_keyval_scalar(kv: &kv[i++], |
4496 | key: NSTAT_SYSINFO_ECN_SENT_ECE, |
4497 | val: data->u.tcp_stats.ecn_sent_ece); |
4498 | nstat_set_keyval_scalar(kv: &kv[i++], |
4499 | key: NSTAT_SYSINFO_ECN_CONN_RECV_CE, |
4500 | val: data->u.tcp_stats.ecn_conn_recv_ce); |
4501 | nstat_set_keyval_scalar(kv: &kv[i++], |
4502 | key: NSTAT_SYSINFO_ECN_CONN_RECV_ECE, |
4503 | val: data->u.tcp_stats.ecn_conn_recv_ece); |
4504 | nstat_set_keyval_scalar(kv: &kv[i++], |
4505 | key: NSTAT_SYSINFO_ECN_CONN_PLNOCE, |
4506 | val: data->u.tcp_stats.ecn_conn_plnoce); |
4507 | nstat_set_keyval_scalar(kv: &kv[i++], |
4508 | key: NSTAT_SYSINFO_ECN_CONN_PL_CE, |
4509 | val: data->u.tcp_stats.ecn_conn_pl_ce); |
4510 | nstat_set_keyval_scalar(kv: &kv[i++], |
4511 | key: NSTAT_SYSINFO_ECN_CONN_NOPL_CE, |
4512 | val: data->u.tcp_stats.ecn_conn_nopl_ce); |
4513 | nstat_set_keyval_scalar(kv: &kv[i++], |
4514 | key: NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS, |
4515 | val: data->u.tcp_stats.ecn_fallback_synloss); |
4516 | nstat_set_keyval_scalar(kv: &kv[i++], |
4517 | key: NSTAT_SYSINFO_ECN_FALLBACK_REORDER, |
4518 | val: data->u.tcp_stats.ecn_fallback_reorder); |
4519 | nstat_set_keyval_scalar(kv: &kv[i++], |
4520 | key: NSTAT_SYSINFO_ECN_FALLBACK_CE, |
4521 | val: data->u.tcp_stats.ecn_fallback_ce); |
4522 | nstat_set_keyval_scalar(kv: &kv[i++], |
4523 | key: NSTAT_SYSINFO_TFO_SYN_DATA_RCV, |
4524 | val: data->u.tcp_stats.tfo_syn_data_rcv); |
4525 | nstat_set_keyval_scalar(kv: &kv[i++], |
4526 | key: NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV, |
4527 | val: data->u.tcp_stats.tfo_cookie_req_rcv); |
4528 | nstat_set_keyval_scalar(kv: &kv[i++], |
4529 | key: NSTAT_SYSINFO_TFO_COOKIE_SENT, |
4530 | val: data->u.tcp_stats.tfo_cookie_sent); |
4531 | nstat_set_keyval_scalar(kv: &kv[i++], |
4532 | key: NSTAT_SYSINFO_TFO_COOKIE_INVALID, |
4533 | val: data->u.tcp_stats.tfo_cookie_invalid); |
4534 | nstat_set_keyval_scalar(kv: &kv[i++], |
4535 | key: NSTAT_SYSINFO_TFO_COOKIE_REQ, |
4536 | val: data->u.tcp_stats.tfo_cookie_req); |
4537 | nstat_set_keyval_scalar(kv: &kv[i++], |
4538 | key: NSTAT_SYSINFO_TFO_COOKIE_RCV, |
4539 | val: data->u.tcp_stats.tfo_cookie_rcv); |
4540 | nstat_set_keyval_scalar(kv: &kv[i++], |
4541 | key: NSTAT_SYSINFO_TFO_SYN_DATA_SENT, |
4542 | val: data->u.tcp_stats.tfo_syn_data_sent); |
4543 | nstat_set_keyval_scalar(kv: &kv[i++], |
4544 | key: NSTAT_SYSINFO_TFO_SYN_DATA_ACKED, |
4545 | val: data->u.tcp_stats.tfo_syn_data_acked); |
4546 | nstat_set_keyval_scalar(kv: &kv[i++], |
4547 | key: NSTAT_SYSINFO_TFO_SYN_LOSS, |
4548 | val: data->u.tcp_stats.tfo_syn_loss); |
4549 | nstat_set_keyval_scalar(kv: &kv[i++], |
4550 | key: NSTAT_SYSINFO_TFO_BLACKHOLE, |
4551 | val: data->u.tcp_stats.tfo_blackhole); |
4552 | nstat_set_keyval_scalar(kv: &kv[i++], |
4553 | key: NSTAT_SYSINFO_TFO_COOKIE_WRONG, |
4554 | val: data->u.tcp_stats.tfo_cookie_wrong); |
4555 | nstat_set_keyval_scalar(kv: &kv[i++], |
4556 | key: NSTAT_SYSINFO_TFO_NO_COOKIE_RCV, |
4557 | val: data->u.tcp_stats.tfo_no_cookie_rcv); |
4558 | nstat_set_keyval_scalar(kv: &kv[i++], |
4559 | key: NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE, |
4560 | val: data->u.tcp_stats.tfo_heuristics_disable); |
4561 | nstat_set_keyval_scalar(kv: &kv[i++], |
4562 | key: NSTAT_SYSINFO_TFO_SEND_BLACKHOLE, |
4563 | val: data->u.tcp_stats.tfo_sndblackhole); |
4564 | nstat_set_keyval_scalar(kv: &kv[i++], |
4565 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT, |
4566 | val: data->u.tcp_stats.mptcp_handover_attempt); |
4567 | nstat_set_keyval_scalar(kv: &kv[i++], |
4568 | key: NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT, |
4569 | val: data->u.tcp_stats.mptcp_interactive_attempt); |
4570 | nstat_set_keyval_scalar(kv: &kv[i++], |
4571 | key: NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT, |
4572 | val: data->u.tcp_stats.mptcp_aggregate_attempt); |
4573 | nstat_set_keyval_scalar(kv: &kv[i++], |
4574 | key: NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT, |
4575 | val: data->u.tcp_stats.mptcp_fp_handover_attempt); |
4576 | nstat_set_keyval_scalar(kv: &kv[i++], |
4577 | key: NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT, |
4578 | val: data->u.tcp_stats.mptcp_fp_interactive_attempt); |
4579 | nstat_set_keyval_scalar(kv: &kv[i++], |
4580 | key: NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT, |
4581 | val: data->u.tcp_stats.mptcp_fp_aggregate_attempt); |
4582 | nstat_set_keyval_scalar(kv: &kv[i++], |
4583 | key: NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK, |
4584 | val: data->u.tcp_stats.mptcp_heuristic_fallback); |
4585 | nstat_set_keyval_scalar(kv: &kv[i++], |
4586 | key: NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK, |
4587 | val: data->u.tcp_stats.mptcp_fp_heuristic_fallback); |
4588 | nstat_set_keyval_scalar(kv: &kv[i++], |
4589 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI, |
4590 | val: data->u.tcp_stats.mptcp_handover_success_wifi); |
4591 | nstat_set_keyval_scalar(kv: &kv[i++], |
4592 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL, |
4593 | val: data->u.tcp_stats.mptcp_handover_success_cell); |
4594 | nstat_set_keyval_scalar(kv: &kv[i++], |
4595 | key: NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS, |
4596 | val: data->u.tcp_stats.mptcp_interactive_success); |
4597 | nstat_set_keyval_scalar(kv: &kv[i++], |
4598 | key: NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS, |
4599 | val: data->u.tcp_stats.mptcp_aggregate_success); |
4600 | nstat_set_keyval_scalar(kv: &kv[i++], |
4601 | key: NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI, |
4602 | val: data->u.tcp_stats.mptcp_fp_handover_success_wifi); |
4603 | nstat_set_keyval_scalar(kv: &kv[i++], |
4604 | key: NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL, |
4605 | val: data->u.tcp_stats.mptcp_fp_handover_success_cell); |
4606 | nstat_set_keyval_scalar(kv: &kv[i++], |
4607 | key: NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS, |
4608 | val: data->u.tcp_stats.mptcp_fp_interactive_success); |
4609 | nstat_set_keyval_scalar(kv: &kv[i++], |
4610 | key: NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS, |
4611 | val: data->u.tcp_stats.mptcp_fp_aggregate_success); |
4612 | nstat_set_keyval_scalar(kv: &kv[i++], |
4613 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI, |
4614 | val: data->u.tcp_stats.mptcp_handover_cell_from_wifi); |
4615 | nstat_set_keyval_scalar(kv: &kv[i++], |
4616 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL, |
4617 | val: data->u.tcp_stats.mptcp_handover_wifi_from_cell); |
4618 | nstat_set_keyval_scalar(kv: &kv[i++], |
4619 | key: NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI, |
4620 | val: data->u.tcp_stats.mptcp_interactive_cell_from_wifi); |
4621 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4622 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES, |
4623 | val: data->u.tcp_stats.mptcp_handover_cell_bytes); |
4624 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4625 | key: NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES, |
4626 | val: data->u.tcp_stats.mptcp_interactive_cell_bytes); |
4627 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4628 | key: NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES, |
4629 | val: data->u.tcp_stats.mptcp_aggregate_cell_bytes); |
4630 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4631 | key: NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES, |
4632 | val: data->u.tcp_stats.mptcp_handover_all_bytes); |
4633 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4634 | key: NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES, |
4635 | val: data->u.tcp_stats.mptcp_interactive_all_bytes); |
4636 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4637 | key: NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES, |
4638 | val: data->u.tcp_stats.mptcp_aggregate_all_bytes); |
4639 | nstat_set_keyval_scalar(kv: &kv[i++], |
4640 | key: NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI, |
4641 | val: data->u.tcp_stats.mptcp_back_to_wifi); |
4642 | nstat_set_keyval_scalar(kv: &kv[i++], |
4643 | key: NSTAT_SYSINFO_MPTCP_WIFI_PROXY, |
4644 | val: data->u.tcp_stats.mptcp_wifi_proxy); |
4645 | nstat_set_keyval_scalar(kv: &kv[i++], |
4646 | key: NSTAT_SYSINFO_MPTCP_CELL_PROXY, |
4647 | val: data->u.tcp_stats.mptcp_cell_proxy); |
4648 | nstat_set_keyval_scalar(kv: &kv[i++], |
4649 | key: NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL, |
4650 | val: data->u.tcp_stats.mptcp_triggered_cell); |
4651 | VERIFY(i == nkeyvals); |
4652 | break; |
4653 | } |
4654 | case NSTAT_SYSINFO_IFNET_ECN_STATS: |
4655 | { |
4656 | nstat_set_keyval_scalar(kv: &kv[i++], |
4657 | key: NSTAT_SYSINFO_ECN_IFNET_TYPE, |
4658 | val: data->u.ifnet_ecn_stats.ifnet_type); |
4659 | nstat_set_keyval_scalar(kv: &kv[i++], |
4660 | key: NSTAT_SYSINFO_ECN_IFNET_PROTO, |
4661 | val: data->u.ifnet_ecn_stats.ifnet_proto); |
4662 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4663 | key: NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP, |
4664 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup); |
4665 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4666 | key: NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP, |
4667 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup); |
4668 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4669 | key: NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS, |
4670 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success); |
4671 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4672 | key: NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS, |
4673 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success); |
4674 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4675 | key: NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT, |
4676 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport); |
4677 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4678 | key: NSTAT_SYSINFO_ECN_IFNET_SYN_LOST, |
4679 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost); |
4680 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4681 | key: NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST, |
4682 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost); |
4683 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4684 | key: NSTAT_SYSINFO_ECN_IFNET_RECV_CE, |
4685 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce); |
4686 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4687 | key: NSTAT_SYSINFO_ECN_IFNET_RECV_ECE, |
4688 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece); |
4689 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4690 | key: NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE, |
4691 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce); |
4692 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4693 | key: NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE, |
4694 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece); |
4695 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4696 | key: NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE, |
4697 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce); |
4698 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4699 | key: NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE, |
4700 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce); |
4701 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4702 | key: NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE, |
4703 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce); |
4704 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4705 | key: NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS, |
4706 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss); |
4707 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4708 | key: NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER, |
4709 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder); |
4710 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4711 | key: NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE, |
4712 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce); |
4713 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4714 | key: NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG, |
4715 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg); |
4716 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4717 | key: NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR, |
4718 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var); |
4719 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4720 | key: NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT, |
4721 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent); |
4722 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4723 | key: NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE, |
4724 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes); |
4725 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4726 | key: NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT, |
4727 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent); |
4728 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4729 | key: NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT, |
4730 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent); |
4731 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4732 | key: NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP, |
4733 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop); |
4734 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4735 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG, |
4736 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg); |
4737 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4738 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR, |
4739 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var); |
4740 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4741 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT, |
4742 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent); |
4743 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4744 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE, |
4745 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes); |
4746 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4747 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT, |
4748 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent); |
4749 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4750 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT, |
4751 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent); |
4752 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4753 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP, |
4754 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop); |
4755 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4756 | key: NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS, |
4757 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts); |
4758 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4759 | key: NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS, |
4760 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts); |
4761 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4762 | key: NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS, |
4763 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts); |
4764 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4765 | key: NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS, |
4766 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts); |
4767 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4768 | key: NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST, |
4769 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop); |
4770 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4771 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS, |
4772 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts); |
4773 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4774 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS, |
4775 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts); |
4776 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4777 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS, |
4778 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts); |
4779 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4780 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS, |
4781 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts); |
4782 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4783 | key: NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST, |
4784 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop); |
4785 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4786 | key: NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN, |
4787 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn); |
4788 | nstat_set_keyval_scalar(kv: &kv[i++], |
4789 | key: NSTAT_SYSINFO_IFNET_UNSENT_DATA, |
4790 | val: data->unsent_data_cnt); |
4791 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4792 | key: NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST, |
4793 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst); |
4794 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4795 | key: NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT, |
4796 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt); |
4797 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4798 | key: NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST, |
4799 | val: data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst); |
4800 | break; |
4801 | } |
4802 | case NSTAT_SYSINFO_LIM_STATS: |
4803 | { |
4804 | nstat_set_keyval_string(kv: &kv[i++], |
4805 | key: NSTAT_SYSINFO_LIM_IFNET_SIGNATURE, |
4806 | buf: data->u.lim_stats.ifnet_signature, |
4807 | len: data->u.lim_stats.ifnet_siglen); |
4808 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4809 | key: NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH, |
4810 | val: data->u.lim_stats.lim_stat.lim_dl_max_bandwidth); |
4811 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4812 | key: NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH, |
4813 | val: data->u.lim_stats.lim_stat.lim_ul_max_bandwidth); |
4814 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4815 | key: NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT, |
4816 | val: data->u.lim_stats.lim_stat.lim_packet_loss_percent); |
4817 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4818 | key: NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT, |
4819 | val: data->u.lim_stats.lim_stat.lim_packet_ooo_percent); |
4820 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4821 | key: NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE, |
4822 | val: data->u.lim_stats.lim_stat.lim_rtt_variance); |
4823 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4824 | key: NSTAT_SYSINFO_LIM_IFNET_RTT_MIN, |
4825 | val: data->u.lim_stats.lim_stat.lim_rtt_min); |
4826 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4827 | key: NSTAT_SYSINFO_LIM_IFNET_RTT_AVG, |
4828 | val: data->u.lim_stats.lim_stat.lim_rtt_average); |
4829 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4830 | key: NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT, |
4831 | val: data->u.lim_stats.lim_stat.lim_conn_timeout_percent); |
4832 | nstat_set_keyval_scalar(kv: &kv[i++], |
4833 | key: NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED, |
4834 | val: data->u.lim_stats.lim_stat.lim_dl_detected); |
4835 | nstat_set_keyval_scalar(kv: &kv[i++], |
4836 | key: NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED, |
4837 | val: data->u.lim_stats.lim_stat.lim_ul_detected); |
4838 | nstat_set_keyval_scalar(kv: &kv[i++], |
4839 | key: NSTAT_SYSINFO_LIM_IFNET_TYPE, |
4840 | val: data->u.lim_stats.ifnet_type); |
4841 | break; |
4842 | } |
4843 | case NSTAT_SYSINFO_NET_API_STATS: |
4844 | { |
4845 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4846 | key: NSTAT_SYSINFO_API_IF_FLTR_ATTACH, |
4847 | val: data->u.net_api_stats.net_api_stats.nas_iflt_attach_total); |
4848 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4849 | key: NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS, |
4850 | val: data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total); |
4851 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4852 | key: NSTAT_SYSINFO_API_IP_FLTR_ADD, |
4853 | val: data->u.net_api_stats.net_api_stats.nas_ipf_add_total); |
4854 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4855 | key: NSTAT_SYSINFO_API_IP_FLTR_ADD_OS, |
4856 | val: data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total); |
4857 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4858 | key: NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH, |
4859 | val: data->u.net_api_stats.net_api_stats.nas_sfltr_register_total); |
4860 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4861 | key: NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS, |
4862 | val: data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total); |
4863 | |
4864 | |
4865 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4866 | key: NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL, |
4867 | val: data->u.net_api_stats.net_api_stats.nas_socket_alloc_total); |
4868 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4869 | key: NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL, |
4870 | val: data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total); |
4871 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4872 | key: NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS, |
4873 | val: data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total); |
4874 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4875 | key: NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID, |
4876 | val: data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total); |
4877 | |
4878 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4879 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL, |
4880 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total); |
4881 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4882 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE, |
4883 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total); |
4884 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4885 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_INET, |
4886 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total); |
4887 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4888 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6, |
4889 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total); |
4890 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4891 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM, |
4892 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total); |
4893 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4894 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH, |
4895 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total); |
4896 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4897 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY, |
4898 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total); |
4899 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4900 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV, |
4901 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total); |
4902 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4903 | key: NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER, |
4904 | val: data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total); |
4905 | |
4906 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4907 | key: NSTAT_SYSINFO_API_SOCK_INET_STREAM, |
4908 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total); |
4909 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4910 | key: NSTAT_SYSINFO_API_SOCK_INET_DGRAM, |
4911 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total); |
4912 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4913 | key: NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED, |
4914 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected); |
4915 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4916 | key: NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS, |
4917 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns); |
4918 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4919 | key: NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA, |
4920 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data); |
4921 | |
4922 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4923 | key: NSTAT_SYSINFO_API_SOCK_INET6_STREAM, |
4924 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total); |
4925 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4926 | key: NSTAT_SYSINFO_API_SOCK_INET6_DGRAM, |
4927 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total); |
4928 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4929 | key: NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED, |
4930 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected); |
4931 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4932 | key: NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS, |
4933 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns); |
4934 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4935 | key: NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA, |
4936 | val: data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data); |
4937 | |
4938 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4939 | key: NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN, |
4940 | val: data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total); |
4941 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4942 | key: NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS, |
4943 | val: data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total); |
4944 | |
4945 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4946 | key: NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM, |
4947 | val: data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total); |
4948 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4949 | key: NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM, |
4950 | val: data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total); |
4951 | |
4952 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4953 | key: NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM, |
4954 | val: data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total); |
4955 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4956 | key: NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM, |
4957 | val: data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total); |
4958 | |
4959 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4960 | key: NSTAT_SYSINFO_API_IFNET_ALLOC, |
4961 | val: data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total); |
4962 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4963 | key: NSTAT_SYSINFO_API_IFNET_ALLOC_OS, |
4964 | val: data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total); |
4965 | |
4966 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4967 | key: NSTAT_SYSINFO_API_PF_ADDRULE, |
4968 | val: data->u.net_api_stats.net_api_stats.nas_pf_addrule_total); |
4969 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4970 | key: NSTAT_SYSINFO_API_PF_ADDRULE_OS, |
4971 | val: data->u.net_api_stats.net_api_stats.nas_pf_addrule_os); |
4972 | |
4973 | nstat_set_keyval_u64_scalar(kv: &kv[i++], |
4974 | key: NSTAT_SYSINFO_API_VMNET_START, |
4975 | val: data->u.net_api_stats.net_api_stats.nas_vmnet_total); |
4976 | |
4977 | #if SKYWALK |
4978 | nstat_set_keyval_scalar(kv: &kv[i++], |
4979 | key: NSTAT_SYSINFO_API_IF_NETAGENT_ENABLED, |
4980 | val: if_is_fsw_transport_netagent_enabled()); |
4981 | #endif /* SKYWALK */ |
4982 | |
4983 | nstat_set_keyval_scalar(kv: &kv[i++], |
4984 | key: NSTAT_SYSINFO_API_REPORT_INTERVAL, |
4985 | val: data->u.net_api_stats.report_interval); |
4986 | |
4987 | break; |
4988 | } |
4989 | } |
4990 | if (syscnt != NULL) { |
4991 | VERIFY(i > 0 && i <= nkeyvals); |
4992 | countsize = offsetof(nstat_sysinfo_counts, |
4993 | nstat_sysinfo_keyvals) + |
4994 | sizeof(nstat_sysinfo_keyval) * i; |
4995 | finalsize += countsize; |
4996 | syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS; |
4997 | assert(finalsize <= MAX_NSTAT_MSG_HDR_LENGTH); |
4998 | syscnt->hdr.length = (u_int16_t)finalsize; |
4999 | syscnt->counts.nstat_sysinfo_len = (u_int32_t)countsize; |
5000 | |
5001 | result = ctl_enqueuedata(kctlref: control->ncs_kctl, |
5002 | unit: control->ncs_unit, data: syscnt, len: finalsize, CTL_DATA_EOR); |
5003 | if (result != 0) { |
5004 | nstat_stats.nstat_sysinfofailures += 1; |
5005 | } |
5006 | kfree_data(syscnt, allocsize); |
5007 | } |
5008 | return; |
5009 | } |
5010 | |
5011 | __private_extern__ void |
5012 | nstat_sysinfo_send_data( |
5013 | nstat_sysinfo_data *data) |
5014 | { |
5015 | nstat_control_state *control; |
5016 | |
5017 | lck_mtx_lock(lck: &nstat_mtx); |
5018 | for (control = nstat_controls; control; control = control->ncs_next) { |
5019 | lck_mtx_lock(lck: &control->ncs_mtx); |
5020 | if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) { |
5021 | nstat_sysinfo_send_data_internal(control, data); |
5022 | } |
5023 | lck_mtx_unlock(lck: &control->ncs_mtx); |
5024 | } |
5025 | lck_mtx_unlock(lck: &nstat_mtx); |
5026 | } |
5027 | |
5028 | static void |
5029 | nstat_sysinfo_generate_report(void) |
5030 | { |
5031 | tcp_report_stats(); |
5032 | nstat_ifnet_report_ecn_stats(); |
5033 | nstat_ifnet_report_lim_stats(); |
5034 | nstat_net_api_report_stats(); |
5035 | } |
5036 | |
5037 | #pragma mark -- net_api -- |
5038 | |
5039 | static struct net_api_stats net_api_stats_before; |
5040 | static u_int64_t net_api_stats_last_report_time; |
5041 | |
5042 | static void |
5043 | nstat_net_api_report_stats(void) |
5044 | { |
5045 | struct nstat_sysinfo_data data; |
5046 | struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats; |
5047 | u_int64_t uptime; |
5048 | |
5049 | uptime = net_uptime(); |
5050 | |
5051 | if ((u_int32_t)(uptime - net_api_stats_last_report_time) < |
5052 | net_api_stats_report_interval) { |
5053 | return; |
5054 | } |
5055 | |
5056 | st->report_interval = (u_int32_t)(uptime - net_api_stats_last_report_time); |
5057 | net_api_stats_last_report_time = uptime; |
5058 | |
5059 | data.flags = NSTAT_SYSINFO_NET_API_STATS; |
5060 | data.unsent_data_cnt = 0; |
5061 | |
5062 | /* |
5063 | * Some of the fields in the report are the current value and |
5064 | * other fields are the delta from the last report: |
5065 | * - Report difference for the per flow counters as they increase |
5066 | * with time |
5067 | * - Report current value for other counters as they tend not to change |
5068 | * much with time |
5069 | */ |
5070 | #define STATCOPY(f) \ |
5071 | (st->net_api_stats.f = net_api_stats.f) |
5072 | #define STATDIFF(f) \ |
5073 | (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f) |
5074 | |
5075 | STATCOPY(nas_iflt_attach_count); |
5076 | STATCOPY(nas_iflt_attach_total); |
5077 | STATCOPY(nas_iflt_attach_os_total); |
5078 | |
5079 | STATCOPY(nas_ipf_add_count); |
5080 | STATCOPY(nas_ipf_add_total); |
5081 | STATCOPY(nas_ipf_add_os_total); |
5082 | |
5083 | STATCOPY(nas_sfltr_register_count); |
5084 | STATCOPY(nas_sfltr_register_total); |
5085 | STATCOPY(nas_sfltr_register_os_total); |
5086 | |
5087 | STATDIFF(nas_socket_alloc_total); |
5088 | STATDIFF(nas_socket_in_kernel_total); |
5089 | STATDIFF(nas_socket_in_kernel_os_total); |
5090 | STATDIFF(nas_socket_necp_clientuuid_total); |
5091 | |
5092 | STATDIFF(nas_socket_domain_local_total); |
5093 | STATDIFF(nas_socket_domain_route_total); |
5094 | STATDIFF(nas_socket_domain_inet_total); |
5095 | STATDIFF(nas_socket_domain_inet6_total); |
5096 | STATDIFF(nas_socket_domain_system_total); |
5097 | STATDIFF(nas_socket_domain_multipath_total); |
5098 | STATDIFF(nas_socket_domain_key_total); |
5099 | STATDIFF(nas_socket_domain_ndrv_total); |
5100 | STATDIFF(nas_socket_domain_other_total); |
5101 | |
5102 | STATDIFF(nas_socket_inet_stream_total); |
5103 | STATDIFF(nas_socket_inet_dgram_total); |
5104 | STATDIFF(nas_socket_inet_dgram_connected); |
5105 | STATDIFF(nas_socket_inet_dgram_dns); |
5106 | STATDIFF(nas_socket_inet_dgram_no_data); |
5107 | |
5108 | STATDIFF(nas_socket_inet6_stream_total); |
5109 | STATDIFF(nas_socket_inet6_dgram_total); |
5110 | STATDIFF(nas_socket_inet6_dgram_connected); |
5111 | STATDIFF(nas_socket_inet6_dgram_dns); |
5112 | STATDIFF(nas_socket_inet6_dgram_no_data); |
5113 | |
5114 | STATDIFF(nas_socket_mcast_join_total); |
5115 | STATDIFF(nas_socket_mcast_join_os_total); |
5116 | |
5117 | STATDIFF(nas_sock_inet6_stream_exthdr_in); |
5118 | STATDIFF(nas_sock_inet6_stream_exthdr_out); |
5119 | STATDIFF(nas_sock_inet6_dgram_exthdr_in); |
5120 | STATDIFF(nas_sock_inet6_dgram_exthdr_out); |
5121 | |
5122 | STATDIFF(nas_nx_flow_inet_stream_total); |
5123 | STATDIFF(nas_nx_flow_inet_dgram_total); |
5124 | |
5125 | STATDIFF(nas_nx_flow_inet6_stream_total); |
5126 | STATDIFF(nas_nx_flow_inet6_dgram_total); |
5127 | |
5128 | STATCOPY(nas_ifnet_alloc_count); |
5129 | STATCOPY(nas_ifnet_alloc_total); |
5130 | STATCOPY(nas_ifnet_alloc_os_count); |
5131 | STATCOPY(nas_ifnet_alloc_os_total); |
5132 | |
5133 | STATCOPY(nas_pf_addrule_total); |
5134 | STATCOPY(nas_pf_addrule_os); |
5135 | |
5136 | STATCOPY(nas_vmnet_total); |
5137 | |
5138 | #undef STATCOPY |
5139 | #undef STATDIFF |
5140 | |
5141 | nstat_sysinfo_send_data(data: &data); |
5142 | |
5143 | /* |
5144 | * Save a copy of the current fields so we can diff them the next time |
5145 | */ |
5146 | memcpy(dst: &net_api_stats_before, src: &net_api_stats, |
5147 | n: sizeof(struct net_api_stats)); |
5148 | _CASSERT(sizeof(net_api_stats_before) == sizeof(net_api_stats)); |
5149 | } |
5150 | |
5151 | |
5152 | #pragma mark -- Kernel Control Socket -- |
5153 | |
5154 | static kern_ctl_ref nstat_ctlref = NULL; |
5155 | |
5156 | static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo); |
5157 | static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo); |
5158 | static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags); |
5159 | |
5160 | static errno_t |
5161 | nstat_enqueue_success( |
5162 | uint64_t context, |
5163 | nstat_control_state *state, |
5164 | u_int16_t flags) |
5165 | { |
5166 | nstat_msg_hdr success; |
5167 | errno_t result; |
5168 | |
5169 | bzero(s: &success, n: sizeof(success)); |
5170 | success.context = context; |
5171 | success.type = NSTAT_MSG_TYPE_SUCCESS; |
5172 | success.length = sizeof(success); |
5173 | success.flags = flags; |
5174 | result = ctl_enqueuedata(kctlref: state->ncs_kctl, unit: state->ncs_unit, data: &success, |
5175 | len: sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT); |
5176 | if (result != 0) { |
5177 | if (nstat_debug != 0) { |
5178 | printf("%s: could not enqueue success message %d\n" , |
5179 | __func__, result); |
5180 | } |
5181 | nstat_stats.nstat_successmsgfailures += 1; |
5182 | } |
5183 | return result; |
5184 | } |
5185 | |
5186 | static errno_t |
5187 | nstat_control_send_event( |
5188 | nstat_control_state *state, |
5189 | nstat_src *src, |
5190 | u_int64_t event) |
5191 | { |
5192 | errno_t result = ENOTSUP; |
5193 | int failed = 0; |
5194 | |
5195 | if (nstat_control_reporting_allowed(state, src, suppression_flags: 0)) { |
5196 | if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) { |
5197 | result = nstat_control_send_update(state, src, context: 0, event, hdr_flags: 0, NULL); |
5198 | if (result != 0) { |
5199 | failed = 1; |
5200 | if (nstat_debug != 0) { |
5201 | printf("%s - nstat_control_send_event() %d\n" , __func__, result); |
5202 | } |
5203 | } |
5204 | } else { |
5205 | if (nstat_debug != 0) { |
5206 | printf("%s - nstat_control_send_event() used when updates not supported\n" , __func__); |
5207 | } |
5208 | } |
5209 | } |
5210 | return result; |
5211 | } |
5212 | |
5213 | static errno_t |
5214 | nstat_control_send_goodbye( |
5215 | nstat_control_state *state, |
5216 | nstat_src *src) |
5217 | { |
5218 | errno_t result = 0; |
5219 | int failed = 0; |
5220 | u_int16_t hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_FILTER; |
5221 | |
5222 | if (nstat_control_reporting_allowed(state, src, suppression_flags: (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_CLOSE: 0)) { |
5223 | hdr_flags = 0; |
5224 | if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0) { |
5225 | result = nstat_control_send_update(state, src, context: 0, event: 0, hdr_flags: NSTAT_MSG_HDR_FLAG_CLOSING, NULL); |
5226 | if (result != 0) { |
5227 | failed = 1; |
5228 | hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP; |
5229 | if (nstat_debug != 0) { |
5230 | printf("%s - nstat_control_send_update() %d\n" , __func__, result); |
5231 | } |
5232 | } |
5233 | } else { |
5234 | // send one last counts notification |
5235 | result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL); |
5236 | if (result != 0) { |
5237 | failed = 1; |
5238 | hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP; |
5239 | if (nstat_debug != 0) { |
5240 | printf("%s - nstat_control_send_counts() %d\n" , __func__, result); |
5241 | } |
5242 | } |
5243 | |
5244 | // send a last description |
5245 | result = nstat_control_send_description(state, src, context: 0, hdr_flags: NSTAT_MSG_HDR_FLAG_CLOSING); |
5246 | if (result != 0) { |
5247 | failed = 1; |
5248 | hdr_flags = NSTAT_MSG_HDR_FLAG_CLOSED_AFTER_DROP; |
5249 | if (nstat_debug != 0) { |
5250 | printf("%s - nstat_control_send_description() %d\n" , __func__, result); |
5251 | } |
5252 | } |
5253 | } |
5254 | } |
5255 | |
5256 | // send the source removed notification |
5257 | result = nstat_control_send_removed(state, src, hdr_flags); |
5258 | if (result != 0 && nstat_debug) { |
5259 | failed = 1; |
5260 | if (nstat_debug != 0) { |
5261 | printf("%s - nstat_control_send_removed() %d\n" , __func__, result); |
5262 | } |
5263 | } |
5264 | |
5265 | if (failed != 0) { |
5266 | nstat_stats.nstat_control_send_goodbye_failures++; |
5267 | } |
5268 | |
5269 | |
5270 | return result; |
5271 | } |
5272 | |
5273 | static errno_t |
5274 | nstat_flush_accumulated_msgs( |
5275 | nstat_control_state *state) |
5276 | { |
5277 | errno_t result = 0; |
5278 | if (state->ncs_accumulated != NULL && mbuf_len(mbuf: state->ncs_accumulated) > 0) { |
5279 | mbuf_pkthdr_setlen(mbuf: state->ncs_accumulated, len: mbuf_len(mbuf: state->ncs_accumulated)); |
5280 | result = ctl_enqueuembuf(kctlref: state->ncs_kctl, unit: state->ncs_unit, m: state->ncs_accumulated, CTL_DATA_EOR); |
5281 | if (result != 0) { |
5282 | nstat_stats.nstat_flush_accumulated_msgs_failures++; |
5283 | if (nstat_debug != 0) { |
5284 | printf("%s - ctl_enqueuembuf failed: %d\n" , __func__, result); |
5285 | } |
5286 | mbuf_freem(mbuf: state->ncs_accumulated); |
5287 | } |
5288 | state->ncs_accumulated = NULL; |
5289 | } |
5290 | return result; |
5291 | } |
5292 | |
5293 | static errno_t |
5294 | nstat_accumulate_msg( |
5295 | nstat_control_state *state, |
5296 | nstat_msg_hdr *hdr, |
5297 | size_t length) |
5298 | { |
5299 | assert(length <= MAX_NSTAT_MSG_HDR_LENGTH); |
5300 | |
5301 | if (state->ncs_accumulated && mbuf_trailingspace(mbuf: state->ncs_accumulated) < length) { |
5302 | // Will send the current mbuf |
5303 | nstat_flush_accumulated_msgs(state); |
5304 | } |
5305 | |
5306 | errno_t result = 0; |
5307 | |
5308 | if (state->ncs_accumulated == NULL) { |
5309 | unsigned int one = 1; |
5310 | if (mbuf_allocpacket(how: MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, maxchunks: &one, mbuf: &state->ncs_accumulated) != 0) { |
5311 | if (nstat_debug != 0) { |
5312 | printf("%s - mbuf_allocpacket failed\n" , __func__); |
5313 | } |
5314 | result = ENOMEM; |
5315 | } else { |
5316 | mbuf_setlen(mbuf: state->ncs_accumulated, len: 0); |
5317 | } |
5318 | } |
5319 | |
5320 | if (result == 0) { |
5321 | hdr->length = (u_int16_t)length; |
5322 | result = mbuf_copyback(mbuf: state->ncs_accumulated, offset: mbuf_len(mbuf: state->ncs_accumulated), |
5323 | length, data: hdr, how: MBUF_DONTWAIT); |
5324 | } |
5325 | |
5326 | if (result != 0) { |
5327 | nstat_flush_accumulated_msgs(state); |
5328 | if (nstat_debug != 0) { |
5329 | printf("%s - resorting to ctl_enqueuedata\n" , __func__); |
5330 | } |
5331 | result = ctl_enqueuedata(kctlref: state->ncs_kctl, unit: state->ncs_unit, data: hdr, len: length, CTL_DATA_EOR); |
5332 | } |
5333 | |
5334 | if (result != 0) { |
5335 | nstat_stats.nstat_accumulate_msg_failures++; |
5336 | } |
5337 | |
5338 | return result; |
5339 | } |
5340 | |
5341 | static void |
5342 | nstat_idle_check( |
5343 | __unused thread_call_param_t p0, |
5344 | __unused thread_call_param_t p1) |
5345 | { |
5346 | nstat_control_state *control; |
5347 | nstat_src *src, *tmpsrc; |
5348 | tailq_head_nstat_src dead_list; |
5349 | TAILQ_INIT(&dead_list); |
5350 | |
5351 | lck_mtx_lock(lck: &nstat_mtx); |
5352 | |
5353 | nstat_idle_time = 0; |
5354 | |
5355 | for (control = nstat_controls; control; control = control->ncs_next) { |
5356 | lck_mtx_lock(lck: &control->ncs_mtx); |
5357 | if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS)) { |
5358 | TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc) |
5359 | { |
5360 | if (src->provider->nstat_gone(src->cookie)) { |
5361 | errno_t result; |
5362 | |
5363 | // Pull it off the list |
5364 | TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link); |
5365 | |
5366 | result = nstat_control_send_goodbye(state: control, src); |
5367 | |
5368 | // Put this on the list to release later |
5369 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
5370 | } |
5371 | } |
5372 | } |
5373 | control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS; |
5374 | lck_mtx_unlock(lck: &control->ncs_mtx); |
5375 | } |
5376 | |
5377 | if (nstat_controls) { |
5378 | clock_interval_to_deadline(interval: 60, NSEC_PER_SEC, result: &nstat_idle_time); |
5379 | thread_call_func_delayed(func: (thread_call_func_t)nstat_idle_check, NULL, deadline: nstat_idle_time); |
5380 | } |
5381 | |
5382 | lck_mtx_unlock(lck: &nstat_mtx); |
5383 | |
5384 | /* Generate any system level reports, if needed */ |
5385 | nstat_sysinfo_generate_report(); |
5386 | |
5387 | // Release the sources now that we aren't holding lots of locks |
5388 | while ((src = TAILQ_FIRST(&dead_list))) { |
5389 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
5390 | nstat_control_cleanup_source(NULL, src, FALSE); |
5391 | } |
5392 | |
5393 | nstat_prune_procdetails(); |
5394 | } |
5395 | |
5396 | static void |
5397 | nstat_control_register(void) |
5398 | { |
5399 | // Register the control |
5400 | struct kern_ctl_reg nstat_control; |
5401 | bzero(s: &nstat_control, n: sizeof(nstat_control)); |
5402 | strlcpy(dst: nstat_control.ctl_name, NET_STAT_CONTROL_NAME, n: sizeof(nstat_control.ctl_name)); |
5403 | nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT; |
5404 | nstat_control.ctl_sendsize = nstat_sendspace; |
5405 | nstat_control.ctl_recvsize = nstat_recvspace; |
5406 | nstat_control.ctl_connect = nstat_control_connect; |
5407 | nstat_control.ctl_disconnect = nstat_control_disconnect; |
5408 | nstat_control.ctl_send = nstat_control_send; |
5409 | |
5410 | ctl_register(userkctl: &nstat_control, kctlref: &nstat_ctlref); |
5411 | } |
5412 | |
5413 | static void |
5414 | nstat_control_cleanup_source( |
5415 | nstat_control_state *state, |
5416 | struct nstat_src *src, |
5417 | boolean_t locked) |
5418 | { |
5419 | errno_t result; |
5420 | |
5421 | if (state) { |
5422 | result = nstat_control_send_removed(state, src, hdr_flags: 0); |
5423 | if (result != 0) { |
5424 | nstat_stats.nstat_control_cleanup_source_failures++; |
5425 | if (nstat_debug != 0) { |
5426 | printf("%s - nstat_control_send_removed() %d\n" , |
5427 | __func__, result); |
5428 | } |
5429 | } |
5430 | } |
5431 | // Cleanup the source if we found it. |
5432 | src->provider->nstat_release(src->cookie, locked); |
5433 | kfree_type(struct nstat_src, src); |
5434 | } |
5435 | |
5436 | |
5437 | static bool |
5438 | nstat_control_reporting_allowed( |
5439 | nstat_control_state *state, |
5440 | nstat_src *src, |
5441 | u_int64_t suppression_flags) |
5442 | { |
5443 | if (src->provider->nstat_reporting_allowed == NULL) { |
5444 | return TRUE; |
5445 | } |
5446 | |
5447 | return src->provider->nstat_reporting_allowed(src->cookie, |
5448 | &state->ncs_provider_filters[src->provider->nstat_provider_id], suppression_flags); |
5449 | } |
5450 | |
5451 | |
5452 | static errno_t |
5453 | nstat_control_connect( |
5454 | kern_ctl_ref kctl, |
5455 | struct sockaddr_ctl *sac, |
5456 | void **uinfo) |
5457 | { |
5458 | nstat_control_state *state = kalloc_type(nstat_control_state, |
5459 | Z_WAITOK | Z_ZERO); |
5460 | if (state == NULL) { |
5461 | return ENOMEM; |
5462 | } |
5463 | |
5464 | lck_mtx_init(lck: &state->ncs_mtx, grp: &nstat_lck_grp, NULL); |
5465 | state->ncs_kctl = kctl; |
5466 | state->ncs_unit = sac->sc_unit; |
5467 | state->ncs_flags = NSTAT_FLAG_REQCOUNTS; |
5468 | state->ncs_procdetails = nstat_retain_curprocdetails(); |
5469 | *uinfo = state; |
5470 | |
5471 | lck_mtx_lock(lck: &nstat_mtx); |
5472 | state->ncs_next = nstat_controls; |
5473 | nstat_controls = state; |
5474 | |
5475 | if (nstat_idle_time == 0) { |
5476 | clock_interval_to_deadline(interval: 60, NSEC_PER_SEC, result: &nstat_idle_time); |
5477 | thread_call_func_delayed(func: (thread_call_func_t)nstat_idle_check, NULL, deadline: nstat_idle_time); |
5478 | } |
5479 | |
5480 | merge_current_event_filters(); |
5481 | lck_mtx_unlock(lck: &nstat_mtx); |
5482 | |
5483 | return 0; |
5484 | } |
5485 | |
5486 | static errno_t |
5487 | nstat_control_disconnect( |
5488 | __unused kern_ctl_ref kctl, |
5489 | __unused u_int32_t unit, |
5490 | void *uinfo) |
5491 | { |
5492 | u_int32_t watching; |
5493 | nstat_control_state *state = (nstat_control_state*)uinfo; |
5494 | tailq_head_nstat_src cleanup_list; |
5495 | nstat_src *src; |
5496 | |
5497 | TAILQ_INIT(&cleanup_list); |
5498 | |
5499 | // pull it out of the global list of states |
5500 | lck_mtx_lock(lck: &nstat_mtx); |
5501 | nstat_control_state **statepp; |
5502 | for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next) { |
5503 | if (*statepp == state) { |
5504 | *statepp = state->ncs_next; |
5505 | break; |
5506 | } |
5507 | } |
5508 | merge_current_event_filters(); |
5509 | lck_mtx_unlock(lck: &nstat_mtx); |
5510 | |
5511 | lck_mtx_lock(lck: &state->ncs_mtx); |
5512 | // Stop watching for sources |
5513 | nstat_provider *provider; |
5514 | watching = state->ncs_watching; |
5515 | state->ncs_watching = 0; |
5516 | for (provider = nstat_providers; provider && watching; provider = provider->next) { |
5517 | if ((watching & (1 << provider->nstat_provider_id)) != 0) { |
5518 | watching &= ~(1 << provider->nstat_provider_id); |
5519 | provider->nstat_watcher_remove(state); |
5520 | } |
5521 | } |
5522 | |
5523 | // set cleanup flags |
5524 | state->ncs_flags |= NSTAT_FLAG_CLEANUP; |
5525 | |
5526 | if (state->ncs_accumulated) { |
5527 | mbuf_freem(mbuf: state->ncs_accumulated); |
5528 | state->ncs_accumulated = NULL; |
5529 | } |
5530 | |
5531 | // Copy out the list of sources |
5532 | TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link); |
5533 | lck_mtx_unlock(lck: &state->ncs_mtx); |
5534 | |
5535 | while ((src = TAILQ_FIRST(&cleanup_list))) { |
5536 | TAILQ_REMOVE(&cleanup_list, src, ns_control_link); |
5537 | nstat_control_cleanup_source(NULL, src, FALSE); |
5538 | } |
5539 | |
5540 | lck_mtx_destroy(lck: &state->ncs_mtx, grp: &nstat_lck_grp); |
5541 | nstat_release_procdetails(procdetails: state->ncs_procdetails); |
5542 | kfree_type(struct nstat_control_state, state); |
5543 | |
5544 | return 0; |
5545 | } |
5546 | |
5547 | static nstat_src_ref_t |
5548 | nstat_control_next_src_ref( |
5549 | nstat_control_state *state) |
5550 | { |
5551 | return ++state->ncs_next_srcref; |
5552 | } |
5553 | |
5554 | static errno_t |
5555 | nstat_control_send_counts( |
5556 | nstat_control_state *state, |
5557 | nstat_src *src, |
5558 | unsigned long long context, |
5559 | u_int16_t hdr_flags, |
5560 | int *gone) |
5561 | { |
5562 | nstat_msg_src_counts counts; |
5563 | errno_t result = 0; |
5564 | |
5565 | /* Some providers may not have any counts to send */ |
5566 | if (src->provider->nstat_counts == NULL) { |
5567 | return 0; |
5568 | } |
5569 | |
5570 | bzero(s: &counts, n: sizeof(counts)); |
5571 | counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS; |
5572 | counts.hdr.length = sizeof(counts); |
5573 | counts.hdr.flags = hdr_flags; |
5574 | counts.hdr.context = context; |
5575 | counts.srcref = src->srcref; |
5576 | counts.event_flags = 0; |
5577 | |
5578 | if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0) { |
5579 | if ((src->filter & NSTAT_FILTER_NOZEROBYTES) && |
5580 | counts.counts.nstat_rxbytes == 0 && |
5581 | counts.counts.nstat_txbytes == 0) { |
5582 | result = EAGAIN; |
5583 | } else { |
5584 | result = ctl_enqueuedata(kctlref: state->ncs_kctl, |
5585 | unit: state->ncs_unit, data: &counts, len: sizeof(counts), |
5586 | CTL_DATA_EOR); |
5587 | if (result != 0) { |
5588 | nstat_stats.nstat_sendcountfailures += 1; |
5589 | } |
5590 | } |
5591 | } |
5592 | return result; |
5593 | } |
5594 | |
5595 | static errno_t |
5596 | nstat_control_append_counts( |
5597 | nstat_control_state *state, |
5598 | nstat_src *src, |
5599 | int *gone) |
5600 | { |
5601 | /* Some providers may not have any counts to send */ |
5602 | if (!src->provider->nstat_counts) { |
5603 | return 0; |
5604 | } |
5605 | |
5606 | nstat_msg_src_counts counts; |
5607 | bzero(s: &counts, n: sizeof(counts)); |
5608 | counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS; |
5609 | counts.hdr.length = sizeof(counts); |
5610 | counts.srcref = src->srcref; |
5611 | counts.event_flags = 0; |
5612 | |
5613 | errno_t result = 0; |
5614 | result = src->provider->nstat_counts(src->cookie, &counts.counts, gone); |
5615 | if (result != 0) { |
5616 | return result; |
5617 | } |
5618 | |
5619 | if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES && |
5620 | counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0) { |
5621 | return EAGAIN; |
5622 | } |
5623 | |
5624 | return nstat_accumulate_msg(state, hdr: &counts.hdr, length: counts.hdr.length); |
5625 | } |
5626 | |
5627 | static int |
5628 | nstat_control_send_description( |
5629 | nstat_control_state *state, |
5630 | nstat_src *src, |
5631 | u_int64_t context, |
5632 | u_int16_t hdr_flags) |
5633 | { |
5634 | // Provider doesn't support getting the descriptor? Done. |
5635 | if (src->provider->nstat_descriptor_length == 0 || |
5636 | src->provider->nstat_copy_descriptor == NULL) { |
5637 | return EOPNOTSUPP; |
5638 | } |
5639 | |
5640 | // Allocate storage for the descriptor message |
5641 | mbuf_t msg; |
5642 | unsigned int one = 1; |
5643 | size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; |
5644 | assert(size <= MAX_NSTAT_MSG_HDR_LENGTH); |
5645 | |
5646 | if (mbuf_allocpacket(how: MBUF_DONTWAIT, packetlen: size, maxchunks: &one, mbuf: &msg) != 0) { |
5647 | return ENOMEM; |
5648 | } |
5649 | |
5650 | nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(mbuf: msg); |
5651 | bzero(s: desc, n: size); |
5652 | mbuf_setlen(mbuf: msg, len: size); |
5653 | mbuf_pkthdr_setlen(mbuf: msg, len: mbuf_len(mbuf: msg)); |
5654 | |
5655 | // Query the provider for the provider specific bits |
5656 | errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length); |
5657 | |
5658 | if (result != 0) { |
5659 | mbuf_freem(mbuf: msg); |
5660 | return result; |
5661 | } |
5662 | |
5663 | desc->hdr.context = context; |
5664 | desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC; |
5665 | desc->hdr.length = (u_int16_t)size; |
5666 | desc->hdr.flags = hdr_flags; |
5667 | desc->srcref = src->srcref; |
5668 | desc->event_flags = 0; |
5669 | desc->provider = src->provider->nstat_provider_id; |
5670 | |
5671 | result = ctl_enqueuembuf(kctlref: state->ncs_kctl, unit: state->ncs_unit, m: msg, CTL_DATA_EOR); |
5672 | if (result != 0) { |
5673 | nstat_stats.nstat_descriptionfailures += 1; |
5674 | mbuf_freem(mbuf: msg); |
5675 | } |
5676 | |
5677 | return result; |
5678 | } |
5679 | |
5680 | static errno_t |
5681 | nstat_control_append_description( |
5682 | nstat_control_state *state, |
5683 | nstat_src *src) |
5684 | { |
5685 | size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length; |
5686 | if (size > 512 || src->provider->nstat_descriptor_length == 0 || |
5687 | src->provider->nstat_copy_descriptor == NULL) { |
5688 | return EOPNOTSUPP; |
5689 | } |
5690 | |
5691 | // Fill out a buffer on the stack, we will copy to the mbuf later |
5692 | u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment |
5693 | bzero(s: buffer, n: size); |
5694 | |
5695 | nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer; |
5696 | desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC; |
5697 | desc->hdr.length = (u_int16_t)size; |
5698 | desc->srcref = src->srcref; |
5699 | desc->event_flags = 0; |
5700 | desc->provider = src->provider->nstat_provider_id; |
5701 | |
5702 | errno_t result = 0; |
5703 | // Fill in the description |
5704 | // Query the provider for the provider specific bits |
5705 | result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, |
5706 | src->provider->nstat_descriptor_length); |
5707 | if (result != 0) { |
5708 | return result; |
5709 | } |
5710 | |
5711 | return nstat_accumulate_msg(state, hdr: &desc->hdr, length: size); |
5712 | } |
5713 | |
5714 | static uint64_t |
5715 | nstat_extension_flags_for_source( |
5716 | nstat_control_state *state, |
5717 | nstat_src *src) |
5718 | { |
5719 | VERIFY(state != NULL & src != NULL); |
5720 | nstat_provider_id_t provider_id = src->provider->nstat_provider_id; |
5721 | |
5722 | return state->ncs_provider_filters[provider_id].npf_extensions; |
5723 | } |
5724 | |
5725 | static int |
5726 | nstat_control_send_update( |
5727 | nstat_control_state *state, |
5728 | nstat_src *src, |
5729 | u_int64_t context, |
5730 | u_int64_t event, |
5731 | u_int16_t hdr_flags, |
5732 | int *gone) |
5733 | { |
5734 | // Provider doesn't support getting the descriptor or counts? Done. |
5735 | if ((src->provider->nstat_descriptor_length == 0 || |
5736 | src->provider->nstat_copy_descriptor == NULL) && |
5737 | src->provider->nstat_counts == NULL) { |
5738 | return EOPNOTSUPP; |
5739 | } |
5740 | |
5741 | // Allocate storage for the descriptor message |
5742 | mbuf_t msg; |
5743 | unsigned int one = 1; |
5744 | size_t size = offsetof(nstat_msg_src_update, data) + |
5745 | src->provider->nstat_descriptor_length; |
5746 | size_t total_extension_size = 0; |
5747 | u_int32_t num_extensions = 0; |
5748 | u_int64_t extension_mask = nstat_extension_flags_for_source(state, src); |
5749 | |
5750 | if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) { |
5751 | uint32_t extension_id = 0; |
5752 | for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) { |
5753 | if ((extension_mask & (1ull << extension_id)) != 0) { |
5754 | size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0); |
5755 | if (extension_size == 0) { |
5756 | extension_mask &= ~(1ull << extension_id); |
5757 | } else { |
5758 | num_extensions++; |
5759 | total_extension_size += ROUNDUP64(extension_size); |
5760 | } |
5761 | } |
5762 | } |
5763 | size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions); |
5764 | } |
5765 | assert(size <= MAX_NSTAT_MSG_HDR_LENGTH); |
5766 | |
5767 | /* |
5768 | * XXX Would be interesting to see how extended updates affect mbuf |
5769 | * allocations, given the max segments defined as 1, one may get |
5770 | * allocations with higher fragmentation. |
5771 | */ |
5772 | if (mbuf_allocpacket(how: MBUF_DONTWAIT, packetlen: size, maxchunks: &one, mbuf: &msg) != 0) { |
5773 | return ENOMEM; |
5774 | } |
5775 | |
5776 | nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(mbuf: msg); |
5777 | bzero(s: desc, n: size); |
5778 | desc->hdr.context = context; |
5779 | desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE : |
5780 | NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE; |
5781 | desc->hdr.length = (u_int16_t)size; |
5782 | desc->hdr.flags = hdr_flags; |
5783 | desc->srcref = src->srcref; |
5784 | desc->event_flags = event; |
5785 | desc->provider = src->provider->nstat_provider_id; |
5786 | |
5787 | /* |
5788 | * XXX The following two lines are only valid when max-segments is passed |
5789 | * as one. |
5790 | * Other computations with offset also depend on that being true. |
5791 | * Be aware of that before making any modifications that changes that |
5792 | * behavior. |
5793 | */ |
5794 | mbuf_setlen(mbuf: msg, len: size); |
5795 | mbuf_pkthdr_setlen(mbuf: msg, len: mbuf_len(mbuf: msg)); |
5796 | |
5797 | errno_t result = 0; |
5798 | if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) { |
5799 | // Query the provider for the provider specific bits |
5800 | result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, |
5801 | src->provider->nstat_descriptor_length); |
5802 | if (result != 0) { |
5803 | mbuf_freem(mbuf: msg); |
5804 | return result; |
5805 | } |
5806 | } |
5807 | |
5808 | if (num_extensions > 0) { |
5809 | nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)mbuf_data(mbuf: msg) + |
5810 | sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length); |
5811 | uint32_t extension_id = 0; |
5812 | |
5813 | bzero(s: p_extension_hdr, n: total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions)); |
5814 | |
5815 | for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) { |
5816 | if ((extension_mask & (1ull << extension_id)) != 0) { |
5817 | void *buf = (void *)(p_extension_hdr + 1); |
5818 | size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size); |
5819 | if ((extension_size == 0) || (extension_size > total_extension_size)) { |
5820 | // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused |
5821 | p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN; |
5822 | p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1)); |
5823 | break; |
5824 | } else { |
5825 | // The extension may be of any size alignment, reported as such in the extension header, |
5826 | // but we pad to ensure that whatever comes next is suitably aligned |
5827 | p_extension_hdr->type = extension_id; |
5828 | p_extension_hdr->length = extension_size; |
5829 | extension_size = ROUNDUP64(extension_size); |
5830 | total_extension_size -= extension_size; |
5831 | p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size); |
5832 | num_extensions--; |
5833 | } |
5834 | } |
5835 | } |
5836 | } |
5837 | |
5838 | if (src->provider->nstat_counts) { |
5839 | result = src->provider->nstat_counts(src->cookie, &desc->counts, gone); |
5840 | if (result == 0) { |
5841 | if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES && |
5842 | desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) { |
5843 | result = EAGAIN; |
5844 | } else { |
5845 | result = ctl_enqueuembuf(kctlref: state->ncs_kctl, unit: state->ncs_unit, m: msg, CTL_DATA_EOR); |
5846 | } |
5847 | } |
5848 | } |
5849 | |
5850 | if (result != 0) { |
5851 | nstat_stats.nstat_srcupatefailures += 1; |
5852 | mbuf_freem(mbuf: msg); |
5853 | } else { |
5854 | src->ns_reported = true; |
5855 | } |
5856 | |
5857 | return result; |
5858 | } |
5859 | |
5860 | static errno_t |
5861 | nstat_control_append_update( |
5862 | nstat_control_state *state, |
5863 | nstat_src *src, |
5864 | int *gone) |
5865 | { |
5866 | if ((src->provider->nstat_descriptor_length == 0 || |
5867 | src->provider->nstat_copy_descriptor == NULL) && |
5868 | src->provider->nstat_counts == NULL) { |
5869 | return EOPNOTSUPP; |
5870 | } |
5871 | |
5872 | size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length; |
5873 | size_t total_extension_size = 0; |
5874 | u_int32_t num_extensions = 0; |
5875 | u_int64_t extension_mask = nstat_extension_flags_for_source(state, src); |
5876 | |
5877 | if ((extension_mask != 0) && (src->provider->nstat_copy_extension != NULL)) { |
5878 | uint32_t extension_id = 0; |
5879 | for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) { |
5880 | if ((extension_mask & (1ull << extension_id)) != 0) { |
5881 | size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, NULL, 0); |
5882 | if (extension_size == 0) { |
5883 | extension_mask &= ~(1ull << extension_id); |
5884 | } else { |
5885 | num_extensions++; |
5886 | total_extension_size += ROUNDUP64(extension_size); |
5887 | } |
5888 | } |
5889 | } |
5890 | size += total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions); |
5891 | } |
5892 | |
5893 | /* |
5894 | * This kind of limits extensions. |
5895 | * The optimization is around being able to deliver multiple |
5896 | * of updates bundled together. |
5897 | * Increasing the size runs the risk of too much stack usage. |
5898 | * One could potentially changed the allocation below to be on heap. |
5899 | * For now limiting it to half of NSTAT_MAX_MSG_SIZE. |
5900 | */ |
5901 | if (size > (NSTAT_MAX_MSG_SIZE >> 1)) { |
5902 | return EOPNOTSUPP; |
5903 | } |
5904 | |
5905 | // Fill out a buffer on the stack, we will copy to the mbuf later |
5906 | u_int64_t buffer[size / sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment |
5907 | bzero(s: buffer, n: size); |
5908 | |
5909 | nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer; |
5910 | desc->hdr.type = (num_extensions == 0) ? NSTAT_MSG_TYPE_SRC_UPDATE : |
5911 | NSTAT_MSG_TYPE_SRC_EXTENDED_UPDATE; |
5912 | desc->hdr.length = (u_int16_t)size; |
5913 | desc->srcref = src->srcref; |
5914 | desc->event_flags = 0; |
5915 | desc->provider = src->provider->nstat_provider_id; |
5916 | |
5917 | errno_t result = 0; |
5918 | // Fill in the description |
5919 | if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor) { |
5920 | // Query the provider for the provider specific bits |
5921 | result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, |
5922 | src->provider->nstat_descriptor_length); |
5923 | if (result != 0) { |
5924 | nstat_stats.nstat_copy_descriptor_failures++; |
5925 | if (nstat_debug != 0) { |
5926 | printf("%s: src->provider->nstat_copy_descriptor: %d\n" , __func__, result); |
5927 | } |
5928 | return result; |
5929 | } |
5930 | } |
5931 | |
5932 | if (num_extensions > 0) { |
5933 | nstat_msg_src_extended_item_hdr *p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buffer + |
5934 | sizeof(nstat_msg_src_update_hdr) + src->provider->nstat_descriptor_length); |
5935 | uint32_t extension_id = 0; |
5936 | bzero(s: p_extension_hdr, n: total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * num_extensions)); |
5937 | |
5938 | for (extension_id = NSTAT_EXTENDED_UPDATE_TYPE_MIN; extension_id <= NSTAT_EXTENDED_UPDATE_TYPE_MAX; extension_id++) { |
5939 | if ((extension_mask & (1ull << extension_id)) != 0) { |
5940 | void *buf = (void *)(p_extension_hdr + 1); |
5941 | size_t extension_size = src->provider->nstat_copy_extension(src->cookie, extension_id, buf, total_extension_size); |
5942 | if ((extension_size == 0) || (extension_size > total_extension_size)) { |
5943 | // Something has gone wrong. Instead of attempting to wind back the excess buffer space, mark it as unused |
5944 | p_extension_hdr->type = NSTAT_EXTENDED_UPDATE_TYPE_UNKNOWN; |
5945 | p_extension_hdr->length = total_extension_size + (sizeof(nstat_msg_src_extended_item_hdr) * (num_extensions - 1)); |
5946 | break; |
5947 | } else { |
5948 | extension_size = ROUNDUP64(extension_size); |
5949 | p_extension_hdr->type = extension_id; |
5950 | p_extension_hdr->length = extension_size; |
5951 | total_extension_size -= extension_size; |
5952 | p_extension_hdr = (nstat_msg_src_extended_item_hdr *)(void *)((char *)buf + extension_size); |
5953 | num_extensions--; |
5954 | } |
5955 | } |
5956 | } |
5957 | } |
5958 | |
5959 | if (src->provider->nstat_counts) { |
5960 | result = src->provider->nstat_counts(src->cookie, &desc->counts, gone); |
5961 | if (result != 0) { |
5962 | nstat_stats.nstat_provider_counts_failures++; |
5963 | if (nstat_debug != 0) { |
5964 | printf("%s: src->provider->nstat_counts: %d\n" , __func__, result); |
5965 | } |
5966 | return result; |
5967 | } |
5968 | |
5969 | if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES && |
5970 | desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0) { |
5971 | return EAGAIN; |
5972 | } |
5973 | } |
5974 | |
5975 | result = nstat_accumulate_msg(state, hdr: &desc->hdr, length: size); |
5976 | if (result == 0) { |
5977 | src->ns_reported = true; |
5978 | } |
5979 | return result; |
5980 | } |
5981 | |
5982 | static errno_t |
5983 | nstat_control_send_removed( |
5984 | nstat_control_state *state, |
5985 | nstat_src *src, |
5986 | u_int16_t hdr_flags) |
5987 | { |
5988 | nstat_msg_src_removed removed; |
5989 | errno_t result; |
5990 | |
5991 | bzero(s: &removed, n: sizeof(removed)); |
5992 | removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED; |
5993 | removed.hdr.length = sizeof(removed); |
5994 | removed.hdr.context = 0; |
5995 | removed.hdr.flags = hdr_flags; |
5996 | removed.srcref = src->srcref; |
5997 | result = ctl_enqueuedata(kctlref: state->ncs_kctl, unit: state->ncs_unit, data: &removed, |
5998 | len: sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT); |
5999 | if (result != 0) { |
6000 | nstat_stats.nstat_msgremovedfailures += 1; |
6001 | } |
6002 | |
6003 | return result; |
6004 | } |
6005 | |
6006 | static errno_t |
6007 | nstat_control_handle_add_request( |
6008 | nstat_control_state *state, |
6009 | mbuf_t m) |
6010 | { |
6011 | errno_t result; |
6012 | |
6013 | // Verify the header fits in the first mbuf |
6014 | if (mbuf_len(mbuf: m) < offsetof(nstat_msg_add_src_req, param)) { |
6015 | return EINVAL; |
6016 | } |
6017 | |
6018 | // Calculate the length of the parameter field |
6019 | ssize_t paramlength = mbuf_pkthdr_len(mbuf: m) - offsetof(nstat_msg_add_src_req, param); |
6020 | if (paramlength < 0 || paramlength > 2 * 1024) { |
6021 | return EINVAL; |
6022 | } |
6023 | |
6024 | nstat_provider *provider = NULL; |
6025 | nstat_provider_cookie_t cookie = NULL; |
6026 | nstat_msg_add_src_req *req = mbuf_data(mbuf: m); |
6027 | if (mbuf_pkthdr_len(mbuf: m) > mbuf_len(mbuf: m)) { |
6028 | // parameter is too large, we need to make a contiguous copy |
6029 | void *data = (void *) kalloc_data(paramlength, Z_WAITOK); |
6030 | |
6031 | if (!data) { |
6032 | return ENOMEM; |
6033 | } |
6034 | result = mbuf_copydata(mbuf: m, offsetof(nstat_msg_add_src_req, param), length: paramlength, out_data: data); |
6035 | if (result == 0) { |
6036 | result = nstat_lookup_entry(id: req->provider, data, length: paramlength, out_provider: &provider, out_cookie: &cookie); |
6037 | } |
6038 | kfree_data(data, paramlength); |
6039 | } else { |
6040 | result = nstat_lookup_entry(id: req->provider, data: (void*)&req->param, length: paramlength, out_provider: &provider, out_cookie: &cookie); |
6041 | } |
6042 | |
6043 | if (result != 0) { |
6044 | return result; |
6045 | } |
6046 | |
6047 | // sanitize cookie |
6048 | nstat_control_sanitize_cookie(state, id: provider->nstat_provider_id, cookie); |
6049 | |
6050 | result = nstat_control_source_add(context: req->hdr.context, state, provider, cookie); |
6051 | if (result != 0) { |
6052 | provider->nstat_release(cookie, 0); |
6053 | } |
6054 | |
6055 | // Set the flag if a provider added a single source |
6056 | os_atomic_or(&state->ncs_added_src, (1 << provider->nstat_provider_id), relaxed); |
6057 | |
6058 | return result; |
6059 | } |
6060 | |
6061 | static errno_t |
6062 | nstat_set_provider_filter( |
6063 | nstat_control_state *state, |
6064 | nstat_msg_add_all_srcs *req) |
6065 | { |
6066 | nstat_provider_id_t provider_id = req->provider; |
6067 | |
6068 | u_int32_t prev_ncs_watching = os_atomic_or_orig(&state->ncs_watching, (1 << provider_id), relaxed); |
6069 | |
6070 | // Reject it if the client is already watching all the sources. |
6071 | if ((prev_ncs_watching & (1 << provider_id)) != 0) { |
6072 | return EALREADY; |
6073 | } |
6074 | |
6075 | // Reject it if any single source has already been added. |
6076 | u_int32_t ncs_added_src = os_atomic_load(&state->ncs_added_src, relaxed); |
6077 | if ((ncs_added_src & (1 << provider_id)) != 0) { |
6078 | return EALREADY; |
6079 | } |
6080 | |
6081 | state->ncs_watching |= (1 << provider_id); |
6082 | state->ncs_provider_filters[provider_id].npf_events = req->events; |
6083 | state->ncs_provider_filters[provider_id].npf_flags = req->filter; |
6084 | state->ncs_provider_filters[provider_id].npf_pid = req->target_pid; |
6085 | uuid_copy(dst: state->ncs_provider_filters[provider_id].npf_uuid, src: req->target_uuid); |
6086 | |
6087 | // The extensions should be populated by a more direct mechanism |
6088 | // Using the top 32 bits of the filter flags reduces the namespace of both, |
6089 | // but is a convenient workaround that avoids ntstat.h changes that would require rebuild of all clients |
6090 | // Extensions give away additional privacy information and are subject to unconditional privilege check, |
6091 | // unconstrained by the value of nstat_privcheck |
6092 | if (priv_check_cred(cred: kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, flags: 0) == 0) { |
6093 | state->ncs_provider_filters[provider_id].npf_extensions = (req->filter >> NSTAT_FILTER_ALLOWED_EXTENSIONS_SHIFT) & NSTAT_EXTENDED_UPDATE_FLAG_MASK; |
6094 | } |
6095 | return 0; |
6096 | } |
6097 | |
6098 | static errno_t |
6099 | nstat_control_handle_add_all( |
6100 | nstat_control_state *state, |
6101 | mbuf_t m) |
6102 | { |
6103 | errno_t result = 0; |
6104 | |
6105 | // Verify the header fits in the first mbuf |
6106 | if (mbuf_len(mbuf: m) < sizeof(nstat_msg_add_all_srcs)) { |
6107 | return EINVAL; |
6108 | } |
6109 | |
6110 | nstat_msg_add_all_srcs *req = mbuf_data(mbuf: m); |
6111 | if (req->provider > NSTAT_PROVIDER_LAST) { |
6112 | return ENOENT; |
6113 | } |
6114 | |
6115 | nstat_provider *provider = nstat_find_provider_by_id(id: req->provider); |
6116 | |
6117 | if (!provider) { |
6118 | return ENOENT; |
6119 | } |
6120 | if (provider->nstat_watcher_add == NULL) { |
6121 | return ENOTSUP; |
6122 | } |
6123 | |
6124 | // Traditionally the nstat_privcheck value allowed for easy access to ntstat on the Mac. |
6125 | // Keep backwards compatibility while being more stringent with recent providers |
6126 | if ((nstat_privcheck != 0) || (req->provider == NSTAT_PROVIDER_UDP_SUBFLOW) || (req->provider == NSTAT_PROVIDER_CONN_USERLAND)) { |
6127 | result = priv_check_cred(cred: kauth_cred_get(), |
6128 | PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, flags: 0); |
6129 | if (result != 0) { |
6130 | return result; |
6131 | } |
6132 | } |
6133 | |
6134 | lck_mtx_lock(lck: &state->ncs_mtx); |
6135 | if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED) { |
6136 | // Suppression of source messages implicitly requires the use of update messages |
6137 | state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES; |
6138 | } |
6139 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6140 | |
6141 | // rdar://problem/30301300 Different providers require different synchronization |
6142 | // to ensure that a new entry does not get double counted due to being added prior |
6143 | // to all current provider entries being added. Hence pass the provider the details |
6144 | // in the original request for this to be applied atomically |
6145 | |
6146 | result = provider->nstat_watcher_add(state, req); |
6147 | |
6148 | if (result == 0) { |
6149 | nstat_enqueue_success(context: req->hdr.context, state, flags: 0); |
6150 | } |
6151 | |
6152 | return result; |
6153 | } |
6154 | |
6155 | static errno_t |
6156 | nstat_control_source_add( |
6157 | u_int64_t context, |
6158 | nstat_control_state *state, |
6159 | nstat_provider *provider, |
6160 | nstat_provider_cookie_t cookie) |
6161 | { |
6162 | // Fill out source added message if appropriate |
6163 | mbuf_t msg = NULL; |
6164 | nstat_src_ref_t *srcrefp = NULL; |
6165 | |
6166 | u_int64_t provider_filter_flags = |
6167 | state->ncs_provider_filters[provider->nstat_provider_id].npf_flags; |
6168 | boolean_t tell_user = |
6169 | ((provider_filter_flags & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0); |
6170 | u_int32_t src_filter = |
6171 | (provider_filter_flags & NSTAT_FILTER_PROVIDER_NOZEROBYTES) |
6172 | ? NSTAT_FILTER_NOZEROBYTES : 0; |
6173 | |
6174 | if (provider_filter_flags & NSTAT_FILTER_TCP_NO_EARLY_CLOSE) { |
6175 | src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE; |
6176 | } |
6177 | |
6178 | if (tell_user) { |
6179 | unsigned int one = 1; |
6180 | |
6181 | if (mbuf_allocpacket(how: MBUF_DONTWAIT, packetlen: sizeof(nstat_msg_src_added), |
6182 | maxchunks: &one, mbuf: &msg) != 0) { |
6183 | return ENOMEM; |
6184 | } |
6185 | |
6186 | mbuf_setlen(mbuf: msg, len: sizeof(nstat_msg_src_added)); |
6187 | mbuf_pkthdr_setlen(mbuf: msg, len: mbuf_len(mbuf: msg)); |
6188 | nstat_msg_src_added *add = mbuf_data(mbuf: msg); |
6189 | bzero(s: add, n: sizeof(*add)); |
6190 | add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED; |
6191 | assert(mbuf_len(msg) <= MAX_NSTAT_MSG_HDR_LENGTH); |
6192 | add->hdr.length = (u_int16_t)mbuf_len(mbuf: msg); |
6193 | add->hdr.context = context; |
6194 | add->provider = provider->nstat_provider_id; |
6195 | srcrefp = &add->srcref; |
6196 | } |
6197 | |
6198 | // Allocate storage for the source |
6199 | nstat_src *src = kalloc_type(struct nstat_src, Z_WAITOK); |
6200 | if (src == NULL) { |
6201 | if (msg) { |
6202 | mbuf_freem(mbuf: msg); |
6203 | } |
6204 | return ENOMEM; |
6205 | } |
6206 | |
6207 | // Fill in the source, including picking an unused source ref |
6208 | lck_mtx_lock(lck: &state->ncs_mtx); |
6209 | |
6210 | src->srcref = nstat_control_next_src_ref(state); |
6211 | if (srcrefp) { |
6212 | *srcrefp = src->srcref; |
6213 | } |
6214 | |
6215 | if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID) { |
6216 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6217 | kfree_type(struct nstat_src, src); |
6218 | if (msg) { |
6219 | mbuf_freem(mbuf: msg); |
6220 | } |
6221 | return EINVAL; |
6222 | } |
6223 | src->provider = provider; |
6224 | src->cookie = cookie; |
6225 | src->filter = src_filter; |
6226 | src->seq = 0; |
6227 | |
6228 | if (msg) { |
6229 | // send the source added message if appropriate |
6230 | errno_t result = ctl_enqueuembuf(kctlref: state->ncs_kctl, unit: state->ncs_unit, m: msg, |
6231 | CTL_DATA_EOR); |
6232 | if (result != 0) { |
6233 | nstat_stats.nstat_srcaddedfailures += 1; |
6234 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6235 | kfree_type(struct nstat_src, src); |
6236 | mbuf_freem(mbuf: msg); |
6237 | return result; |
6238 | } |
6239 | } |
6240 | // Put the source in the list |
6241 | TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link); |
6242 | src->ns_control = state; |
6243 | |
6244 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6245 | |
6246 | return 0; |
6247 | } |
6248 | |
6249 | static errno_t |
6250 | nstat_control_handle_remove_request( |
6251 | nstat_control_state *state, |
6252 | mbuf_t m) |
6253 | { |
6254 | nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID; |
6255 | nstat_src *src; |
6256 | |
6257 | if (mbuf_copydata(mbuf: m, offsetof(nstat_msg_rem_src_req, srcref), length: sizeof(srcref), out_data: &srcref) != 0) { |
6258 | return EINVAL; |
6259 | } |
6260 | |
6261 | lck_mtx_lock(lck: &state->ncs_mtx); |
6262 | |
6263 | // Remove this source as we look for it |
6264 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
6265 | { |
6266 | if (src->srcref == srcref) { |
6267 | break; |
6268 | } |
6269 | } |
6270 | if (src) { |
6271 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
6272 | } |
6273 | |
6274 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6275 | |
6276 | if (src) { |
6277 | nstat_control_cleanup_source(state, src, FALSE); |
6278 | } |
6279 | |
6280 | return src ? 0 : ENOENT; |
6281 | } |
6282 | |
6283 | static errno_t |
6284 | nstat_control_handle_query_request( |
6285 | nstat_control_state *state, |
6286 | mbuf_t m) |
6287 | { |
6288 | // TBD: handle this from another thread so we can enqueue a lot of data |
6289 | // As written, if a client requests query all, this function will be |
6290 | // called from their send of the request message. We will attempt to write |
6291 | // responses and succeed until the buffer fills up. Since the clients thread |
6292 | // is blocked on send, it won't be reading unless the client has two threads |
6293 | // using this socket, one for read and one for write. Two threads probably |
6294 | // won't work with this code anyhow since we don't have proper locking in |
6295 | // place yet. |
6296 | tailq_head_nstat_src dead_list; |
6297 | errno_t result = ENOENT; |
6298 | nstat_msg_query_src_req req; |
6299 | |
6300 | if (mbuf_copydata(mbuf: m, offset: 0, length: sizeof(req), out_data: &req) != 0) { |
6301 | return EINVAL; |
6302 | } |
6303 | |
6304 | TAILQ_INIT(&dead_list); |
6305 | const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL); |
6306 | |
6307 | lck_mtx_lock(lck: &state->ncs_mtx); |
6308 | |
6309 | if (all_srcs) { |
6310 | state->ncs_flags |= NSTAT_FLAG_REQCOUNTS; |
6311 | } |
6312 | nstat_src *src, *tmpsrc; |
6313 | u_int64_t src_count = 0; |
6314 | boolean_t partial = FALSE; |
6315 | |
6316 | /* |
6317 | * Error handling policy and sequence number generation is folded into |
6318 | * nstat_control_begin_query. |
6319 | */ |
6320 | partial = nstat_control_begin_query(state, hdrp: &req.hdr); |
6321 | |
6322 | |
6323 | TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) |
6324 | { |
6325 | int gone = 0; |
6326 | |
6327 | // XXX ignore IFACE types? |
6328 | if (all_srcs || src->srcref == req.srcref) { |
6329 | if (nstat_control_reporting_allowed(state, src, suppression_flags: 0) |
6330 | && (!partial || !all_srcs || src->seq != state->ncs_seq)) { |
6331 | if (all_srcs && |
6332 | (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0) { |
6333 | result = nstat_control_append_counts(state, src, gone: &gone); |
6334 | } else { |
6335 | result = nstat_control_send_counts(state, src, context: req.hdr.context, hdr_flags: 0, gone: &gone); |
6336 | } |
6337 | |
6338 | if (ENOMEM == result || ENOBUFS == result) { |
6339 | /* |
6340 | * If the counts message failed to |
6341 | * enqueue then we should clear our flag so |
6342 | * that a client doesn't miss anything on |
6343 | * idle cleanup. We skip the "gone" |
6344 | * processing in the hope that we may |
6345 | * catch it another time. |
6346 | */ |
6347 | state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS; |
6348 | break; |
6349 | } |
6350 | if (partial) { |
6351 | /* |
6352 | * We skip over hard errors and |
6353 | * filtered sources. |
6354 | */ |
6355 | src->seq = state->ncs_seq; |
6356 | src_count++; |
6357 | } |
6358 | } |
6359 | } |
6360 | |
6361 | if (gone) { |
6362 | // send one last descriptor message so client may see last state |
6363 | // If we can't send the notification now, it |
6364 | // will be sent in the idle cleanup. |
6365 | result = nstat_control_send_description(state, src, context: 0, hdr_flags: 0); |
6366 | if (result != 0) { |
6367 | nstat_stats.nstat_control_send_description_failures++; |
6368 | if (nstat_debug != 0) { |
6369 | printf("%s - nstat_control_send_description() %d\n" , __func__, result); |
6370 | } |
6371 | state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS; |
6372 | break; |
6373 | } |
6374 | |
6375 | // pull src out of the list |
6376 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
6377 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
6378 | } |
6379 | |
6380 | if (all_srcs) { |
6381 | if (src_count >= QUERY_CONTINUATION_SRC_COUNT) { |
6382 | break; |
6383 | } |
6384 | } else if (req.srcref == src->srcref) { |
6385 | break; |
6386 | } |
6387 | } |
6388 | |
6389 | nstat_flush_accumulated_msgs(state); |
6390 | |
6391 | u_int16_t flags = 0; |
6392 | if (req.srcref == NSTAT_SRC_REF_ALL) { |
6393 | flags = nstat_control_end_query(state, last_src: src, partial); |
6394 | } |
6395 | |
6396 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6397 | |
6398 | /* |
6399 | * If an error occurred enqueueing data, then allow the error to |
6400 | * propagate to nstat_control_send. This way, the error is sent to |
6401 | * user-level. |
6402 | */ |
6403 | if (all_srcs && ENOMEM != result && ENOBUFS != result) { |
6404 | nstat_enqueue_success(context: req.hdr.context, state, flags); |
6405 | result = 0; |
6406 | } |
6407 | |
6408 | while ((src = TAILQ_FIRST(&dead_list))) { |
6409 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
6410 | nstat_control_cleanup_source(state, src, FALSE); |
6411 | } |
6412 | |
6413 | return result; |
6414 | } |
6415 | |
6416 | static errno_t |
6417 | nstat_control_handle_get_src_description( |
6418 | nstat_control_state *state, |
6419 | mbuf_t m) |
6420 | { |
6421 | nstat_msg_get_src_description req; |
6422 | errno_t result = ENOENT; |
6423 | nstat_src *src; |
6424 | |
6425 | if (mbuf_copydata(mbuf: m, offset: 0, length: sizeof(req), out_data: &req) != 0) { |
6426 | return EINVAL; |
6427 | } |
6428 | |
6429 | lck_mtx_lock(lck: &state->ncs_mtx); |
6430 | u_int64_t src_count = 0; |
6431 | boolean_t partial = FALSE; |
6432 | const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL); |
6433 | |
6434 | /* |
6435 | * Error handling policy and sequence number generation is folded into |
6436 | * nstat_control_begin_query. |
6437 | */ |
6438 | partial = nstat_control_begin_query(state, hdrp: &req.hdr); |
6439 | |
6440 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
6441 | { |
6442 | if (all_srcs || src->srcref == req.srcref) { |
6443 | if (nstat_control_reporting_allowed(state, src, suppression_flags: 0) |
6444 | && (!all_srcs || !partial || src->seq != state->ncs_seq)) { |
6445 | if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs) { |
6446 | result = nstat_control_append_description(state, src); |
6447 | } else { |
6448 | result = nstat_control_send_description(state, src, context: req.hdr.context, hdr_flags: 0); |
6449 | } |
6450 | |
6451 | if (ENOMEM == result || ENOBUFS == result) { |
6452 | /* |
6453 | * If the description message failed to |
6454 | * enqueue then we give up for now. |
6455 | */ |
6456 | break; |
6457 | } |
6458 | if (partial) { |
6459 | /* |
6460 | * Note, we skip over hard errors and |
6461 | * filtered sources. |
6462 | */ |
6463 | src->seq = state->ncs_seq; |
6464 | src_count++; |
6465 | if (src_count >= QUERY_CONTINUATION_SRC_COUNT) { |
6466 | break; |
6467 | } |
6468 | } |
6469 | } |
6470 | |
6471 | if (!all_srcs) { |
6472 | break; |
6473 | } |
6474 | } |
6475 | } |
6476 | nstat_flush_accumulated_msgs(state); |
6477 | |
6478 | u_int16_t flags = 0; |
6479 | if (req.srcref == NSTAT_SRC_REF_ALL) { |
6480 | flags = nstat_control_end_query(state, last_src: src, partial); |
6481 | } |
6482 | |
6483 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6484 | /* |
6485 | * If an error occurred enqueueing data, then allow the error to |
6486 | * propagate to nstat_control_send. This way, the error is sent to |
6487 | * user-level. |
6488 | */ |
6489 | if (all_srcs && ENOMEM != result && ENOBUFS != result) { |
6490 | nstat_enqueue_success(context: req.hdr.context, state, flags); |
6491 | result = 0; |
6492 | } |
6493 | |
6494 | return result; |
6495 | } |
6496 | |
6497 | static errno_t |
6498 | nstat_control_handle_set_filter( |
6499 | nstat_control_state *state, |
6500 | mbuf_t m) |
6501 | { |
6502 | nstat_msg_set_filter req; |
6503 | nstat_src *src; |
6504 | |
6505 | if (mbuf_copydata(mbuf: m, offset: 0, length: sizeof(req), out_data: &req) != 0) { |
6506 | return EINVAL; |
6507 | } |
6508 | if (req.srcref == NSTAT_SRC_REF_ALL || |
6509 | req.srcref == NSTAT_SRC_REF_INVALID) { |
6510 | return EINVAL; |
6511 | } |
6512 | |
6513 | lck_mtx_lock(lck: &state->ncs_mtx); |
6514 | TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) |
6515 | { |
6516 | if (req.srcref == src->srcref) { |
6517 | src->filter = req.filter; |
6518 | break; |
6519 | } |
6520 | } |
6521 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6522 | if (src == NULL) { |
6523 | return ENOENT; |
6524 | } |
6525 | |
6526 | return 0; |
6527 | } |
6528 | |
6529 | static void |
6530 | nstat_send_error( |
6531 | nstat_control_state *state, |
6532 | u_int64_t context, |
6533 | u_int32_t error) |
6534 | { |
6535 | errno_t result; |
6536 | struct nstat_msg_error err; |
6537 | |
6538 | bzero(s: &err, n: sizeof(err)); |
6539 | err.hdr.type = NSTAT_MSG_TYPE_ERROR; |
6540 | err.hdr.length = sizeof(err); |
6541 | err.hdr.context = context; |
6542 | err.error = error; |
6543 | |
6544 | result = ctl_enqueuedata(kctlref: state->ncs_kctl, unit: state->ncs_unit, data: &err, |
6545 | len: sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT); |
6546 | if (result != 0) { |
6547 | nstat_stats.nstat_msgerrorfailures++; |
6548 | } |
6549 | } |
6550 | |
6551 | static boolean_t |
6552 | nstat_control_begin_query( |
6553 | nstat_control_state *state, |
6554 | const nstat_msg_hdr *hdrp) |
6555 | { |
6556 | boolean_t partial = FALSE; |
6557 | |
6558 | if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION) { |
6559 | /* A partial query all has been requested. */ |
6560 | partial = TRUE; |
6561 | |
6562 | if (state->ncs_context != hdrp->context) { |
6563 | if (state->ncs_context != 0) { |
6564 | nstat_send_error(state, context: state->ncs_context, EAGAIN); |
6565 | } |
6566 | |
6567 | /* Initialize state for a partial query all. */ |
6568 | state->ncs_context = hdrp->context; |
6569 | state->ncs_seq++; |
6570 | } |
6571 | } |
6572 | |
6573 | return partial; |
6574 | } |
6575 | |
6576 | static u_int16_t |
6577 | nstat_control_end_query( |
6578 | nstat_control_state *state, |
6579 | nstat_src *last_src, |
6580 | boolean_t partial) |
6581 | { |
6582 | u_int16_t flags = 0; |
6583 | |
6584 | if (last_src == NULL || !partial) { |
6585 | /* |
6586 | * We iterated through the entire srcs list or exited early |
6587 | * from the loop when a partial update was not requested (an |
6588 | * error occurred), so clear context to indicate internally |
6589 | * that the query is finished. |
6590 | */ |
6591 | state->ncs_context = 0; |
6592 | } else { |
6593 | /* |
6594 | * Indicate to userlevel to make another partial request as |
6595 | * there are still sources left to be reported. |
6596 | */ |
6597 | flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION; |
6598 | } |
6599 | |
6600 | return flags; |
6601 | } |
6602 | |
6603 | static errno_t |
6604 | nstat_control_handle_get_update( |
6605 | nstat_control_state *state, |
6606 | mbuf_t m) |
6607 | { |
6608 | nstat_msg_query_src_req req; |
6609 | |
6610 | if (mbuf_copydata(mbuf: m, offset: 0, length: sizeof(req), out_data: &req) != 0) { |
6611 | return EINVAL; |
6612 | } |
6613 | |
6614 | lck_mtx_lock(lck: &state->ncs_mtx); |
6615 | |
6616 | state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES; |
6617 | |
6618 | errno_t result = ENOENT; |
6619 | nstat_src *src, *tmpsrc; |
6620 | tailq_head_nstat_src dead_list; |
6621 | u_int64_t src_count = 0; |
6622 | boolean_t partial = FALSE; |
6623 | const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL); |
6624 | TAILQ_INIT(&dead_list); |
6625 | |
6626 | /* |
6627 | * Error handling policy and sequence number generation is folded into |
6628 | * nstat_control_begin_query. |
6629 | */ |
6630 | partial = nstat_control_begin_query(state, hdrp: &req.hdr); |
6631 | |
6632 | TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc) { |
6633 | int gone = 0; |
6634 | if (all_srcs) { |
6635 | // Check to see if we should handle this source or if we're still skipping to find where to continue |
6636 | if ((FALSE == partial || src->seq != state->ncs_seq)) { |
6637 | u_int64_t suppression_flags = (src->ns_reported)? NSTAT_FILTER_SUPPRESS_BORING_POLL: 0; |
6638 | if (nstat_control_reporting_allowed(state, src, suppression_flags)) { |
6639 | result = nstat_control_append_update(state, src, gone: &gone); |
6640 | if (ENOMEM == result || ENOBUFS == result) { |
6641 | /* |
6642 | * If the update message failed to |
6643 | * enqueue then give up. |
6644 | */ |
6645 | break; |
6646 | } |
6647 | if (partial) { |
6648 | /* |
6649 | * We skip over hard errors and |
6650 | * filtered sources. |
6651 | */ |
6652 | src->seq = state->ncs_seq; |
6653 | src_count++; |
6654 | } |
6655 | } |
6656 | } |
6657 | } else if (src->srcref == req.srcref) { |
6658 | if (nstat_control_reporting_allowed(state, src, suppression_flags: 0)) { |
6659 | result = nstat_control_send_update(state, src, context: req.hdr.context, event: 0, hdr_flags: 0, gone: &gone); |
6660 | } |
6661 | } |
6662 | |
6663 | if (gone) { |
6664 | // pull src out of the list |
6665 | TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link); |
6666 | TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link); |
6667 | } |
6668 | |
6669 | if (!all_srcs && req.srcref == src->srcref) { |
6670 | break; |
6671 | } |
6672 | if (src_count >= QUERY_CONTINUATION_SRC_COUNT) { |
6673 | break; |
6674 | } |
6675 | } |
6676 | |
6677 | nstat_flush_accumulated_msgs(state); |
6678 | |
6679 | |
6680 | u_int16_t flags = 0; |
6681 | if (req.srcref == NSTAT_SRC_REF_ALL) { |
6682 | flags = nstat_control_end_query(state, last_src: src, partial); |
6683 | } |
6684 | |
6685 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6686 | /* |
6687 | * If an error occurred enqueueing data, then allow the error to |
6688 | * propagate to nstat_control_send. This way, the error is sent to |
6689 | * user-level. |
6690 | */ |
6691 | if (all_srcs && ENOMEM != result && ENOBUFS != result) { |
6692 | nstat_enqueue_success(context: req.hdr.context, state, flags); |
6693 | result = 0; |
6694 | } |
6695 | |
6696 | while ((src = TAILQ_FIRST(&dead_list))) { |
6697 | TAILQ_REMOVE(&dead_list, src, ns_control_link); |
6698 | // release src and send notification |
6699 | nstat_control_cleanup_source(state, src, FALSE); |
6700 | } |
6701 | |
6702 | return result; |
6703 | } |
6704 | |
6705 | static errno_t |
6706 | nstat_control_handle_subscribe_sysinfo( |
6707 | nstat_control_state *state) |
6708 | { |
6709 | errno_t result = priv_check_cred(cred: kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, flags: 0); |
6710 | |
6711 | if (result != 0) { |
6712 | return result; |
6713 | } |
6714 | |
6715 | lck_mtx_lock(lck: &state->ncs_mtx); |
6716 | state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED; |
6717 | lck_mtx_unlock(lck: &state->ncs_mtx); |
6718 | |
6719 | return 0; |
6720 | } |
6721 | |
6722 | static errno_t |
6723 | nstat_control_send( |
6724 | kern_ctl_ref kctl, |
6725 | u_int32_t unit, |
6726 | void *uinfo, |
6727 | mbuf_t m, |
6728 | __unused int flags) |
6729 | { |
6730 | nstat_control_state *state = (nstat_control_state*)uinfo; |
6731 | struct nstat_msg_hdr *hdr; |
6732 | struct nstat_msg_hdr storage; |
6733 | errno_t result = 0; |
6734 | |
6735 | if (mbuf_pkthdr_len(mbuf: m) < sizeof(*hdr)) { |
6736 | // Is this the right thing to do? |
6737 | mbuf_freem(mbuf: m); |
6738 | return EINVAL; |
6739 | } |
6740 | |
6741 | if (mbuf_len(mbuf: m) >= sizeof(*hdr)) { |
6742 | hdr = mbuf_data(mbuf: m); |
6743 | } else { |
6744 | mbuf_copydata(mbuf: m, offset: 0, length: sizeof(storage), out_data: &storage); |
6745 | hdr = &storage; |
6746 | } |
6747 | |
6748 | // Legacy clients may not set the length |
6749 | // Those clients are likely not setting the flags either |
6750 | // Fix everything up so old clients continue to work |
6751 | if (hdr->length != mbuf_pkthdr_len(mbuf: m)) { |
6752 | hdr->flags = 0; |
6753 | assert(mbuf_pkthdr_len(m) <= MAX_NSTAT_MSG_HDR_LENGTH); |
6754 | hdr->length = (u_int16_t)mbuf_pkthdr_len(mbuf: m); |
6755 | if (hdr == &storage) { |
6756 | mbuf_copyback(mbuf: m, offset: 0, length: sizeof(*hdr), data: hdr, how: MBUF_DONTWAIT); |
6757 | } |
6758 | } |
6759 | |
6760 | switch (hdr->type) { |
6761 | case NSTAT_MSG_TYPE_ADD_SRC: |
6762 | result = nstat_control_handle_add_request(state, m); |
6763 | break; |
6764 | |
6765 | case NSTAT_MSG_TYPE_ADD_ALL_SRCS: |
6766 | result = nstat_control_handle_add_all(state, m); |
6767 | break; |
6768 | |
6769 | case NSTAT_MSG_TYPE_REM_SRC: |
6770 | result = nstat_control_handle_remove_request(state, m); |
6771 | break; |
6772 | |
6773 | case NSTAT_MSG_TYPE_QUERY_SRC: |
6774 | result = nstat_control_handle_query_request(state, m); |
6775 | break; |
6776 | |
6777 | case NSTAT_MSG_TYPE_GET_SRC_DESC: |
6778 | result = nstat_control_handle_get_src_description(state, m); |
6779 | break; |
6780 | |
6781 | case NSTAT_MSG_TYPE_SET_FILTER: |
6782 | result = nstat_control_handle_set_filter(state, m); |
6783 | break; |
6784 | |
6785 | case NSTAT_MSG_TYPE_GET_UPDATE: |
6786 | result = nstat_control_handle_get_update(state, m); |
6787 | break; |
6788 | |
6789 | case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO: |
6790 | result = nstat_control_handle_subscribe_sysinfo(state); |
6791 | break; |
6792 | |
6793 | default: |
6794 | result = EINVAL; |
6795 | break; |
6796 | } |
6797 | |
6798 | if (result != 0) { |
6799 | struct nstat_msg_error err; |
6800 | |
6801 | bzero(s: &err, n: sizeof(err)); |
6802 | err.hdr.type = NSTAT_MSG_TYPE_ERROR; |
6803 | err.hdr.length = (u_int16_t)(sizeof(err) + mbuf_pkthdr_len(mbuf: m)); |
6804 | err.hdr.context = hdr->context; |
6805 | err.error = result; |
6806 | |
6807 | if (mbuf_prepend(mbuf: &m, len: sizeof(err), how: MBUF_DONTWAIT) == 0 && |
6808 | mbuf_copyback(mbuf: m, offset: 0, length: sizeof(err), data: &err, how: MBUF_DONTWAIT) == 0) { |
6809 | result = ctl_enqueuembuf(kctlref: kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT); |
6810 | if (result != 0) { |
6811 | mbuf_freem(mbuf: m); |
6812 | } |
6813 | m = NULL; |
6814 | } |
6815 | |
6816 | if (result != 0) { |
6817 | // Unable to prepend the error to the request - just send the error |
6818 | err.hdr.length = sizeof(err); |
6819 | result = ctl_enqueuedata(kctlref: kctl, unit, data: &err, len: sizeof(err), |
6820 | CTL_DATA_EOR | CTL_DATA_CRIT); |
6821 | if (result != 0) { |
6822 | nstat_stats.nstat_msgerrorfailures += 1; |
6823 | } |
6824 | } |
6825 | nstat_stats.nstat_handle_msg_failures += 1; |
6826 | } |
6827 | |
6828 | if (m) { |
6829 | mbuf_freem(mbuf: m); |
6830 | } |
6831 | |
6832 | return result; |
6833 | } |
6834 | |
6835 | |
6836 | /* Performs interface matching based on NSTAT_IFNET_IS… filter flags provided by an external caller */ |
6837 | static bool |
6838 | nstat_interface_matches_filter_flag(uint32_t filter_flags, struct ifnet *ifp) |
6839 | { |
6840 | bool result = false; |
6841 | |
6842 | if (ifp) { |
6843 | uint32_t flag_mask = (NSTAT_FILTER_IFNET_FLAGS & ~(NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL)); |
6844 | filter_flags &= flag_mask; |
6845 | |
6846 | uint32_t flags = nstat_ifnet_to_flags(ifp); |
6847 | if (filter_flags & flags) { |
6848 | result = true; |
6849 | } |
6850 | } |
6851 | return result; |
6852 | } |
6853 | |
6854 | |
6855 | static int |
6856 | tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, uint32_t filter_flags, struct xtcpprogress_indicators *indicators) |
6857 | { |
6858 | int error = 0; |
6859 | struct inpcb *inp; |
6860 | uint64_t min_recent_start_time; |
6861 | #if SKYWALK |
6862 | struct nstat_tu_shadow *shad; |
6863 | #endif /* SKYWALK */ |
6864 | |
6865 | min_recent_start_time = mach_continuous_time() - recentflow_maxduration; |
6866 | bzero(s: indicators, n: sizeof(*indicators)); |
6867 | |
6868 | #if NSTAT_DEBUG |
6869 | /* interface index -1 may be passed in to only match against the filters specified in the flags */ |
6870 | if (ifindex < UINT_MAX) { |
6871 | printf("%s - for interface index %u with flags %x\n" , __func__, ifindex, filter_flags); |
6872 | } else { |
6873 | printf("%s - for matching interface with flags %x\n" , __func__, filter_flags); |
6874 | } |
6875 | #endif |
6876 | |
6877 | lck_rw_lock_shared(lck: &tcbinfo.ipi_lock); |
6878 | /* |
6879 | * For progress indicators we don't need to special case TCP to collect time wait connections |
6880 | */ |
6881 | LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list) |
6882 | { |
6883 | struct tcpcb *tp = intotcpcb(inp); |
6884 | /* radar://57100452 |
6885 | * The conditional logic implemented below performs an *inclusive* match based on the desired interface index in addition to any filter values. |
6886 | * While the general expectation is that only one criteria normally is used for queries, the capability exists satisfy any eccentric future needs. |
6887 | */ |
6888 | if (tp && |
6889 | inp->inp_state != INPCB_STATE_DEAD && |
6890 | inp->inp_last_outifp && |
6891 | /* matches the given interface index, or against any provided filter flags */ |
6892 | (((inp->inp_last_outifp->if_index == ifindex) || |
6893 | nstat_interface_matches_filter_flag(filter_flags, ifp: inp->inp_last_outifp)) && |
6894 | /* perform flow state matching based any provided filter flags */ |
6895 | (((filter_flags & (NSTAT_IFNET_IS_NON_LOCAL | NSTAT_IFNET_IS_LOCAL)) == 0) || |
6896 | ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && !(tp->t_flags & TF_LOCAL)) || |
6897 | ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (tp->t_flags & TF_LOCAL))))) { |
6898 | struct tcp_conn_status connstatus; |
6899 | #if NSTAT_DEBUG |
6900 | printf("%s - *matched non-Skywalk* [filter match: %d]\n" , __func__, nstat_interface_matches_filter_flag(filter_flags, inp->inp_last_outifp)); |
6901 | #endif |
6902 | indicators->xp_numflows++; |
6903 | tcp_get_connectivity_status(tp, &connstatus); |
6904 | if (connstatus.write_probe_failed) { |
6905 | indicators->xp_write_probe_fails++; |
6906 | } |
6907 | if (connstatus.read_probe_failed) { |
6908 | indicators->xp_read_probe_fails++; |
6909 | } |
6910 | if (connstatus.conn_probe_failed) { |
6911 | indicators->xp_conn_probe_fails++; |
6912 | } |
6913 | if (inp->inp_start_timestamp > min_recent_start_time) { |
6914 | uint64_t flow_count; |
6915 | |
6916 | indicators->xp_recentflows++; |
6917 | flow_count = os_atomic_load(&inp->inp_stat->rxbytes, relaxed); |
6918 | indicators->xp_recentflows_rxbytes += flow_count; |
6919 | flow_count = os_atomic_load(&inp->inp_stat->txbytes, relaxed); |
6920 | indicators->xp_recentflows_txbytes += flow_count; |
6921 | |
6922 | indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes; |
6923 | indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes; |
6924 | indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes; |
6925 | if (tp->snd_max - tp->snd_una) { |
6926 | indicators->xp_recentflows_unacked++; |
6927 | } |
6928 | } |
6929 | } |
6930 | } |
6931 | lck_rw_done(lck: &tcbinfo.ipi_lock); |
6932 | |
6933 | #if SKYWALK |
6934 | lck_mtx_lock(lck: &nstat_mtx); |
6935 | |
6936 | TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) { |
6937 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
6938 | |
6939 | if ((shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) && (shad->shad_live)) { |
6940 | u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE; |
6941 | if (filter_flags != 0) { |
6942 | bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL); |
6943 | error = (result)? 0 : EIO; |
6944 | if (error) { |
6945 | printf("%s - nstat get ifflags %d\n" , __func__, error); |
6946 | continue; |
6947 | } |
6948 | |
6949 | if ((ifflags & filter_flags) == 0) { |
6950 | continue; |
6951 | } |
6952 | // Skywalk locality flags are not yet in place, see <rdar://problem/35607563> |
6953 | // Instead of checking flags with a simple logical and, check the inverse. |
6954 | // This allows for default action of fallthrough if the flags are not set. |
6955 | if ((filter_flags & NSTAT_IFNET_IS_NON_LOCAL) && (ifflags & NSTAT_IFNET_IS_LOCAL)) { |
6956 | continue; |
6957 | } |
6958 | if ((filter_flags & NSTAT_IFNET_IS_LOCAL) && (ifflags & NSTAT_IFNET_IS_NON_LOCAL)) { |
6959 | continue; |
6960 | } |
6961 | } |
6962 | |
6963 | nstat_progress_digest digest; |
6964 | bzero(s: &digest, n: sizeof(digest)); |
6965 | bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, &digest, NULL, NULL); |
6966 | |
6967 | error = (result)? 0 : EIO; |
6968 | if (error) { |
6969 | printf("%s - nstat get progressdigest returned %d\n" , __func__, error); |
6970 | continue; |
6971 | } |
6972 | if ((digest.ifindex == (u_int32_t)ifindex) || |
6973 | (filter_flags & ifflags)) { |
6974 | #if NSTAT_DEBUG |
6975 | printf("%s - *matched Skywalk* [filter match: %x %x]\n" , __func__, filter_flags, flags); |
6976 | #endif |
6977 | indicators->xp_numflows++; |
6978 | if (digest.connstatus.write_probe_failed) { |
6979 | indicators->xp_write_probe_fails++; |
6980 | } |
6981 | if (digest.connstatus.read_probe_failed) { |
6982 | indicators->xp_read_probe_fails++; |
6983 | } |
6984 | if (digest.connstatus.conn_probe_failed) { |
6985 | indicators->xp_conn_probe_fails++; |
6986 | } |
6987 | if (shad->shad_start_timestamp > min_recent_start_time) { |
6988 | indicators->xp_recentflows++; |
6989 | indicators->xp_recentflows_rxbytes += digest.rxbytes; |
6990 | indicators->xp_recentflows_txbytes += digest.txbytes; |
6991 | indicators->xp_recentflows_rxooo += digest.rxoutoforderbytes; |
6992 | indicators->xp_recentflows_rxdup += digest.rxduplicatebytes; |
6993 | indicators->xp_recentflows_retx += digest.txretransmit; |
6994 | if (digest.txunacked) { |
6995 | indicators->xp_recentflows_unacked++; |
6996 | } |
6997 | } |
6998 | } |
6999 | } |
7000 | } |
7001 | |
7002 | lck_mtx_unlock(lck: &nstat_mtx); |
7003 | |
7004 | #endif /* SKYWALK */ |
7005 | return error; |
7006 | } |
7007 | |
7008 | |
7009 | static int |
7010 | tcp_progress_probe_enable_for_interface(unsigned int ifindex, uint32_t filter_flags, uint32_t enable_flags) |
7011 | { |
7012 | int error = 0; |
7013 | struct ifnet *ifp; |
7014 | |
7015 | #if NSTAT_DEBUG |
7016 | printf("%s - for interface index %u with flags %d\n" , __func__, ifindex, filter_flags); |
7017 | #endif |
7018 | |
7019 | ifnet_head_lock_shared(); |
7020 | TAILQ_FOREACH(ifp, &ifnet_head, if_link) |
7021 | { |
7022 | if ((ifp->if_index == ifindex) || |
7023 | nstat_interface_matches_filter_flag(filter_flags, ifp)) { |
7024 | #if NSTAT_DEBUG |
7025 | printf("%s - *matched* interface index %d, enable: %d\n" , __func__, ifp->if_index, enable_flags); |
7026 | #endif |
7027 | error = if_probe_connectivity(ifp, conn_probe: enable_flags); |
7028 | if (error) { |
7029 | printf("%s (%d) - nstat set tcp probe %d for interface index %d\n" , __func__, error, enable_flags, ifp->if_index); |
7030 | } |
7031 | } |
7032 | } |
7033 | ifnet_head_done(); |
7034 | |
7035 | return error; |
7036 | } |
7037 | |
7038 | |
7039 | __private_extern__ int |
7040 | ntstat_tcp_progress_indicators(struct sysctl_req *req) |
7041 | { |
7042 | struct xtcpprogress_indicators indicators = {}; |
7043 | int error = 0; |
7044 | struct tcpprogressreq requested; |
7045 | |
7046 | if (priv_check_cred(cred: kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, flags: 0) != 0) { |
7047 | return EACCES; |
7048 | } |
7049 | if (req->newptr == USER_ADDR_NULL) { |
7050 | return EINVAL; |
7051 | } |
7052 | if (req->newlen < sizeof(req)) { |
7053 | return EINVAL; |
7054 | } |
7055 | error = SYSCTL_IN(req, &requested, sizeof(requested)); |
7056 | if (error != 0) { |
7057 | return error; |
7058 | } |
7059 | error = tcp_progress_indicators_for_interface(ifindex: (unsigned int)requested.ifindex, recentflow_maxduration: requested.recentflow_maxduration, filter_flags: (uint32_t)requested.filter_flags, indicators: &indicators); |
7060 | if (error != 0) { |
7061 | return error; |
7062 | } |
7063 | error = SYSCTL_OUT(req, &indicators, sizeof(indicators)); |
7064 | |
7065 | return error; |
7066 | } |
7067 | |
7068 | |
7069 | __private_extern__ int |
7070 | ntstat_tcp_progress_enable(struct sysctl_req *req) |
7071 | { |
7072 | int error = 0; |
7073 | struct tcpprobereq requested; |
7074 | |
7075 | if (priv_check_cred(cred: kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, flags: 0) != 0) { |
7076 | return EACCES; |
7077 | } |
7078 | if (req->newptr == USER_ADDR_NULL) { |
7079 | return EINVAL; |
7080 | } |
7081 | if (req->newlen < sizeof(req)) { |
7082 | return EINVAL; |
7083 | } |
7084 | error = SYSCTL_IN(req, &requested, sizeof(requested)); |
7085 | if (error != 0) { |
7086 | return error; |
7087 | } |
7088 | error = tcp_progress_probe_enable_for_interface(ifindex: (unsigned int)requested.ifindex, filter_flags: (uint32_t)requested.filter_flags, enable_flags: (uint32_t)requested.enable); |
7089 | |
7090 | return error; |
7091 | } |
7092 | |
7093 | |
7094 | #if SKYWALK |
7095 | |
7096 | #pragma mark -- netstat support for user level providers -- |
7097 | |
7098 | typedef struct nstat_flow_data { |
7099 | nstat_counts counts; |
7100 | union { |
7101 | nstat_udp_descriptor udp_descriptor; |
7102 | nstat_tcp_descriptor tcp_descriptor; |
7103 | } flow_descriptor; |
7104 | } nstat_flow_data; |
7105 | |
7106 | static int |
7107 | nstat_gather_flow_data(nstat_provider_id_t provider, nstat_flow_data *flow_data, int n) |
7108 | { |
7109 | struct nstat_tu_shadow *shad; |
7110 | int prepared = 0; |
7111 | errno_t err; |
7112 | |
7113 | TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) { |
7114 | assert(shad->shad_magic == TU_SHADOW_MAGIC); |
7115 | |
7116 | if ((shad->shad_provider == provider) && (shad->shad_live)) { |
7117 | if (prepared >= n) { |
7118 | break; |
7119 | } |
7120 | err = nstat_userland_tu_copy_descriptor(cookie: (nstat_provider_cookie_t) shad, |
7121 | data: &flow_data->flow_descriptor, len: sizeof(flow_data->flow_descriptor)); |
7122 | |
7123 | if (err != 0) { |
7124 | printf("%s - nstat_userland_tu_copy_descriptor returned %d\n" , __func__, err); |
7125 | } |
7126 | err = nstat_userland_tu_counts(cookie: (nstat_provider_cookie_t) shad, |
7127 | out_counts: &flow_data->counts, NULL); |
7128 | if (err != 0) { |
7129 | printf("%s - nstat_userland_tu_counts returned %d\n" , __func__, err); |
7130 | } |
7131 | flow_data++; |
7132 | prepared++; |
7133 | } |
7134 | } |
7135 | return prepared; |
7136 | } |
7137 | |
7138 | static void |
7139 | nstat_userland_to_xinpcb_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xinpcb_n *xinp) |
7140 | { |
7141 | xinp->xi_len = sizeof(struct xinpcb_n); |
7142 | xinp->xi_kind = XSO_INPCB; |
7143 | |
7144 | if (provider == NSTAT_PROVIDER_TCP_USERLAND) { |
7145 | nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor; |
7146 | struct sockaddr_in *sa = &desc->local.v4; |
7147 | if (sa->sin_family == AF_INET) { |
7148 | xinp->inp_vflag = INP_IPV4; |
7149 | xinp->inp_laddr = desc->local.v4.sin_addr; |
7150 | xinp->inp_lport = desc->local.v4.sin_port; |
7151 | xinp->inp_faddr = desc->remote.v4.sin_addr; |
7152 | xinp->inp_fport = desc->remote.v4.sin_port; |
7153 | } else if (sa->sin_family == AF_INET6) { |
7154 | xinp->inp_vflag = INP_IPV6; |
7155 | xinp->in6p_laddr = desc->local.v6.sin6_addr; |
7156 | xinp->in6p_lport = desc->local.v6.sin6_port; |
7157 | xinp->in6p_faddr = desc->remote.v6.sin6_addr; |
7158 | xinp->in6p_fport = desc->remote.v6.sin6_port; |
7159 | } |
7160 | } else if (provider == NSTAT_PROVIDER_UDP_USERLAND) { |
7161 | nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor; |
7162 | struct sockaddr_in *sa = &desc->local.v4; |
7163 | if (sa->sin_family == AF_INET) { |
7164 | xinp->inp_vflag = INP_IPV4; |
7165 | xinp->inp_laddr = desc->local.v4.sin_addr; |
7166 | xinp->inp_lport = desc->local.v4.sin_port; |
7167 | xinp->inp_faddr = desc->remote.v4.sin_addr; |
7168 | xinp->inp_fport = desc->remote.v4.sin_port; |
7169 | } else if (sa->sin_family == AF_INET6) { |
7170 | xinp->inp_vflag = INP_IPV6; |
7171 | xinp->in6p_laddr = desc->local.v6.sin6_addr; |
7172 | xinp->in6p_lport = desc->local.v6.sin6_port; |
7173 | xinp->in6p_faddr = desc->remote.v6.sin6_addr; |
7174 | xinp->in6p_fport = desc->remote.v6.sin6_port; |
7175 | } |
7176 | } |
7177 | } |
7178 | |
7179 | static void |
7180 | nstat_userland_to_xsocket_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsocket_n *xso) |
7181 | { |
7182 | xso->xso_len = sizeof(struct xsocket_n); |
7183 | xso->xso_kind = XSO_SOCKET; |
7184 | |
7185 | if (provider == NSTAT_PROVIDER_TCP_USERLAND) { |
7186 | nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor; |
7187 | xso->xso_protocol = IPPROTO_TCP; |
7188 | xso->so_e_pid = desc->epid; |
7189 | xso->so_last_pid = desc->pid; |
7190 | } else { |
7191 | nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor; |
7192 | xso->xso_protocol = IPPROTO_UDP; |
7193 | xso->so_e_pid = desc->epid; |
7194 | xso->so_last_pid = desc->pid; |
7195 | } |
7196 | } |
7197 | |
7198 | static void |
7199 | nstat_userland_to_rcv_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbrcv) |
7200 | { |
7201 | xsbrcv->xsb_len = sizeof(struct xsockbuf_n); |
7202 | xsbrcv->xsb_kind = XSO_RCVBUF; |
7203 | |
7204 | if (provider == NSTAT_PROVIDER_TCP_USERLAND) { |
7205 | nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor; |
7206 | xsbrcv->sb_hiwat = desc->rcvbufsize; |
7207 | xsbrcv->sb_cc = desc->rcvbufused; |
7208 | } else { |
7209 | nstat_udp_descriptor *desc = &flow_data->flow_descriptor.udp_descriptor; |
7210 | xsbrcv->sb_hiwat = desc->rcvbufsize; |
7211 | xsbrcv->sb_cc = desc->rcvbufused; |
7212 | } |
7213 | } |
7214 | |
7215 | static void |
7216 | nstat_userland_to_snd_xsockbuf_n(nstat_provider_id_t provider, nstat_flow_data *flow_data, struct xsockbuf_n *xsbsnd) |
7217 | { |
7218 | xsbsnd->xsb_len = sizeof(struct xsockbuf_n); |
7219 | xsbsnd->xsb_kind = XSO_SNDBUF; |
7220 | |
7221 | if (provider == NSTAT_PROVIDER_TCP_USERLAND) { |
7222 | nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor; |
7223 | xsbsnd->sb_hiwat = desc->sndbufsize; |
7224 | xsbsnd->sb_cc = desc->sndbufused; |
7225 | } else { |
7226 | } |
7227 | } |
7228 | |
7229 | static void |
7230 | nstat_userland_to_xsockstat_n(nstat_flow_data *flow_data, struct xsockstat_n *xst) |
7231 | { |
7232 | xst->xst_len = sizeof(struct xsockstat_n); |
7233 | xst->xst_kind = XSO_STATS; |
7234 | |
7235 | // The kernel version supports an array of counts, here we only support one and map to first entry |
7236 | xst->xst_tc_stats[0].rxpackets = flow_data->counts.nstat_rxpackets; |
7237 | xst->xst_tc_stats[0].rxbytes = flow_data->counts.nstat_rxbytes; |
7238 | xst->xst_tc_stats[0].txpackets = flow_data->counts.nstat_txpackets; |
7239 | xst->xst_tc_stats[0].txbytes = flow_data->counts.nstat_txbytes; |
7240 | } |
7241 | |
7242 | static void |
7243 | nstat_userland_to_xtcpcb_n(nstat_flow_data *flow_data, struct xtcpcb_n *xt) |
7244 | { |
7245 | nstat_tcp_descriptor *desc = &flow_data->flow_descriptor.tcp_descriptor; |
7246 | xt->xt_len = sizeof(struct xtcpcb_n); |
7247 | xt->xt_kind = XSO_TCPCB; |
7248 | xt->t_state = desc->state; |
7249 | xt->snd_wnd = desc->txwindow; |
7250 | xt->snd_cwnd = desc->txcwindow; |
7251 | } |
7252 | |
7253 | |
7254 | __private_extern__ int |
7255 | ntstat_userland_count(short proto) |
7256 | { |
7257 | int n = 0; |
7258 | if (proto == IPPROTO_TCP) { |
7259 | n = nstat_userland_tcp_shadows; |
7260 | } else if (proto == IPPROTO_UDP) { |
7261 | n = nstat_userland_udp_shadows; |
7262 | } |
7263 | return n; |
7264 | } |
7265 | |
7266 | __private_extern__ int |
7267 | nstat_userland_get_snapshot(short proto, void **snapshotp, int *countp) |
7268 | { |
7269 | int error = 0; |
7270 | int n = 0; |
7271 | nstat_provider_id_t provider; |
7272 | nstat_flow_data *flow_data = NULL; |
7273 | |
7274 | lck_mtx_lock(lck: &nstat_mtx); |
7275 | if (proto == IPPROTO_TCP) { |
7276 | n = nstat_userland_tcp_shadows; |
7277 | provider = NSTAT_PROVIDER_TCP_USERLAND; |
7278 | } else if (proto == IPPROTO_UDP) { |
7279 | n = nstat_userland_udp_shadows; |
7280 | provider = NSTAT_PROVIDER_UDP_USERLAND; |
7281 | } |
7282 | if (n == 0) { |
7283 | goto done; |
7284 | } |
7285 | |
7286 | flow_data = (nstat_flow_data *) kalloc_data(n * sizeof(*flow_data), |
7287 | Z_WAITOK | Z_ZERO); |
7288 | if (flow_data) { |
7289 | n = nstat_gather_flow_data(provider, flow_data, n); |
7290 | } else { |
7291 | error = ENOMEM; |
7292 | } |
7293 | done: |
7294 | lck_mtx_unlock(lck: &nstat_mtx); |
7295 | *snapshotp = flow_data; |
7296 | *countp = n; |
7297 | return error; |
7298 | } |
7299 | |
7300 | // nstat_userland_list_snapshot() does most of the work for a sysctl that uses a return format |
7301 | // as per get_pcblist_n() even though the vast majority of fields are unused. |
7302 | // Additional items are required in the sysctl output before and after the data added |
7303 | // by this function. |
7304 | __private_extern__ int |
7305 | nstat_userland_list_snapshot(short proto, struct sysctl_req *req, void *userlandsnapshot, int n) |
7306 | { |
7307 | int error = 0; |
7308 | int i; |
7309 | nstat_provider_id_t provider; |
7310 | void *buf = NULL; |
7311 | nstat_flow_data *flow_data, *flow_data_array = NULL; |
7312 | size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) + |
7313 | ROUNDUP64(sizeof(struct xsocket_n)) + |
7314 | 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) + |
7315 | ROUNDUP64(sizeof(struct xsockstat_n)); |
7316 | |
7317 | if ((n == 0) || (userlandsnapshot == NULL)) { |
7318 | goto done; |
7319 | } |
7320 | |
7321 | if (proto == IPPROTO_TCP) { |
7322 | item_size += ROUNDUP64(sizeof(struct xtcpcb_n)); |
7323 | provider = NSTAT_PROVIDER_TCP_USERLAND; |
7324 | } else if (proto == IPPROTO_UDP) { |
7325 | provider = NSTAT_PROVIDER_UDP_USERLAND; |
7326 | } else { |
7327 | error = EINVAL; |
7328 | goto done; |
7329 | } |
7330 | |
7331 | buf = (void *) kalloc_data(item_size, Z_WAITOK); |
7332 | if (buf) { |
7333 | struct xinpcb_n *xi = (struct xinpcb_n *)buf; |
7334 | struct xsocket_n *xso = (struct xsocket_n *) ADVANCE64(xi, sizeof(*xi)); |
7335 | struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) ADVANCE64(xso, sizeof(*xso)); |
7336 | struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) ADVANCE64(xsbrcv, sizeof(*xsbrcv)); |
7337 | struct xsockstat_n *xsostats = (struct xsockstat_n *) ADVANCE64(xsbsnd, sizeof(*xsbsnd)); |
7338 | struct xtcpcb_n *xt = (struct xtcpcb_n *) ADVANCE64(xsostats, sizeof(*xsostats)); |
7339 | |
7340 | flow_data_array = (nstat_flow_data *)userlandsnapshot; |
7341 | |
7342 | for (i = 0; i < n; i++) { |
7343 | flow_data = &flow_data_array[i]; |
7344 | bzero(s: buf, n: item_size); |
7345 | |
7346 | nstat_userland_to_xinpcb_n(provider, flow_data, xinp: xi); |
7347 | nstat_userland_to_xsocket_n(provider, flow_data, xso); |
7348 | nstat_userland_to_rcv_xsockbuf_n(provider, flow_data, xsbrcv); |
7349 | nstat_userland_to_snd_xsockbuf_n(provider, flow_data, xsbsnd); |
7350 | nstat_userland_to_xsockstat_n(flow_data, xst: xsostats); |
7351 | if (proto == IPPROTO_TCP) { |
7352 | nstat_userland_to_xtcpcb_n(flow_data, xt); |
7353 | } |
7354 | error = SYSCTL_OUT(req, buf, item_size); |
7355 | if (error) { |
7356 | break; |
7357 | } |
7358 | } |
7359 | kfree_data(buf, item_size); |
7360 | } else { |
7361 | error = ENOMEM; |
7362 | } |
7363 | done: |
7364 | return error; |
7365 | } |
7366 | |
7367 | __private_extern__ void |
7368 | nstat_userland_release_snapshot(void *snapshot, int nuserland) |
7369 | { |
7370 | if (snapshot != NULL) { |
7371 | kfree_data(snapshot, nuserland * sizeof(nstat_flow_data)); |
7372 | } |
7373 | } |
7374 | |
7375 | #if NTSTAT_SUPPORTS_STANDALONE_SYSCTL |
7376 | |
7377 | __private_extern__ int |
7378 | ntstat_userland_list_n(short proto, struct sysctl_req *req) |
7379 | { |
7380 | int error = 0; |
7381 | int n; |
7382 | struct xinpgen xig; |
7383 | void *snapshot = NULL; |
7384 | size_t item_size = ROUNDUP64(sizeof(struct xinpcb_n)) + |
7385 | ROUNDUP64(sizeof(struct xsocket_n)) + |
7386 | 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) + |
7387 | ROUNDUP64(sizeof(struct xsockstat_n)); |
7388 | |
7389 | if (proto == IPPROTO_TCP) { |
7390 | item_size += ROUNDUP64(sizeof(struct xtcpcb_n)); |
7391 | } |
7392 | |
7393 | if (req->oldptr == USER_ADDR_NULL) { |
7394 | n = ntstat_userland_count(proto); |
7395 | req->oldidx = 2 * (sizeof(xig)) + (n + 1 + n / 8) * item_size; |
7396 | goto done; |
7397 | } |
7398 | |
7399 | if (req->newptr != USER_ADDR_NULL) { |
7400 | error = EPERM; |
7401 | goto done; |
7402 | } |
7403 | |
7404 | error = nstat_userland_get_snapshot(proto, &snapshot, &n); |
7405 | |
7406 | if (error) { |
7407 | goto done; |
7408 | } |
7409 | |
7410 | bzero(&xig, sizeof(xig)); |
7411 | xig.xig_len = sizeof(xig); |
7412 | xig.xig_gen = 0; |
7413 | xig.xig_sogen = 0; |
7414 | xig.xig_count = n; |
7415 | error = SYSCTL_OUT(req, &xig, sizeof(xig)); |
7416 | if (error) { |
7417 | goto done; |
7418 | } |
7419 | /* |
7420 | * We are done if there are no flows |
7421 | */ |
7422 | if (n == 0) { |
7423 | goto done; |
7424 | } |
7425 | |
7426 | error = nstat_userland_list_snapshot(proto, req, snapshot, n); |
7427 | |
7428 | if (!error) { |
7429 | /* |
7430 | * Give the user an updated idea of our state, |
7431 | * which is unchanged |
7432 | */ |
7433 | error = SYSCTL_OUT(req, &xig, sizeof(xig)); |
7434 | } |
7435 | done: |
7436 | nstat_userland_release_snapshot(snapshot, n); |
7437 | return error; |
7438 | } |
7439 | |
7440 | #endif /* NTSTAT_SUPPORTS_STANDALONE_SYSCTL */ |
7441 | #endif /* SKYWALK */ |
7442 | |