1/*
2 * Copyright (c) 2010-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/param.h>
30#include <sys/types.h>
31#include <sys/kpi_mbuf.h>
32#include <sys/socket.h>
33#include <sys/kern_control.h>
34#include <sys/mcache.h>
35#include <sys/socketvar.h>
36#include <sys/sysctl.h>
37#include <sys/queue.h>
38#include <sys/priv.h>
39#include <sys/protosw.h>
40#include <sys/persona.h>
41
42#include <kern/clock.h>
43#include <kern/debug.h>
44
45#include <libkern/libkern.h>
46#include <libkern/OSAtomic.h>
47#include <libkern/locks.h>
48
49#include <net/if.h>
50#include <net/if_var.h>
51#include <net/if_types.h>
52#include <net/route.h>
53#include <net/dlil.h>
54
55// These includes appear in ntstat.h but we include them here first so they won't trigger
56// any clang diagnostic errors.
57#include <netinet/in.h>
58#include <netinet/in_stat.h>
59#include <netinet/tcp.h>
60
61#pragma clang diagnostic push
62#pragma clang diagnostic error "-Wpadded"
63#pragma clang diagnostic error "-Wpacked"
64// This header defines structures shared with user space, so we need to ensure there is
65// no compiler inserted padding in case the user space process isn't using the same
66// architecture as the kernel (example: i386 process with x86_64 kernel).
67#include <net/ntstat.h>
68#pragma clang diagnostic pop
69
70#include <netinet/ip_var.h>
71#include <netinet/in_pcb.h>
72#include <netinet/in_var.h>
73#include <netinet/tcp_var.h>
74#include <netinet/tcp_fsm.h>
75#include <netinet/tcp_cc.h>
76#include <netinet/udp.h>
77#include <netinet/udp_var.h>
78#include <netinet6/in6_pcb.h>
79#include <netinet6/in6_var.h>
80
81#include <net/sockaddr_utils.h>
82
83__private_extern__ int nstat_collect = 1;
84
85#if (DEBUG || DEVELOPMENT)
86SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
87 &nstat_collect, 0, "Collect detailed statistics");
88#endif /* (DEBUG || DEVELOPMENT) */
89
90#if !XNU_TARGET_OS_OSX
91static int nstat_privcheck = 1;
92#else /* XNU_TARGET_OS_OSX */
93static int nstat_privcheck = 0;
94#endif /* XNU_TARGET_OS_OSX */
95SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
96 &nstat_privcheck, 0, "Entitlement check");
97
98SYSCTL_NODE(_net, OID_AUTO, stats,
99 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "network statistics");
100
101static int nstat_debug = 0;
102SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
103 &nstat_debug, 0, "");
104
105static int nstat_debug_pid = 0; // Only log socket level debug for specified pid
106SYSCTL_INT(_net_stats, OID_AUTO, debug_pid, CTLFLAG_RW | CTLFLAG_LOCKED,
107 &nstat_debug_pid, 0, "");
108
109static int nstat_sendspace = 2048;
110SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
111 &nstat_sendspace, 0, "");
112
113static int nstat_recvspace = 8192;
114SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
115 &nstat_recvspace, 0, "");
116
117static struct nstat_stats nstat_stats;
118SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
119 &nstat_stats, nstat_stats, "");
120
121static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
122static u_int32_t nstat_lim_min_tx_pkts = 100;
123static u_int32_t nstat_lim_min_rx_pkts = 100;
124#if (DEBUG || DEVELOPMENT)
125SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
126 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
127 "Low internet stat report interval");
128
129SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
130 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
131 "Low Internet, min transmit packets threshold");
132
133SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
134 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
135 "Low Internet, min receive packets threshold");
136#endif /* DEBUG || DEVELOPMENT */
137
138static struct net_api_stats net_api_stats_before;
139static u_int64_t net_api_stats_last_report_time;
140#define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
141static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
142
143#if (DEBUG || DEVELOPMENT)
144SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
145 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
146#endif /* DEBUG || DEVELOPMENT */
147
148#define NSTAT_DEBUG_SOCKET_PID_MATCHED(so) \
149 (so && (nstat_debug_pid == (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid)))
150
151#define NSTAT_DEBUG_SOCKET_ON(so) \
152 ((nstat_debug && (!nstat_debug_pid || NSTAT_DEBUG_SOCKET_PID_MATCHED(so))) ? nstat_debug : 0)
153
154#define NSTAT_DEBUG_SOCKET_LOG(so, fmt, ...) \
155 if (NSTAT_DEBUG_SOCKET_ON(so)) { \
156 printf("NSTAT_DEBUG_SOCKET <pid %d>: " fmt "\n", (so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid), ##__VA_ARGS__); \
157 }
158
159enum{
160 NSTAT_FLAG_CLEANUP = (1 << 0),
161 NSTAT_FLAG_REQCOUNTS = (1 << 1),
162 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
163 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
164};
165
166#if !XNU_TARGET_OS_OSX
167#define QUERY_CONTINUATION_SRC_COUNT 50
168#else /* XNU_TARGET_OS_OSX */
169#define QUERY_CONTINUATION_SRC_COUNT 100
170#endif /* XNU_TARGET_OS_OSX */
171
172#ifndef ROUNDUP64
173#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
174#endif
175
176#ifndef ADVANCE64
177#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
178#endif
179
180typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
181typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
182
183typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
184typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
185
186typedef TAILQ_HEAD(, nstat_generic_shadow) tailq_head_generic_shadow;
187typedef TAILQ_ENTRY(nstat_generic_shadow) tailq_entry_generic_shadow;
188
189typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
190typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
191
192struct nstat_procdetails {
193 tailq_entry_procdetails pdet_link;
194 int pdet_pid;
195 u_int64_t pdet_upid;
196 char pdet_procname[64];
197 uuid_t pdet_uuid;
198 u_int32_t pdet_refcnt;
199 u_int32_t pdet_magic;
200};
201
202typedef struct nstat_provider_filter {
203 u_int64_t npf_flags;
204 u_int64_t npf_events;
205 u_int64_t npf_extensions;
206 pid_t npf_pid;
207 uuid_t npf_uuid;
208} nstat_provider_filter;
209
210
211typedef struct nstat_control_state {
212 struct nstat_control_state *ncs_next;
213 /* A bitmask to indicate whether a provider ever done NSTAT_MSG_TYPE_ADD_ALL_SRCS */
214 u_int32_t ncs_watching;
215 /* A bitmask to indicate whether a provider ever done NSTAT_MSG_TYPE_ADD_SRC */
216 u_int32_t ncs_added_src;
217 decl_lck_mtx_data(, ncs_mtx);
218 kern_ctl_ref ncs_kctl;
219 u_int32_t ncs_unit;
220 nstat_src_ref_t ncs_next_srcref;
221 tailq_head_nstat_src ncs_src_queue;
222 mbuf_t ncs_accumulated;
223 u_int32_t ncs_flags;
224 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
225 /* state maintained for partial query requests */
226 u_int64_t ncs_context;
227 u_int64_t ncs_seq;
228 /* For ease of debugging with lldb macros */
229 struct nstat_procdetails *ncs_procdetails;
230} nstat_control_state;
231
232typedef struct nstat_provider {
233 struct nstat_provider *next;
234 nstat_provider_id_t nstat_provider_id;
235 size_t nstat_descriptor_length;
236 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
237 int (*nstat_gone)(nstat_provider_cookie_t cookie);
238 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
239 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
240 void (*nstat_watcher_remove)(nstat_control_state *state);
241 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, size_t len);
242 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
243 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, u_int64_t suppression_flags);
244 bool (*nstat_cookie_equal)(nstat_provider_cookie_t cookie1, nstat_provider_cookie_t cookie2);
245 size_t (*nstat_copy_extension)(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len);
246} nstat_provider;
247
248typedef struct nstat_src {
249 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
250 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
251 nstat_src_ref_t srcref;
252 nstat_provider *provider;
253 nstat_provider_cookie_t cookie;
254 uint32_t filter;
255 bool ns_reported; // At least one update/counts/desc message has been sent
256 uint64_t seq;
257} nstat_src;
258
259// The merge structures are intended to give a global picture of what may be asked for by the current set of clients
260// This is to avoid taking locks to check them all individually
261typedef struct nstat_merged_provider_filter {
262 u_int64_t mf_events; // So far we only merge the events portion of any filters
263} nstat_merged_provider_filter;
264
265typedef struct nstat_merged_provider_filters {
266 nstat_merged_provider_filter mpf_filters[NSTAT_PROVIDER_COUNT];
267} nstat_merged_provider_filters;
268
269static errno_t nstat_control_send_counts(nstat_control_state *, nstat_src *, unsigned long long, u_int16_t, int *);
270static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
271static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int64_t event, u_int16_t hdr_flags, int *gone);
272static errno_t nstat_control_send_removed(nstat_control_state *state, nstat_src *src, u_int16_t hdr_flags);
273static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
274static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
275static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src, u_int64_t suppression_flags);
276static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
277static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
278static void nstat_ifnet_report_ecn_stats(void);
279static void nstat_ifnet_report_lim_stats(void);
280static void nstat_net_api_report_stats(void);
281static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
282static errno_t nstat_control_send_event(nstat_control_state *state, nstat_src *src, u_int64_t event);
283
284static u_int32_t nstat_udp_watchers = 0;
285static u_int32_t nstat_tcp_watchers = 0;
286static nstat_merged_provider_filters merged_filters = {};
287
288static void nstat_control_register(void);
289
290/*
291 * The lock order is as follows:
292 *
293 * socket_lock (inpcb)
294 * nstat_mtx
295 * state->ncs_mtx
296 */
297static nstat_control_state *nstat_controls = NULL;
298static uint64_t nstat_idle_time = 0;
299static LCK_GRP_DECLARE(nstat_lck_grp, "network statistics kctl");
300static LCK_MTX_DECLARE(nstat_mtx, &nstat_lck_grp);
301
302
303/* some extern definitions */
304extern void tcp_report_stats(void);
305
306static void
307nstat_copy_sa_out(
308 const struct sockaddr *src,
309 struct sockaddr *dst,
310 int maxlen)
311{
312 if (src->sa_len > maxlen) {
313 return;
314 }
315
316 SOCKADDR_COPY(src, dst, src->sa_len);
317 if (src->sa_family == AF_INET6 &&
318 src->sa_len >= sizeof(struct sockaddr_in6)) {
319 struct sockaddr_in6 *sin6 = SIN6(dst);
320 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr)) {
321 sin6->sin6_scope_id = (SIN6(src))->sin6_scope_id;
322 if (in6_embedded_scope) {
323 in6_verify_ifscope(&sin6->sin6_addr, sin6->sin6_scope_id);
324 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
325 sin6->sin6_addr.s6_addr16[1] = 0;
326 }
327 }
328 }
329}
330
331static void
332nstat_ip_to_sockaddr(
333 const struct in_addr *ip,
334 u_int16_t port,
335 struct sockaddr_in *sin,
336 u_int32_t maxlen)
337{
338 if (maxlen < sizeof(struct sockaddr_in)) {
339 return;
340 }
341
342 sin->sin_family = AF_INET;
343 sin->sin_len = sizeof(*sin);
344 sin->sin_port = port;
345 sin->sin_addr = *ip;
346}
347
348u_int32_t
349nstat_ifnet_to_flags(
350 struct ifnet *ifp)
351{
352 u_int32_t flags = 0;
353 u_int32_t functional_type = if_functional_type(ifp, FALSE);
354
355 /* Panic if someone adds a functional type without updating ntstat. */
356 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
357
358 switch (functional_type) {
359 case IFRTYPE_FUNCTIONAL_UNKNOWN:
360 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
361 break;
362 case IFRTYPE_FUNCTIONAL_LOOPBACK:
363 flags |= NSTAT_IFNET_IS_LOOPBACK;
364 break;
365 case IFRTYPE_FUNCTIONAL_WIRED:
366 case IFRTYPE_FUNCTIONAL_INTCOPROC:
367 case IFRTYPE_FUNCTIONAL_MANAGEMENT:
368 flags |= NSTAT_IFNET_IS_WIRED;
369 break;
370 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
371 flags |= NSTAT_IFNET_IS_WIFI | NSTAT_IFNET_IS_WIFI_INFRA;
372 break;
373 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
374 flags |= NSTAT_IFNET_IS_WIFI | NSTAT_IFNET_IS_AWDL;
375 break;
376 case IFRTYPE_FUNCTIONAL_CELLULAR:
377 flags |= NSTAT_IFNET_IS_CELLULAR;
378 break;
379 case IFRTYPE_FUNCTIONAL_COMPANIONLINK:
380 flags |= NSTAT_IFNET_IS_COMPANIONLINK;
381 break;
382 }
383
384 if (IFNET_IS_EXPENSIVE(ifp)) {
385 flags |= NSTAT_IFNET_IS_EXPENSIVE;
386 }
387 if (IFNET_IS_CONSTRAINED(ifp)) {
388 flags |= NSTAT_IFNET_IS_CONSTRAINED;
389 }
390 if (ifp->if_xflags & IFXF_LOW_LATENCY) {
391 flags |= NSTAT_IFNET_IS_WIFI | NSTAT_IFNET_IS_LLW;
392 }
393
394 return flags;
395}
396
397static void
398nstat_update_local_flag_from_inpcb_route(const struct inpcb *inp,
399 u_int32_t *flags)
400{
401 if (inp != NULL &&
402 ((inp->inp_route.ro_rt != NULL &&
403 IS_LOCALNET_ROUTE(inp->inp_route.ro_rt)) ||
404 (inp->inp_flags2 & INP2_LAST_ROUTE_LOCAL))) {
405 *flags |= NSTAT_IFNET_IS_LOCAL;
406 } else {
407 *flags |= NSTAT_IFNET_IS_NON_LOCAL;
408 }
409}
410
411static u_int32_t
412nstat_inpcb_to_flags(
413 const struct inpcb *inp)
414{
415 u_int32_t flags = 0;
416
417 if (inp != NULL) {
418 if (inp->inp_last_outifp != NULL) {
419 struct ifnet *ifp = inp->inp_last_outifp;
420 flags = nstat_ifnet_to_flags(ifp);
421
422 struct tcpcb *tp = intotcpcb(inp);
423 if (tp) {
424 if (tp->t_flags & TF_LOCAL) {
425 flags |= NSTAT_IFNET_IS_LOCAL;
426 } else {
427 flags |= NSTAT_IFNET_IS_NON_LOCAL;
428 }
429 } else {
430 nstat_update_local_flag_from_inpcb_route(inp, flags: &flags);
431 }
432 } else {
433 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
434 nstat_update_local_flag_from_inpcb_route(inp, flags: &flags);
435 }
436 if (inp->inp_socket != NULL &&
437 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK)) {
438 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
439 }
440 }
441 return flags;
442}
443
444static void
445merge_current_event_filters(void)
446{
447 // The nstat_mtx is assumed locked
448 nstat_merged_provider_filters new_merge = {};
449 nstat_provider_type_t provider;
450 nstat_control_state *state;
451
452 for (state = nstat_controls; state; state = state->ncs_next) {
453 for (provider = NSTAT_PROVIDER_NONE; provider <= NSTAT_PROVIDER_LAST; provider++) {
454 new_merge.mpf_filters[provider].mf_events |= state->ncs_provider_filters[provider].npf_events;
455 }
456 }
457 for (provider = NSTAT_PROVIDER_NONE; provider <= NSTAT_PROVIDER_LAST; provider++) {
458 // This should do atomic updates of the 64 bit words, where memcpy would be undefined
459 merged_filters.mpf_filters[provider].mf_events = new_merge.mpf_filters[provider].mf_events;
460 }
461}
462
463
464#pragma mark -- Network Statistic Providers --
465
466static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
467struct nstat_provider *nstat_providers = NULL;
468
469static struct nstat_provider*
470nstat_find_provider_by_id(
471 nstat_provider_id_t id)
472{
473 struct nstat_provider *provider;
474
475 for (provider = nstat_providers; provider != NULL; provider = provider->next) {
476 if (provider->nstat_provider_id == id) {
477 break;
478 }
479 }
480
481 return provider;
482}
483
484static errno_t
485nstat_lookup_entry(
486 nstat_provider_id_t id,
487 const void *data,
488 u_int32_t length,
489 nstat_provider **out_provider,
490 nstat_provider_cookie_t *out_cookie)
491{
492 *out_provider = nstat_find_provider_by_id(id);
493 if (*out_provider == NULL) {
494 return ENOENT;
495 }
496
497 return (*out_provider)->nstat_lookup(data, length, out_cookie);
498}
499
500static void
501nstat_control_sanitize_cookie(
502 nstat_control_state *state,
503 nstat_provider_id_t id,
504 nstat_provider_cookie_t cookie)
505{
506 nstat_src *src = NULL;
507
508 // Scan the source list to find any duplicate entry and remove it.
509 lck_mtx_lock(lck: &state->ncs_mtx);
510 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
511 {
512 nstat_provider *sp = src->provider;
513 if (sp->nstat_provider_id == id &&
514 sp->nstat_cookie_equal != NULL &&
515 sp->nstat_cookie_equal(src->cookie, cookie)) {
516 break;
517 }
518 }
519 if (src) {
520 nstat_control_send_goodbye(state, src);
521 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
522 }
523 lck_mtx_unlock(lck: &state->ncs_mtx);
524
525 if (src) {
526 nstat_control_cleanup_source(NULL, src, TRUE);
527 }
528}
529
530static void nstat_init_route_provider(void);
531static void nstat_init_tcp_provider(void);
532static void nstat_init_udp_provider(void);
533#if SKYWALK
534static void nstat_init_userland_tcp_provider(void);
535static void nstat_init_userland_udp_provider(void);
536static void nstat_init_userland_quic_provider(void);
537#endif /* SKYWALK */
538static void nstat_init_userland_conn_provider(void);
539static void nstat_init_udp_subflow_provider(void);
540static void nstat_init_ifnet_provider(void);
541
542__private_extern__ void
543nstat_init(void)
544{
545 nstat_init_route_provider();
546 nstat_init_tcp_provider();
547 nstat_init_udp_provider();
548#if SKYWALK
549 nstat_init_userland_tcp_provider();
550 nstat_init_userland_udp_provider();
551 nstat_init_userland_quic_provider();
552#endif /* SKYWALK */
553 nstat_init_userland_conn_provider();
554 nstat_init_udp_subflow_provider();
555 nstat_init_ifnet_provider();
556 nstat_control_register();
557}
558
559#pragma mark -- Aligned Buffer Allocation --
560
561struct align_header {
562 u_int32_t offset;
563 u_int32_t length;
564};
565
566static void*
567nstat_malloc_aligned(
568 size_t length,
569 u_int8_t alignment,
570 zalloc_flags_t flags)
571{
572 struct align_header *hdr = NULL;
573 size_t size = length + sizeof(*hdr) + alignment - 1;
574
575 // Arbitrary limit to prevent abuse
576 if (length > (64 * 1024)) {
577 return NULL;
578 }
579 u_int8_t *buffer = (u_int8_t *)kalloc_data(size, flags);
580 if (buffer == NULL) {
581 return NULL;
582 }
583
584 u_int8_t *aligned = buffer + sizeof(*hdr);
585 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
586
587 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
588 hdr->offset = aligned - buffer;
589 hdr->length = size;
590
591 return aligned;
592}
593
594static void
595nstat_free_aligned(
596 void *buffer)
597{
598 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
599 char *offset_buffer = (char *)buffer - hdr->offset;
600 kfree_data(offset_buffer, hdr->length);
601}
602
603#pragma mark -- Utilities --
604
605#define NSTAT_PROCDETAILS_MAGIC 0xfeedc001
606#define NSTAT_PROCDETAILS_UNMAGIC 0xdeadc001
607
608static tailq_head_procdetails nstat_procdetails_head = TAILQ_HEAD_INITIALIZER(nstat_procdetails_head);
609
610static struct nstat_procdetails *
611nstat_retain_curprocdetails(void)
612{
613 struct nstat_procdetails *procdetails = NULL;
614 uint64_t upid = proc_uniqueid(current_proc());
615
616 lck_mtx_lock(lck: &nstat_mtx);
617
618 TAILQ_FOREACH(procdetails, &nstat_procdetails_head, pdet_link) {
619 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
620
621 if (procdetails->pdet_upid == upid) {
622 OSIncrementAtomic(&procdetails->pdet_refcnt);
623 break;
624 }
625 }
626 lck_mtx_unlock(lck: &nstat_mtx);
627 if (!procdetails) {
628 // No need for paranoia on locking, it would be OK if there are duplicate structs on the list
629 procdetails = kalloc_type(struct nstat_procdetails,
630 Z_WAITOK | Z_NOFAIL);
631 procdetails->pdet_pid = proc_selfpid();
632 procdetails->pdet_upid = upid;
633 proc_selfname(buf: procdetails->pdet_procname, size: sizeof(procdetails->pdet_procname));
634 proc_getexecutableuuid(current_proc(), procdetails->pdet_uuid, sizeof(uuid_t));
635 procdetails->pdet_refcnt = 1;
636 procdetails->pdet_magic = NSTAT_PROCDETAILS_MAGIC;
637 lck_mtx_lock(lck: &nstat_mtx);
638 TAILQ_INSERT_HEAD(&nstat_procdetails_head, procdetails, pdet_link);
639 lck_mtx_unlock(lck: &nstat_mtx);
640 }
641
642 return procdetails;
643}
644
645static void
646nstat_release_procdetails(struct nstat_procdetails *procdetails)
647{
648 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
649 // These are harvested later to amortize costs
650 OSDecrementAtomic(&procdetails->pdet_refcnt);
651}
652
653static void
654nstat_prune_procdetails(void)
655{
656 struct nstat_procdetails *procdetails;
657 struct nstat_procdetails *tmpdetails;
658 tailq_head_procdetails dead_list;
659
660 TAILQ_INIT(&dead_list);
661 lck_mtx_lock(lck: &nstat_mtx);
662
663 TAILQ_FOREACH_SAFE(procdetails, &nstat_procdetails_head, pdet_link, tmpdetails)
664 {
665 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
666 if (procdetails->pdet_refcnt == 0) {
667 // Pull it off the list
668 TAILQ_REMOVE(&nstat_procdetails_head, procdetails, pdet_link);
669 TAILQ_INSERT_TAIL(&dead_list, procdetails, pdet_link);
670 }
671 }
672 lck_mtx_unlock(lck: &nstat_mtx);
673
674 while ((procdetails = TAILQ_FIRST(&dead_list))) {
675 TAILQ_REMOVE(&dead_list, procdetails, pdet_link);
676 procdetails->pdet_magic = NSTAT_PROCDETAILS_UNMAGIC;
677 kfree_type(struct nstat_procdetails, procdetails);
678 }
679}
680
681#pragma mark -- Route Provider --
682
683static nstat_provider nstat_route_provider;
684
685static errno_t
686nstat_route_lookup(
687 const void *data,
688 u_int32_t length,
689 nstat_provider_cookie_t *out_cookie)
690{
691 struct sockaddr *dst = NULL;
692 struct sockaddr *mask = NULL;
693 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
694 *out_cookie = NULL;
695
696 if (length < sizeof(*param)) {
697 return EINVAL;
698 }
699
700 if (param->dst.v4.sin_family == 0 ||
701 param->dst.v4.sin_family > AF_MAX ||
702 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family)) {
703 return EINVAL;
704 }
705
706 if (param->dst.v4.sin_len > sizeof(param->dst) ||
707 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len))) {
708 return EINVAL;
709 }
710 if ((param->dst.v4.sin_family == AF_INET &&
711 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
712 (param->dst.v6.sin6_family == AF_INET6 &&
713 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6))) {
714 return EINVAL;
715 }
716
717 dst = __DECONST_SA(&param->dst.v4);
718 mask = param->mask.v4.sin_family ? __DECONST_SA(&param->mask.v4) : NULL;
719
720 struct radix_node_head *rnh = rt_tables[dst->sa_family];
721 if (rnh == NULL) {
722 return EAFNOSUPPORT;
723 }
724
725 lck_mtx_lock(rnh_lock);
726 struct rtentry *rt = rt_lookup(TRUE, dst, mask, rnh, param->ifindex);
727 lck_mtx_unlock(rnh_lock);
728
729 if (rt) {
730 *out_cookie = (nstat_provider_cookie_t)rt;
731 }
732
733 return rt ? 0 : ENOENT;
734}
735
736static int
737nstat_route_gone(
738 nstat_provider_cookie_t cookie)
739{
740 struct rtentry *rt = (struct rtentry*)cookie;
741 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
742}
743
744static errno_t
745nstat_route_counts(
746 nstat_provider_cookie_t cookie,
747 struct nstat_counts *out_counts,
748 int *out_gone)
749{
750 struct rtentry *rt = (struct rtentry*)cookie;
751 struct nstat_counts *rt_stats = rt->rt_stats;
752
753 if (out_gone) {
754 *out_gone = 0;
755 }
756
757 if (out_gone && (rt->rt_flags & RTF_UP) == 0) {
758 *out_gone = 1;
759 }
760
761 if (rt_stats) {
762 out_counts->nstat_rxpackets = os_atomic_load(&rt_stats->nstat_rxpackets, relaxed);
763 out_counts->nstat_rxbytes = os_atomic_load(&rt_stats->nstat_rxbytes, relaxed);
764 out_counts->nstat_txpackets = os_atomic_load(&rt_stats->nstat_txpackets, relaxed);
765 out_counts->nstat_txbytes = os_atomic_load(&rt_stats->nstat_txbytes, relaxed);
766 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
767 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
768 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
769 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
770 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
771 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
772 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
773 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
774 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
775 } else {
776 bzero(s: out_counts, n: sizeof(*out_counts));
777 }
778
779 return 0;
780}
781
782static void
783nstat_route_release(
784 nstat_provider_cookie_t cookie,
785 __unused int locked)
786{
787 rtfree((struct rtentry*)cookie);
788}
789
790static u_int32_t nstat_route_watchers = 0;
791
792static int
793nstat_route_walktree_add(
794 struct radix_node *rn,
795 void *context)
796{
797 errno_t result = 0;
798 struct rtentry *rt = (struct rtentry *)rn;
799 nstat_control_state *state = (nstat_control_state*)context;
800
801 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
802
803 /* RTF_UP can't change while rnh_lock is held */
804 if ((rt->rt_flags & RTF_UP) != 0) {
805 /* Clear RTPRF_OURS if the route is still usable */
806 RT_LOCK(rt);
807 if (rt_validate(rt)) {
808 RT_ADDREF_LOCKED(rt);
809 RT_UNLOCK(rt);
810 } else {
811 RT_UNLOCK(rt);
812 rt = NULL;
813 }
814
815 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
816 if (rt == NULL) {
817 return 0;
818 }
819
820 result = nstat_control_source_add(context: 0, state, provider: &nstat_route_provider, cookie: rt);
821 if (result != 0) {
822 rtfree_locked(rt);
823 }
824 }
825
826 return result;
827}
828
829static errno_t
830nstat_route_add_watcher(
831 nstat_control_state *state,
832 nstat_msg_add_all_srcs *req)
833{
834 int i;
835 errno_t result = 0;
836
837 lck_mtx_lock(rnh_lock);
838
839 result = nstat_set_provider_filter(state, req);
840 if (result == 0) {
841 OSIncrementAtomic(&nstat_route_watchers);
842
843 for (i = 1; i < AF_MAX; i++) {
844 struct radix_node_head *rnh;
845 rnh = rt_tables[i];
846 if (!rnh) {
847 continue;
848 }
849
850 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
851 if (result != 0) {
852 // This is probably resource exhaustion.
853 // There currently isn't a good way to recover from this.
854 // Least bad seems to be to give up on the add-all but leave
855 // the watcher in place.
856 break;
857 }
858 }
859 }
860 lck_mtx_unlock(rnh_lock);
861
862 return result;
863}
864
865__private_extern__ void
866nstat_route_new_entry(
867 struct rtentry *rt)
868{
869 if (nstat_route_watchers == 0) {
870 return;
871 }
872
873 lck_mtx_lock(lck: &nstat_mtx);
874 if ((rt->rt_flags & RTF_UP) != 0) {
875 nstat_control_state *state;
876 for (state = nstat_controls; state; state = state->ncs_next) {
877 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0) {
878 // this client is watching routes
879 // acquire a reference for the route
880 RT_ADDREF(rt);
881
882 // add the source, if that fails, release the reference
883 if (nstat_control_source_add(context: 0, state, provider: &nstat_route_provider, cookie: rt) != 0) {
884 RT_REMREF(rt);
885 }
886 }
887 }
888 }
889 lck_mtx_unlock(lck: &nstat_mtx);
890}
891
892static void
893nstat_route_remove_watcher(
894 __unused nstat_control_state *state)
895{
896 OSDecrementAtomic(&nstat_route_watchers);
897}
898
899static errno_t
900nstat_route_copy_descriptor(
901 nstat_provider_cookie_t cookie,
902 void *data,
903 size_t len)
904{
905 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
906 if (len < sizeof(*desc)) {
907 return EINVAL;
908 }
909 bzero(s: desc, n: sizeof(*desc));
910
911 struct rtentry *rt = (struct rtentry*)cookie;
912 desc->id = (uint64_t)VM_KERNEL_ADDRHASH(rt);
913 desc->parent_id = (uint64_t)VM_KERNEL_ADDRHASH(rt->rt_parent);
914 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRHASH(rt->rt_gwroute);
915
916
917 // key/dest
918 struct sockaddr *sa;
919 if ((sa = rt_key(rt))) {
920 nstat_copy_sa_out(src: sa, dst: &desc->dst.sa, maxlen: sizeof(desc->dst));
921 }
922
923 // mask
924 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask)) {
925 memcpy(dst: &desc->mask, src: sa, n: sa->sa_len);
926 }
927
928 // gateway
929 if ((sa = rt->rt_gateway)) {
930 nstat_copy_sa_out(src: sa, dst: &desc->gateway.sa, maxlen: sizeof(desc->gateway));
931 }
932
933 if (rt->rt_ifp) {
934 desc->ifindex = rt->rt_ifp->if_index;
935 }
936
937 desc->flags = rt->rt_flags;
938
939 return 0;
940}
941
942static bool
943nstat_route_reporting_allowed(
944 nstat_provider_cookie_t cookie,
945 nstat_provider_filter *filter,
946 __unused u_int64_t suppression_flags)
947{
948 bool retval = true;
949
950 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
951 struct rtentry *rt = (struct rtentry*)cookie;
952 struct ifnet *ifp = rt->rt_ifp;
953
954 if (ifp) {
955 uint32_t interface_properties = nstat_ifnet_to_flags(ifp);
956
957 if ((filter->npf_flags & interface_properties) == 0) {
958 retval = false;
959 }
960 }
961 }
962 return retval;
963}
964
965static bool
966nstat_route_cookie_equal(
967 nstat_provider_cookie_t cookie1,
968 nstat_provider_cookie_t cookie2)
969{
970 struct rtentry *rt1 = (struct rtentry *)cookie1;
971 struct rtentry *rt2 = (struct rtentry *)cookie2;
972
973 return (rt1 == rt2) ? true : false;
974}
975
976static void
977nstat_init_route_provider(void)
978{
979 bzero(s: &nstat_route_provider, n: sizeof(nstat_route_provider));
980 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
981 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
982 nstat_route_provider.nstat_lookup = nstat_route_lookup;
983 nstat_route_provider.nstat_gone = nstat_route_gone;
984 nstat_route_provider.nstat_counts = nstat_route_counts;
985 nstat_route_provider.nstat_release = nstat_route_release;
986 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
987 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
988 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
989 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
990 nstat_route_provider.nstat_cookie_equal = nstat_route_cookie_equal;
991 nstat_route_provider.next = nstat_providers;
992 nstat_providers = &nstat_route_provider;
993}
994
995#pragma mark -- Route Collection --
996
997__private_extern__ struct nstat_counts*
998nstat_route_attach(
999 struct rtentry *rte)
1000{
1001 struct nstat_counts *result = rte->rt_stats;
1002 if (result) {
1003 return result;
1004 }
1005
1006 result = nstat_malloc_aligned(length: sizeof(*result), alignment: sizeof(u_int64_t),
1007 flags: Z_WAITOK | Z_ZERO);
1008 if (!result) {
1009 return result;
1010 }
1011
1012 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats)) {
1013 nstat_free_aligned(buffer: result);
1014 result = rte->rt_stats;
1015 }
1016
1017 return result;
1018}
1019
1020__private_extern__ void
1021nstat_route_detach(
1022 struct rtentry *rte)
1023{
1024 if (rte->rt_stats) {
1025 nstat_free_aligned(buffer: rte->rt_stats);
1026 rte->rt_stats = NULL;
1027 }
1028}
1029
1030__private_extern__ void
1031nstat_route_connect_attempt(
1032 struct rtentry *rte)
1033{
1034 while (rte) {
1035 struct nstat_counts* stats = nstat_route_attach(rte);
1036 if (stats) {
1037 OSIncrementAtomic(&stats->nstat_connectattempts);
1038 }
1039
1040 rte = rte->rt_parent;
1041 }
1042}
1043
1044__private_extern__ void
1045nstat_route_connect_success(
1046 struct rtentry *rte)
1047{
1048 // This route
1049 while (rte) {
1050 struct nstat_counts* stats = nstat_route_attach(rte);
1051 if (stats) {
1052 OSIncrementAtomic(&stats->nstat_connectsuccesses);
1053 }
1054
1055 rte = rte->rt_parent;
1056 }
1057}
1058
1059__private_extern__ void
1060nstat_route_tx(
1061 struct rtentry *rte,
1062 u_int32_t packets,
1063 u_int32_t bytes,
1064 u_int32_t flags)
1065{
1066 while (rte) {
1067 struct nstat_counts* stats = nstat_route_attach(rte);
1068 if (stats) {
1069 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0) {
1070 OSAddAtomic(bytes, &stats->nstat_txretransmit);
1071 } else {
1072 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
1073 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
1074 }
1075 }
1076
1077 rte = rte->rt_parent;
1078 }
1079}
1080
1081__private_extern__ void
1082nstat_route_rx(
1083 struct rtentry *rte,
1084 u_int32_t packets,
1085 u_int32_t bytes,
1086 u_int32_t flags)
1087{
1088 while (rte) {
1089 struct nstat_counts* stats = nstat_route_attach(rte);
1090 if (stats) {
1091 if (flags == 0) {
1092 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
1093 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
1094 } else {
1095 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER) {
1096 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
1097 }
1098 if (flags & NSTAT_RX_FLAG_DUPLICATE) {
1099 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
1100 }
1101 }
1102 }
1103
1104 rte = rte->rt_parent;
1105 }
1106}
1107
1108/* atomically average current value at _val_addr with _new_val and store */
1109#define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
1110 volatile uint32_t _old_val; \
1111 volatile uint32_t _avg; \
1112 do { \
1113 _old_val = *_val_addr; \
1114 if (_old_val == 0) \
1115 { \
1116 _avg = _new_val; \
1117 } \
1118 else \
1119 { \
1120 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
1121 } \
1122 if (_old_val == _avg) break; \
1123 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
1124} while (0);
1125
1126/* atomically compute minimum of current value at _val_addr with _new_val and store */
1127#define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
1128 volatile uint32_t _old_val; \
1129 do { \
1130 _old_val = *_val_addr; \
1131 if (_old_val != 0 && _old_val < _new_val) \
1132 { \
1133 break; \
1134 } \
1135 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
1136} while (0);
1137
1138__private_extern__ void
1139nstat_route_rtt(
1140 struct rtentry *rte,
1141 u_int32_t rtt,
1142 u_int32_t rtt_var)
1143{
1144 const uint32_t decay = 3;
1145
1146 while (rte) {
1147 struct nstat_counts* stats = nstat_route_attach(rte);
1148 if (stats) {
1149 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1150 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1151 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1152 }
1153 rte = rte->rt_parent;
1154 }
1155}
1156
1157__private_extern__ void
1158nstat_route_update(
1159 struct rtentry *rte,
1160 uint32_t connect_attempts,
1161 uint32_t connect_successes,
1162 uint32_t rx_packets,
1163 uint32_t rx_bytes,
1164 uint32_t rx_duplicatebytes,
1165 uint32_t rx_outoforderbytes,
1166 uint32_t tx_packets,
1167 uint32_t tx_bytes,
1168 uint32_t tx_retransmit,
1169 uint32_t rtt,
1170 uint32_t rtt_var)
1171{
1172 const uint32_t decay = 3;
1173
1174 while (rte) {
1175 struct nstat_counts* stats = nstat_route_attach(rte);
1176 if (stats) {
1177 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
1178 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
1179 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
1180 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
1181 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
1182 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
1183 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
1184 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
1185 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
1186
1187 if (rtt != 0) {
1188 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
1189 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
1190 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
1191 }
1192 }
1193 rte = rte->rt_parent;
1194 }
1195}
1196
1197#pragma mark -- TCP Kernel Provider --
1198
1199/*
1200 * Due to the way the kernel deallocates a process (the process structure
1201 * might be gone by the time we get the PCB detach notification),
1202 * we need to cache the process name. Without this, proc_name() would
1203 * return null and the process name would never be sent to userland.
1204 *
1205 * For UDP sockets, we also store the cached the connection tuples along with
1206 * the interface index. This is necessary because when UDP sockets are
1207 * disconnected, the connection tuples are forever lost from the inpcb, thus
1208 * we need to keep track of the last call to connect() in ntstat.
1209 */
1210struct nstat_tucookie {
1211 struct inpcb *inp;
1212 char pname[MAXCOMLEN + 1];
1213 bool cached;
1214 union{
1215 struct sockaddr_in v4;
1216 struct sockaddr_in6 v6;
1217 } local;
1218 union{
1219 struct sockaddr_in v4;
1220 struct sockaddr_in6 v6;
1221 } remote;
1222 unsigned int if_index;
1223 uint32_t ifnet_properties;
1224};
1225
1226static struct nstat_tucookie *
1227nstat_tucookie_alloc_internal(
1228 struct inpcb *inp,
1229 bool ref,
1230 bool locked)
1231{
1232 struct nstat_tucookie *cookie;
1233
1234 cookie = kalloc_type(struct nstat_tucookie,
1235 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1236 if (!locked) {
1237 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1238 }
1239 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING) {
1240 kfree_type(struct nstat_tucookie, cookie);
1241 return NULL;
1242 }
1243 cookie->inp = inp;
1244 proc_name(pid: inp->inp_socket->last_pid, buf: cookie->pname,
1245 size: sizeof(cookie->pname));
1246 /*
1247 * We only increment the reference count for UDP sockets because we
1248 * only cache UDP socket tuples.
1249 */
1250 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP) {
1251 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1252 }
1253
1254 return cookie;
1255}
1256
1257__unused static struct nstat_tucookie *
1258nstat_tucookie_alloc(
1259 struct inpcb *inp)
1260{
1261 return nstat_tucookie_alloc_internal(inp, false, false);
1262}
1263
1264static struct nstat_tucookie *
1265nstat_tucookie_alloc_ref(
1266 struct inpcb *inp)
1267{
1268 return nstat_tucookie_alloc_internal(inp, true, false);
1269}
1270
1271static struct nstat_tucookie *
1272nstat_tucookie_alloc_ref_locked(
1273 struct inpcb *inp)
1274{
1275 return nstat_tucookie_alloc_internal(inp, true, true);
1276}
1277
1278static void
1279nstat_tucookie_release_internal(
1280 struct nstat_tucookie *cookie,
1281 int inplock)
1282{
1283 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP) {
1284 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1285 }
1286 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1287 kfree_type(struct nstat_tucookie, cookie);
1288}
1289
1290static void
1291nstat_tucookie_release(
1292 struct nstat_tucookie *cookie)
1293{
1294 nstat_tucookie_release_internal(cookie, false);
1295}
1296
1297static void
1298nstat_tucookie_release_locked(
1299 struct nstat_tucookie *cookie)
1300{
1301 nstat_tucookie_release_internal(cookie, true);
1302}
1303
1304
1305static size_t
1306nstat_inp_domain_info(struct inpcb *inp, nstat_domain_info *domain_info, size_t len)
1307{
1308 // Note, the caller has guaranteed that the buffer has been zeroed, there is no need to clear it again
1309 struct socket *so = inp->inp_socket;
1310
1311 if (so == NULL) {
1312 return 0;
1313 }
1314
1315 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: Collecting stats");
1316
1317 if (domain_info == NULL) {
1318 return sizeof(nstat_domain_info);
1319 }
1320
1321 if (len < sizeof(nstat_domain_info)) {
1322 return 0;
1323 }
1324
1325 necp_copy_inp_domain_info(inp, so, domain_info);
1326
1327 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: <pid %d> Collected stats - domain <%s> owner <%s> ctxt <%s> bundle id <%s> "
1328 "is_tracker %d is_non_app_initiated %d is_silent %d",
1329 so->so_flags & SOF_DELEGATED ? so->e_pid : so->last_pid,
1330 domain_info->domain_name,
1331 domain_info->domain_owner,
1332 domain_info->domain_tracker_ctxt,
1333 domain_info->domain_attributed_bundle_id,
1334 domain_info->is_tracker,
1335 domain_info->is_non_app_initiated,
1336 domain_info->is_silent);
1337
1338 return sizeof(nstat_domain_info);
1339}
1340
1341
1342static nstat_provider nstat_tcp_provider;
1343
1344static errno_t
1345nstat_tcp_lookup(
1346 __unused const void *data,
1347 __unused u_int32_t length,
1348 __unused nstat_provider_cookie_t *out_cookie)
1349{
1350 // Looking up a specific connection is not supported.
1351 return ENOTSUP;
1352}
1353
1354static int
1355nstat_tcp_gone(
1356 nstat_provider_cookie_t cookie)
1357{
1358 struct nstat_tucookie *tucookie =
1359 (struct nstat_tucookie *)cookie;
1360 struct inpcb *inp;
1361 struct tcpcb *tp;
1362
1363 return (!(inp = tucookie->inp) ||
1364 !(tp = intotcpcb(inp)) ||
1365 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1366}
1367
1368static errno_t
1369nstat_tcp_counts(
1370 nstat_provider_cookie_t cookie,
1371 struct nstat_counts *out_counts,
1372 int *out_gone)
1373{
1374 struct nstat_tucookie *tucookie =
1375 (struct nstat_tucookie *)cookie;
1376 struct inpcb *inp;
1377
1378 bzero(s: out_counts, n: sizeof(*out_counts));
1379
1380 if (out_gone) {
1381 *out_gone = 0;
1382 }
1383
1384 // if the pcb is in the dead state, we should stop using it
1385 if (nstat_tcp_gone(cookie)) {
1386 if (out_gone) {
1387 *out_gone = 1;
1388 }
1389 if (!(inp = tucookie->inp) || !intotcpcb(inp)) {
1390 return EINVAL;
1391 }
1392 }
1393 inp = tucookie->inp;
1394 struct tcpcb *tp = intotcpcb(inp);
1395
1396 out_counts->nstat_rxpackets = os_atomic_load(&inp->inp_stat->rxpackets, relaxed);
1397 out_counts->nstat_rxbytes = os_atomic_load(&inp->inp_stat->rxbytes, relaxed);
1398 out_counts->nstat_txpackets = os_atomic_load(&inp->inp_stat->txpackets, relaxed);
1399 out_counts->nstat_txbytes = os_atomic_load(&inp->inp_stat->txbytes, relaxed);
1400 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1401 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1402 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1403 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1404 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1405 out_counts->nstat_avg_rtt = tp->t_srtt;
1406 out_counts->nstat_min_rtt = tp->t_rttbest;
1407 out_counts->nstat_var_rtt = tp->t_rttvar;
1408 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt) {
1409 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1410 }
1411 out_counts->nstat_cell_rxbytes = os_atomic_load(&inp->inp_cstat->rxbytes, relaxed);
1412 out_counts->nstat_cell_txbytes = os_atomic_load(&inp->inp_cstat->txbytes, relaxed);
1413 out_counts->nstat_wifi_rxbytes = os_atomic_load(&inp->inp_wstat->rxbytes, relaxed);
1414 out_counts->nstat_wifi_txbytes = os_atomic_load(&inp->inp_wstat->txbytes, relaxed);
1415 out_counts->nstat_wired_rxbytes = os_atomic_load(&inp->inp_Wstat->rxbytes, relaxed);
1416 out_counts->nstat_wired_txbytes = os_atomic_load(&inp->inp_Wstat->txbytes, relaxed);
1417
1418 return 0;
1419}
1420
1421static void
1422nstat_tcp_release(
1423 nstat_provider_cookie_t cookie,
1424 int locked)
1425{
1426 struct nstat_tucookie *tucookie =
1427 (struct nstat_tucookie *)cookie;
1428
1429 nstat_tucookie_release_internal(cookie: tucookie, inplock: locked);
1430}
1431
1432static errno_t
1433nstat_tcp_add_watcher(
1434 nstat_control_state *state,
1435 nstat_msg_add_all_srcs *req)
1436{
1437 // There is a tricky issue around getting all TCP sockets added once
1438 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1439 // being placed on any lists where it might be found.
1440 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1441 // it should be impossible for a new socket to be added twice.
1442 // On the other hand, there is still a timing issue where a new socket
1443 // results in a call to nstat_tcp_new_pcb() before this watcher
1444 // is instantiated and yet the socket doesn't make it into ipi_listhead
1445 // prior to the scan. <rdar://problem/30361716>
1446
1447 errno_t result;
1448
1449 lck_rw_lock_shared(lck: &tcbinfo.ipi_lock);
1450 result = nstat_set_provider_filter(state, req);
1451 if (result == 0) {
1452 OSIncrementAtomic(&nstat_tcp_watchers);
1453
1454 // Add all current tcp inpcbs. Ignore those in timewait
1455 struct inpcb *inp;
1456 struct nstat_tucookie *cookie;
1457 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1458 {
1459 cookie = nstat_tucookie_alloc_ref(inp);
1460 if (cookie == NULL) {
1461 continue;
1462 }
1463 if (nstat_control_source_add(context: 0, state, provider: &nstat_tcp_provider,
1464 cookie) != 0) {
1465 nstat_tucookie_release(cookie);
1466 break;
1467 }
1468 }
1469 }
1470
1471 lck_rw_done(lck: &tcbinfo.ipi_lock);
1472
1473 return result;
1474}
1475
1476static void
1477nstat_tcp_remove_watcher(
1478 __unused nstat_control_state *state)
1479{
1480 OSDecrementAtomic(&nstat_tcp_watchers);
1481}
1482
1483__private_extern__ void
1484nstat_tcp_new_pcb(
1485 struct inpcb *inp)
1486{
1487 struct nstat_tucookie *cookie;
1488
1489 inp->inp_start_timestamp = mach_continuous_time();
1490
1491 if (nstat_tcp_watchers == 0) {
1492 return;
1493 }
1494
1495 socket_lock(so: inp->inp_socket, refcount: 0);
1496 lck_mtx_lock(lck: &nstat_mtx);
1497 nstat_control_state *state;
1498 for (state = nstat_controls; state; state = state->ncs_next) {
1499 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0) {
1500 // this client is watching tcp
1501 // acquire a reference for it
1502 cookie = nstat_tucookie_alloc_ref_locked(inp);
1503 if (cookie == NULL) {
1504 continue;
1505 }
1506 // add the source, if that fails, release the reference
1507 if (nstat_control_source_add(context: 0, state,
1508 provider: &nstat_tcp_provider, cookie) != 0) {
1509 nstat_tucookie_release_locked(cookie);
1510 break;
1511 }
1512 }
1513 }
1514 lck_mtx_unlock(lck: &nstat_mtx);
1515 socket_unlock(so: inp->inp_socket, refcount: 0);
1516}
1517
1518__private_extern__ void
1519nstat_pcb_detach(struct inpcb *inp)
1520{
1521 nstat_control_state *state;
1522 nstat_src *src;
1523 tailq_head_nstat_src dead_list;
1524 struct nstat_tucookie *tucookie;
1525 errno_t result;
1526
1527 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1528 return;
1529 }
1530
1531 TAILQ_INIT(&dead_list);
1532 lck_mtx_lock(lck: &nstat_mtx);
1533 for (state = nstat_controls; state; state = state->ncs_next) {
1534 lck_mtx_lock(lck: &state->ncs_mtx);
1535 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1536 {
1537 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1538 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1539 tucookie = (struct nstat_tucookie *)src->cookie;
1540 if (tucookie->inp == inp) {
1541 break;
1542 }
1543 }
1544 }
1545
1546 if (src) {
1547 result = nstat_control_send_goodbye(state, src);
1548
1549 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1550 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1551 }
1552 lck_mtx_unlock(lck: &state->ncs_mtx);
1553 }
1554 lck_mtx_unlock(lck: &nstat_mtx);
1555
1556 while ((src = TAILQ_FIRST(&dead_list))) {
1557 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1558 nstat_control_cleanup_source(NULL, src, TRUE);
1559 }
1560}
1561
1562__private_extern__ void
1563nstat_pcb_event(struct inpcb *inp, u_int64_t event)
1564{
1565 nstat_control_state *state;
1566 nstat_src *src;
1567 struct nstat_tucookie *tucookie;
1568 errno_t result;
1569 nstat_provider_id_t provider_id;
1570
1571 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0)) {
1572 return;
1573 }
1574 if (((merged_filters.mpf_filters[NSTAT_PROVIDER_TCP_KERNEL].mf_events & event) == 0) &&
1575 ((merged_filters.mpf_filters[NSTAT_PROVIDER_UDP_KERNEL].mf_events & event) == 0)) {
1576 // There are clients for TCP and UDP, but none are interested in the event
1577 // This check saves taking the mutex and scanning the list
1578 return;
1579 }
1580 lck_mtx_lock(lck: &nstat_mtx);
1581 for (state = nstat_controls; state; state = state->ncs_next) {
1582 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_KERNEL].npf_events & event) == 0) &&
1583 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_KERNEL].npf_events & event) == 0)) {
1584 continue;
1585 }
1586 lck_mtx_lock(lck: &state->ncs_mtx);
1587 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1588 {
1589 provider_id = src->provider->nstat_provider_id;
1590 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL) {
1591 tucookie = (struct nstat_tucookie *)src->cookie;
1592 if (tucookie->inp == inp) {
1593 break;
1594 }
1595 }
1596 }
1597
1598 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
1599 result = nstat_control_send_event(state, src, event);
1600 }
1601 lck_mtx_unlock(lck: &state->ncs_mtx);
1602 }
1603 lck_mtx_unlock(lck: &nstat_mtx);
1604 if (event == NSTAT_EVENT_SRC_ATTRIBUTION_CHANGE) {
1605 // As a convenience to clients, the bitmap is cleared when there is an attribution change
1606 // There is no interlock preventing clients from polling and collecting a half-cleared bitmap
1607 // but as the timestamp should be cleared first that should show that the bitmap is not applicable
1608 // The other race condition where an interested client process has exited and the new instance
1609 // has not yet shown up seems inconsequential enough not to burden the early exit path with additional checks
1610 inp_clear_activity_bitmap(inpb: inp);
1611 }
1612}
1613
1614
1615__private_extern__ void
1616nstat_pcb_cache(struct inpcb *inp)
1617{
1618 nstat_control_state *state;
1619 nstat_src *src;
1620 struct nstat_tucookie *tucookie;
1621
1622 if (inp == NULL || nstat_udp_watchers == 0 ||
1623 inp->inp_nstat_refcnt == 0) {
1624 return;
1625 }
1626 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1627 lck_mtx_lock(lck: &nstat_mtx);
1628 for (state = nstat_controls; state; state = state->ncs_next) {
1629 lck_mtx_lock(lck: &state->ncs_mtx);
1630 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1631 {
1632 tucookie = (struct nstat_tucookie *)src->cookie;
1633 if (tucookie->inp == inp) {
1634 if (inp->inp_vflag & INP_IPV6) {
1635 in6_ip6_to_sockaddr(ip6: &inp->in6p_laddr,
1636 port: inp->inp_lport,
1637 ifscope: inp->inp_lifscope,
1638 sin6: &tucookie->local.v6,
1639 maxlen: sizeof(tucookie->local));
1640 in6_ip6_to_sockaddr(ip6: &inp->in6p_faddr,
1641 port: inp->inp_fport,
1642 ifscope: inp->inp_fifscope,
1643 sin6: &tucookie->remote.v6,
1644 maxlen: sizeof(tucookie->remote));
1645 } else if (inp->inp_vflag & INP_IPV4) {
1646 nstat_ip_to_sockaddr(ip: &inp->inp_laddr,
1647 port: inp->inp_lport,
1648 sin: &tucookie->local.v4,
1649 maxlen: sizeof(tucookie->local));
1650 nstat_ip_to_sockaddr(ip: &inp->inp_faddr,
1651 port: inp->inp_fport,
1652 sin: &tucookie->remote.v4,
1653 maxlen: sizeof(tucookie->remote));
1654 }
1655 if (inp->inp_last_outifp) {
1656 tucookie->if_index =
1657 inp->inp_last_outifp->if_index;
1658 }
1659
1660 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1661 tucookie->cached = true;
1662 break;
1663 }
1664 }
1665 lck_mtx_unlock(lck: &state->ncs_mtx);
1666 }
1667 lck_mtx_unlock(lck: &nstat_mtx);
1668}
1669
1670__private_extern__ void
1671nstat_pcb_invalidate_cache(struct inpcb *inp)
1672{
1673 nstat_control_state *state;
1674 nstat_src *src;
1675 struct nstat_tucookie *tucookie;
1676
1677 if (inp == NULL || nstat_udp_watchers == 0 ||
1678 inp->inp_nstat_refcnt == 0) {
1679 return;
1680 }
1681 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1682 lck_mtx_lock(lck: &nstat_mtx);
1683 for (state = nstat_controls; state; state = state->ncs_next) {
1684 lck_mtx_lock(lck: &state->ncs_mtx);
1685 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1686 {
1687 tucookie = (struct nstat_tucookie *)src->cookie;
1688 if (tucookie->inp == inp) {
1689 tucookie->cached = false;
1690 break;
1691 }
1692 }
1693 lck_mtx_unlock(lck: &state->ncs_mtx);
1694 }
1695 lck_mtx_unlock(lck: &nstat_mtx);
1696}
1697
1698static errno_t
1699nstat_tcp_copy_descriptor(
1700 nstat_provider_cookie_t cookie,
1701 void *data,
1702 size_t len)
1703{
1704 if (len < sizeof(nstat_tcp_descriptor)) {
1705 return EINVAL;
1706 }
1707
1708 if (nstat_tcp_gone(cookie)) {
1709 return EINVAL;
1710 }
1711
1712 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1713 struct nstat_tucookie *tucookie =
1714 (struct nstat_tucookie *)cookie;
1715 struct inpcb *inp = tucookie->inp;
1716 struct tcpcb *tp = intotcpcb(inp);
1717 bzero(s: desc, n: sizeof(*desc));
1718
1719 if (inp->inp_vflag & INP_IPV6) {
1720 in6_ip6_to_sockaddr(ip6: &inp->in6p_laddr, port: inp->inp_lport, ifscope: inp->inp_lifscope,
1721 sin6: &desc->local.v6, maxlen: sizeof(desc->local));
1722 in6_ip6_to_sockaddr(ip6: &inp->in6p_faddr, port: inp->inp_fport, ifscope: inp->inp_fifscope,
1723 sin6: &desc->remote.v6, maxlen: sizeof(desc->remote));
1724 } else if (inp->inp_vflag & INP_IPV4) {
1725 nstat_ip_to_sockaddr(ip: &inp->inp_laddr, port: inp->inp_lport,
1726 sin: &desc->local.v4, maxlen: sizeof(desc->local));
1727 nstat_ip_to_sockaddr(ip: &inp->inp_faddr, port: inp->inp_fport,
1728 sin: &desc->remote.v4, maxlen: sizeof(desc->remote));
1729 }
1730
1731 desc->state = intotcpcb(inp)->t_state;
1732 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1733 inp->inp_last_outifp->if_index;
1734
1735 // danger - not locked, values could be bogus
1736 desc->txunacked = tp->snd_max - tp->snd_una;
1737 desc->txwindow = tp->snd_wnd;
1738 desc->txcwindow = tp->snd_cwnd;
1739 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1740
1741 if (CC_ALGO(tp)->name != NULL) {
1742 strlcpy(dst: desc->cc_algo, CC_ALGO(tp)->name,
1743 n: sizeof(desc->cc_algo));
1744 }
1745
1746 struct socket *so = inp->inp_socket;
1747 if (so) {
1748 // TBD - take the socket lock around these to make sure
1749 // they're in sync?
1750 desc->upid = so->last_upid;
1751 desc->pid = so->last_pid;
1752 desc->traffic_class = so->so_traffic_class;
1753 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND)) {
1754 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1755 }
1756 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG)) {
1757 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1758 }
1759 if (so->so_flags1 & SOF1_INBOUND) {
1760 desc->ifnet_properties |= NSTAT_SOURCE_IS_INBOUND;
1761 } else if (desc->state == TCPS_LISTEN) {
1762 desc->ifnet_properties |= NSTAT_SOURCE_IS_LISTENER;
1763 tucookie->ifnet_properties = NSTAT_SOURCE_IS_LISTENER;
1764 } else if (desc->state != TCPS_CLOSED) {
1765 desc->ifnet_properties |= NSTAT_SOURCE_IS_OUTBOUND;
1766 tucookie->ifnet_properties = NSTAT_SOURCE_IS_OUTBOUND;
1767 } else {
1768 desc->ifnet_properties |= tucookie->ifnet_properties;
1769 }
1770 proc_name(pid: desc->pid, buf: desc->pname, size: sizeof(desc->pname));
1771 if (desc->pname[0] == 0) {
1772 strlcpy(dst: desc->pname, src: tucookie->pname,
1773 n: sizeof(desc->pname));
1774 } else {
1775 desc->pname[sizeof(desc->pname) - 1] = 0;
1776 strlcpy(dst: tucookie->pname, src: desc->pname,
1777 n: sizeof(tucookie->pname));
1778 }
1779 memcpy(dst: desc->uuid, src: so->last_uuid, n: sizeof(so->last_uuid));
1780 memcpy(dst: desc->vuuid, src: so->so_vuuid, n: sizeof(so->so_vuuid));
1781 if (so->so_flags & SOF_DELEGATED) {
1782 desc->eupid = so->e_upid;
1783 desc->epid = so->e_pid;
1784 memcpy(dst: desc->euuid, src: so->e_uuid, n: sizeof(so->e_uuid));
1785 } else if (!uuid_is_null(uu: so->so_ruuid)) {
1786 memcpy(dst: desc->euuid, src: so->so_ruuid, n: sizeof(so->so_ruuid));
1787 } else {
1788 desc->eupid = desc->upid;
1789 desc->epid = desc->pid;
1790 memcpy(dst: desc->euuid, src: desc->uuid, n: sizeof(desc->uuid));
1791 }
1792 uuid_copy(dst: desc->fuuid, src: inp->necp_client_uuid);
1793 desc->persona_id = so->so_persona_id;
1794 desc->uid = kauth_cred_getuid(cred: so->so_cred);
1795 desc->sndbufsize = so->so_snd.sb_hiwat;
1796 desc->sndbufused = so->so_snd.sb_cc;
1797 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1798 desc->rcvbufused = so->so_rcv.sb_cc;
1799 desc->fallback_mode = so->so_fallback_mode;
1800
1801 if (nstat_debug) {
1802 uuid_string_t euuid_str = { 0 };
1803 uuid_unparse(uu: desc->euuid, out: euuid_str);
1804 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: TCP - pid %d uid %d euuid %s persona id %d", desc->pid, desc->uid, euuid_str, desc->persona_id);
1805 }
1806 }
1807
1808 tcp_get_connectivity_status(tp, &desc->connstatus);
1809 inp_get_activity_bitmap(inp, b: &desc->activity_bitmap);
1810 desc->start_timestamp = inp->inp_start_timestamp;
1811 desc->timestamp = mach_continuous_time();
1812 return 0;
1813}
1814
1815static bool
1816nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1817{
1818 bool retval = true;
1819
1820 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS | NSTAT_FILTER_SPECIFIC_USER)) != 0) {
1821 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1822 struct inpcb *inp = tucookie->inp;
1823
1824 /* Only apply interface filter if at least one is allowed. */
1825 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
1826 uint32_t interface_properties = nstat_inpcb_to_flags(inp);
1827
1828 if ((filter->npf_flags & interface_properties) == 0) {
1829 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1830 // We allow reporting if there have been transfers of the requested kind.
1831 // This is imperfect as we cannot account for the expensive attribute over wifi.
1832 // We also assume that cellular is expensive and we have no way to select for AWDL
1833 if (is_UDP) {
1834 do{
1835 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR | NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1836 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes)) {
1837 break;
1838 }
1839 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1840 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes)) {
1841 break;
1842 }
1843 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1844 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes)) {
1845 break;
1846 }
1847 return false;
1848 } while (0);
1849 } else {
1850 return false;
1851 }
1852 }
1853 }
1854
1855 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval)) {
1856 struct socket *so = inp->inp_socket;
1857 retval = false;
1858
1859 if (so) {
1860 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1861 (filter->npf_pid == so->last_pid)) {
1862 retval = true;
1863 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1864 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid)) {
1865 retval = true;
1866 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1867 (memcmp(s1: filter->npf_uuid, s2: so->last_uuid, n: sizeof(so->last_uuid)) == 0)) {
1868 retval = true;
1869 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1870 (memcmp(s1: filter->npf_uuid, s2: (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1871 n: sizeof(so->last_uuid)) == 0)) {
1872 retval = true;
1873 }
1874 }
1875 }
1876 }
1877 return retval;
1878}
1879
1880static bool
1881nstat_tcp_reporting_allowed(
1882 nstat_provider_cookie_t cookie,
1883 nstat_provider_filter *filter,
1884 __unused u_int64_t suppression_flags)
1885{
1886 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1887}
1888
1889static size_t
1890nstat_tcp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
1891{
1892 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1893 struct inpcb *inp = tucookie->inp;
1894
1895 if (nstat_tcp_gone(cookie)) {
1896 return 0;
1897 }
1898
1899 switch (extension_id) {
1900 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
1901 return nstat_inp_domain_info(inp, domain_info: (nstat_domain_info *)buf, len);
1902
1903 case NSTAT_EXTENDED_UPDATE_TYPE_NECP_TLV:
1904 default:
1905 break;
1906 }
1907 return 0;
1908}
1909
1910static void
1911nstat_init_tcp_provider(void)
1912{
1913 bzero(s: &nstat_tcp_provider, n: sizeof(nstat_tcp_provider));
1914 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1915 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1916 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1917 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1918 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1919 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1920 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1921 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1922 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1923 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1924 nstat_tcp_provider.nstat_copy_extension = nstat_tcp_extensions;
1925 nstat_tcp_provider.next = nstat_providers;
1926 nstat_providers = &nstat_tcp_provider;
1927}
1928
1929#pragma mark -- UDP Provider --
1930
1931static nstat_provider nstat_udp_provider;
1932
1933static errno_t
1934nstat_udp_lookup(
1935 __unused const void *data,
1936 __unused u_int32_t length,
1937 __unused nstat_provider_cookie_t *out_cookie)
1938{
1939 // Looking up a specific connection is not supported.
1940 return ENOTSUP;
1941}
1942
1943static int
1944nstat_udp_gone(
1945 nstat_provider_cookie_t cookie)
1946{
1947 struct nstat_tucookie *tucookie =
1948 (struct nstat_tucookie *)cookie;
1949 struct inpcb *inp;
1950
1951 return (!(inp = tucookie->inp) ||
1952 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1953}
1954
1955static errno_t
1956nstat_udp_counts(
1957 nstat_provider_cookie_t cookie,
1958 struct nstat_counts *out_counts,
1959 int *out_gone)
1960{
1961 struct nstat_tucookie *tucookie =
1962 (struct nstat_tucookie *)cookie;
1963
1964 if (out_gone) {
1965 *out_gone = 0;
1966 }
1967
1968 // if the pcb is in the dead state, we should stop using it
1969 if (nstat_udp_gone(cookie)) {
1970 if (out_gone) {
1971 *out_gone = 1;
1972 }
1973 if (!tucookie->inp) {
1974 return EINVAL;
1975 }
1976 }
1977 struct inpcb *inp = tucookie->inp;
1978
1979 out_counts->nstat_rxpackets = os_atomic_load(&inp->inp_stat->rxpackets, relaxed);
1980 out_counts->nstat_rxbytes = os_atomic_load(&inp->inp_stat->rxbytes, relaxed);
1981 out_counts->nstat_txpackets = os_atomic_load(&inp->inp_stat->txpackets, relaxed);
1982 out_counts->nstat_txbytes = os_atomic_load(&inp->inp_stat->txbytes, relaxed);
1983 out_counts->nstat_cell_rxbytes = os_atomic_load(&inp->inp_cstat->rxbytes, relaxed);
1984 out_counts->nstat_cell_txbytes = os_atomic_load(&inp->inp_cstat->txbytes, relaxed);
1985 out_counts->nstat_wifi_rxbytes = os_atomic_load(&inp->inp_wstat->rxbytes, relaxed);
1986 out_counts->nstat_wifi_txbytes = os_atomic_load(&inp->inp_wstat->txbytes, relaxed);
1987 out_counts->nstat_wired_rxbytes = os_atomic_load(&inp->inp_Wstat->rxbytes, relaxed);
1988 out_counts->nstat_wired_txbytes = os_atomic_load(&inp->inp_Wstat->txbytes, relaxed);
1989
1990 return 0;
1991}
1992
1993static void
1994nstat_udp_release(
1995 nstat_provider_cookie_t cookie,
1996 int locked)
1997{
1998 struct nstat_tucookie *tucookie =
1999 (struct nstat_tucookie *)cookie;
2000
2001 nstat_tucookie_release_internal(cookie: tucookie, inplock: locked);
2002}
2003
2004static errno_t
2005nstat_udp_add_watcher(
2006 nstat_control_state *state,
2007 nstat_msg_add_all_srcs *req)
2008{
2009 // There is a tricky issue around getting all UDP sockets added once
2010 // and only once. nstat_udp_new_pcb() is called prior to the new item
2011 // being placed on any lists where it might be found.
2012 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
2013 // it should be impossible for a new socket to be added twice.
2014 // On the other hand, there is still a timing issue where a new socket
2015 // results in a call to nstat_udp_new_pcb() before this watcher
2016 // is instantiated and yet the socket doesn't make it into ipi_listhead
2017 // prior to the scan. <rdar://problem/30361716>
2018
2019 errno_t result;
2020
2021 lck_rw_lock_shared(lck: &udbinfo.ipi_lock);
2022 result = nstat_set_provider_filter(state, req);
2023
2024 if (result == 0) {
2025 struct inpcb *inp;
2026 struct nstat_tucookie *cookie;
2027
2028 OSIncrementAtomic(&nstat_udp_watchers);
2029
2030 // Add all current UDP inpcbs.
2031 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
2032 {
2033 cookie = nstat_tucookie_alloc_ref(inp);
2034 if (cookie == NULL) {
2035 continue;
2036 }
2037 if (nstat_control_source_add(context: 0, state, provider: &nstat_udp_provider,
2038 cookie) != 0) {
2039 nstat_tucookie_release(cookie);
2040 break;
2041 }
2042 }
2043 }
2044
2045 lck_rw_done(lck: &udbinfo.ipi_lock);
2046
2047 return result;
2048}
2049
2050static void
2051nstat_udp_remove_watcher(
2052 __unused nstat_control_state *state)
2053{
2054 OSDecrementAtomic(&nstat_udp_watchers);
2055}
2056
2057__private_extern__ void
2058nstat_udp_new_pcb(
2059 struct inpcb *inp)
2060{
2061 struct nstat_tucookie *cookie;
2062
2063 inp->inp_start_timestamp = mach_continuous_time();
2064
2065 if (nstat_udp_watchers == 0) {
2066 return;
2067 }
2068
2069 socket_lock(so: inp->inp_socket, refcount: 0);
2070 lck_mtx_lock(lck: &nstat_mtx);
2071 nstat_control_state *state;
2072 for (state = nstat_controls; state; state = state->ncs_next) {
2073 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0) {
2074 // this client is watching tcp
2075 // acquire a reference for it
2076 cookie = nstat_tucookie_alloc_ref_locked(inp);
2077 if (cookie == NULL) {
2078 continue;
2079 }
2080 // add the source, if that fails, release the reference
2081 if (nstat_control_source_add(context: 0, state,
2082 provider: &nstat_udp_provider, cookie) != 0) {
2083 nstat_tucookie_release_locked(cookie);
2084 break;
2085 }
2086 }
2087 }
2088 lck_mtx_unlock(lck: &nstat_mtx);
2089 socket_unlock(so: inp->inp_socket, refcount: 0);
2090}
2091
2092static errno_t
2093nstat_udp_copy_descriptor(
2094 nstat_provider_cookie_t cookie,
2095 void *data,
2096 size_t len)
2097{
2098 if (len < sizeof(nstat_udp_descriptor)) {
2099 return EINVAL;
2100 }
2101
2102 if (nstat_udp_gone(cookie)) {
2103 return EINVAL;
2104 }
2105
2106 struct nstat_tucookie *tucookie =
2107 (struct nstat_tucookie *)cookie;
2108 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
2109 struct inpcb *inp = tucookie->inp;
2110
2111 bzero(s: desc, n: sizeof(*desc));
2112
2113 if (tucookie->cached == false) {
2114 if (inp->inp_vflag & INP_IPV6) {
2115 in6_ip6_to_sockaddr(ip6: &inp->in6p_laddr, port: inp->inp_lport, ifscope: inp->inp_lifscope,
2116 sin6: &desc->local.v6, maxlen: sizeof(desc->local.v6));
2117 in6_ip6_to_sockaddr(ip6: &inp->in6p_faddr, port: inp->inp_fport, ifscope: inp->inp_fifscope,
2118 sin6: &desc->remote.v6, maxlen: sizeof(desc->remote.v6));
2119 } else if (inp->inp_vflag & INP_IPV4) {
2120 nstat_ip_to_sockaddr(ip: &inp->inp_laddr, port: inp->inp_lport,
2121 sin: &desc->local.v4, maxlen: sizeof(desc->local.v4));
2122 nstat_ip_to_sockaddr(ip: &inp->inp_faddr, port: inp->inp_fport,
2123 sin: &desc->remote.v4, maxlen: sizeof(desc->remote.v4));
2124 }
2125 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
2126 } else {
2127 if (inp->inp_vflag & INP_IPV6) {
2128 memcpy(dst: &desc->local.v6, src: &tucookie->local.v6,
2129 n: sizeof(desc->local.v6));
2130 memcpy(dst: &desc->remote.v6, src: &tucookie->remote.v6,
2131 n: sizeof(desc->remote.v6));
2132 } else if (inp->inp_vflag & INP_IPV4) {
2133 memcpy(dst: &desc->local.v4, src: &tucookie->local.v4,
2134 n: sizeof(desc->local.v4));
2135 memcpy(dst: &desc->remote.v4, src: &tucookie->remote.v4,
2136 n: sizeof(desc->remote.v4));
2137 }
2138 desc->ifnet_properties = tucookie->ifnet_properties;
2139 }
2140
2141 if (inp->inp_last_outifp) {
2142 desc->ifindex = inp->inp_last_outifp->if_index;
2143 } else {
2144 desc->ifindex = tucookie->if_index;
2145 }
2146
2147 struct socket *so = inp->inp_socket;
2148 if (so) {
2149 // TBD - take the socket lock around these to make sure
2150 // they're in sync?
2151 desc->upid = so->last_upid;
2152 desc->pid = so->last_pid;
2153 proc_name(pid: desc->pid, buf: desc->pname, size: sizeof(desc->pname));
2154 if (desc->pname[0] == 0) {
2155 strlcpy(dst: desc->pname, src: tucookie->pname,
2156 n: sizeof(desc->pname));
2157 } else {
2158 desc->pname[sizeof(desc->pname) - 1] = 0;
2159 strlcpy(dst: tucookie->pname, src: desc->pname,
2160 n: sizeof(tucookie->pname));
2161 }
2162 memcpy(dst: desc->uuid, src: so->last_uuid, n: sizeof(so->last_uuid));
2163 memcpy(dst: desc->vuuid, src: so->so_vuuid, n: sizeof(so->so_vuuid));
2164 if (so->so_flags & SOF_DELEGATED) {
2165 desc->eupid = so->e_upid;
2166 desc->epid = so->e_pid;
2167 memcpy(dst: desc->euuid, src: so->e_uuid, n: sizeof(so->e_uuid));
2168 } else if (!uuid_is_null(uu: so->so_ruuid)) {
2169 memcpy(dst: desc->euuid, src: so->so_ruuid, n: sizeof(so->so_ruuid));
2170 } else {
2171 desc->eupid = desc->upid;
2172 desc->epid = desc->pid;
2173 memcpy(dst: desc->euuid, src: desc->uuid, n: sizeof(desc->uuid));
2174 }
2175 uuid_copy(dst: desc->fuuid, src: inp->necp_client_uuid);
2176 desc->persona_id = so->so_persona_id;
2177 desc->uid = kauth_cred_getuid(cred: so->so_cred);
2178 desc->rcvbufsize = so->so_rcv.sb_hiwat;
2179 desc->rcvbufused = so->so_rcv.sb_cc;
2180 desc->traffic_class = so->so_traffic_class;
2181 desc->fallback_mode = so->so_fallback_mode;
2182 inp_get_activity_bitmap(inp, b: &desc->activity_bitmap);
2183 desc->start_timestamp = inp->inp_start_timestamp;
2184 desc->timestamp = mach_continuous_time();
2185
2186 if (nstat_debug) {
2187 uuid_string_t euuid_str = { 0 };
2188 uuid_unparse(uu: desc->euuid, out: euuid_str);
2189 NSTAT_DEBUG_SOCKET_LOG(so, "NSTAT: UDP - pid %d uid %d euuid %s persona id %d", desc->pid, desc->uid, euuid_str, desc->persona_id);
2190 }
2191 }
2192
2193 return 0;
2194}
2195
2196static bool
2197nstat_udp_reporting_allowed(
2198 nstat_provider_cookie_t cookie,
2199 nstat_provider_filter *filter,
2200 __unused u_int64_t suppression_flags)
2201{
2202 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
2203}
2204
2205
2206static size_t
2207nstat_udp_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2208{
2209 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
2210 struct inpcb *inp = tucookie->inp;
2211 if (nstat_udp_gone(cookie)) {
2212 return 0;
2213 }
2214
2215 switch (extension_id) {
2216 case NSTAT_EXTENDED_UPDATE_TYPE_DOMAIN:
2217 return nstat_inp_domain_info(inp, domain_info: (nstat_domain_info *)buf, len);
2218
2219 default:
2220 break;
2221 }
2222 return 0;
2223}
2224
2225
2226static void
2227nstat_init_udp_provider(void)
2228{
2229 bzero(s: &nstat_udp_provider, n: sizeof(nstat_udp_provider));
2230 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
2231 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2232 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
2233 nstat_udp_provider.nstat_gone = nstat_udp_gone;
2234 nstat_udp_provider.nstat_counts = nstat_udp_counts;
2235 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
2236 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
2237 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
2238 nstat_udp_provider.nstat_release = nstat_udp_release;
2239 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
2240 nstat_udp_provider.nstat_copy_extension = nstat_udp_extensions;
2241 nstat_udp_provider.next = nstat_providers;
2242 nstat_providers = &nstat_udp_provider;
2243}
2244
2245#if SKYWALK
2246
2247#pragma mark -- TCP/UDP/QUIC Userland
2248
2249// Almost all of this infrastucture is common to both TCP and UDP
2250
2251static u_int32_t nstat_userland_quic_watchers = 0;
2252static u_int32_t nstat_userland_udp_watchers = 0;
2253static u_int32_t nstat_userland_tcp_watchers = 0;
2254
2255static u_int32_t nstat_userland_quic_shadows = 0;
2256static u_int32_t nstat_userland_udp_shadows = 0;
2257static u_int32_t nstat_userland_tcp_shadows = 0;
2258
2259static nstat_provider nstat_userland_quic_provider;
2260static nstat_provider nstat_userland_udp_provider;
2261static nstat_provider nstat_userland_tcp_provider;
2262
2263enum nstat_rnf_override {
2264 nstat_rnf_override_not_set,
2265 nstat_rnf_override_enabled,
2266 nstat_rnf_override_disabled
2267};
2268
2269struct nstat_tu_shadow {
2270 tailq_entry_tu_shadow shad_link;
2271 userland_stats_request_vals_fn *shad_getvals_fn;
2272 userland_stats_request_extension_fn *shad_get_extension_fn;
2273 userland_stats_provider_context *shad_provider_context;
2274 u_int64_t shad_properties;
2275 u_int64_t shad_start_timestamp;
2276 nstat_provider_id_t shad_provider;
2277 struct nstat_procdetails *shad_procdetails;
2278 bool shad_live; // false if defunct
2279 enum nstat_rnf_override shad_rnf_override;
2280 uint32_t shad_magic;
2281};
2282
2283// Magic number checking should remain in place until the userland provider has been fully proven
2284#define TU_SHADOW_MAGIC 0xfeedf00d
2285#define TU_SHADOW_UNMAGIC 0xdeaddeed
2286
2287static tailq_head_tu_shadow nstat_userprot_shad_head = TAILQ_HEAD_INITIALIZER(nstat_userprot_shad_head);
2288
2289static errno_t
2290nstat_userland_tu_lookup(
2291 __unused const void *data,
2292 __unused u_int32_t length,
2293 __unused nstat_provider_cookie_t *out_cookie)
2294{
2295 // Looking up a specific connection is not supported
2296 return ENOTSUP;
2297}
2298
2299static int
2300nstat_userland_tu_gone(
2301 __unused nstat_provider_cookie_t cookie)
2302{
2303 // Returns non-zero if the source has gone.
2304 // We don't keep a source hanging around, so the answer is always 0
2305 return 0;
2306}
2307
2308static errno_t
2309nstat_userland_tu_counts(
2310 nstat_provider_cookie_t cookie,
2311 struct nstat_counts *out_counts,
2312 int *out_gone)
2313{
2314 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2315 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2316 assert(shad->shad_live);
2317
2318 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, out_counts, NULL);
2319
2320 if (out_gone) {
2321 *out_gone = 0;
2322 }
2323
2324 return (result)? 0 : EIO;
2325}
2326
2327
2328static errno_t
2329nstat_userland_tu_copy_descriptor(
2330 nstat_provider_cookie_t cookie,
2331 void *data,
2332 __unused size_t len)
2333{
2334 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2335 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2336 assert(shad->shad_live);
2337 struct nstat_procdetails *procdetails = shad->shad_procdetails;
2338 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2339
2340 bool result = (*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, data);
2341
2342 switch (shad->shad_provider) {
2343 case NSTAT_PROVIDER_TCP_USERLAND:
2344 {
2345 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor *)data;
2346 desc->pid = procdetails->pdet_pid;
2347 desc->upid = procdetails->pdet_upid;
2348 uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid);
2349 strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname));
2350 if (shad->shad_rnf_override == nstat_rnf_override_enabled) {
2351 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
2352 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
2353 } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) {
2354 desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK;
2355 desc->fallback_mode = SO_FALLBACK_MODE_NONE;
2356 }
2357 desc->ifnet_properties |= (uint32_t)shad->shad_properties;
2358 desc->start_timestamp = shad->shad_start_timestamp;
2359 desc->timestamp = mach_continuous_time();
2360 }
2361 break;
2362 case NSTAT_PROVIDER_UDP_USERLAND:
2363 {
2364 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
2365 desc->pid = procdetails->pdet_pid;
2366 desc->upid = procdetails->pdet_upid;
2367 uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid);
2368 strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname));
2369 if (shad->shad_rnf_override == nstat_rnf_override_enabled) {
2370 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
2371 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
2372 } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) {
2373 desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK;
2374 desc->fallback_mode = SO_FALLBACK_MODE_NONE;
2375 }
2376 desc->ifnet_properties |= (uint32_t)shad->shad_properties;
2377 desc->start_timestamp = shad->shad_start_timestamp;
2378 desc->timestamp = mach_continuous_time();
2379 }
2380 break;
2381 case NSTAT_PROVIDER_QUIC_USERLAND:
2382 {
2383 nstat_quic_descriptor *desc = (nstat_quic_descriptor *)data;
2384 desc->pid = procdetails->pdet_pid;
2385 desc->upid = procdetails->pdet_upid;
2386 uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid);
2387 strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname));
2388 if (shad->shad_rnf_override == nstat_rnf_override_enabled) {
2389 desc->ifnet_properties |= NSTAT_IFNET_VIA_CELLFALLBACK;
2390 desc->fallback_mode = SO_FALLBACK_MODE_FAST;
2391 } else if (shad->shad_rnf_override == nstat_rnf_override_disabled) {
2392 desc->ifnet_properties &= ~NSTAT_IFNET_VIA_CELLFALLBACK;
2393 desc->fallback_mode = SO_FALLBACK_MODE_NONE;
2394 }
2395 desc->ifnet_properties |= (uint32_t)shad->shad_properties;
2396 desc->start_timestamp = shad->shad_start_timestamp;
2397 desc->timestamp = mach_continuous_time();
2398 }
2399 break;
2400 default:
2401 break;
2402 }
2403 return (result)? 0 : EIO;
2404}
2405
2406static void
2407nstat_userland_tu_release(
2408 __unused nstat_provider_cookie_t cookie,
2409 __unused int locked)
2410{
2411 // Called when a nstat_src is detached.
2412 // We don't reference count or ask for delayed release so nothing to do here.
2413 // Note that any associated nstat_tu_shadow may already have been released.
2414}
2415
2416static bool
2417check_reporting_for_user(nstat_provider_filter *filter, pid_t pid, pid_t epid, uuid_t *uuid, uuid_t *euuid)
2418{
2419 bool retval = true;
2420
2421 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2422 retval = false;
2423
2424 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
2425 (filter->npf_pid == pid)) {
2426 retval = true;
2427 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
2428 (filter->npf_pid == epid)) {
2429 retval = true;
2430 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
2431 (memcmp(s1: filter->npf_uuid, s2: uuid, n: sizeof(*uuid)) == 0)) {
2432 retval = true;
2433 } else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
2434 (memcmp(s1: filter->npf_uuid, s2: euuid, n: sizeof(*euuid)) == 0)) {
2435 retval = true;
2436 }
2437 }
2438 return retval;
2439}
2440
2441static bool
2442nstat_userland_tcp_reporting_allowed(
2443 nstat_provider_cookie_t cookie,
2444 nstat_provider_filter *filter,
2445 __unused u_int64_t suppression_flags)
2446{
2447 bool retval = true;
2448 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2449
2450 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2451
2452 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2453 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2454
2455 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2456 if ((filter->npf_flags & ifflags) == 0) {
2457 return false;
2458 }
2459 }
2460 }
2461
2462 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2463 nstat_tcp_descriptor tcp_desc; // Stack allocation - OK or pushing the limits too far?
2464 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &tcp_desc)) {
2465 retval = check_reporting_for_user(filter, pid: (pid_t)tcp_desc.pid, epid: (pid_t)tcp_desc.epid,
2466 uuid: &tcp_desc.uuid, euuid: &tcp_desc.euuid);
2467 } else {
2468 retval = false; // No further information, so might as well give up now.
2469 }
2470 }
2471 return retval;
2472}
2473
2474static size_t
2475nstat_userland_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
2476{
2477 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2478 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2479 assert(shad->shad_live);
2480 assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
2481
2482 return shad->shad_get_extension_fn(shad->shad_provider_context, extension_id, buf, len);
2483}
2484
2485
2486static bool
2487nstat_userland_udp_reporting_allowed(
2488 nstat_provider_cookie_t cookie,
2489 nstat_provider_filter *filter,
2490 __unused u_int64_t suppression_flags)
2491{
2492 bool retval = true;
2493 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2494
2495 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2496
2497 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2498 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2499
2500 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2501 if ((filter->npf_flags & ifflags) == 0) {
2502 return false;
2503 }
2504 }
2505 }
2506 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2507 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
2508 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &udp_desc)) {
2509 retval = check_reporting_for_user(filter, pid: (pid_t)udp_desc.pid, epid: (pid_t)udp_desc.epid,
2510 uuid: &udp_desc.uuid, euuid: &udp_desc.euuid);
2511 } else {
2512 retval = false; // No further information, so might as well give up now.
2513 }
2514 }
2515 return retval;
2516}
2517
2518static bool
2519nstat_userland_quic_reporting_allowed(
2520 nstat_provider_cookie_t cookie,
2521 nstat_provider_filter *filter,
2522 __unused u_int64_t suppression_flags)
2523{
2524 bool retval = true;
2525 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)cookie;
2526
2527 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2528
2529 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0) {
2530 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
2531
2532 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, &ifflags, NULL, NULL, NULL)) {
2533 if ((filter->npf_flags & ifflags) == 0) {
2534 return false;
2535 }
2536 }
2537 }
2538 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
2539 nstat_quic_descriptor quic_desc; // Stack allocation - OK or pushing the limits too far?
2540 if ((*shad->shad_getvals_fn)(shad->shad_provider_context, NULL, NULL, NULL, &quic_desc)) {
2541 retval = check_reporting_for_user(filter, pid: (pid_t)quic_desc.pid, epid: (pid_t)quic_desc.epid,
2542 uuid: &quic_desc.uuid, euuid: &quic_desc.euuid);
2543 } else {
2544 retval = false; // No further information, so might as well give up now.
2545 }
2546 }
2547 return retval;
2548}
2549
2550static errno_t
2551nstat_userland_protocol_add_watcher(
2552 nstat_control_state *state,
2553 nstat_msg_add_all_srcs *req,
2554 nstat_provider_type_t nstat_provider_type,
2555 nstat_provider *nstat_provider,
2556 u_int32_t *proto_watcher_cnt)
2557{
2558 errno_t result;
2559
2560 lck_mtx_lock(lck: &nstat_mtx);
2561 result = nstat_set_provider_filter(state, req);
2562
2563 if (result == 0) {
2564 struct nstat_tu_shadow *shad;
2565
2566 OSIncrementAtomic(proto_watcher_cnt);
2567
2568 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2569 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2570
2571 if ((shad->shad_provider == nstat_provider_type) && (shad->shad_live)) {
2572 result = nstat_control_source_add(context: 0, state, provider: nstat_provider, cookie: shad);
2573 if (result != 0) {
2574 printf("%s - nstat_control_source_add returned %d for "
2575 "provider type: %d\n", __func__, result, nstat_provider_type);
2576 break;
2577 }
2578 }
2579 }
2580 }
2581 lck_mtx_unlock(lck: &nstat_mtx);
2582
2583 return result;
2584}
2585
2586static errno_t
2587nstat_userland_tcp_add_watcher(
2588 nstat_control_state *state,
2589 nstat_msg_add_all_srcs *req)
2590{
2591 return nstat_userland_protocol_add_watcher(state, req, nstat_provider_type: NSTAT_PROVIDER_TCP_USERLAND,
2592 nstat_provider: &nstat_userland_tcp_provider, proto_watcher_cnt: &nstat_userland_tcp_watchers);
2593}
2594
2595static errno_t
2596nstat_userland_udp_add_watcher(
2597 nstat_control_state *state,
2598 nstat_msg_add_all_srcs *req)
2599{
2600 return nstat_userland_protocol_add_watcher(state, req, nstat_provider_type: NSTAT_PROVIDER_UDP_USERLAND,
2601 nstat_provider: &nstat_userland_udp_provider, proto_watcher_cnt: &nstat_userland_udp_watchers);
2602}
2603
2604static errno_t
2605nstat_userland_quic_add_watcher(
2606 nstat_control_state *state,
2607 nstat_msg_add_all_srcs *req)
2608{
2609 return nstat_userland_protocol_add_watcher(state, req, nstat_provider_type: NSTAT_PROVIDER_QUIC_USERLAND,
2610 nstat_provider: &nstat_userland_quic_provider, proto_watcher_cnt: &nstat_userland_quic_watchers);
2611}
2612
2613static void
2614nstat_userland_tcp_remove_watcher(
2615 __unused nstat_control_state *state)
2616{
2617 OSDecrementAtomic(&nstat_userland_tcp_watchers);
2618}
2619
2620static void
2621nstat_userland_udp_remove_watcher(
2622 __unused nstat_control_state *state)
2623{
2624 OSDecrementAtomic(&nstat_userland_udp_watchers);
2625}
2626
2627static void
2628nstat_userland_quic_remove_watcher(
2629 __unused nstat_control_state *state)
2630{
2631 OSDecrementAtomic(&nstat_userland_quic_watchers);
2632}
2633
2634
2635static void
2636nstat_init_userland_tcp_provider(void)
2637{
2638 bzero(s: &nstat_userland_tcp_provider, n: sizeof(nstat_userland_tcp_provider));
2639 nstat_userland_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
2640 nstat_userland_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_USERLAND;
2641 nstat_userland_tcp_provider.nstat_lookup = nstat_userland_tu_lookup;
2642 nstat_userland_tcp_provider.nstat_gone = nstat_userland_tu_gone;
2643 nstat_userland_tcp_provider.nstat_counts = nstat_userland_tu_counts;
2644 nstat_userland_tcp_provider.nstat_release = nstat_userland_tu_release;
2645 nstat_userland_tcp_provider.nstat_watcher_add = nstat_userland_tcp_add_watcher;
2646 nstat_userland_tcp_provider.nstat_watcher_remove = nstat_userland_tcp_remove_watcher;
2647 nstat_userland_tcp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2648 nstat_userland_tcp_provider.nstat_reporting_allowed = nstat_userland_tcp_reporting_allowed;
2649 nstat_userland_tcp_provider.nstat_copy_extension = nstat_userland_extensions;
2650 nstat_userland_tcp_provider.next = nstat_providers;
2651 nstat_providers = &nstat_userland_tcp_provider;
2652}
2653
2654
2655static void
2656nstat_init_userland_udp_provider(void)
2657{
2658 bzero(s: &nstat_userland_udp_provider, n: sizeof(nstat_userland_udp_provider));
2659 nstat_userland_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
2660 nstat_userland_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_USERLAND;
2661 nstat_userland_udp_provider.nstat_lookup = nstat_userland_tu_lookup;
2662 nstat_userland_udp_provider.nstat_gone = nstat_userland_tu_gone;
2663 nstat_userland_udp_provider.nstat_counts = nstat_userland_tu_counts;
2664 nstat_userland_udp_provider.nstat_release = nstat_userland_tu_release;
2665 nstat_userland_udp_provider.nstat_watcher_add = nstat_userland_udp_add_watcher;
2666 nstat_userland_udp_provider.nstat_watcher_remove = nstat_userland_udp_remove_watcher;
2667 nstat_userland_udp_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2668 nstat_userland_udp_provider.nstat_reporting_allowed = nstat_userland_udp_reporting_allowed;
2669 nstat_userland_udp_provider.nstat_copy_extension = nstat_userland_extensions;
2670 nstat_userland_udp_provider.next = nstat_providers;
2671 nstat_providers = &nstat_userland_udp_provider;
2672}
2673
2674static void
2675nstat_init_userland_quic_provider(void)
2676{
2677 bzero(s: &nstat_userland_quic_provider, n: sizeof(nstat_userland_quic_provider));
2678 nstat_userland_quic_provider.nstat_descriptor_length = sizeof(nstat_quic_descriptor);
2679 nstat_userland_quic_provider.nstat_provider_id = NSTAT_PROVIDER_QUIC_USERLAND;
2680 nstat_userland_quic_provider.nstat_lookup = nstat_userland_tu_lookup;
2681 nstat_userland_quic_provider.nstat_gone = nstat_userland_tu_gone;
2682 nstat_userland_quic_provider.nstat_counts = nstat_userland_tu_counts;
2683 nstat_userland_quic_provider.nstat_release = nstat_userland_tu_release;
2684 nstat_userland_quic_provider.nstat_watcher_add = nstat_userland_quic_add_watcher;
2685 nstat_userland_quic_provider.nstat_watcher_remove = nstat_userland_quic_remove_watcher;
2686 nstat_userland_quic_provider.nstat_copy_descriptor = nstat_userland_tu_copy_descriptor;
2687 nstat_userland_quic_provider.nstat_reporting_allowed = nstat_userland_quic_reporting_allowed;
2688 nstat_userland_quic_provider.nstat_copy_extension = nstat_userland_extensions;
2689 nstat_userland_quic_provider.next = nstat_providers;
2690 nstat_providers = &nstat_userland_quic_provider;
2691}
2692
2693
2694// Things get started with a call to netstats to say that there’s a new connection:
2695__private_extern__ nstat_userland_context
2696ntstat_userland_stats_open(userland_stats_provider_context *ctx,
2697 int provider_id,
2698 u_int64_t properties,
2699 userland_stats_request_vals_fn req_fn,
2700 userland_stats_request_extension_fn req_extension_fn)
2701{
2702 struct nstat_tu_shadow *shad;
2703 struct nstat_procdetails *procdetails;
2704 nstat_provider *provider;
2705
2706 if ((provider_id != NSTAT_PROVIDER_TCP_USERLAND) &&
2707 (provider_id != NSTAT_PROVIDER_UDP_USERLAND) &&
2708 (provider_id != NSTAT_PROVIDER_QUIC_USERLAND)) {
2709 printf("%s - incorrect provider is supplied, %d\n", __func__, provider_id);
2710 return NULL;
2711 }
2712
2713 shad = kalloc_type(struct nstat_tu_shadow, Z_WAITOK | Z_NOFAIL);
2714
2715 procdetails = nstat_retain_curprocdetails();
2716
2717 if (procdetails == NULL) {
2718 kfree_type(struct nstat_tu_shadow, shad);
2719 return NULL;
2720 }
2721
2722 shad->shad_getvals_fn = req_fn;
2723 shad->shad_get_extension_fn = req_extension_fn;
2724 shad->shad_provider_context = ctx;
2725 shad->shad_provider = provider_id;
2726 shad->shad_properties = properties;
2727 shad->shad_procdetails = procdetails;
2728 shad->shad_rnf_override = nstat_rnf_override_not_set;
2729 shad->shad_start_timestamp = mach_continuous_time();
2730 shad->shad_live = true;
2731 shad->shad_magic = TU_SHADOW_MAGIC;
2732
2733 lck_mtx_lock(lck: &nstat_mtx);
2734 nstat_control_state *state;
2735
2736 // Even if there are no watchers, we save the shadow structure
2737 TAILQ_INSERT_HEAD(&nstat_userprot_shad_head, shad, shad_link);
2738
2739 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND) {
2740 nstat_userland_tcp_shadows++;
2741 provider = &nstat_userland_tcp_provider;
2742 } else if (provider_id == NSTAT_PROVIDER_UDP_USERLAND) {
2743 nstat_userland_udp_shadows++;
2744 provider = &nstat_userland_udp_provider;
2745 } else {
2746 nstat_userland_quic_shadows++;
2747 provider = &nstat_userland_quic_provider;
2748 }
2749
2750 for (state = nstat_controls; state; state = state->ncs_next) {
2751 if ((state->ncs_watching & (1 << provider_id)) != 0) {
2752 // this client is watching tcp/udp/quic userland
2753 // Link to it.
2754 int result = nstat_control_source_add(context: 0, state, provider, cookie: shad);
2755 if (result != 0) {
2756 // There should be some kind of statistics for failures like this.
2757 // <rdar://problem/31377195> The kernel ntstat component should keep some
2758 // internal counters reflecting operational state for eventual AWD reporting
2759 }
2760 }
2761 }
2762 lck_mtx_unlock(lck: &nstat_mtx);
2763
2764 return (nstat_userland_context)shad;
2765}
2766
2767
2768__private_extern__ void
2769ntstat_userland_stats_close(nstat_userland_context nstat_ctx)
2770{
2771 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2772 tailq_head_nstat_src dead_list;
2773 nstat_src *src;
2774
2775 if (shad == NULL) {
2776 return;
2777 }
2778
2779 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2780 TAILQ_INIT(&dead_list);
2781
2782 lck_mtx_lock(lck: &nstat_mtx);
2783 if (nstat_userland_udp_watchers != 0 ||
2784 nstat_userland_tcp_watchers != 0 ||
2785 nstat_userland_quic_watchers != 0) {
2786 nstat_control_state *state;
2787 errno_t result;
2788
2789 for (state = nstat_controls; state; state = state->ncs_next) {
2790 lck_mtx_lock(lck: &state->ncs_mtx);
2791 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2792 {
2793 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2794 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2795 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2796 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2797 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2798 break;
2799 }
2800 }
2801 }
2802
2803 if (src) {
2804 result = nstat_control_send_goodbye(state, src);
2805
2806 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2807 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2808 }
2809 lck_mtx_unlock(lck: &state->ncs_mtx);
2810 }
2811 }
2812 TAILQ_REMOVE(&nstat_userprot_shad_head, shad, shad_link);
2813
2814 if (shad->shad_live) {
2815 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2816 nstat_userland_tcp_shadows--;
2817 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2818 nstat_userland_udp_shadows--;
2819 } else {
2820 nstat_userland_quic_shadows--;
2821 }
2822 }
2823
2824 lck_mtx_unlock(lck: &nstat_mtx);
2825
2826 while ((src = TAILQ_FIRST(&dead_list))) {
2827 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2828 nstat_control_cleanup_source(NULL, src, TRUE);
2829 }
2830 nstat_release_procdetails(procdetails: shad->shad_procdetails);
2831 shad->shad_magic = TU_SHADOW_UNMAGIC;
2832
2833 kfree_type(struct nstat_tu_shadow, shad);
2834}
2835
2836static void
2837ntstat_userland_stats_event_locked(
2838 struct nstat_tu_shadow *shad,
2839 uint64_t event)
2840{
2841 nstat_control_state *state;
2842 nstat_src *src;
2843 errno_t result;
2844 nstat_provider_id_t provider_id;
2845
2846 if (nstat_userland_udp_watchers != 0 || nstat_userland_tcp_watchers != 0 || nstat_userland_quic_watchers != 0) {
2847 for (state = nstat_controls; state; state = state->ncs_next) {
2848 if (((state->ncs_provider_filters[NSTAT_PROVIDER_TCP_USERLAND].npf_events & event) == 0) &&
2849 ((state->ncs_provider_filters[NSTAT_PROVIDER_UDP_USERLAND].npf_events & event) == 0) &&
2850 ((state->ncs_provider_filters[NSTAT_PROVIDER_QUIC_USERLAND].npf_events & event) == 0)) {
2851 continue;
2852 }
2853 lck_mtx_lock(lck: &state->ncs_mtx);
2854 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link) {
2855 provider_id = src->provider->nstat_provider_id;
2856 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND || provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2857 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2858 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2859 break;
2860 }
2861 }
2862 }
2863 if (src && ((state->ncs_provider_filters[provider_id].npf_events & event) != 0)) {
2864 result = nstat_control_send_event(state, src, event);
2865 }
2866 lck_mtx_unlock(lck: &state->ncs_mtx);
2867 }
2868 }
2869}
2870
2871__private_extern__ void
2872ntstat_userland_stats_event(
2873 nstat_userland_context nstat_ctx,
2874 uint64_t event)
2875{
2876 // This will need refinement for when we do genuine stats filtering
2877 // See <rdar://problem/23022832> NetworkStatistics should provide opt-in notifications
2878 // For now it deals only with events that potentially cause any traditional netstat sources to be closed
2879
2880 struct nstat_tu_shadow *shad = (struct nstat_tu_shadow *)nstat_ctx;
2881 tailq_head_nstat_src dead_list;
2882 nstat_src *src;
2883
2884 if (shad == NULL) {
2885 return;
2886 }
2887
2888 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2889
2890 if (event & NECP_CLIENT_STATISTICS_EVENT_TIME_WAIT) {
2891 TAILQ_INIT(&dead_list);
2892
2893 lck_mtx_lock(lck: &nstat_mtx);
2894 if (nstat_userland_udp_watchers != 0 ||
2895 nstat_userland_tcp_watchers != 0 ||
2896 nstat_userland_quic_watchers != 0) {
2897 nstat_control_state *state;
2898 errno_t result;
2899
2900 for (state = nstat_controls; state; state = state->ncs_next) {
2901 lck_mtx_lock(lck: &state->ncs_mtx);
2902 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2903 {
2904 if (shad == (struct nstat_tu_shadow *)src->cookie) {
2905 break;
2906 }
2907 }
2908
2909 if (src) {
2910 if (!(src->filter & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)) {
2911 result = nstat_control_send_goodbye(state, src);
2912
2913 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2914 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2915 }
2916 }
2917 lck_mtx_unlock(lck: &state->ncs_mtx);
2918 }
2919 }
2920 lck_mtx_unlock(lck: &nstat_mtx);
2921
2922 while ((src = TAILQ_FIRST(&dead_list))) {
2923 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2924 nstat_control_cleanup_source(NULL, src, TRUE);
2925 }
2926 }
2927}
2928
2929__private_extern__ void
2930nstats_userland_stats_defunct_for_process(int pid)
2931{
2932 // Note that this can be called multiple times for the same process
2933 tailq_head_nstat_src dead_list;
2934 nstat_src *src, *tmpsrc;
2935 struct nstat_tu_shadow *shad;
2936
2937 TAILQ_INIT(&dead_list);
2938
2939 lck_mtx_lock(lck: &nstat_mtx);
2940
2941 if (nstat_userland_udp_watchers != 0 ||
2942 nstat_userland_tcp_watchers != 0 ||
2943 nstat_userland_quic_watchers != 0) {
2944 nstat_control_state *state;
2945 errno_t result;
2946
2947 for (state = nstat_controls; state; state = state->ncs_next) {
2948 lck_mtx_lock(lck: &state->ncs_mtx);
2949 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
2950 {
2951 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
2952 if (provider_id == NSTAT_PROVIDER_TCP_USERLAND ||
2953 provider_id == NSTAT_PROVIDER_UDP_USERLAND ||
2954 provider_id == NSTAT_PROVIDER_QUIC_USERLAND) {
2955 shad = (struct nstat_tu_shadow *)src->cookie;
2956 if (shad->shad_procdetails->pdet_pid == pid) {
2957 result = nstat_control_send_goodbye(state, src);
2958
2959 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
2960 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
2961 }
2962 }
2963 }
2964 lck_mtx_unlock(lck: &state->ncs_mtx);
2965 }
2966 }
2967
2968 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
2969 assert(shad->shad_magic == TU_SHADOW_MAGIC);
2970
2971 if (shad->shad_live) {
2972 if (shad->shad_procdetails->pdet_pid == pid) {
2973 shad->shad_live = false;
2974 if (shad->shad_provider == NSTAT_PROVIDER_TCP_USERLAND) {
2975 nstat_userland_tcp_shadows--;
2976 } else if (shad->shad_provider == NSTAT_PROVIDER_UDP_USERLAND) {
2977 nstat_userland_udp_shadows--;
2978 } else {
2979 nstat_userland_quic_shadows--;
2980 }
2981 }
2982 }
2983 }
2984
2985 lck_mtx_unlock(lck: &nstat_mtx);
2986
2987 while ((src = TAILQ_FIRST(&dead_list))) {
2988 TAILQ_REMOVE(&dead_list, src, ns_control_link);
2989 nstat_control_cleanup_source(NULL, src, TRUE);
2990 }
2991}
2992
2993errno_t
2994nstat_userland_mark_rnf_override(uuid_t target_fuuid, bool rnf_override)
2995{
2996 // Note that this can be called multiple times for the same process
2997 struct nstat_tu_shadow *shad;
2998 uuid_t fuuid;
2999 errno_t result;
3000
3001 lck_mtx_lock(lck: &nstat_mtx);
3002 // We set the fallback state regardles of watchers as there may be future ones that need to know
3003 TAILQ_FOREACH(shad, &nstat_userprot_shad_head, shad_link) {
3004 assert(shad->shad_magic == TU_SHADOW_MAGIC);
3005 assert(shad->shad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3006 if (shad->shad_get_extension_fn(shad->shad_provider_context, NSTAT_EXTENDED_UPDATE_TYPE_FUUID, fuuid, sizeof(fuuid))) {
3007 if (uuid_compare(uu1: fuuid, uu2: target_fuuid) == 0) {
3008 break;
3009 }
3010 }
3011 }
3012 if (shad) {
3013 if (shad->shad_procdetails->pdet_pid != proc_selfpid()) {
3014 result = EPERM;
3015 } else {
3016 result = 0;
3017 // It would be possible but awkward to check the previous value
3018 // for RNF override, and send an event only if changed.
3019 // In practice it's fine to send an event regardless,
3020 // which "pushes" the last statistics for the previous mode
3021 shad->shad_rnf_override = rnf_override ? nstat_rnf_override_enabled
3022 : nstat_rnf_override_disabled;
3023 ntstat_userland_stats_event_locked(shad,
3024 event: rnf_override ? NSTAT_EVENT_SRC_ENTER_CELLFALLBACK
3025 : NSTAT_EVENT_SRC_EXIT_CELLFALLBACK);
3026 }
3027 } else {
3028 result = EEXIST;
3029 }
3030
3031 lck_mtx_unlock(lck: &nstat_mtx);
3032
3033 return result;
3034}
3035
3036#pragma mark -- Generic Providers --
3037
3038static nstat_provider nstat_userland_conn_provider;
3039static nstat_provider nstat_udp_subflow_provider;
3040
3041static u_int32_t nstat_generic_provider_watchers[NSTAT_PROVIDER_COUNT];
3042
3043struct nstat_generic_shadow {
3044 tailq_entry_generic_shadow gshad_link;
3045 nstat_provider_context gshad_provider_context;
3046 nstat_provider_request_vals_fn *gshad_getvals_fn;
3047 nstat_provider_request_extensions_fn *gshad_getextensions_fn;
3048 u_int64_t gshad_properties;
3049 u_int64_t gshad_start_timestamp;
3050 struct nstat_procdetails *gshad_procdetails;
3051 nstat_provider_id_t gshad_provider;
3052 int32_t gshad_refcnt;
3053 uint32_t gshad_magic;
3054};
3055
3056// Magic number checking should remain in place until the userland provider has been fully proven
3057#define NSTAT_GENERIC_SHADOW_MAGIC 0xfadef00d
3058#define NSTAT_GENERIC_SHADOW_UNMAGIC 0xfadedead
3059
3060static tailq_head_generic_shadow nstat_gshad_head = TAILQ_HEAD_INITIALIZER(nstat_gshad_head);
3061
3062static inline void
3063nstat_retain_gshad(
3064 struct nstat_generic_shadow *gshad)
3065{
3066 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3067
3068 OSIncrementAtomic(&gshad->gshad_refcnt);
3069}
3070
3071static void
3072nstat_release_gshad(
3073 struct nstat_generic_shadow *gshad)
3074{
3075 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3076
3077 if (OSDecrementAtomic(&gshad->gshad_refcnt) == 1) {
3078 nstat_release_procdetails(procdetails: gshad->gshad_procdetails);
3079 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_UNMAGIC;
3080 kfree_type(struct nstat_generic_shadow, gshad);
3081 }
3082}
3083
3084static errno_t
3085nstat_generic_provider_lookup(
3086 __unused const void *data,
3087 __unused u_int32_t length,
3088 __unused nstat_provider_cookie_t *out_cookie)
3089{
3090 // Looking up a specific connection is not supported
3091 return ENOTSUP;
3092}
3093
3094static int
3095nstat_generic_provider_gone(
3096 __unused nstat_provider_cookie_t cookie)
3097{
3098 // Returns non-zero if the source has gone.
3099 // We don't keep a source hanging around, so the answer is always 0
3100 return 0;
3101}
3102
3103static errno_t
3104nstat_generic_provider_counts(
3105 nstat_provider_cookie_t cookie,
3106 struct nstat_counts *out_counts,
3107 int *out_gone)
3108{
3109 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3110 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3111
3112 memset(s: out_counts, c: 0, n: sizeof(*out_counts));
3113
3114 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, out_counts, NULL);
3115
3116 if (out_gone) {
3117 *out_gone = 0;
3118 }
3119 return (result)? 0 : EIO;
3120}
3121
3122
3123static errno_t
3124nstat_generic_provider_copy_descriptor(
3125 nstat_provider_cookie_t cookie,
3126 void *data,
3127 __unused size_t len)
3128{
3129 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3130 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3131 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
3132 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3133
3134 bool result = (*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, data);
3135
3136 switch (gshad->gshad_provider) {
3137 case NSTAT_PROVIDER_CONN_USERLAND:
3138 {
3139 nstat_connection_descriptor *desc = (nstat_connection_descriptor *)data;
3140 desc->pid = procdetails->pdet_pid;
3141 desc->upid = procdetails->pdet_upid;
3142 uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid);
3143 strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname));
3144 desc->start_timestamp = gshad->gshad_start_timestamp;
3145 desc->timestamp = mach_continuous_time();
3146 break;
3147 }
3148 case NSTAT_PROVIDER_UDP_SUBFLOW:
3149 {
3150 nstat_udp_descriptor *desc = (nstat_udp_descriptor *)data;
3151 desc->pid = procdetails->pdet_pid;
3152 desc->upid = procdetails->pdet_upid;
3153 uuid_copy(dst: desc->uuid, src: procdetails->pdet_uuid);
3154 strlcpy(dst: desc->pname, src: procdetails->pdet_procname, n: sizeof(desc->pname));
3155 desc->start_timestamp = gshad->gshad_start_timestamp;
3156 desc->timestamp = mach_continuous_time();
3157 break;
3158 }
3159 default:
3160 break;
3161 }
3162 return (result)? 0 : EIO;
3163}
3164
3165static void
3166nstat_generic_provider_release(
3167 __unused nstat_provider_cookie_t cookie,
3168 __unused int locked)
3169{
3170 // Called when a nstat_src is detached.
3171 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3172
3173 nstat_release_gshad(gshad);
3174}
3175
3176static bool
3177nstat_generic_provider_reporting_allowed(
3178 nstat_provider_cookie_t cookie,
3179 nstat_provider_filter *filter,
3180 u_int64_t suppression_flags)
3181{
3182 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3183
3184 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3185
3186 if ((filter->npf_flags & NSTAT_FILTER_SUPPRESS_BORING_FLAGS) != 0) {
3187 if ((filter->npf_flags & suppression_flags) != 0) {
3188 return false;
3189 }
3190 }
3191
3192 // Filter based on interface and connection flags
3193 // If a provider doesn't support flags, a client shouldn't attempt to use filtering
3194 if ((filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS) != 0) {
3195 u_int32_t ifflags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
3196
3197 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, &ifflags, NULL, NULL)) {
3198 if ((filter->npf_flags & ifflags) == 0) {
3199 return false;
3200 }
3201 }
3202 }
3203
3204 if ((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) {
3205 struct nstat_procdetails *procdetails = gshad->gshad_procdetails;
3206 assert(procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3207
3208 // Check details that we have readily to hand before asking the provider for descriptor items
3209 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
3210 (filter->npf_pid == procdetails->pdet_pid)) {
3211 return true;
3212 }
3213 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
3214 (memcmp(s1: filter->npf_uuid, s2: &procdetails->pdet_uuid, n: sizeof(filter->npf_uuid)) == 0)) {
3215 return true;
3216 }
3217 if ((filter->npf_flags & (NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID)) != 0) {
3218 nstat_udp_descriptor udp_desc; // Stack allocation - OK or pushing the limits too far?
3219 switch (gshad->gshad_provider) {
3220 case NSTAT_PROVIDER_CONN_USERLAND:
3221 // Filtering by effective uuid or effective pid is currently not supported
3222 filter->npf_flags &= ~((uint64_t)(NSTAT_FILTER_SPECIFIC_USER_BY_EPID | NSTAT_FILTER_SPECIFIC_USER_BY_EUUID));
3223 printf("%s - attempt to filter conn provider by effective pid/uuid, not supported\n", __func__);
3224 return true;
3225
3226 case NSTAT_PROVIDER_UDP_SUBFLOW:
3227 if ((*gshad->gshad_getvals_fn)(gshad->gshad_provider_context, NULL, NULL, &udp_desc)) {
3228 if (check_reporting_for_user(filter, pid: procdetails->pdet_pid, epid: (pid_t)udp_desc.epid,
3229 uuid: &procdetails->pdet_uuid, euuid: &udp_desc.euuid)) {
3230 return true;
3231 }
3232 }
3233 break;
3234 default:
3235 break;
3236 }
3237 }
3238 return false;
3239 }
3240 return true;
3241}
3242
3243static size_t
3244nstat_generic_extensions(nstat_provider_cookie_t cookie, u_int32_t extension_id, void *buf, size_t len)
3245{
3246 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)cookie;
3247 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3248 assert(gshad->gshad_procdetails->pdet_magic == NSTAT_PROCDETAILS_MAGIC);
3249
3250 if (gshad->gshad_getextensions_fn == NULL) {
3251 return 0;
3252 }
3253 return gshad->gshad_getextensions_fn(gshad->gshad_provider_context, extension_id, buf, len);
3254}
3255
3256static errno_t
3257nstat_generic_provider_add_watcher(
3258 nstat_control_state *state,
3259 nstat_msg_add_all_srcs *req)
3260{
3261 errno_t result;
3262 nstat_provider_id_t provider_id = req->provider;
3263 nstat_provider *provider;
3264
3265 switch (provider_id) {
3266 case NSTAT_PROVIDER_CONN_USERLAND:
3267 provider = &nstat_userland_conn_provider;
3268 break;
3269 case NSTAT_PROVIDER_UDP_SUBFLOW:
3270 provider = &nstat_udp_subflow_provider;
3271 break;
3272 default:
3273 return ENOTSUP;
3274 }
3275
3276 lck_mtx_lock(lck: &nstat_mtx);
3277 result = nstat_set_provider_filter(state, req);
3278
3279 if (result == 0) {
3280 struct nstat_generic_shadow *gshad;
3281 nstat_provider_filter *filter = &state->ncs_provider_filters[provider_id];
3282
3283 OSIncrementAtomic(&nstat_generic_provider_watchers[provider_id]);
3284
3285 TAILQ_FOREACH(gshad, &nstat_gshad_head, gshad_link) {
3286 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3287
3288 if (gshad->gshad_provider == provider_id) {
3289 if (filter->npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3290 u_int64_t npf_flags = filter->npf_flags & NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3291 if ((npf_flags != 0) && ((npf_flags & gshad->gshad_properties) == 0)) {
3292 // Skip this one
3293 // Note - no filtering by pid or UUID supported at this point, for simplicity
3294 continue;
3295 }
3296 }
3297 nstat_retain_gshad(gshad);
3298 result = nstat_control_source_add(context: 0, state, provider, cookie: gshad);
3299 if (result != 0) {
3300 printf("%s - nstat_control_source_add returned %d for "
3301 "provider type: %d\n", __func__, result, provider_id);
3302 nstat_release_gshad(gshad);
3303 break;
3304 }
3305 }
3306 }
3307 }
3308 lck_mtx_unlock(lck: &nstat_mtx);
3309
3310 return result;
3311}
3312
3313static void
3314nstat_userland_conn_remove_watcher(
3315 __unused nstat_control_state *state)
3316{
3317 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_CONN_USERLAND]);
3318}
3319
3320static void
3321nstat_udp_subflow_remove_watcher(
3322 __unused nstat_control_state *state)
3323{
3324 OSDecrementAtomic(&nstat_generic_provider_watchers[NSTAT_PROVIDER_UDP_SUBFLOW]);
3325}
3326
3327static void
3328nstat_init_userland_conn_provider(void)
3329{
3330 bzero(s: &nstat_userland_conn_provider, n: sizeof(nstat_userland_conn_provider));
3331 nstat_userland_conn_provider.nstat_descriptor_length = sizeof(nstat_connection_descriptor);
3332 nstat_userland_conn_provider.nstat_provider_id = NSTAT_PROVIDER_CONN_USERLAND;
3333 nstat_userland_conn_provider.nstat_lookup = nstat_generic_provider_lookup;
3334 nstat_userland_conn_provider.nstat_gone = nstat_generic_provider_gone;
3335 nstat_userland_conn_provider.nstat_counts = nstat_generic_provider_counts;
3336 nstat_userland_conn_provider.nstat_release = nstat_generic_provider_release;
3337 nstat_userland_conn_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3338 nstat_userland_conn_provider.nstat_watcher_remove = nstat_userland_conn_remove_watcher;
3339 nstat_userland_conn_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3340 nstat_userland_conn_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3341 nstat_userland_conn_provider.nstat_copy_extension = nstat_generic_extensions;
3342 nstat_userland_conn_provider.next = nstat_providers;
3343 nstat_providers = &nstat_userland_conn_provider;
3344}
3345
3346static void
3347nstat_init_udp_subflow_provider(void)
3348{
3349 bzero(s: &nstat_udp_subflow_provider, n: sizeof(nstat_udp_subflow_provider));
3350 nstat_udp_subflow_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
3351 nstat_udp_subflow_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_SUBFLOW;
3352 nstat_udp_subflow_provider.nstat_lookup = nstat_generic_provider_lookup;
3353 nstat_udp_subflow_provider.nstat_gone = nstat_generic_provider_gone;
3354 nstat_udp_subflow_provider.nstat_counts = nstat_generic_provider_counts;
3355 nstat_udp_subflow_provider.nstat_release = nstat_generic_provider_release;
3356 nstat_udp_subflow_provider.nstat_watcher_add = nstat_generic_provider_add_watcher;
3357 nstat_udp_subflow_provider.nstat_watcher_remove = nstat_udp_subflow_remove_watcher;
3358 nstat_udp_subflow_provider.nstat_copy_descriptor = nstat_generic_provider_copy_descriptor;
3359 nstat_udp_subflow_provider.nstat_reporting_allowed = nstat_generic_provider_reporting_allowed;
3360 nstat_udp_subflow_provider.nstat_copy_extension = nstat_generic_extensions;
3361 nstat_udp_subflow_provider.next = nstat_providers;
3362 nstat_providers = &nstat_udp_subflow_provider;
3363}
3364
3365// Things get started with a call from the provider to netstats to say that there’s a new source
3366__private_extern__ nstat_context
3367nstat_provider_stats_open(nstat_provider_context ctx,
3368 int provider_id,
3369 u_int64_t properties,
3370 nstat_provider_request_vals_fn req_fn,
3371 nstat_provider_request_extensions_fn req_extensions_fn)
3372{
3373 struct nstat_generic_shadow *gshad;
3374 struct nstat_procdetails *procdetails;
3375 nstat_provider *provider = nstat_find_provider_by_id(id: provider_id);
3376
3377 gshad = kalloc_type(struct nstat_generic_shadow, Z_WAITOK | Z_NOFAIL);
3378
3379 procdetails = nstat_retain_curprocdetails();
3380
3381 if (procdetails == NULL) {
3382 kfree_type(struct nstat_generic_shadow, gshad);
3383 return NULL;
3384 }
3385
3386 gshad->gshad_getvals_fn = req_fn;
3387 gshad->gshad_getextensions_fn = req_extensions_fn;
3388 gshad->gshad_provider_context = ctx;
3389 gshad->gshad_properties = properties;
3390 gshad->gshad_procdetails = procdetails;
3391 gshad->gshad_provider = provider_id;
3392 gshad->gshad_start_timestamp = mach_continuous_time();
3393 gshad->gshad_refcnt = 0;
3394 gshad->gshad_magic = NSTAT_GENERIC_SHADOW_MAGIC;
3395 nstat_retain_gshad(gshad);
3396
3397 lck_mtx_lock(lck: &nstat_mtx);
3398 nstat_control_state *state;
3399
3400 // Even if there are no watchers, we save the shadow structure
3401 TAILQ_INSERT_HEAD(&nstat_gshad_head, gshad, gshad_link);
3402
3403 for (state = nstat_controls; state; state = state->ncs_next) {
3404 if ((state->ncs_watching & (1 << provider_id)) != 0) {
3405 // Does this client want an initial filtering to be made?
3406 u_int64_t npf_flags = state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
3407 if (npf_flags & NSTAT_FILTER_INITIAL_PROPERTIES) {
3408 npf_flags &= NSTAT_FILTER_IFNET_AND_CONN_FLAGS;
3409 if ((npf_flags != 0) && ((npf_flags & properties) == 0)) {
3410 // Skip this one
3411 // Note - no filtering by pid or UUID supported at this point, for simplicity
3412 continue;
3413 }
3414 }
3415 // this client is watching, so link to it.
3416 nstat_retain_gshad(gshad);
3417 int result = nstat_control_source_add(context: 0, state, provider, cookie: gshad);
3418 if (result != 0) {
3419 // There should be some kind of statistics for failures like this.
3420 // <rdar://problem/31377195> The kernel ntstat component should keep some
3421 // internal counters reflecting operational state for eventual AWD reporting
3422 nstat_release_gshad(gshad);
3423 }
3424 }
3425 }
3426 lck_mtx_unlock(lck: &nstat_mtx);
3427
3428 return (nstat_context) gshad;
3429}
3430
3431
3432// When the source is closed, netstats will make one last call on the request functions to retrieve final values
3433__private_extern__ void
3434nstat_provider_stats_close(nstat_context nstat_ctx)
3435{
3436 tailq_head_nstat_src dead_list;
3437 nstat_src *src;
3438 struct nstat_generic_shadow *gshad = (struct nstat_generic_shadow *)nstat_ctx;
3439
3440 if (gshad == NULL) {
3441 printf("%s - called with null reference", __func__);
3442 return;
3443 }
3444
3445 assert(gshad->gshad_magic == NSTAT_GENERIC_SHADOW_MAGIC);
3446
3447 if (gshad->gshad_magic != NSTAT_GENERIC_SHADOW_MAGIC) {
3448 printf("%s - called with incorrect shadow magic 0x%x", __func__,