1/*
2 * Copyright (c) 2010-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/param.h>
30#include <sys/types.h>
31#include <sys/kpi_mbuf.h>
32#include <sys/socket.h>
33#include <sys/kern_control.h>
34#include <sys/mcache.h>
35#include <sys/socketvar.h>
36#include <sys/sysctl.h>
37#include <sys/queue.h>
38#include <sys/priv.h>
39#include <sys/protosw.h>
40
41#include <kern/clock.h>
42#include <kern/debug.h>
43
44#include <libkern/libkern.h>
45#include <libkern/OSMalloc.h>
46#include <libkern/OSAtomic.h>
47#include <libkern/locks.h>
48
49#include <net/if.h>
50#include <net/if_var.h>
51#include <net/if_types.h>
52#include <net/route.h>
53
54// These includes appear in ntstat.h but we include them here first so they won't trigger
55// any clang diagnostic errors.
56#include <netinet/in.h>
57#include <netinet/in_stat.h>
58#include <netinet/tcp.h>
59
60#pragma clang diagnostic push
61#pragma clang diagnostic error "-Wpadded"
62#pragma clang diagnostic error "-Wpacked"
63// This header defines structures shared with user space, so we need to ensure there is
64// no compiler inserted padding in case the user space process isn't using the same
65// architecture as the kernel (example: i386 process with x86_64 kernel).
66#include <net/ntstat.h>
67#pragma clang diagnostic pop
68
69#include <netinet/ip_var.h>
70#include <netinet/in_pcb.h>
71#include <netinet/in_var.h>
72#include <netinet/tcp_var.h>
73#include <netinet/tcp_fsm.h>
74#include <netinet/tcp_cc.h>
75#include <netinet/udp.h>
76#include <netinet/udp_var.h>
77#include <netinet6/in6_pcb.h>
78#include <netinet6/in6_var.h>
79
80__private_extern__ int nstat_collect = 1;
81
82#if (DEBUG || DEVELOPMENT)
83SYSCTL_INT(_net, OID_AUTO, statistics, CTLFLAG_RW | CTLFLAG_LOCKED,
84 &nstat_collect, 0, "Collect detailed statistics");
85#endif /* (DEBUG || DEVELOPMENT) */
86
87#if CONFIG_EMBEDDED
88static int nstat_privcheck = 1;
89#else
90static int nstat_privcheck = 0;
91#endif
92SYSCTL_INT(_net, OID_AUTO, statistics_privcheck, CTLFLAG_RW | CTLFLAG_LOCKED,
93 &nstat_privcheck, 0, "Entitlement check");
94
95SYSCTL_NODE(_net, OID_AUTO, stats,
96 CTLFLAG_RW|CTLFLAG_LOCKED, 0, "network statistics");
97
98static int nstat_debug = 0;
99SYSCTL_INT(_net_stats, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
100 &nstat_debug, 0, "");
101
102static int nstat_sendspace = 2048;
103SYSCTL_INT(_net_stats, OID_AUTO, sendspace, CTLFLAG_RW | CTLFLAG_LOCKED,
104 &nstat_sendspace, 0, "");
105
106static int nstat_recvspace = 8192;
107SYSCTL_INT(_net_stats, OID_AUTO, recvspace, CTLFLAG_RW | CTLFLAG_LOCKED,
108 &nstat_recvspace, 0, "");
109
110static struct nstat_stats nstat_stats;
111SYSCTL_STRUCT(_net_stats, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_LOCKED,
112 &nstat_stats, nstat_stats, "");
113
114static u_int32_t nstat_lim_interval = 30 * 60; /* Report interval, seconds */
115static u_int32_t nstat_lim_min_tx_pkts = 100;
116static u_int32_t nstat_lim_min_rx_pkts = 100;
117#if (DEBUG || DEVELOPMENT)
118SYSCTL_INT(_net_stats, OID_AUTO, lim_report_interval,
119 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_interval, 0,
120 "Low internet stat report interval");
121
122SYSCTL_INT(_net_stats, OID_AUTO, lim_min_tx_pkts,
123 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_tx_pkts, 0,
124 "Low Internet, min transmit packets threshold");
125
126SYSCTL_INT(_net_stats, OID_AUTO, lim_min_rx_pkts,
127 CTLFLAG_RW | CTLFLAG_LOCKED, &nstat_lim_min_rx_pkts, 0,
128 "Low Internet, min receive packets threshold");
129#endif /* DEBUG || DEVELOPMENT */
130
131static struct net_api_stats net_api_stats_before;
132static u_int64_t net_api_stats_last_report_time;
133#define NET_API_STATS_REPORT_INTERVAL (12 * 60 * 60) /* 12 hours, in seconds */
134static u_int32_t net_api_stats_report_interval = NET_API_STATS_REPORT_INTERVAL;
135
136#if (DEBUG || DEVELOPMENT)
137SYSCTL_UINT(_net_stats, OID_AUTO, api_report_interval,
138 CTLFLAG_RW | CTLFLAG_LOCKED, &net_api_stats_report_interval, 0, "");
139#endif /* DEBUG || DEVELOPMENT */
140
141enum
142{
143 NSTAT_FLAG_CLEANUP = (1 << 0),
144 NSTAT_FLAG_REQCOUNTS = (1 << 1),
145 NSTAT_FLAG_SUPPORTS_UPDATES = (1 << 2),
146 NSTAT_FLAG_SYSINFO_SUBSCRIBED = (1 << 3),
147};
148
149#if CONFIG_EMBEDDED
150#define QUERY_CONTINUATION_SRC_COUNT 50
151#else
152#define QUERY_CONTINUATION_SRC_COUNT 100
153#endif
154
155typedef TAILQ_HEAD(, nstat_src) tailq_head_nstat_src;
156typedef TAILQ_ENTRY(nstat_src) tailq_entry_nstat_src;
157
158typedef struct nstat_provider_filter
159{
160 u_int64_t npf_flags;
161 u_int64_t npf_events;
162 pid_t npf_pid;
163 uuid_t npf_uuid;
164} nstat_provider_filter;
165
166
167typedef struct nstat_control_state
168{
169 struct nstat_control_state *ncs_next;
170 u_int32_t ncs_watching;
171 decl_lck_mtx_data(, ncs_mtx);
172 kern_ctl_ref ncs_kctl;
173 u_int32_t ncs_unit;
174 nstat_src_ref_t ncs_next_srcref;
175 tailq_head_nstat_src ncs_src_queue;
176 mbuf_t ncs_accumulated;
177 u_int32_t ncs_flags;
178 nstat_provider_filter ncs_provider_filters[NSTAT_PROVIDER_COUNT];
179 /* state maintained for partial query requests */
180 u_int64_t ncs_context;
181 u_int64_t ncs_seq;
182} nstat_control_state;
183
184typedef struct nstat_provider
185{
186 struct nstat_provider *next;
187 nstat_provider_id_t nstat_provider_id;
188 size_t nstat_descriptor_length;
189 errno_t (*nstat_lookup)(const void *data, u_int32_t length, nstat_provider_cookie_t *out_cookie);
190 int (*nstat_gone)(nstat_provider_cookie_t cookie);
191 errno_t (*nstat_counts)(nstat_provider_cookie_t cookie, struct nstat_counts *out_counts, int *out_gone);
192 errno_t (*nstat_watcher_add)(nstat_control_state *state, nstat_msg_add_all_srcs *req);
193 void (*nstat_watcher_remove)(nstat_control_state *state);
194 errno_t (*nstat_copy_descriptor)(nstat_provider_cookie_t cookie, void *data, u_int32_t len);
195 void (*nstat_release)(nstat_provider_cookie_t cookie, boolean_t locked);
196 bool (*nstat_reporting_allowed)(nstat_provider_cookie_t cookie, nstat_provider_filter *filter);
197} nstat_provider;
198
199typedef STAILQ_HEAD(, nstat_src) stailq_head_nstat_src;
200typedef STAILQ_ENTRY(nstat_src) stailq_entry_nstat_src;
201
202typedef TAILQ_HEAD(, nstat_tu_shadow) tailq_head_tu_shadow;
203typedef TAILQ_ENTRY(nstat_tu_shadow) tailq_entry_tu_shadow;
204
205typedef TAILQ_HEAD(, nstat_procdetails) tailq_head_procdetails;
206typedef TAILQ_ENTRY(nstat_procdetails) tailq_entry_procdetails;
207
208typedef struct nstat_src
209{
210 tailq_entry_nstat_src ns_control_link; // All sources for the nstat_control_state, for iterating over.
211 nstat_control_state *ns_control; // The nstat_control_state that this is a source for
212 nstat_src_ref_t srcref;
213 nstat_provider *provider;
214 nstat_provider_cookie_t cookie;
215 uint32_t filter;
216 uint64_t seq;
217} nstat_src;
218
219static errno_t nstat_control_send_counts(nstat_control_state *,
220 nstat_src *, unsigned long long, u_int16_t, int *);
221static int nstat_control_send_description(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags);
222static int nstat_control_send_update(nstat_control_state *state, nstat_src *src, u_int64_t context, u_int16_t hdr_flags, int *gone);
223static errno_t nstat_control_send_removed(nstat_control_state *, nstat_src *);
224static errno_t nstat_control_send_goodbye(nstat_control_state *state, nstat_src *src);
225static void nstat_control_cleanup_source(nstat_control_state *state, nstat_src *src, boolean_t);
226static bool nstat_control_reporting_allowed(nstat_control_state *state, nstat_src *src);
227static boolean_t nstat_control_begin_query(nstat_control_state *state, const nstat_msg_hdr *hdrp);
228static u_int16_t nstat_control_end_query(nstat_control_state *state, nstat_src *last_src, boolean_t partial);
229static void nstat_ifnet_report_ecn_stats(void);
230static void nstat_ifnet_report_lim_stats(void);
231static void nstat_net_api_report_stats(void);
232static errno_t nstat_set_provider_filter( nstat_control_state *state, nstat_msg_add_all_srcs *req);
233
234static u_int32_t nstat_udp_watchers = 0;
235static u_int32_t nstat_tcp_watchers = 0;
236
237static void nstat_control_register(void);
238
239/*
240 * The lock order is as follows:
241 *
242 * socket_lock (inpcb)
243 * nstat_mtx
244 * state->ncs_mtx
245 */
246static volatile OSMallocTag nstat_malloc_tag = NULL;
247static nstat_control_state *nstat_controls = NULL;
248static uint64_t nstat_idle_time = 0;
249static decl_lck_mtx_data(, nstat_mtx);
250
251/* some extern definitions */
252extern void mbuf_report_peak_usage(void);
253extern void tcp_report_stats(void);
254
255static void
256nstat_copy_sa_out(
257 const struct sockaddr *src,
258 struct sockaddr *dst,
259 int maxlen)
260{
261 if (src->sa_len > maxlen) return;
262
263 bcopy(src, dst, src->sa_len);
264 if (src->sa_family == AF_INET6 &&
265 src->sa_len >= sizeof(struct sockaddr_in6))
266 {
267 struct sockaddr_in6 *sin6 = (struct sockaddr_in6*)(void *)dst;
268 if (IN6_IS_SCOPE_EMBED(&sin6->sin6_addr))
269 {
270 if (sin6->sin6_scope_id == 0)
271 sin6->sin6_scope_id = ntohs(sin6->sin6_addr.s6_addr16[1]);
272 sin6->sin6_addr.s6_addr16[1] = 0;
273 }
274 }
275}
276
277static void
278nstat_ip_to_sockaddr(
279 const struct in_addr *ip,
280 u_int16_t port,
281 struct sockaddr_in *sin,
282 u_int32_t maxlen)
283{
284 if (maxlen < sizeof(struct sockaddr_in))
285 return;
286
287 sin->sin_family = AF_INET;
288 sin->sin_len = sizeof(*sin);
289 sin->sin_port = port;
290 sin->sin_addr = *ip;
291}
292
293u_int16_t
294nstat_ifnet_to_flags(
295 struct ifnet *ifp)
296{
297 u_int16_t flags = 0;
298 u_int32_t functional_type = if_functional_type(ifp, FALSE);
299
300 /* Panic if someone adds a functional type without updating ntstat. */
301 VERIFY(0 <= functional_type && functional_type <= IFRTYPE_FUNCTIONAL_LAST);
302
303 switch (functional_type)
304 {
305 case IFRTYPE_FUNCTIONAL_UNKNOWN:
306 flags |= NSTAT_IFNET_IS_UNKNOWN_TYPE;
307 break;
308 case IFRTYPE_FUNCTIONAL_LOOPBACK:
309 flags |= NSTAT_IFNET_IS_LOOPBACK;
310 break;
311 case IFRTYPE_FUNCTIONAL_WIRED:
312 case IFRTYPE_FUNCTIONAL_INTCOPROC:
313 flags |= NSTAT_IFNET_IS_WIRED;
314 break;
315 case IFRTYPE_FUNCTIONAL_WIFI_INFRA:
316 flags |= NSTAT_IFNET_IS_WIFI;
317 break;
318 case IFRTYPE_FUNCTIONAL_WIFI_AWDL:
319 flags |= NSTAT_IFNET_IS_WIFI;
320 flags |= NSTAT_IFNET_IS_AWDL;
321 break;
322 case IFRTYPE_FUNCTIONAL_CELLULAR:
323 flags |= NSTAT_IFNET_IS_CELLULAR;
324 break;
325 }
326
327 if (IFNET_IS_EXPENSIVE(ifp))
328 {
329 flags |= NSTAT_IFNET_IS_EXPENSIVE;
330 }
331
332 return flags;
333}
334
335static u_int16_t
336nstat_inpcb_to_flags(
337 const struct inpcb *inp)
338{
339 u_int16_t flags = 0;
340
341 if ((inp != NULL ) && (inp->inp_last_outifp != NULL))
342 {
343 struct ifnet *ifp = inp->inp_last_outifp;
344 flags = nstat_ifnet_to_flags(ifp);
345
346 if (flags & NSTAT_IFNET_IS_CELLULAR)
347 {
348 if (inp->inp_socket != NULL &&
349 (inp->inp_socket->so_flags1 & SOF1_CELLFALLBACK))
350 flags |= NSTAT_IFNET_VIA_CELLFALLBACK;
351 }
352 }
353 else
354 {
355 flags = NSTAT_IFNET_IS_UNKNOWN_TYPE;
356 }
357
358 return flags;
359}
360
361#pragma mark -- Network Statistic Providers --
362
363static errno_t nstat_control_source_add(u_int64_t context, nstat_control_state *state, nstat_provider *provider, nstat_provider_cookie_t cookie);
364struct nstat_provider *nstat_providers = NULL;
365
366static struct nstat_provider*
367nstat_find_provider_by_id(
368 nstat_provider_id_t id)
369{
370 struct nstat_provider *provider;
371
372 for (provider = nstat_providers; provider != NULL; provider = provider->next)
373 {
374 if (provider->nstat_provider_id == id)
375 break;
376 }
377
378 return provider;
379}
380
381static errno_t
382nstat_lookup_entry(
383 nstat_provider_id_t id,
384 const void *data,
385 u_int32_t length,
386 nstat_provider **out_provider,
387 nstat_provider_cookie_t *out_cookie)
388{
389 *out_provider = nstat_find_provider_by_id(id);
390 if (*out_provider == NULL)
391 {
392 return ENOENT;
393 }
394
395 return (*out_provider)->nstat_lookup(data, length, out_cookie);
396}
397
398static void nstat_init_route_provider(void);
399static void nstat_init_tcp_provider(void);
400static void nstat_init_udp_provider(void);
401static void nstat_init_ifnet_provider(void);
402
403__private_extern__ void
404nstat_init(void)
405{
406 if (nstat_malloc_tag != NULL) return;
407
408 OSMallocTag tag = OSMalloc_Tagalloc(NET_STAT_CONTROL_NAME, OSMT_DEFAULT);
409 if (!OSCompareAndSwapPtr(NULL, tag, &nstat_malloc_tag))
410 {
411 OSMalloc_Tagfree(tag);
412 tag = nstat_malloc_tag;
413 }
414 else
415 {
416 // we need to initialize other things, we do it here as this code path will only be hit once;
417 nstat_init_route_provider();
418 nstat_init_tcp_provider();
419 nstat_init_udp_provider();
420 nstat_init_ifnet_provider();
421 nstat_control_register();
422 }
423}
424
425#pragma mark -- Aligned Buffer Allocation --
426
427struct align_header
428{
429 u_int32_t offset;
430 u_int32_t length;
431};
432
433static void*
434nstat_malloc_aligned(
435 u_int32_t length,
436 u_int8_t alignment,
437 OSMallocTag tag)
438{
439 struct align_header *hdr = NULL;
440 u_int32_t size = length + sizeof(*hdr) + alignment - 1;
441
442 u_int8_t *buffer = OSMalloc(size, tag);
443 if (buffer == NULL) return NULL;
444
445 u_int8_t *aligned = buffer + sizeof(*hdr);
446 aligned = (u_int8_t*)P2ROUNDUP(aligned, alignment);
447
448 hdr = (struct align_header*)(void *)(aligned - sizeof(*hdr));
449 hdr->offset = aligned - buffer;
450 hdr->length = size;
451
452 return aligned;
453}
454
455static void
456nstat_free_aligned(
457 void *buffer,
458 OSMallocTag tag)
459{
460 struct align_header *hdr = (struct align_header*)(void *)((u_int8_t*)buffer - sizeof(*hdr));
461 OSFree(((char*)buffer) - hdr->offset, hdr->length, tag);
462}
463
464#pragma mark -- Route Provider --
465
466static nstat_provider nstat_route_provider;
467
468static errno_t
469nstat_route_lookup(
470 const void *data,
471 u_int32_t length,
472 nstat_provider_cookie_t *out_cookie)
473{
474 // rt_lookup doesn't take const params but it doesn't modify the parameters for
475 // the lookup. So...we use a union to eliminate the warning.
476 union
477 {
478 struct sockaddr *sa;
479 const struct sockaddr *const_sa;
480 } dst, mask;
481
482 const nstat_route_add_param *param = (const nstat_route_add_param*)data;
483 *out_cookie = NULL;
484
485 if (length < sizeof(*param))
486 {
487 return EINVAL;
488 }
489
490 if (param->dst.v4.sin_family == 0 ||
491 param->dst.v4.sin_family > AF_MAX ||
492 (param->mask.v4.sin_family != 0 && param->mask.v4.sin_family != param->dst.v4.sin_family))
493 {
494 return EINVAL;
495 }
496
497 if (param->dst.v4.sin_len > sizeof(param->dst) ||
498 (param->mask.v4.sin_family && param->mask.v4.sin_len > sizeof(param->mask.v4.sin_len)))
499 {
500 return EINVAL;
501 }
502 if ((param->dst.v4.sin_family == AF_INET &&
503 param->dst.v4.sin_len < sizeof(struct sockaddr_in)) ||
504 (param->dst.v6.sin6_family == AF_INET6 &&
505 param->dst.v6.sin6_len < sizeof(struct sockaddr_in6)))
506 {
507 return EINVAL;
508 }
509
510 dst.const_sa = (const struct sockaddr*)&param->dst;
511 mask.const_sa = param->mask.v4.sin_family ? (const struct sockaddr*)&param->mask : NULL;
512
513 struct radix_node_head *rnh = rt_tables[dst.sa->sa_family];
514 if (rnh == NULL) return EAFNOSUPPORT;
515
516 lck_mtx_lock(rnh_lock);
517 struct rtentry *rt = rt_lookup(TRUE, dst.sa, mask.sa, rnh, param->ifindex);
518 lck_mtx_unlock(rnh_lock);
519
520 if (rt) *out_cookie = (nstat_provider_cookie_t)rt;
521
522 return rt ? 0 : ENOENT;
523}
524
525static int
526nstat_route_gone(
527 nstat_provider_cookie_t cookie)
528{
529 struct rtentry *rt = (struct rtentry*)cookie;
530 return ((rt->rt_flags & RTF_UP) == 0) ? 1 : 0;
531}
532
533static errno_t
534nstat_route_counts(
535 nstat_provider_cookie_t cookie,
536 struct nstat_counts *out_counts,
537 int *out_gone)
538{
539 struct rtentry *rt = (struct rtentry*)cookie;
540 struct nstat_counts *rt_stats = rt->rt_stats;
541
542 if (out_gone) *out_gone = 0;
543
544 if (out_gone && (rt->rt_flags & RTF_UP) == 0) *out_gone = 1;
545
546 if (rt_stats)
547 {
548 atomic_get_64(out_counts->nstat_rxpackets, &rt_stats->nstat_rxpackets);
549 atomic_get_64(out_counts->nstat_rxbytes, &rt_stats->nstat_rxbytes);
550 atomic_get_64(out_counts->nstat_txpackets, &rt_stats->nstat_txpackets);
551 atomic_get_64(out_counts->nstat_txbytes, &rt_stats->nstat_txbytes);
552 out_counts->nstat_rxduplicatebytes = rt_stats->nstat_rxduplicatebytes;
553 out_counts->nstat_rxoutoforderbytes = rt_stats->nstat_rxoutoforderbytes;
554 out_counts->nstat_txretransmit = rt_stats->nstat_txretransmit;
555 out_counts->nstat_connectattempts = rt_stats->nstat_connectattempts;
556 out_counts->nstat_connectsuccesses = rt_stats->nstat_connectsuccesses;
557 out_counts->nstat_min_rtt = rt_stats->nstat_min_rtt;
558 out_counts->nstat_avg_rtt = rt_stats->nstat_avg_rtt;
559 out_counts->nstat_var_rtt = rt_stats->nstat_var_rtt;
560 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
561 }
562 else
563 {
564 bzero(out_counts, sizeof(*out_counts));
565 }
566
567 return 0;
568}
569
570static void
571nstat_route_release(
572 nstat_provider_cookie_t cookie,
573 __unused int locked)
574{
575 rtfree((struct rtentry*)cookie);
576}
577
578static u_int32_t nstat_route_watchers = 0;
579
580static int
581nstat_route_walktree_add(
582 struct radix_node *rn,
583 void *context)
584{
585 errno_t result = 0;
586 struct rtentry *rt = (struct rtentry *)rn;
587 nstat_control_state *state = (nstat_control_state*)context;
588
589 LCK_MTX_ASSERT(rnh_lock, LCK_MTX_ASSERT_OWNED);
590
591 /* RTF_UP can't change while rnh_lock is held */
592 if ((rt->rt_flags & RTF_UP) != 0)
593 {
594 /* Clear RTPRF_OURS if the route is still usable */
595 RT_LOCK(rt);
596 if (rt_validate(rt)) {
597 RT_ADDREF_LOCKED(rt);
598 RT_UNLOCK(rt);
599 } else {
600 RT_UNLOCK(rt);
601 rt = NULL;
602 }
603
604 /* Otherwise if RTF_CONDEMNED, treat it as if it were down */
605 if (rt == NULL)
606 return (0);
607
608 result = nstat_control_source_add(0, state, &nstat_route_provider, rt);
609 if (result != 0)
610 rtfree_locked(rt);
611 }
612
613 return result;
614}
615
616static errno_t
617nstat_route_add_watcher(
618 nstat_control_state *state,
619 nstat_msg_add_all_srcs *req)
620{
621 int i;
622 errno_t result = 0;
623
624 lck_mtx_lock(rnh_lock);
625
626 result = nstat_set_provider_filter(state, req);
627 if (result == 0)
628 {
629 OSIncrementAtomic(&nstat_route_watchers);
630
631 for (i = 1; i < AF_MAX; i++)
632 {
633 struct radix_node_head *rnh;
634 rnh = rt_tables[i];
635 if (!rnh) continue;
636
637 result = rnh->rnh_walktree(rnh, nstat_route_walktree_add, state);
638 if (result != 0)
639 {
640 // This is probably resource exhaustion.
641 // There currently isn't a good way to recover from this.
642 // Least bad seems to be to give up on the add-all but leave
643 // the watcher in place.
644 break;
645 }
646 }
647 }
648 lck_mtx_unlock(rnh_lock);
649
650 return result;
651}
652
653__private_extern__ void
654nstat_route_new_entry(
655 struct rtentry *rt)
656{
657 if (nstat_route_watchers == 0)
658 return;
659
660 lck_mtx_lock(&nstat_mtx);
661 if ((rt->rt_flags & RTF_UP) != 0)
662 {
663 nstat_control_state *state;
664 for (state = nstat_controls; state; state = state->ncs_next)
665 {
666 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_ROUTE)) != 0)
667 {
668 // this client is watching routes
669 // acquire a reference for the route
670 RT_ADDREF(rt);
671
672 // add the source, if that fails, release the reference
673 if (nstat_control_source_add(0, state, &nstat_route_provider, rt) != 0)
674 RT_REMREF(rt);
675 }
676 }
677 }
678 lck_mtx_unlock(&nstat_mtx);
679}
680
681static void
682nstat_route_remove_watcher(
683 __unused nstat_control_state *state)
684{
685 OSDecrementAtomic(&nstat_route_watchers);
686}
687
688static errno_t
689nstat_route_copy_descriptor(
690 nstat_provider_cookie_t cookie,
691 void *data,
692 u_int32_t len)
693{
694 nstat_route_descriptor *desc = (nstat_route_descriptor*)data;
695 if (len < sizeof(*desc))
696 {
697 return EINVAL;
698 }
699 bzero(desc, sizeof(*desc));
700
701 struct rtentry *rt = (struct rtentry*)cookie;
702 desc->id = (uint64_t)VM_KERNEL_ADDRPERM(rt);
703 desc->parent_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_parent);
704 desc->gateway_id = (uint64_t)VM_KERNEL_ADDRPERM(rt->rt_gwroute);
705
706
707 // key/dest
708 struct sockaddr *sa;
709 if ((sa = rt_key(rt)))
710 nstat_copy_sa_out(sa, &desc->dst.sa, sizeof(desc->dst));
711
712 // mask
713 if ((sa = rt_mask(rt)) && sa->sa_len <= sizeof(desc->mask))
714 memcpy(&desc->mask, sa, sa->sa_len);
715
716 // gateway
717 if ((sa = rt->rt_gateway))
718 nstat_copy_sa_out(sa, &desc->gateway.sa, sizeof(desc->gateway));
719
720 if (rt->rt_ifp)
721 desc->ifindex = rt->rt_ifp->if_index;
722
723 desc->flags = rt->rt_flags;
724
725 return 0;
726}
727
728static bool
729nstat_route_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
730{
731 bool retval = true;
732
733 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
734 {
735 struct rtentry *rt = (struct rtentry*)cookie;
736 struct ifnet *ifp = rt->rt_ifp;
737
738 if (ifp)
739 {
740 uint16_t interface_properties = nstat_ifnet_to_flags(ifp);
741
742 if ((filter->npf_flags & interface_properties) == 0)
743 {
744 retval = false;
745 }
746 }
747 }
748 return retval;
749}
750
751static void
752nstat_init_route_provider(void)
753{
754 bzero(&nstat_route_provider, sizeof(nstat_route_provider));
755 nstat_route_provider.nstat_descriptor_length = sizeof(nstat_route_descriptor);
756 nstat_route_provider.nstat_provider_id = NSTAT_PROVIDER_ROUTE;
757 nstat_route_provider.nstat_lookup = nstat_route_lookup;
758 nstat_route_provider.nstat_gone = nstat_route_gone;
759 nstat_route_provider.nstat_counts = nstat_route_counts;
760 nstat_route_provider.nstat_release = nstat_route_release;
761 nstat_route_provider.nstat_watcher_add = nstat_route_add_watcher;
762 nstat_route_provider.nstat_watcher_remove = nstat_route_remove_watcher;
763 nstat_route_provider.nstat_copy_descriptor = nstat_route_copy_descriptor;
764 nstat_route_provider.nstat_reporting_allowed = nstat_route_reporting_allowed;
765 nstat_route_provider.next = nstat_providers;
766 nstat_providers = &nstat_route_provider;
767}
768
769#pragma mark -- Route Collection --
770
771__private_extern__ struct nstat_counts*
772nstat_route_attach(
773 struct rtentry *rte)
774{
775 struct nstat_counts *result = rte->rt_stats;
776 if (result) return result;
777
778 if (nstat_malloc_tag == NULL) nstat_init();
779
780 result = nstat_malloc_aligned(sizeof(*result), sizeof(u_int64_t), nstat_malloc_tag);
781 if (!result) return result;
782
783 bzero(result, sizeof(*result));
784
785 if (!OSCompareAndSwapPtr(NULL, result, &rte->rt_stats))
786 {
787 nstat_free_aligned(result, nstat_malloc_tag);
788 result = rte->rt_stats;
789 }
790
791 return result;
792}
793
794__private_extern__ void
795nstat_route_detach(
796 struct rtentry *rte)
797{
798 if (rte->rt_stats)
799 {
800 nstat_free_aligned(rte->rt_stats, nstat_malloc_tag);
801 rte->rt_stats = NULL;
802 }
803}
804
805__private_extern__ void
806nstat_route_connect_attempt(
807 struct rtentry *rte)
808{
809 while (rte)
810 {
811 struct nstat_counts* stats = nstat_route_attach(rte);
812 if (stats)
813 {
814 OSIncrementAtomic(&stats->nstat_connectattempts);
815 }
816
817 rte = rte->rt_parent;
818 }
819}
820
821__private_extern__ void
822nstat_route_connect_success(
823 struct rtentry *rte)
824{
825 // This route
826 while (rte)
827 {
828 struct nstat_counts* stats = nstat_route_attach(rte);
829 if (stats)
830 {
831 OSIncrementAtomic(&stats->nstat_connectsuccesses);
832 }
833
834 rte = rte->rt_parent;
835 }
836}
837
838__private_extern__ void
839nstat_route_tx(
840 struct rtentry *rte,
841 u_int32_t packets,
842 u_int32_t bytes,
843 u_int32_t flags)
844{
845 while (rte)
846 {
847 struct nstat_counts* stats = nstat_route_attach(rte);
848 if (stats)
849 {
850 if ((flags & NSTAT_TX_FLAG_RETRANSMIT) != 0)
851 {
852 OSAddAtomic(bytes, &stats->nstat_txretransmit);
853 }
854 else
855 {
856 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_txpackets);
857 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_txbytes);
858 }
859 }
860
861 rte = rte->rt_parent;
862 }
863}
864
865__private_extern__ void
866nstat_route_rx(
867 struct rtentry *rte,
868 u_int32_t packets,
869 u_int32_t bytes,
870 u_int32_t flags)
871{
872 while (rte)
873 {
874 struct nstat_counts* stats = nstat_route_attach(rte);
875 if (stats)
876 {
877 if (flags == 0)
878 {
879 OSAddAtomic64((SInt64)packets, (SInt64*)&stats->nstat_rxpackets);
880 OSAddAtomic64((SInt64)bytes, (SInt64*)&stats->nstat_rxbytes);
881 }
882 else
883 {
884 if (flags & NSTAT_RX_FLAG_OUT_OF_ORDER)
885 OSAddAtomic(bytes, &stats->nstat_rxoutoforderbytes);
886 if (flags & NSTAT_RX_FLAG_DUPLICATE)
887 OSAddAtomic(bytes, &stats->nstat_rxduplicatebytes);
888 }
889 }
890
891 rte = rte->rt_parent;
892 }
893}
894
895/* atomically average current value at _val_addr with _new_val and store */
896#define NSTAT_EWMA_ATOMIC(_val_addr, _new_val, _decay) do { \
897 volatile uint32_t _old_val; \
898 volatile uint32_t _avg; \
899 do { \
900 _old_val = *_val_addr; \
901 if (_old_val == 0) \
902 { \
903 _avg = _new_val; \
904 } \
905 else \
906 { \
907 _avg = _old_val - (_old_val >> _decay) + (_new_val >> _decay); \
908 } \
909 if (_old_val == _avg) break; \
910 } while (!OSCompareAndSwap(_old_val, _avg, _val_addr)); \
911} while (0);
912
913/* atomically compute minimum of current value at _val_addr with _new_val and store */
914#define NSTAT_MIN_ATOMIC(_val_addr, _new_val) do { \
915 volatile uint32_t _old_val; \
916 do { \
917 _old_val = *_val_addr; \
918 if (_old_val != 0 && _old_val < _new_val) \
919 { \
920 break; \
921 } \
922 } while (!OSCompareAndSwap(_old_val, _new_val, _val_addr)); \
923} while (0);
924
925__private_extern__ void
926nstat_route_rtt(
927 struct rtentry *rte,
928 u_int32_t rtt,
929 u_int32_t rtt_var)
930{
931 const uint32_t decay = 3;
932
933 while (rte)
934 {
935 struct nstat_counts* stats = nstat_route_attach(rte);
936 if (stats)
937 {
938 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
939 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
940 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
941 }
942 rte = rte->rt_parent;
943 }
944}
945
946__private_extern__ void
947nstat_route_update(
948 struct rtentry *rte,
949 uint32_t connect_attempts,
950 uint32_t connect_successes,
951 uint32_t rx_packets,
952 uint32_t rx_bytes,
953 uint32_t rx_duplicatebytes,
954 uint32_t rx_outoforderbytes,
955 uint32_t tx_packets,
956 uint32_t tx_bytes,
957 uint32_t tx_retransmit,
958 uint32_t rtt,
959 uint32_t rtt_var)
960{
961 const uint32_t decay = 3;
962
963 while (rte)
964 {
965 struct nstat_counts* stats = nstat_route_attach(rte);
966 if (stats)
967 {
968 OSAddAtomic(connect_attempts, &stats->nstat_connectattempts);
969 OSAddAtomic(connect_successes, &stats->nstat_connectsuccesses);
970 OSAddAtomic64((SInt64)tx_packets, (SInt64*)&stats->nstat_txpackets);
971 OSAddAtomic64((SInt64)tx_bytes, (SInt64*)&stats->nstat_txbytes);
972 OSAddAtomic(tx_retransmit, &stats->nstat_txretransmit);
973 OSAddAtomic64((SInt64)rx_packets, (SInt64*)&stats->nstat_rxpackets);
974 OSAddAtomic64((SInt64)rx_bytes, (SInt64*)&stats->nstat_rxbytes);
975 OSAddAtomic(rx_outoforderbytes, &stats->nstat_rxoutoforderbytes);
976 OSAddAtomic(rx_duplicatebytes, &stats->nstat_rxduplicatebytes);
977
978 if (rtt != 0) {
979 NSTAT_EWMA_ATOMIC(&stats->nstat_avg_rtt, rtt, decay);
980 NSTAT_MIN_ATOMIC(&stats->nstat_min_rtt, rtt);
981 NSTAT_EWMA_ATOMIC(&stats->nstat_var_rtt, rtt_var, decay);
982 }
983 }
984 rte = rte->rt_parent;
985 }
986}
987
988#pragma mark -- TCP Kernel Provider --
989
990/*
991 * Due to the way the kernel deallocates a process (the process structure
992 * might be gone by the time we get the PCB detach notification),
993 * we need to cache the process name. Without this, proc_name() would
994 * return null and the process name would never be sent to userland.
995 *
996 * For UDP sockets, we also store the cached the connection tuples along with
997 * the interface index. This is necessary because when UDP sockets are
998 * disconnected, the connection tuples are forever lost from the inpcb, thus
999 * we need to keep track of the last call to connect() in ntstat.
1000 */
1001struct nstat_tucookie {
1002 struct inpcb *inp;
1003 char pname[MAXCOMLEN+1];
1004 bool cached;
1005 union
1006 {
1007 struct sockaddr_in v4;
1008 struct sockaddr_in6 v6;
1009 } local;
1010 union
1011 {
1012 struct sockaddr_in v4;
1013 struct sockaddr_in6 v6;
1014 } remote;
1015 unsigned int if_index;
1016 uint16_t ifnet_properties;
1017};
1018
1019static struct nstat_tucookie *
1020nstat_tucookie_alloc_internal(
1021 struct inpcb *inp,
1022 bool ref,
1023 bool locked)
1024{
1025 struct nstat_tucookie *cookie;
1026
1027 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
1028 if (cookie == NULL)
1029 return NULL;
1030 if (!locked)
1031 LCK_MTX_ASSERT(&nstat_mtx, LCK_MTX_ASSERT_NOTOWNED);
1032 if (ref && in_pcb_checkstate(inp, WNT_ACQUIRE, locked) == WNT_STOPUSING)
1033 {
1034 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1035 return NULL;
1036 }
1037 bzero(cookie, sizeof(*cookie));
1038 cookie->inp = inp;
1039 proc_name(inp->inp_socket->last_pid, cookie->pname,
1040 sizeof(cookie->pname));
1041 /*
1042 * We only increment the reference count for UDP sockets because we
1043 * only cache UDP socket tuples.
1044 */
1045 if (SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP)
1046 OSIncrementAtomic(&inp->inp_nstat_refcnt);
1047
1048 return cookie;
1049}
1050
1051static struct nstat_tucookie *
1052nstat_tucookie_alloc(
1053 struct inpcb *inp)
1054{
1055 return nstat_tucookie_alloc_internal(inp, false, false);
1056}
1057
1058static struct nstat_tucookie *
1059nstat_tucookie_alloc_ref(
1060 struct inpcb *inp)
1061{
1062 return nstat_tucookie_alloc_internal(inp, true, false);
1063}
1064
1065static struct nstat_tucookie *
1066nstat_tucookie_alloc_ref_locked(
1067 struct inpcb *inp)
1068{
1069 return nstat_tucookie_alloc_internal(inp, true, true);
1070}
1071
1072static void
1073nstat_tucookie_release_internal(
1074 struct nstat_tucookie *cookie,
1075 int inplock)
1076{
1077 if (SOCK_PROTO(cookie->inp->inp_socket) == IPPROTO_UDP)
1078 OSDecrementAtomic(&cookie->inp->inp_nstat_refcnt);
1079 in_pcb_checkstate(cookie->inp, WNT_RELEASE, inplock);
1080 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
1081}
1082
1083static void
1084nstat_tucookie_release(
1085 struct nstat_tucookie *cookie)
1086{
1087 nstat_tucookie_release_internal(cookie, false);
1088}
1089
1090static void
1091nstat_tucookie_release_locked(
1092 struct nstat_tucookie *cookie)
1093{
1094 nstat_tucookie_release_internal(cookie, true);
1095}
1096
1097
1098static nstat_provider nstat_tcp_provider;
1099
1100static errno_t
1101nstat_tcpudp_lookup(
1102 struct inpcbinfo *inpinfo,
1103 const void *data,
1104 u_int32_t length,
1105 nstat_provider_cookie_t *out_cookie)
1106{
1107 struct inpcb *inp = NULL;
1108
1109 // parameter validation
1110 const nstat_tcp_add_param *param = (const nstat_tcp_add_param*)data;
1111 if (length < sizeof(*param))
1112 {
1113 return EINVAL;
1114 }
1115
1116 // src and dst must match
1117 if (param->remote.v4.sin_family != 0 &&
1118 param->remote.v4.sin_family != param->local.v4.sin_family)
1119 {
1120 return EINVAL;
1121 }
1122
1123
1124 switch (param->local.v4.sin_family)
1125 {
1126 case AF_INET:
1127 {
1128 if (param->local.v4.sin_len != sizeof(param->local.v4) ||
1129 (param->remote.v4.sin_family != 0 &&
1130 param->remote.v4.sin_len != sizeof(param->remote.v4)))
1131 {
1132 return EINVAL;
1133 }
1134
1135 inp = in_pcblookup_hash(inpinfo, param->remote.v4.sin_addr, param->remote.v4.sin_port,
1136 param->local.v4.sin_addr, param->local.v4.sin_port, 1, NULL);
1137 }
1138 break;
1139
1140#if INET6
1141 case AF_INET6:
1142 {
1143 union
1144 {
1145 const struct in6_addr *in6c;
1146 struct in6_addr *in6;
1147 } local, remote;
1148
1149 if (param->local.v6.sin6_len != sizeof(param->local.v6) ||
1150 (param->remote.v6.sin6_family != 0 &&
1151 param->remote.v6.sin6_len != sizeof(param->remote.v6)))
1152 {
1153 return EINVAL;
1154 }
1155
1156 local.in6c = &param->local.v6.sin6_addr;
1157 remote.in6c = &param->remote.v6.sin6_addr;
1158
1159 inp = in6_pcblookup_hash(inpinfo, remote.in6, param->remote.v6.sin6_port,
1160 local.in6, param->local.v6.sin6_port, 1, NULL);
1161 }
1162 break;
1163#endif
1164
1165 default:
1166 return EINVAL;
1167 }
1168
1169 if (inp == NULL)
1170 return ENOENT;
1171
1172 // At this point we have a ref to the inpcb
1173 *out_cookie = nstat_tucookie_alloc(inp);
1174 if (*out_cookie == NULL)
1175 in_pcb_checkstate(inp, WNT_RELEASE, 0);
1176
1177 return 0;
1178}
1179
1180static errno_t
1181nstat_tcp_lookup(
1182 const void *data,
1183 u_int32_t length,
1184 nstat_provider_cookie_t *out_cookie)
1185{
1186 return nstat_tcpudp_lookup(&tcbinfo, data, length, out_cookie);
1187}
1188
1189static int
1190nstat_tcp_gone(
1191 nstat_provider_cookie_t cookie)
1192{
1193 struct nstat_tucookie *tucookie =
1194 (struct nstat_tucookie *)cookie;
1195 struct inpcb *inp;
1196 struct tcpcb *tp;
1197
1198 return (!(inp = tucookie->inp) ||
1199 !(tp = intotcpcb(inp)) ||
1200 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1201}
1202
1203static errno_t
1204nstat_tcp_counts(
1205 nstat_provider_cookie_t cookie,
1206 struct nstat_counts *out_counts,
1207 int *out_gone)
1208{
1209 struct nstat_tucookie *tucookie =
1210 (struct nstat_tucookie *)cookie;
1211 struct inpcb *inp;
1212
1213 bzero(out_counts, sizeof(*out_counts));
1214
1215 if (out_gone) *out_gone = 0;
1216
1217 // if the pcb is in the dead state, we should stop using it
1218 if (nstat_tcp_gone(cookie))
1219 {
1220 if (out_gone) *out_gone = 1;
1221 if (!(inp = tucookie->inp) || !intotcpcb(inp))
1222 return EINVAL;
1223 }
1224 inp = tucookie->inp;
1225 struct tcpcb *tp = intotcpcb(inp);
1226
1227 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1228 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1229 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1230 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1231 out_counts->nstat_rxduplicatebytes = tp->t_stat.rxduplicatebytes;
1232 out_counts->nstat_rxoutoforderbytes = tp->t_stat.rxoutoforderbytes;
1233 out_counts->nstat_txretransmit = tp->t_stat.txretransmitbytes;
1234 out_counts->nstat_connectattempts = tp->t_state >= TCPS_SYN_SENT ? 1 : 0;
1235 out_counts->nstat_connectsuccesses = tp->t_state >= TCPS_ESTABLISHED ? 1 : 0;
1236 out_counts->nstat_avg_rtt = tp->t_srtt;
1237 out_counts->nstat_min_rtt = tp->t_rttbest;
1238 out_counts->nstat_var_rtt = tp->t_rttvar;
1239 if (out_counts->nstat_avg_rtt < out_counts->nstat_min_rtt)
1240 out_counts->nstat_min_rtt = out_counts->nstat_avg_rtt;
1241 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1242 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1243 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1244 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1245 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1246 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1247
1248 return 0;
1249}
1250
1251static void
1252nstat_tcp_release(
1253 nstat_provider_cookie_t cookie,
1254 int locked)
1255{
1256 struct nstat_tucookie *tucookie =
1257 (struct nstat_tucookie *)cookie;
1258
1259 nstat_tucookie_release_internal(tucookie, locked);
1260}
1261
1262static errno_t
1263nstat_tcp_add_watcher(
1264 nstat_control_state *state,
1265 nstat_msg_add_all_srcs *req)
1266{
1267 // There is a tricky issue around getting all TCP sockets added once
1268 // and only once. nstat_tcp_new_pcb() is called prior to the new item
1269 // being placed on any lists where it might be found.
1270 // By locking the tcbinfo.ipi_lock prior to marking the state as a watcher,
1271 // it should be impossible for a new socket to be added twice.
1272 // On the other hand, there is still a timing issue where a new socket
1273 // results in a call to nstat_tcp_new_pcb() before this watcher
1274 // is instantiated and yet the socket doesn't make it into ipi_listhead
1275 // prior to the scan. <rdar://problem/30361716>
1276
1277 errno_t result;
1278
1279 lck_rw_lock_shared(tcbinfo.ipi_lock);
1280 result = nstat_set_provider_filter(state, req);
1281 if (result == 0) {
1282 OSIncrementAtomic(&nstat_tcp_watchers);
1283
1284 // Add all current tcp inpcbs. Ignore those in timewait
1285 struct inpcb *inp;
1286 struct nstat_tucookie *cookie;
1287 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
1288 {
1289 cookie = nstat_tucookie_alloc_ref(inp);
1290 if (cookie == NULL)
1291 continue;
1292 if (nstat_control_source_add(0, state, &nstat_tcp_provider,
1293 cookie) != 0)
1294 {
1295 nstat_tucookie_release(cookie);
1296 break;
1297 }
1298 }
1299 }
1300
1301 lck_rw_done(tcbinfo.ipi_lock);
1302
1303 return result;
1304}
1305
1306static void
1307nstat_tcp_remove_watcher(
1308 __unused nstat_control_state *state)
1309{
1310 OSDecrementAtomic(&nstat_tcp_watchers);
1311}
1312
1313__private_extern__ void
1314nstat_tcp_new_pcb(
1315 struct inpcb *inp)
1316{
1317 struct nstat_tucookie *cookie;
1318
1319 inp->inp_start_timestamp = mach_continuous_time();
1320
1321 if (nstat_tcp_watchers == 0)
1322 return;
1323
1324 socket_lock(inp->inp_socket, 0);
1325 lck_mtx_lock(&nstat_mtx);
1326 nstat_control_state *state;
1327 for (state = nstat_controls; state; state = state->ncs_next)
1328 {
1329 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_TCP_KERNEL)) != 0)
1330 {
1331 // this client is watching tcp
1332 // acquire a reference for it
1333 cookie = nstat_tucookie_alloc_ref_locked(inp);
1334 if (cookie == NULL)
1335 continue;
1336 // add the source, if that fails, release the reference
1337 if (nstat_control_source_add(0, state,
1338 &nstat_tcp_provider, cookie) != 0)
1339 {
1340 nstat_tucookie_release_locked(cookie);
1341 break;
1342 }
1343 }
1344 }
1345 lck_mtx_unlock(&nstat_mtx);
1346 socket_unlock(inp->inp_socket, 0);
1347}
1348
1349__private_extern__ void
1350nstat_pcb_detach(struct inpcb *inp)
1351{
1352 nstat_control_state *state;
1353 nstat_src *src;
1354 tailq_head_nstat_src dead_list;
1355 struct nstat_tucookie *tucookie;
1356 errno_t result;
1357
1358 if (inp == NULL || (nstat_tcp_watchers == 0 && nstat_udp_watchers == 0))
1359 return;
1360
1361 TAILQ_INIT(&dead_list);
1362 lck_mtx_lock(&nstat_mtx);
1363 for (state = nstat_controls; state; state = state->ncs_next)
1364 {
1365 lck_mtx_lock(&state->ncs_mtx);
1366 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1367 {
1368 nstat_provider_id_t provider_id = src->provider->nstat_provider_id;
1369 if (provider_id == NSTAT_PROVIDER_TCP_KERNEL || provider_id == NSTAT_PROVIDER_UDP_KERNEL)
1370 {
1371 tucookie = (struct nstat_tucookie *)src->cookie;
1372 if (tucookie->inp == inp)
1373 break;
1374 }
1375 }
1376
1377 if (src)
1378 {
1379 result = nstat_control_send_goodbye(state, src);
1380
1381 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
1382 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
1383 }
1384 lck_mtx_unlock(&state->ncs_mtx);
1385 }
1386 lck_mtx_unlock(&nstat_mtx);
1387
1388 while ((src = TAILQ_FIRST(&dead_list)))
1389 {
1390 TAILQ_REMOVE(&dead_list, src, ns_control_link);
1391 nstat_control_cleanup_source(NULL, src, TRUE);
1392 }
1393}
1394
1395__private_extern__ void
1396nstat_pcb_cache(struct inpcb *inp)
1397{
1398 nstat_control_state *state;
1399 nstat_src *src;
1400 struct nstat_tucookie *tucookie;
1401
1402 if (inp == NULL || nstat_udp_watchers == 0 ||
1403 inp->inp_nstat_refcnt == 0)
1404 return;
1405 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1406 lck_mtx_lock(&nstat_mtx);
1407 for (state = nstat_controls; state; state = state->ncs_next) {
1408 lck_mtx_lock(&state->ncs_mtx);
1409 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1410 {
1411 tucookie = (struct nstat_tucookie *)src->cookie;
1412 if (tucookie->inp == inp)
1413 {
1414 if (inp->inp_vflag & INP_IPV6)
1415 {
1416 in6_ip6_to_sockaddr(&inp->in6p_laddr,
1417 inp->inp_lport,
1418 &tucookie->local.v6,
1419 sizeof(tucookie->local));
1420 in6_ip6_to_sockaddr(&inp->in6p_faddr,
1421 inp->inp_fport,
1422 &tucookie->remote.v6,
1423 sizeof(tucookie->remote));
1424 }
1425 else if (inp->inp_vflag & INP_IPV4)
1426 {
1427 nstat_ip_to_sockaddr(&inp->inp_laddr,
1428 inp->inp_lport,
1429 &tucookie->local.v4,
1430 sizeof(tucookie->local));
1431 nstat_ip_to_sockaddr(&inp->inp_faddr,
1432 inp->inp_fport,
1433 &tucookie->remote.v4,
1434 sizeof(tucookie->remote));
1435 }
1436 if (inp->inp_last_outifp)
1437 tucookie->if_index =
1438 inp->inp_last_outifp->if_index;
1439
1440 tucookie->ifnet_properties = nstat_inpcb_to_flags(inp);
1441 tucookie->cached = true;
1442 break;
1443 }
1444 }
1445 lck_mtx_unlock(&state->ncs_mtx);
1446 }
1447 lck_mtx_unlock(&nstat_mtx);
1448}
1449
1450__private_extern__ void
1451nstat_pcb_invalidate_cache(struct inpcb *inp)
1452{
1453 nstat_control_state *state;
1454 nstat_src *src;
1455 struct nstat_tucookie *tucookie;
1456
1457 if (inp == NULL || nstat_udp_watchers == 0 ||
1458 inp->inp_nstat_refcnt == 0)
1459 return;
1460 VERIFY(SOCK_PROTO(inp->inp_socket) == IPPROTO_UDP);
1461 lck_mtx_lock(&nstat_mtx);
1462 for (state = nstat_controls; state; state = state->ncs_next) {
1463 lck_mtx_lock(&state->ncs_mtx);
1464 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
1465 {
1466 tucookie = (struct nstat_tucookie *)src->cookie;
1467 if (tucookie->inp == inp)
1468 {
1469 tucookie->cached = false;
1470 break;
1471 }
1472 }
1473 lck_mtx_unlock(&state->ncs_mtx);
1474 }
1475 lck_mtx_unlock(&nstat_mtx);
1476}
1477
1478static errno_t
1479nstat_tcp_copy_descriptor(
1480 nstat_provider_cookie_t cookie,
1481 void *data,
1482 u_int32_t len)
1483{
1484 if (len < sizeof(nstat_tcp_descriptor))
1485 {
1486 return EINVAL;
1487 }
1488
1489 if (nstat_tcp_gone(cookie))
1490 return EINVAL;
1491
1492 nstat_tcp_descriptor *desc = (nstat_tcp_descriptor*)data;
1493 struct nstat_tucookie *tucookie =
1494 (struct nstat_tucookie *)cookie;
1495 struct inpcb *inp = tucookie->inp;
1496 struct tcpcb *tp = intotcpcb(inp);
1497 bzero(desc, sizeof(*desc));
1498
1499 if (inp->inp_vflag & INP_IPV6)
1500 {
1501 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1502 &desc->local.v6, sizeof(desc->local));
1503 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1504 &desc->remote.v6, sizeof(desc->remote));
1505 }
1506 else if (inp->inp_vflag & INP_IPV4)
1507 {
1508 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1509 &desc->local.v4, sizeof(desc->local));
1510 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1511 &desc->remote.v4, sizeof(desc->remote));
1512 }
1513
1514 desc->state = intotcpcb(inp)->t_state;
1515 desc->ifindex = (inp->inp_last_outifp == NULL) ? 0 :
1516 inp->inp_last_outifp->if_index;
1517
1518 // danger - not locked, values could be bogus
1519 desc->txunacked = tp->snd_max - tp->snd_una;
1520 desc->txwindow = tp->snd_wnd;
1521 desc->txcwindow = tp->snd_cwnd;
1522
1523 if (CC_ALGO(tp)->name != NULL) {
1524 strlcpy(desc->cc_algo, CC_ALGO(tp)->name,
1525 sizeof(desc->cc_algo));
1526 }
1527
1528 struct socket *so = inp->inp_socket;
1529 if (so)
1530 {
1531 // TBD - take the socket lock around these to make sure
1532 // they're in sync?
1533 desc->upid = so->last_upid;
1534 desc->pid = so->last_pid;
1535 desc->traffic_class = so->so_traffic_class;
1536 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND))
1537 desc->traffic_mgt_flags |= TRAFFIC_MGT_SO_BACKGROUND;
1538 if ((so->so_flags1 & SOF1_TRAFFIC_MGT_TCP_RECVBG))
1539 desc->traffic_mgt_flags |= TRAFFIC_MGT_TCP_RECVBG;
1540 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1541 if (desc->pname[0] == 0)
1542 {
1543 strlcpy(desc->pname, tucookie->pname,
1544 sizeof(desc->pname));
1545 }
1546 else
1547 {
1548 desc->pname[sizeof(desc->pname) - 1] = 0;
1549 strlcpy(tucookie->pname, desc->pname,
1550 sizeof(tucookie->pname));
1551 }
1552 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1553 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1554 if (so->so_flags & SOF_DELEGATED) {
1555 desc->eupid = so->e_upid;
1556 desc->epid = so->e_pid;
1557 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1558 } else {
1559 desc->eupid = desc->upid;
1560 desc->epid = desc->pid;
1561 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1562 }
1563 desc->sndbufsize = so->so_snd.sb_hiwat;
1564 desc->sndbufused = so->so_snd.sb_cc;
1565 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1566 desc->rcvbufused = so->so_rcv.sb_cc;
1567 }
1568
1569 tcp_get_connectivity_status(tp, &desc->connstatus);
1570 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1571 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1572 desc->start_timestamp = inp->inp_start_timestamp;
1573 desc->timestamp = mach_continuous_time();
1574 return 0;
1575}
1576
1577static bool
1578nstat_tcpudp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter, bool is_UDP)
1579{
1580 bool retval = true;
1581
1582 if ((filter->npf_flags & (NSTAT_FILTER_IFNET_FLAGS|NSTAT_FILTER_SPECIFIC_USER)) != 0)
1583 {
1584 struct nstat_tucookie *tucookie = (struct nstat_tucookie *)cookie;
1585 struct inpcb *inp = tucookie->inp;
1586
1587 /* Only apply interface filter if at least one is allowed. */
1588 if ((filter->npf_flags & NSTAT_FILTER_IFNET_FLAGS) != 0)
1589 {
1590 uint16_t interface_properties = nstat_inpcb_to_flags(inp);
1591
1592 if ((filter->npf_flags & interface_properties) == 0)
1593 {
1594 // For UDP, we could have an undefined interface and yet transfers may have occurred.
1595 // We allow reporting if there have been transfers of the requested kind.
1596 // This is imperfect as we cannot account for the expensive attribute over wifi.
1597 // We also assume that cellular is expensive and we have no way to select for AWDL
1598 if (is_UDP)
1599 {
1600 do
1601 {
1602 if ((filter->npf_flags & (NSTAT_FILTER_ACCEPT_CELLULAR|NSTAT_FILTER_ACCEPT_EXPENSIVE)) &&
1603 (inp->inp_cstat->rxbytes || inp->inp_cstat->txbytes))
1604 {
1605 break;
1606 }
1607 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIFI) &&
1608 (inp->inp_wstat->rxbytes || inp->inp_wstat->txbytes))
1609 {
1610 break;
1611 }
1612 if ((filter->npf_flags & NSTAT_FILTER_ACCEPT_WIRED) &&
1613 (inp->inp_Wstat->rxbytes || inp->inp_Wstat->txbytes))
1614 {
1615 break;
1616 }
1617 return false;
1618 } while (0);
1619 }
1620 else
1621 {
1622 return false;
1623 }
1624 }
1625 }
1626
1627 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER) != 0) && (retval))
1628 {
1629 struct socket *so = inp->inp_socket;
1630 retval = false;
1631
1632 if (so)
1633 {
1634 if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_PID) != 0) &&
1635 (filter->npf_pid == so->last_pid))
1636 {
1637 retval = true;
1638 }
1639 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EPID) != 0) &&
1640 (filter->npf_pid == (so->so_flags & SOF_DELEGATED)? so->e_upid : so->last_pid))
1641 {
1642 retval = true;
1643 }
1644 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_UUID) != 0) &&
1645 (memcmp(filter->npf_uuid, so->last_uuid, sizeof(so->last_uuid)) == 0))
1646 {
1647 retval = true;
1648 }
1649 else if (((filter->npf_flags & NSTAT_FILTER_SPECIFIC_USER_BY_EUUID) != 0) &&
1650 (memcmp(filter->npf_uuid, (so->so_flags & SOF_DELEGATED)? so->e_uuid : so->last_uuid,
1651 sizeof(so->last_uuid)) == 0))
1652 {
1653 retval = true;
1654 }
1655 }
1656 }
1657 }
1658 return retval;
1659}
1660
1661static bool
1662nstat_tcp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1663{
1664 return nstat_tcpudp_reporting_allowed(cookie, filter, FALSE);
1665}
1666
1667static void
1668nstat_init_tcp_provider(void)
1669{
1670 bzero(&nstat_tcp_provider, sizeof(nstat_tcp_provider));
1671 nstat_tcp_provider.nstat_descriptor_length = sizeof(nstat_tcp_descriptor);
1672 nstat_tcp_provider.nstat_provider_id = NSTAT_PROVIDER_TCP_KERNEL;
1673 nstat_tcp_provider.nstat_lookup = nstat_tcp_lookup;
1674 nstat_tcp_provider.nstat_gone = nstat_tcp_gone;
1675 nstat_tcp_provider.nstat_counts = nstat_tcp_counts;
1676 nstat_tcp_provider.nstat_release = nstat_tcp_release;
1677 nstat_tcp_provider.nstat_watcher_add = nstat_tcp_add_watcher;
1678 nstat_tcp_provider.nstat_watcher_remove = nstat_tcp_remove_watcher;
1679 nstat_tcp_provider.nstat_copy_descriptor = nstat_tcp_copy_descriptor;
1680 nstat_tcp_provider.nstat_reporting_allowed = nstat_tcp_reporting_allowed;
1681 nstat_tcp_provider.next = nstat_providers;
1682 nstat_providers = &nstat_tcp_provider;
1683}
1684
1685#pragma mark -- UDP Provider --
1686
1687static nstat_provider nstat_udp_provider;
1688
1689static errno_t
1690nstat_udp_lookup(
1691 const void *data,
1692 u_int32_t length,
1693 nstat_provider_cookie_t *out_cookie)
1694{
1695 return nstat_tcpudp_lookup(&udbinfo, data, length, out_cookie);
1696}
1697
1698static int
1699nstat_udp_gone(
1700 nstat_provider_cookie_t cookie)
1701{
1702 struct nstat_tucookie *tucookie =
1703 (struct nstat_tucookie *)cookie;
1704 struct inpcb *inp;
1705
1706 return (!(inp = tucookie->inp) ||
1707 inp->inp_state == INPCB_STATE_DEAD) ? 1 : 0;
1708}
1709
1710static errno_t
1711nstat_udp_counts(
1712 nstat_provider_cookie_t cookie,
1713 struct nstat_counts *out_counts,
1714 int *out_gone)
1715{
1716 struct nstat_tucookie *tucookie =
1717 (struct nstat_tucookie *)cookie;
1718
1719 if (out_gone) *out_gone = 0;
1720
1721 // if the pcb is in the dead state, we should stop using it
1722 if (nstat_udp_gone(cookie))
1723 {
1724 if (out_gone) *out_gone = 1;
1725 if (!tucookie->inp)
1726 return EINVAL;
1727 }
1728 struct inpcb *inp = tucookie->inp;
1729
1730 atomic_get_64(out_counts->nstat_rxpackets, &inp->inp_stat->rxpackets);
1731 atomic_get_64(out_counts->nstat_rxbytes, &inp->inp_stat->rxbytes);
1732 atomic_get_64(out_counts->nstat_txpackets, &inp->inp_stat->txpackets);
1733 atomic_get_64(out_counts->nstat_txbytes, &inp->inp_stat->txbytes);
1734 atomic_get_64(out_counts->nstat_cell_rxbytes, &inp->inp_cstat->rxbytes);
1735 atomic_get_64(out_counts->nstat_cell_txbytes, &inp->inp_cstat->txbytes);
1736 atomic_get_64(out_counts->nstat_wifi_rxbytes, &inp->inp_wstat->rxbytes);
1737 atomic_get_64(out_counts->nstat_wifi_txbytes, &inp->inp_wstat->txbytes);
1738 atomic_get_64(out_counts->nstat_wired_rxbytes, &inp->inp_Wstat->rxbytes);
1739 atomic_get_64(out_counts->nstat_wired_txbytes, &inp->inp_Wstat->txbytes);
1740
1741 return 0;
1742}
1743
1744static void
1745nstat_udp_release(
1746 nstat_provider_cookie_t cookie,
1747 int locked)
1748{
1749 struct nstat_tucookie *tucookie =
1750 (struct nstat_tucookie *)cookie;
1751
1752 nstat_tucookie_release_internal(tucookie, locked);
1753}
1754
1755static errno_t
1756nstat_udp_add_watcher(
1757 nstat_control_state *state,
1758 nstat_msg_add_all_srcs *req)
1759{
1760 // There is a tricky issue around getting all UDP sockets added once
1761 // and only once. nstat_udp_new_pcb() is called prior to the new item
1762 // being placed on any lists where it might be found.
1763 // By locking the udpinfo.ipi_lock prior to marking the state as a watcher,
1764 // it should be impossible for a new socket to be added twice.
1765 // On the other hand, there is still a timing issue where a new socket
1766 // results in a call to nstat_udp_new_pcb() before this watcher
1767 // is instantiated and yet the socket doesn't make it into ipi_listhead
1768 // prior to the scan. <rdar://problem/30361716>
1769
1770 errno_t result;
1771
1772 lck_rw_lock_shared(udbinfo.ipi_lock);
1773 result = nstat_set_provider_filter(state, req);
1774
1775 if (result == 0) {
1776 struct inpcb *inp;
1777 struct nstat_tucookie *cookie;
1778
1779 OSIncrementAtomic(&nstat_udp_watchers);
1780
1781 // Add all current UDP inpcbs.
1782 LIST_FOREACH(inp, udbinfo.ipi_listhead, inp_list)
1783 {
1784 cookie = nstat_tucookie_alloc_ref(inp);
1785 if (cookie == NULL)
1786 continue;
1787 if (nstat_control_source_add(0, state, &nstat_udp_provider,
1788 cookie) != 0)
1789 {
1790 nstat_tucookie_release(cookie);
1791 break;
1792 }
1793 }
1794 }
1795
1796 lck_rw_done(udbinfo.ipi_lock);
1797
1798 return result;
1799}
1800
1801static void
1802nstat_udp_remove_watcher(
1803 __unused nstat_control_state *state)
1804{
1805 OSDecrementAtomic(&nstat_udp_watchers);
1806}
1807
1808__private_extern__ void
1809nstat_udp_new_pcb(
1810 struct inpcb *inp)
1811{
1812 struct nstat_tucookie *cookie;
1813
1814 inp->inp_start_timestamp = mach_continuous_time();
1815
1816 if (nstat_udp_watchers == 0)
1817 return;
1818
1819 socket_lock(inp->inp_socket, 0);
1820 lck_mtx_lock(&nstat_mtx);
1821 nstat_control_state *state;
1822 for (state = nstat_controls; state; state = state->ncs_next)
1823 {
1824 if ((state->ncs_watching & (1 << NSTAT_PROVIDER_UDP_KERNEL)) != 0)
1825 {
1826 // this client is watching tcp
1827 // acquire a reference for it
1828 cookie = nstat_tucookie_alloc_ref_locked(inp);
1829 if (cookie == NULL)
1830 continue;
1831 // add the source, if that fails, release the reference
1832 if (nstat_control_source_add(0, state,
1833 &nstat_udp_provider, cookie) != 0)
1834 {
1835 nstat_tucookie_release_locked(cookie);
1836 break;
1837 }
1838 }
1839 }
1840 lck_mtx_unlock(&nstat_mtx);
1841 socket_unlock(inp->inp_socket, 0);
1842}
1843
1844static errno_t
1845nstat_udp_copy_descriptor(
1846 nstat_provider_cookie_t cookie,
1847 void *data,
1848 u_int32_t len)
1849{
1850 if (len < sizeof(nstat_udp_descriptor))
1851 {
1852 return EINVAL;
1853 }
1854
1855 if (nstat_udp_gone(cookie))
1856 return EINVAL;
1857
1858 struct nstat_tucookie *tucookie =
1859 (struct nstat_tucookie *)cookie;
1860 nstat_udp_descriptor *desc = (nstat_udp_descriptor*)data;
1861 struct inpcb *inp = tucookie->inp;
1862
1863 bzero(desc, sizeof(*desc));
1864
1865 if (tucookie->cached == false) {
1866 if (inp->inp_vflag & INP_IPV6)
1867 {
1868 in6_ip6_to_sockaddr(&inp->in6p_laddr, inp->inp_lport,
1869 &desc->local.v6, sizeof(desc->local.v6));
1870 in6_ip6_to_sockaddr(&inp->in6p_faddr, inp->inp_fport,
1871 &desc->remote.v6, sizeof(desc->remote.v6));
1872 }
1873 else if (inp->inp_vflag & INP_IPV4)
1874 {
1875 nstat_ip_to_sockaddr(&inp->inp_laddr, inp->inp_lport,
1876 &desc->local.v4, sizeof(desc->local.v4));
1877 nstat_ip_to_sockaddr(&inp->inp_faddr, inp->inp_fport,
1878 &desc->remote.v4, sizeof(desc->remote.v4));
1879 }
1880 desc->ifnet_properties = nstat_inpcb_to_flags(inp);
1881 }
1882 else
1883 {
1884 if (inp->inp_vflag & INP_IPV6)
1885 {
1886 memcpy(&desc->local.v6, &tucookie->local.v6,
1887 sizeof(desc->local.v6));
1888 memcpy(&desc->remote.v6, &tucookie->remote.v6,
1889 sizeof(desc->remote.v6));
1890 }
1891 else if (inp->inp_vflag & INP_IPV4)
1892 {
1893 memcpy(&desc->local.v4, &tucookie->local.v4,
1894 sizeof(desc->local.v4));
1895 memcpy(&desc->remote.v4, &tucookie->remote.v4,
1896 sizeof(desc->remote.v4));
1897 }
1898 desc->ifnet_properties = tucookie->ifnet_properties;
1899 }
1900
1901 if (inp->inp_last_outifp)
1902 desc->ifindex = inp->inp_last_outifp->if_index;
1903 else
1904 desc->ifindex = tucookie->if_index;
1905
1906 struct socket *so = inp->inp_socket;
1907 if (so)
1908 {
1909 // TBD - take the socket lock around these to make sure
1910 // they're in sync?
1911 desc->upid = so->last_upid;
1912 desc->pid = so->last_pid;
1913 proc_name(desc->pid, desc->pname, sizeof(desc->pname));
1914 if (desc->pname[0] == 0)
1915 {
1916 strlcpy(desc->pname, tucookie->pname,
1917 sizeof(desc->pname));
1918 }
1919 else
1920 {
1921 desc->pname[sizeof(desc->pname) - 1] = 0;
1922 strlcpy(tucookie->pname, desc->pname,
1923 sizeof(tucookie->pname));
1924 }
1925 memcpy(desc->uuid, so->last_uuid, sizeof(so->last_uuid));
1926 memcpy(desc->vuuid, so->so_vuuid, sizeof(so->so_vuuid));
1927 if (so->so_flags & SOF_DELEGATED) {
1928 desc->eupid = so->e_upid;
1929 desc->epid = so->e_pid;
1930 memcpy(desc->euuid, so->e_uuid, sizeof(so->e_uuid));
1931 } else {
1932 desc->eupid = desc->upid;
1933 desc->epid = desc->pid;
1934 memcpy(desc->euuid, desc->uuid, sizeof(desc->uuid));
1935 }
1936 desc->rcvbufsize = so->so_rcv.sb_hiwat;
1937 desc->rcvbufused = so->so_rcv.sb_cc;
1938 desc->traffic_class = so->so_traffic_class;
1939 inp_get_activity_bitmap(inp, &desc->activity_bitmap);
1940 desc->start_timestamp = inp->inp_start_timestamp;
1941 desc->timestamp = mach_continuous_time();
1942 }
1943
1944 return 0;
1945}
1946
1947static bool
1948nstat_udp_reporting_allowed(nstat_provider_cookie_t cookie, nstat_provider_filter *filter)
1949{
1950 return nstat_tcpudp_reporting_allowed(cookie, filter, TRUE);
1951}
1952
1953
1954static void
1955nstat_init_udp_provider(void)
1956{
1957 bzero(&nstat_udp_provider, sizeof(nstat_udp_provider));
1958 nstat_udp_provider.nstat_provider_id = NSTAT_PROVIDER_UDP_KERNEL;
1959 nstat_udp_provider.nstat_descriptor_length = sizeof(nstat_udp_descriptor);
1960 nstat_udp_provider.nstat_lookup = nstat_udp_lookup;
1961 nstat_udp_provider.nstat_gone = nstat_udp_gone;
1962 nstat_udp_provider.nstat_counts = nstat_udp_counts;
1963 nstat_udp_provider.nstat_watcher_add = nstat_udp_add_watcher;
1964 nstat_udp_provider.nstat_watcher_remove = nstat_udp_remove_watcher;
1965 nstat_udp_provider.nstat_copy_descriptor = nstat_udp_copy_descriptor;
1966 nstat_udp_provider.nstat_release = nstat_udp_release;
1967 nstat_udp_provider.nstat_reporting_allowed = nstat_udp_reporting_allowed;
1968 nstat_udp_provider.next = nstat_providers;
1969 nstat_providers = &nstat_udp_provider;
1970}
1971
1972
1973
1974#pragma mark -- ifnet Provider --
1975
1976static nstat_provider nstat_ifnet_provider;
1977
1978/*
1979 * We store a pointer to the ifnet and the original threshold
1980 * requested by the client.
1981 */
1982struct nstat_ifnet_cookie
1983{
1984 struct ifnet *ifp;
1985 uint64_t threshold;
1986};
1987
1988static errno_t
1989nstat_ifnet_lookup(
1990 const void *data,
1991 u_int32_t length,
1992 nstat_provider_cookie_t *out_cookie)
1993{
1994 const nstat_ifnet_add_param *param = (const nstat_ifnet_add_param *)data;
1995 struct ifnet *ifp;
1996 boolean_t changed = FALSE;
1997 nstat_control_state *state;
1998 nstat_src *src;
1999 struct nstat_ifnet_cookie *cookie;
2000
2001 if (length < sizeof(*param) || param->threshold < 1024*1024)
2002 return EINVAL;
2003 if (nstat_privcheck != 0) {
2004 errno_t result = priv_check_cred(kauth_cred_get(),
2005 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
2006 if (result != 0)
2007 return result;
2008 }
2009 cookie = OSMalloc(sizeof(*cookie), nstat_malloc_tag);
2010 if (cookie == NULL)
2011 return ENOMEM;
2012 bzero(cookie, sizeof(*cookie));
2013
2014 ifnet_head_lock_shared();
2015 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2016 {
2017 ifnet_lock_exclusive(ifp);
2018 if (ifp->if_index == param->ifindex)
2019 {
2020 cookie->ifp = ifp;
2021 cookie->threshold = param->threshold;
2022 *out_cookie = cookie;
2023 if (!ifp->if_data_threshold ||
2024 ifp->if_data_threshold > param->threshold)
2025 {
2026 changed = TRUE;
2027 ifp->if_data_threshold = param->threshold;
2028 }
2029 ifnet_lock_done(ifp);
2030 ifnet_reference(ifp);
2031 break;
2032 }
2033 ifnet_lock_done(ifp);
2034 }
2035 ifnet_head_done();
2036
2037 /*
2038 * When we change the threshold to something smaller, we notify
2039 * all of our clients with a description message.
2040 * We won't send a message to the client we are currently serving
2041 * because it has no `ifnet source' yet.
2042 */
2043 if (changed)
2044 {
2045 lck_mtx_lock(&nstat_mtx);
2046 for (state = nstat_controls; state; state = state->ncs_next)
2047 {
2048 lck_mtx_lock(&state->ncs_mtx);
2049 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2050 {
2051 if (src->provider != &nstat_ifnet_provider)
2052 continue;
2053 nstat_control_send_description(state, src, 0, 0);
2054 }
2055 lck_mtx_unlock(&state->ncs_mtx);
2056 }
2057 lck_mtx_unlock(&nstat_mtx);
2058 }
2059 if (cookie->ifp == NULL)
2060 OSFree(cookie, sizeof(*cookie), nstat_malloc_tag);
2061
2062 return ifp ? 0 : EINVAL;
2063}
2064
2065static int
2066nstat_ifnet_gone(
2067 nstat_provider_cookie_t cookie)
2068{
2069 struct ifnet *ifp;
2070 struct nstat_ifnet_cookie *ifcookie =
2071 (struct nstat_ifnet_cookie *)cookie;
2072
2073 ifnet_head_lock_shared();
2074 TAILQ_FOREACH(ifp, &ifnet_head, if_link)
2075 {
2076 if (ifp == ifcookie->ifp)
2077 break;
2078 }
2079 ifnet_head_done();
2080
2081 return ifp ? 0 : 1;
2082}
2083
2084static errno_t
2085nstat_ifnet_counts(
2086 nstat_provider_cookie_t cookie,
2087 struct nstat_counts *out_counts,
2088 int *out_gone)
2089{
2090 struct nstat_ifnet_cookie *ifcookie =
2091 (struct nstat_ifnet_cookie *)cookie;
2092 struct ifnet *ifp = ifcookie->ifp;
2093
2094 if (out_gone) *out_gone = 0;
2095
2096 // if the ifnet is gone, we should stop using it
2097 if (nstat_ifnet_gone(cookie))
2098 {
2099 if (out_gone) *out_gone = 1;
2100 return EINVAL;
2101 }
2102
2103 bzero(out_counts, sizeof(*out_counts));
2104 out_counts->nstat_rxpackets = ifp->if_ipackets;
2105 out_counts->nstat_rxbytes = ifp->if_ibytes;
2106 out_counts->nstat_txpackets = ifp->if_opackets;
2107 out_counts->nstat_txbytes = ifp->if_obytes;
2108 out_counts->nstat_cell_rxbytes = out_counts->nstat_cell_txbytes = 0;
2109 return 0;
2110}
2111
2112static void
2113nstat_ifnet_release(
2114 nstat_provider_cookie_t cookie,
2115 __unused int locked)
2116{
2117 struct nstat_ifnet_cookie *ifcookie;
2118 struct ifnet *ifp;
2119 nstat_control_state *state;
2120 nstat_src *src;
2121 uint64_t minthreshold = UINT64_MAX;
2122
2123 /*
2124 * Find all the clients that requested a threshold
2125 * for this ifnet and re-calculate if_data_threshold.
2126 */
2127 lck_mtx_lock(&nstat_mtx);
2128 for (state = nstat_controls; state; state = state->ncs_next)
2129 {
2130 lck_mtx_lock(&state->ncs_mtx);
2131 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2132 {
2133 /* Skip the provider we are about to detach. */
2134 if (src->provider != &nstat_ifnet_provider ||
2135 src->cookie == cookie)
2136 continue;
2137 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2138 if (ifcookie->threshold < minthreshold)
2139 minthreshold = ifcookie->threshold;
2140 }
2141 lck_mtx_unlock(&state->ncs_mtx);
2142 }
2143 lck_mtx_unlock(&nstat_mtx);
2144 /*
2145 * Reset if_data_threshold or disable it.
2146 */
2147 ifcookie = (struct nstat_ifnet_cookie *)cookie;
2148 ifp = ifcookie->ifp;
2149 if (ifnet_is_attached(ifp, 1)) {
2150 ifnet_lock_exclusive(ifp);
2151 if (minthreshold == UINT64_MAX)
2152 ifp->if_data_threshold = 0;
2153 else
2154 ifp->if_data_threshold = minthreshold;
2155 ifnet_lock_done(ifp);
2156 ifnet_decr_iorefcnt(ifp);
2157 }
2158 ifnet_release(ifp);
2159 OSFree(ifcookie, sizeof(*ifcookie), nstat_malloc_tag);
2160}
2161
2162static void
2163nstat_ifnet_copy_link_status(
2164 struct ifnet *ifp,
2165 struct nstat_ifnet_descriptor *desc)
2166{
2167 struct if_link_status *ifsr = ifp->if_link_status;
2168 nstat_ifnet_desc_link_status *link_status = &desc->link_status;
2169
2170 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_NONE;
2171 if (ifsr == NULL)
2172 return;
2173
2174 lck_rw_lock_shared(&ifp->if_link_status_lock);
2175
2176 if (ifp->if_type == IFT_CELLULAR) {
2177
2178 nstat_ifnet_desc_cellular_status *cell_status = &link_status->u.cellular;
2179 struct if_cellular_status_v1 *if_cell_sr =
2180 &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
2181
2182 if (ifsr->ifsr_version != IF_CELLULAR_STATUS_REPORT_VERSION_1)
2183 goto done;
2184
2185 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2186
2187 if (if_cell_sr->valid_bitmask & IF_CELL_LINK_QUALITY_METRIC_VALID) {
2188 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_LINK_QUALITY_METRIC_VALID;
2189 cell_status->link_quality_metric = if_cell_sr->link_quality_metric;
2190 }
2191 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_BANDWIDTH_VALID) {
2192 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_BANDWIDTH_VALID;
2193 cell_status->ul_effective_bandwidth = if_cell_sr->ul_effective_bandwidth;
2194 }
2195 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_BANDWIDTH_VALID) {
2196 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_BANDWIDTH_VALID;
2197 cell_status->ul_max_bandwidth = if_cell_sr->ul_max_bandwidth;
2198 }
2199 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_LATENCY_VALID) {
2200 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_LATENCY_VALID;
2201 cell_status->ul_min_latency = if_cell_sr->ul_min_latency;
2202 }
2203 if (if_cell_sr->valid_bitmask & IF_CELL_UL_EFFECTIVE_LATENCY_VALID) {
2204 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_EFFECTIVE_LATENCY_VALID;
2205 cell_status->ul_effective_latency = if_cell_sr->ul_effective_latency;
2206 }
2207 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_LATENCY_VALID) {
2208 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_LATENCY_VALID;
2209 cell_status->ul_max_latency = if_cell_sr->ul_max_latency;
2210 }
2211 if (if_cell_sr->valid_bitmask & IF_CELL_UL_RETXT_LEVEL_VALID) {
2212 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2213 if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_NONE)
2214 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_NONE;
2215 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_LOW)
2216 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_LOW;
2217 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_MEDIUM)
2218 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_MEDIUM;
2219 else if (if_cell_sr->ul_retxt_level == IF_CELL_UL_RETXT_LEVEL_HIGH)
2220 cell_status->ul_retxt_level = NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_HIGH;
2221 else
2222 cell_status->valid_bitmask &= ~NSTAT_IFNET_DESC_CELL_UL_RETXT_LEVEL_VALID;
2223 }
2224 if (if_cell_sr->valid_bitmask & IF_CELL_UL_BYTES_LOST_VALID) {
2225 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_BYTES_LOST_VALID;
2226 cell_status->ul_bytes_lost = if_cell_sr->ul_bytes_lost;
2227 }
2228 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MIN_QUEUE_SIZE_VALID) {
2229 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MIN_QUEUE_SIZE_VALID;
2230 cell_status->ul_min_queue_size = if_cell_sr->ul_min_queue_size;
2231 }
2232 if (if_cell_sr->valid_bitmask & IF_CELL_UL_AVG_QUEUE_SIZE_VALID) {
2233 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_AVG_QUEUE_SIZE_VALID;
2234 cell_status->ul_avg_queue_size = if_cell_sr->ul_avg_queue_size;
2235 }
2236 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MAX_QUEUE_SIZE_VALID) {
2237 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_UL_MAX_QUEUE_SIZE_VALID;
2238 cell_status->ul_max_queue_size = if_cell_sr->ul_max_queue_size;
2239 }
2240 if (if_cell_sr->valid_bitmask & IF_CELL_DL_EFFECTIVE_BANDWIDTH_VALID) {
2241 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_EFFECTIVE_BANDWIDTH_VALID;
2242 cell_status->dl_effective_bandwidth = if_cell_sr->dl_effective_bandwidth;
2243 }
2244 if (if_cell_sr->valid_bitmask & IF_CELL_DL_MAX_BANDWIDTH_VALID) {
2245 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_DL_MAX_BANDWIDTH_VALID;
2246 cell_status->dl_max_bandwidth = if_cell_sr->dl_max_bandwidth;
2247 }
2248 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_INACTIVITY_TIME_VALID) {
2249 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_INACTIVITY_TIME_VALID;
2250 cell_status->config_inactivity_time = if_cell_sr->config_inactivity_time;
2251 }
2252 if (if_cell_sr->valid_bitmask & IF_CELL_CONFIG_BACKOFF_TIME_VALID) {
2253 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_CONFIG_BACKOFF_TIME_VALID;
2254 cell_status->config_backoff_time = if_cell_sr->config_backoff_time;
2255 }
2256 if (if_cell_sr->valid_bitmask & IF_CELL_UL_MSS_RECOMMENDED_VALID) {
2257 cell_status->valid_bitmask |= NSTAT_IFNET_DESC_CELL_MSS_RECOMMENDED_VALID;
2258 cell_status->mss_recommended = if_cell_sr->mss_recommended;
2259 }
2260 } else if (ifp->if_subfamily == IFNET_SUBFAMILY_WIFI) {
2261
2262 nstat_ifnet_desc_wifi_status *wifi_status = &link_status->u.wifi;
2263 struct if_wifi_status_v1 *if_wifi_sr =
2264 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
2265
2266 if (ifsr->ifsr_version != IF_WIFI_STATUS_REPORT_VERSION_1)
2267 goto done;
2268
2269 link_status->link_status_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2270
2271 if (if_wifi_sr->valid_bitmask & IF_WIFI_LINK_QUALITY_METRIC_VALID) {
2272 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_LINK_QUALITY_METRIC_VALID;
2273 wifi_status->link_quality_metric = if_wifi_sr->link_quality_metric;
2274 }
2275 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) {
2276 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
2277 wifi_status->ul_effective_bandwidth = if_wifi_sr->ul_effective_bandwidth;
2278 }
2279 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_BANDWIDTH_VALID) {
2280 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_BANDWIDTH_VALID;
2281 wifi_status->ul_max_bandwidth = if_wifi_sr->ul_max_bandwidth;
2282 }
2283 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MIN_LATENCY_VALID) {
2284 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MIN_LATENCY_VALID;
2285 wifi_status->ul_min_latency = if_wifi_sr->ul_min_latency;
2286 }
2287 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_EFFECTIVE_LATENCY_VALID) {
2288 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_EFFECTIVE_LATENCY_VALID;
2289 wifi_status->ul_effective_latency = if_wifi_sr->ul_effective_latency;
2290 }
2291 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_MAX_LATENCY_VALID) {
2292 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_MAX_LATENCY_VALID;
2293 wifi_status->ul_max_latency = if_wifi_sr->ul_max_latency;
2294 }
2295 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_RETXT_LEVEL_VALID) {
2296 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2297 if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_NONE)
2298 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_NONE;
2299 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_LOW)
2300 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_LOW;
2301 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_MEDIUM)
2302 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_MEDIUM;
2303 else if (if_wifi_sr->ul_retxt_level == IF_WIFI_UL_RETXT_LEVEL_HIGH)
2304 wifi_status->ul_retxt_level = NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_HIGH;
2305 else
2306 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_UL_RETXT_LEVEL_VALID;
2307 }
2308 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_BYTES_LOST_VALID) {
2309 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_BYTES_LOST_VALID;
2310 wifi_status->ul_bytes_lost = if_wifi_sr->ul_bytes_lost;
2311 }
2312 if (if_wifi_sr->valid_bitmask & IF_WIFI_UL_ERROR_RATE_VALID) {
2313 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_UL_ERROR_RATE_VALID;
2314 wifi_status->ul_error_rate = if_wifi_sr->ul_error_rate;
2315 }
2316 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) {
2317 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
2318 wifi_status->dl_effective_bandwidth = if_wifi_sr->dl_effective_bandwidth;
2319 }
2320 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_BANDWIDTH_VALID) {
2321 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_BANDWIDTH_VALID;
2322 wifi_status->dl_max_bandwidth = if_wifi_sr->dl_max_bandwidth;
2323 }
2324 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MIN_LATENCY_VALID) {
2325 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MIN_LATENCY_VALID;
2326 wifi_status->dl_min_latency = if_wifi_sr->dl_min_latency;
2327 }
2328 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_EFFECTIVE_LATENCY_VALID) {
2329 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_EFFECTIVE_LATENCY_VALID;
2330 wifi_status->dl_effective_latency = if_wifi_sr->dl_effective_latency;
2331 }
2332 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_MAX_LATENCY_VALID) {
2333 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_MAX_LATENCY_VALID;
2334 wifi_status->dl_max_latency = if_wifi_sr->dl_max_latency;
2335 }
2336 if (if_wifi_sr->valid_bitmask & IF_WIFI_DL_ERROR_RATE_VALID) {
2337 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_DL_ERROR_RATE_VALID;
2338 wifi_status->dl_error_rate = if_wifi_sr->dl_error_rate;
2339 }
2340 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_FREQUENCY_VALID) {
2341 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2342 if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_2_4_GHZ)
2343 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_2_4_GHZ;
2344 else if (if_wifi_sr->config_frequency == IF_WIFI_CONFIG_FREQUENCY_5_0_GHZ)
2345 wifi_status->config_frequency = NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_5_0_GHZ;
2346 else
2347 wifi_status->valid_bitmask &= ~NSTAT_IFNET_DESC_WIFI_CONFIG_FREQUENCY_VALID;
2348 }
2349 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_MULTICAST_RATE_VALID) {
2350 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_MULTICAST_RATE_VALID;
2351 wifi_status->config_multicast_rate = if_wifi_sr->config_multicast_rate;
2352 }
2353 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_COUNT_VALID) {
2354 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_COUNT_VALID;
2355 wifi_status->scan_count = if_wifi_sr->scan_count;
2356 }
2357 if (if_wifi_sr->valid_bitmask & IF_WIFI_CONFIG_SCAN_DURATION_VALID) {
2358 wifi_status->valid_bitmask |= NSTAT_IFNET_DESC_WIFI_CONFIG_SCAN_DURATION_VALID;
2359 wifi_status->scan_duration = if_wifi_sr->scan_duration;
2360 }
2361 }
2362
2363done:
2364 lck_rw_done(&ifp->if_link_status_lock);
2365}
2366
2367static u_int64_t nstat_ifnet_last_report_time = 0;
2368extern int tcp_report_stats_interval;
2369
2370static void
2371nstat_ifnet_compute_percentages(struct if_tcp_ecn_perf_stat *ifst)
2372{
2373 /* Retransmit percentage */
2374 if (ifst->total_rxmitpkts > 0 && ifst->total_txpkts > 0) {
2375 /* shift by 10 for precision */
2376 ifst->rxmit_percent =
2377 ((ifst->total_rxmitpkts << 10) * 100) / ifst->total_txpkts;
2378 } else {
2379 ifst->rxmit_percent = 0;
2380 }
2381
2382 /* Out-of-order percentage */
2383 if (ifst->total_oopkts > 0 && ifst->total_rxpkts > 0) {
2384 /* shift by 10 for precision */
2385 ifst->oo_percent =
2386 ((ifst->total_oopkts << 10) * 100) / ifst->total_rxpkts;
2387 } else {
2388 ifst->oo_percent = 0;
2389 }
2390
2391 /* Reorder percentage */
2392 if (ifst->total_reorderpkts > 0 &&
2393 (ifst->total_txpkts + ifst->total_rxpkts) > 0) {
2394 /* shift by 10 for precision */
2395 ifst->reorder_percent =
2396 ((ifst->total_reorderpkts << 10) * 100) /
2397 (ifst->total_txpkts + ifst->total_rxpkts);
2398 } else {
2399 ifst->reorder_percent = 0;
2400 }
2401}
2402
2403static void
2404nstat_ifnet_normalize_counter(struct if_tcp_ecn_stat *if_st)
2405{
2406 u_int64_t ecn_on_conn, ecn_off_conn;
2407
2408 if (if_st == NULL)
2409 return;
2410 ecn_on_conn = if_st->ecn_client_success +
2411 if_st->ecn_server_success;
2412 ecn_off_conn = if_st->ecn_off_conn +
2413 (if_st->ecn_client_setup - if_st->ecn_client_success) +
2414 (if_st->ecn_server_setup - if_st->ecn_server_success);
2415
2416 /*
2417 * report sack episodes, rst_drop and rxmit_drop
2418 * as a ratio per connection, shift by 10 for precision
2419 */
2420 if (ecn_on_conn > 0) {
2421 if_st->ecn_on.sack_episodes =
2422 (if_st->ecn_on.sack_episodes << 10) / ecn_on_conn;
2423 if_st->ecn_on.rst_drop =
2424 (if_st->ecn_on.rst_drop << 10) * 100 / ecn_on_conn;
2425 if_st->ecn_on.rxmit_drop =
2426 (if_st->ecn_on.rxmit_drop << 10) * 100 / ecn_on_conn;
2427 } else {
2428 /* set to zero, just in case */
2429 if_st->ecn_on.sack_episodes = 0;
2430 if_st->ecn_on.rst_drop = 0;
2431 if_st->ecn_on.rxmit_drop = 0;
2432 }
2433
2434 if (ecn_off_conn > 0) {
2435 if_st->ecn_off.sack_episodes =
2436 (if_st->ecn_off.sack_episodes << 10) / ecn_off_conn;
2437 if_st->ecn_off.rst_drop =
2438 (if_st->ecn_off.rst_drop << 10) * 100 / ecn_off_conn;
2439 if_st->ecn_off.rxmit_drop =
2440 (if_st->ecn_off.rxmit_drop << 10) * 100 / ecn_off_conn;
2441 } else {
2442 if_st->ecn_off.sack_episodes = 0;
2443 if_st->ecn_off.rst_drop = 0;
2444 if_st->ecn_off.rxmit_drop = 0;
2445 }
2446 if_st->ecn_total_conn = ecn_off_conn + ecn_on_conn;
2447}
2448
2449static void
2450nstat_ifnet_report_ecn_stats(void)
2451{
2452 u_int64_t uptime, last_report_time;
2453 struct nstat_sysinfo_data data;
2454 struct nstat_sysinfo_ifnet_ecn_stats *st;
2455 struct ifnet *ifp;
2456
2457 uptime = net_uptime();
2458
2459 if ((int)(uptime - nstat_ifnet_last_report_time) <
2460 tcp_report_stats_interval)
2461 return;
2462
2463 last_report_time = nstat_ifnet_last_report_time;
2464 nstat_ifnet_last_report_time = uptime;
2465 data.flags = NSTAT_SYSINFO_IFNET_ECN_STATS;
2466 st = &data.u.ifnet_ecn_stats;
2467
2468 ifnet_head_lock_shared();
2469 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2470 if (ifp->if_ipv4_stat == NULL || ifp->if_ipv6_stat == NULL)
2471 continue;
2472
2473 if (!IF_FULLY_ATTACHED(ifp))
2474 continue;
2475
2476 /* Limit reporting to Wifi, Ethernet and cellular. */
2477 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2478 continue;
2479
2480 bzero(st, sizeof(*st));
2481 if (IFNET_IS_CELLULAR(ifp)) {
2482 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_CELLULAR;
2483 } else if (IFNET_IS_WIFI(ifp)) {
2484 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_WIFI;
2485 } else {
2486 st->ifnet_type = NSTAT_IFNET_ECN_TYPE_ETHERNET;
2487 }
2488 data.unsent_data_cnt = ifp->if_unsent_data_cnt;
2489 /* skip if there was no update since last report */
2490 if (ifp->if_ipv4_stat->timestamp <= 0 ||
2491 ifp->if_ipv4_stat->timestamp < last_report_time)
2492 goto v6;
2493 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV4;
2494 /* compute percentages using packet counts */
2495 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_on);
2496 nstat_ifnet_compute_percentages(&ifp->if_ipv4_stat->ecn_off);
2497 nstat_ifnet_normalize_counter(ifp->if_ipv4_stat);
2498 bcopy(ifp->if_ipv4_stat, &st->ecn_stat,
2499 sizeof(st->ecn_stat));
2500 nstat_sysinfo_send_data(&data);
2501 bzero(ifp->if_ipv4_stat, sizeof(*ifp->if_ipv4_stat));
2502
2503v6:
2504 /* skip if there was no update since last report */
2505 if (ifp->if_ipv6_stat->timestamp <= 0 ||
2506 ifp->if_ipv6_stat->timestamp < last_report_time)
2507 continue;
2508 st->ifnet_proto = NSTAT_IFNET_ECN_PROTO_IPV6;
2509
2510 /* compute percentages using packet counts */
2511 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_on);
2512 nstat_ifnet_compute_percentages(&ifp->if_ipv6_stat->ecn_off);
2513 nstat_ifnet_normalize_counter(ifp->if_ipv6_stat);
2514 bcopy(ifp->if_ipv6_stat, &st->ecn_stat,
2515 sizeof(st->ecn_stat));
2516 nstat_sysinfo_send_data(&data);
2517
2518 /* Zero the stats in ifp */
2519 bzero(ifp->if_ipv6_stat, sizeof(*ifp->if_ipv6_stat));
2520 }
2521 ifnet_head_done();
2522
2523}
2524
2525/* Some thresholds to determine Low Iternet mode */
2526#define NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD 1000000 /* 1 Mbps */
2527#define NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD 500000 /* 500 Kbps */
2528#define NSTAT_LIM_UL_MIN_RTT_THRESHOLD 1000 /* 1 second */
2529#define NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD (10 << 10) /* 10 percent connection timeouts */
2530#define NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD (2 << 10) /* 2 percent packet loss rate */
2531
2532static boolean_t
2533nstat_lim_activity_check(struct if_lim_perf_stat *st)
2534{
2535 /* check that the current activity is enough to report stats */
2536 if (st->lim_total_txpkts < nstat_lim_min_tx_pkts ||
2537 st->lim_total_rxpkts < nstat_lim_min_rx_pkts ||
2538 st->lim_conn_attempts == 0)
2539 return (FALSE);
2540
2541 /*
2542 * Compute percentages if there was enough activity. Use
2543 * shift-left by 10 to preserve precision.
2544 */
2545 st->lim_packet_loss_percent = ((st->lim_total_retxpkts << 10) /
2546 st->lim_total_txpkts) * 100;
2547
2548 st->lim_packet_ooo_percent = ((st->lim_total_oopkts << 10) /
2549 st->lim_total_rxpkts) * 100;
2550
2551 st->lim_conn_timeout_percent = ((st->lim_conn_timeouts << 10) /
2552 st->lim_conn_attempts) * 100;
2553
2554 /*
2555 * Is Low Internet detected? First order metrics are bandwidth
2556 * and RTT. If these metrics are below the minimum thresholds
2557 * defined then the network attachment can be classified as
2558 * having Low Internet capacity.
2559 *
2560 * High connection timeout rate also indicates Low Internet
2561 * capacity.
2562 */
2563 if (st->lim_dl_max_bandwidth > 0 &&
2564 st->lim_dl_max_bandwidth <= NSTAT_LIM_DL_MAX_BANDWIDTH_THRESHOLD)
2565 st->lim_dl_detected = 1;
2566
2567 if ((st->lim_ul_max_bandwidth > 0 &&
2568 st->lim_ul_max_bandwidth <= NSTAT_LIM_UL_MAX_BANDWIDTH_THRESHOLD) ||
2569 st->lim_rtt_min >= NSTAT_LIM_UL_MIN_RTT_THRESHOLD)
2570 st->lim_ul_detected = 1;
2571
2572 if (st->lim_conn_attempts > 20 &&
2573 st->lim_conn_timeout_percent >=
2574 NSTAT_LIM_CONN_TIMEOUT_PERCENT_THRESHOLD)
2575 st->lim_ul_detected = 1;
2576 /*
2577 * Second order metrics: If there was high packet loss even after
2578 * using delay based algorithms then we classify it as Low Internet
2579 * again
2580 */
2581 if (st->lim_bk_txpkts >= nstat_lim_min_tx_pkts &&
2582 st->lim_packet_loss_percent >=
2583 NSTAT_LIM_PACKET_LOSS_PERCENT_THRESHOLD)
2584 st->lim_ul_detected = 1;
2585 return (TRUE);
2586}
2587
2588static u_int64_t nstat_lim_last_report_time = 0;
2589static void
2590nstat_ifnet_report_lim_stats(void)
2591{
2592 u_int64_t uptime;
2593 struct nstat_sysinfo_data data;
2594 struct nstat_sysinfo_lim_stats *st;
2595 struct ifnet *ifp;
2596 int err;
2597
2598 uptime = net_uptime();
2599
2600 if ((u_int32_t)(uptime - nstat_lim_last_report_time) <
2601 nstat_lim_interval)
2602 return;
2603
2604 nstat_lim_last_report_time = uptime;
2605 data.flags = NSTAT_SYSINFO_LIM_STATS;
2606 st = &data.u.lim_stats;
2607 data.unsent_data_cnt = 0;
2608
2609 ifnet_head_lock_shared();
2610 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2611 if (!IF_FULLY_ATTACHED(ifp))
2612 continue;
2613
2614 /* Limit reporting to Wifi, Ethernet and cellular */
2615 if (!(IFNET_IS_ETHERNET(ifp) || IFNET_IS_CELLULAR(ifp)))
2616 continue;
2617
2618 if (!nstat_lim_activity_check(&ifp->if_lim_stat))
2619 continue;
2620
2621 bzero(st, sizeof(*st));
2622 st->ifnet_siglen = sizeof (st->ifnet_signature);
2623 err = ifnet_get_netsignature(ifp, AF_INET,
2624 (u_int8_t *)&st->ifnet_siglen, NULL,
2625 st->ifnet_signature);
2626 if (err != 0) {
2627 err = ifnet_get_netsignature(ifp, AF_INET6,
2628 (u_int8_t *)&st->ifnet_siglen, NULL,
2629 st->ifnet_signature);
2630 if (err != 0)
2631 continue;
2632 }
2633 ifnet_lock_shared(ifp);
2634 if (IFNET_IS_CELLULAR(ifp)) {
2635 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_CELLULAR;
2636 } else if (IFNET_IS_WIFI(ifp)) {
2637 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_WIFI;
2638 } else {
2639 st->ifnet_type = NSTAT_IFNET_DESC_LINK_STATUS_TYPE_ETHERNET;
2640 }
2641 bcopy(&ifp->if_lim_stat, &st->lim_stat,
2642 sizeof(st->lim_stat));
2643
2644 /* Zero the stats in ifp */
2645 bzero(&ifp->if_lim_stat, sizeof(ifp->if_lim_stat));
2646 ifnet_lock_done(ifp);
2647 nstat_sysinfo_send_data(&data);
2648 }
2649 ifnet_head_done();
2650}
2651
2652static errno_t
2653nstat_ifnet_copy_descriptor(
2654 nstat_provider_cookie_t cookie,
2655 void *data,
2656 u_int32_t len)
2657{
2658 nstat_ifnet_descriptor *desc = (nstat_ifnet_descriptor *)data;
2659 struct nstat_ifnet_cookie *ifcookie =
2660 (struct nstat_ifnet_cookie *)cookie;
2661 struct ifnet *ifp = ifcookie->ifp;
2662
2663 if (len < sizeof(nstat_ifnet_descriptor))
2664 return EINVAL;
2665
2666 if (nstat_ifnet_gone(cookie))
2667 return EINVAL;
2668
2669 bzero(desc, sizeof(*desc));
2670 ifnet_lock_shared(ifp);
2671 strlcpy(desc->name, ifp->if_xname, sizeof(desc->name));
2672 desc->ifindex = ifp->if_index;
2673 desc->threshold = ifp->if_data_threshold;
2674 desc->type = ifp->if_type;
2675 if (ifp->if_desc.ifd_len < sizeof(desc->description))
2676 memcpy(desc->description, ifp->if_desc.ifd_desc,
2677 sizeof(desc->description));
2678 nstat_ifnet_copy_link_status(ifp, desc);
2679 ifnet_lock_done(ifp);
2680 return 0;
2681}
2682
2683static void
2684nstat_init_ifnet_provider(void)
2685{
2686 bzero(&nstat_ifnet_provider, sizeof(nstat_ifnet_provider));
2687 nstat_ifnet_provider.nstat_provider_id = NSTAT_PROVIDER_IFNET;
2688 nstat_ifnet_provider.nstat_descriptor_length = sizeof(nstat_ifnet_descriptor);
2689 nstat_ifnet_provider.nstat_lookup = nstat_ifnet_lookup;
2690 nstat_ifnet_provider.nstat_gone = nstat_ifnet_gone;
2691 nstat_ifnet_provider.nstat_counts = nstat_ifnet_counts;
2692 nstat_ifnet_provider.nstat_watcher_add = NULL;
2693 nstat_ifnet_provider.nstat_watcher_remove = NULL;
2694 nstat_ifnet_provider.nstat_copy_descriptor = nstat_ifnet_copy_descriptor;
2695 nstat_ifnet_provider.nstat_release = nstat_ifnet_release;
2696 nstat_ifnet_provider.next = nstat_providers;
2697 nstat_providers = &nstat_ifnet_provider;
2698}
2699
2700__private_extern__ void
2701nstat_ifnet_threshold_reached(unsigned int ifindex)
2702{
2703 nstat_control_state *state;
2704 nstat_src *src;
2705 struct ifnet *ifp;
2706 struct nstat_ifnet_cookie *ifcookie;
2707
2708 lck_mtx_lock(&nstat_mtx);
2709 for (state = nstat_controls; state; state = state->ncs_next)
2710 {
2711 lck_mtx_lock(&state->ncs_mtx);
2712 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
2713 {
2714 if (src->provider != &nstat_ifnet_provider)
2715 continue;
2716 ifcookie = (struct nstat_ifnet_cookie *)src->cookie;
2717 ifp = ifcookie->ifp;
2718 if (ifp->if_index != ifindex)
2719 continue;
2720 nstat_control_send_counts(state, src, 0, 0, NULL);
2721 }
2722 lck_mtx_unlock(&state->ncs_mtx);
2723 }
2724 lck_mtx_unlock(&nstat_mtx);
2725}
2726
2727#pragma mark -- Sysinfo --
2728static void
2729nstat_set_keyval_scalar(nstat_sysinfo_keyval *kv, int key, u_int32_t val)
2730{
2731 kv->nstat_sysinfo_key = key;
2732 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_SCALAR;
2733 kv->u.nstat_sysinfo_scalar = val;
2734 kv->nstat_sysinfo_valsize = sizeof(kv->u.nstat_sysinfo_scalar);
2735}
2736
2737static void
2738nstat_set_keyval_string(nstat_sysinfo_keyval *kv, int key, u_int8_t *buf,
2739 u_int32_t len)
2740{
2741 kv->nstat_sysinfo_key = key;
2742 kv->nstat_sysinfo_flags = NSTAT_SYSINFO_FLAG_STRING;
2743 kv->nstat_sysinfo_valsize = min(len,
2744 NSTAT_SYSINFO_KEYVAL_STRING_MAXSIZE);
2745 bcopy(buf, kv->u.nstat_sysinfo_string, kv->nstat_sysinfo_valsize);
2746}
2747
2748static void
2749nstat_sysinfo_send_data_internal(
2750 nstat_control_state *control,
2751 nstat_sysinfo_data *data)
2752{
2753 nstat_msg_sysinfo_counts *syscnt = NULL;
2754 size_t allocsize = 0, countsize = 0, nkeyvals = 0, finalsize = 0;
2755 nstat_sysinfo_keyval *kv;
2756 errno_t result = 0;
2757 size_t i = 0;
2758
2759 allocsize = offsetof(nstat_msg_sysinfo_counts, counts);
2760 countsize = offsetof(nstat_sysinfo_counts, nstat_sysinfo_keyvals);
2761 finalsize = allocsize;
2762
2763 /* get number of key-vals for each kind of stat */
2764 switch (data->flags)
2765 {
2766 case NSTAT_SYSINFO_MBUF_STATS:
2767 nkeyvals = sizeof(struct nstat_sysinfo_mbuf_stats) /
2768 sizeof(u_int32_t);
2769 break;
2770 case NSTAT_SYSINFO_TCP_STATS:
2771 nkeyvals = NSTAT_SYSINFO_TCP_STATS_COUNT;
2772 break;
2773 case NSTAT_SYSINFO_IFNET_ECN_STATS:
2774 nkeyvals = (sizeof(struct if_tcp_ecn_stat) /
2775 sizeof(u_int64_t));
2776
2777 /* Two more keys for ifnet type and proto */
2778 nkeyvals += 2;
2779
2780 /* One key for unsent data. */
2781 nkeyvals++;
2782 break;
2783 case NSTAT_SYSINFO_LIM_STATS:
2784 nkeyvals = NSTAT_LIM_STAT_KEYVAL_COUNT;
2785 break;
2786 case NSTAT_SYSINFO_NET_API_STATS:
2787 nkeyvals = NSTAT_NET_API_STAT_KEYVAL_COUNT;
2788 break;
2789 default:
2790 return;
2791 }
2792 countsize += sizeof(nstat_sysinfo_keyval) * nkeyvals;
2793 allocsize += countsize;
2794
2795 syscnt = OSMalloc(allocsize, nstat_malloc_tag);
2796 if (syscnt == NULL)
2797 return;
2798 bzero(syscnt, allocsize);
2799
2800 kv = (nstat_sysinfo_keyval *) &syscnt->counts.nstat_sysinfo_keyvals;
2801 switch (data->flags)
2802 {
2803 case NSTAT_SYSINFO_MBUF_STATS:
2804 {
2805 nstat_set_keyval_scalar(&kv[i++],
2806 NSTAT_SYSINFO_KEY_MBUF_256B_TOTAL,
2807 data->u.mb_stats.total_256b);
2808 nstat_set_keyval_scalar(&kv[i++],
2809 NSTAT_SYSINFO_KEY_MBUF_2KB_TOTAL,
2810 data->u.mb_stats.total_2kb);
2811 nstat_set_keyval_scalar(&kv[i++],
2812 NSTAT_SYSINFO_KEY_MBUF_4KB_TOTAL,
2813 data->u.mb_stats.total_4kb);
2814 nstat_set_keyval_scalar(&kv[i++],
2815 NSTAT_SYSINFO_MBUF_16KB_TOTAL,
2816 data->u.mb_stats.total_16kb);
2817 nstat_set_keyval_scalar(&kv[i++],
2818 NSTAT_SYSINFO_KEY_SOCK_MBCNT,
2819 data->u.mb_stats.sbmb_total);
2820 nstat_set_keyval_scalar(&kv[i++],
2821 NSTAT_SYSINFO_KEY_SOCK_ATMBLIMIT,
2822 data->u.mb_stats.sb_atmbuflimit);
2823 nstat_set_keyval_scalar(&kv[i++],
2824 NSTAT_SYSINFO_MBUF_DRAIN_CNT,
2825 data->u.mb_stats.draincnt);
2826 nstat_set_keyval_scalar(&kv[i++],
2827 NSTAT_SYSINFO_MBUF_MEM_RELEASED,
2828 data->u.mb_stats.memreleased);
2829 nstat_set_keyval_scalar(&kv[i++],
2830 NSTAT_SYSINFO_KEY_SOCK_MBFLOOR,
2831 data->u.mb_stats.sbmb_floor);
2832 VERIFY(i == nkeyvals);
2833 break;
2834 }
2835 case NSTAT_SYSINFO_TCP_STATS:
2836 {
2837 nstat_set_keyval_scalar(&kv[i++],
2838 NSTAT_SYSINFO_KEY_IPV4_AVGRTT,
2839 data->u.tcp_stats.ipv4_avgrtt);
2840 nstat_set_keyval_scalar(&kv[i++],
2841 NSTAT_SYSINFO_KEY_IPV6_AVGRTT,
2842 data->u.tcp_stats.ipv6_avgrtt);
2843 nstat_set_keyval_scalar(&kv[i++],
2844 NSTAT_SYSINFO_KEY_SEND_PLR,
2845 data->u.tcp_stats.send_plr);
2846 nstat_set_keyval_scalar(&kv[i++],
2847 NSTAT_SYSINFO_KEY_RECV_PLR,
2848 data->u.tcp_stats.recv_plr);
2849 nstat_set_keyval_scalar(&kv[i++],
2850 NSTAT_SYSINFO_KEY_SEND_TLRTO,
2851 data->u.tcp_stats.send_tlrto_rate);
2852 nstat_set_keyval_scalar(&kv[i++],
2853 NSTAT_SYSINFO_KEY_SEND_REORDERRATE,
2854 data->u.tcp_stats.send_reorder_rate);
2855 nstat_set_keyval_scalar(&kv[i++],
2856 NSTAT_SYSINFO_CONNECTION_ATTEMPTS,
2857 data->u.tcp_stats.connection_attempts);
2858 nstat_set_keyval_scalar(&kv[i++],
2859 NSTAT_SYSINFO_CONNECTION_ACCEPTS,
2860 data->u.tcp_stats.connection_accepts);
2861 nstat_set_keyval_scalar(&kv[i++],
2862 NSTAT_SYSINFO_ECN_CLIENT_ENABLED,
2863 data->u.tcp_stats.ecn_client_enabled);
2864 nstat_set_keyval_scalar(&kv[i++],
2865 NSTAT_SYSINFO_ECN_SERVER_ENABLED,
2866 data->u.tcp_stats.ecn_server_enabled);
2867 nstat_set_keyval_scalar(&kv[i++],
2868 NSTAT_SYSINFO_ECN_CLIENT_SETUP,
2869 data->u.tcp_stats.ecn_client_setup);
2870 nstat_set_keyval_scalar(&kv[i++],
2871 NSTAT_SYSINFO_ECN_SERVER_SETUP,
2872 data->u.tcp_stats.ecn_server_setup);
2873 nstat_set_keyval_scalar(&kv[i++],
2874 NSTAT_SYSINFO_ECN_CLIENT_SUCCESS,
2875 data->u.tcp_stats.ecn_client_success);
2876 nstat_set_keyval_scalar(&kv[i++],
2877 NSTAT_SYSINFO_ECN_SERVER_SUCCESS,
2878 data->u.tcp_stats.ecn_server_success);
2879 nstat_set_keyval_scalar(&kv[i++],
2880 NSTAT_SYSINFO_ECN_NOT_SUPPORTED,
2881 data->u.tcp_stats.ecn_not_supported);
2882 nstat_set_keyval_scalar(&kv[i++],
2883 NSTAT_SYSINFO_ECN_LOST_SYN,
2884 data->u.tcp_stats.ecn_lost_syn);
2885 nstat_set_keyval_scalar(&kv[i++],
2886 NSTAT_SYSINFO_ECN_LOST_SYNACK,
2887 data->u.tcp_stats.ecn_lost_synack);
2888 nstat_set_keyval_scalar(&kv[i++],
2889 NSTAT_SYSINFO_ECN_RECV_CE,
2890 data->u.tcp_stats.ecn_recv_ce);
2891 nstat_set_keyval_scalar(&kv[i++],
2892 NSTAT_SYSINFO_ECN_RECV_ECE,
2893 data->u.tcp_stats.ecn_recv_ece);
2894 nstat_set_keyval_scalar(&kv[i++],
2895 NSTAT_SYSINFO_ECN_SENT_ECE,
2896 data->u.tcp_stats.ecn_sent_ece);
2897 nstat_set_keyval_scalar(&kv[i++],
2898 NSTAT_SYSINFO_ECN_CONN_RECV_CE,
2899 data->u.tcp_stats.ecn_conn_recv_ce);
2900 nstat_set_keyval_scalar(&kv[i++],
2901 NSTAT_SYSINFO_ECN_CONN_RECV_ECE,
2902 data->u.tcp_stats.ecn_conn_recv_ece);
2903 nstat_set_keyval_scalar(&kv[i++],
2904 NSTAT_SYSINFO_ECN_CONN_PLNOCE,
2905 data->u.tcp_stats.ecn_conn_plnoce);
2906 nstat_set_keyval_scalar(&kv[i++],
2907 NSTAT_SYSINFO_ECN_CONN_PL_CE,
2908 data->u.tcp_stats.ecn_conn_pl_ce);
2909 nstat_set_keyval_scalar(&kv[i++],
2910 NSTAT_SYSINFO_ECN_CONN_NOPL_CE,
2911 data->u.tcp_stats.ecn_conn_nopl_ce);
2912 nstat_set_keyval_scalar(&kv[i++],
2913 NSTAT_SYSINFO_ECN_FALLBACK_SYNLOSS,
2914 data->u.tcp_stats.ecn_fallback_synloss);
2915 nstat_set_keyval_scalar(&kv[i++],
2916 NSTAT_SYSINFO_ECN_FALLBACK_REORDER,
2917 data->u.tcp_stats.ecn_fallback_reorder);
2918 nstat_set_keyval_scalar(&kv[i++],
2919 NSTAT_SYSINFO_ECN_FALLBACK_CE,
2920 data->u.tcp_stats.ecn_fallback_ce);
2921 nstat_set_keyval_scalar(&kv[i++],
2922 NSTAT_SYSINFO_TFO_SYN_DATA_RCV,
2923 data->u.tcp_stats.tfo_syn_data_rcv);
2924 nstat_set_keyval_scalar(&kv[i++],
2925 NSTAT_SYSINFO_TFO_COOKIE_REQ_RCV,
2926 data->u.tcp_stats.tfo_cookie_req_rcv);
2927 nstat_set_keyval_scalar(&kv[i++],
2928 NSTAT_SYSINFO_TFO_COOKIE_SENT,
2929 data->u.tcp_stats.tfo_cookie_sent);
2930 nstat_set_keyval_scalar(&kv[i++],
2931 NSTAT_SYSINFO_TFO_COOKIE_INVALID,
2932 data->u.tcp_stats.tfo_cookie_invalid);
2933 nstat_set_keyval_scalar(&kv[i++],
2934 NSTAT_SYSINFO_TFO_COOKIE_REQ,
2935 data->u.tcp_stats.tfo_cookie_req);
2936 nstat_set_keyval_scalar(&kv[i++],
2937 NSTAT_SYSINFO_TFO_COOKIE_RCV,
2938 data->u.tcp_stats.tfo_cookie_rcv);
2939 nstat_set_keyval_scalar(&kv[i++],
2940 NSTAT_SYSINFO_TFO_SYN_DATA_SENT,
2941 data->u.tcp_stats.tfo_syn_data_sent);
2942 nstat_set_keyval_scalar(&kv[i++],
2943 NSTAT_SYSINFO_TFO_SYN_DATA_ACKED,
2944 data->u.tcp_stats.tfo_syn_data_acked);
2945 nstat_set_keyval_scalar(&kv[i++],
2946 NSTAT_SYSINFO_TFO_SYN_LOSS,
2947 data->u.tcp_stats.tfo_syn_loss);
2948 nstat_set_keyval_scalar(&kv[i++],
2949 NSTAT_SYSINFO_TFO_BLACKHOLE,
2950 data->u.tcp_stats.tfo_blackhole);
2951 nstat_set_keyval_scalar(&kv[i++],
2952 NSTAT_SYSINFO_TFO_COOKIE_WRONG,
2953 data->u.tcp_stats.tfo_cookie_wrong);
2954 nstat_set_keyval_scalar(&kv[i++],
2955 NSTAT_SYSINFO_TFO_NO_COOKIE_RCV,
2956 data->u.tcp_stats.tfo_no_cookie_rcv);
2957 nstat_set_keyval_scalar(&kv[i++],
2958 NSTAT_SYSINFO_TFO_HEURISTICS_DISABLE,
2959 data->u.tcp_stats.tfo_heuristics_disable);
2960 nstat_set_keyval_scalar(&kv[i++],
2961 NSTAT_SYSINFO_TFO_SEND_BLACKHOLE,
2962 data->u.tcp_stats.tfo_sndblackhole);
2963 nstat_set_keyval_scalar(&kv[i++],
2964 NSTAT_SYSINFO_MPTCP_HANDOVER_ATTEMPT,
2965 data->u.tcp_stats.mptcp_handover_attempt);
2966 nstat_set_keyval_scalar(&kv[i++],
2967 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ATTEMPT,
2968 data->u.tcp_stats.mptcp_interactive_attempt);
2969 nstat_set_keyval_scalar(&kv[i++],
2970 NSTAT_SYSINFO_MPTCP_AGGREGATE_ATTEMPT,
2971 data->u.tcp_stats.mptcp_aggregate_attempt);
2972 nstat_set_keyval_scalar(&kv[i++],
2973 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_ATTEMPT,
2974 data->u.tcp_stats.mptcp_fp_handover_attempt);
2975 nstat_set_keyval_scalar(&kv[i++],
2976 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_ATTEMPT,
2977 data->u.tcp_stats.mptcp_fp_interactive_attempt);
2978 nstat_set_keyval_scalar(&kv[i++],
2979 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_ATTEMPT,
2980 data->u.tcp_stats.mptcp_fp_aggregate_attempt);
2981 nstat_set_keyval_scalar(&kv[i++],
2982 NSTAT_SYSINFO_MPTCP_HEURISTIC_FALLBACK,
2983 data->u.tcp_stats.mptcp_heuristic_fallback);
2984 nstat_set_keyval_scalar(&kv[i++],
2985 NSTAT_SYSINFO_MPTCP_FP_HEURISTIC_FALLBACK,
2986 data->u.tcp_stats.mptcp_fp_heuristic_fallback);
2987 nstat_set_keyval_scalar(&kv[i++],
2988 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_WIFI,
2989 data->u.tcp_stats.mptcp_handover_success_wifi);
2990 nstat_set_keyval_scalar(&kv[i++],
2991 NSTAT_SYSINFO_MPTCP_HANDOVER_SUCCESS_CELL,
2992 data->u.tcp_stats.mptcp_handover_success_cell);
2993 nstat_set_keyval_scalar(&kv[i++],
2994 NSTAT_SYSINFO_MPTCP_INTERACTIVE_SUCCESS,
2995 data->u.tcp_stats.mptcp_interactive_success);
2996 nstat_set_keyval_scalar(&kv[i++],
2997 NSTAT_SYSINFO_MPTCP_AGGREGATE_SUCCESS,
2998 data->u.tcp_stats.mptcp_aggregate_success);
2999 nstat_set_keyval_scalar(&kv[i++],
3000 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_WIFI,
3001 data->u.tcp_stats.mptcp_fp_handover_success_wifi);
3002 nstat_set_keyval_scalar(&kv[i++],
3003 NSTAT_SYSINFO_MPTCP_FP_HANDOVER_SUCCESS_CELL,
3004 data->u.tcp_stats.mptcp_fp_handover_success_cell);
3005 nstat_set_keyval_scalar(&kv[i++],
3006 NSTAT_SYSINFO_MPTCP_FP_INTERACTIVE_SUCCESS,
3007 data->u.tcp_stats.mptcp_fp_interactive_success);
3008 nstat_set_keyval_scalar(&kv[i++],
3009 NSTAT_SYSINFO_MPTCP_FP_AGGREGATE_SUCCESS,
3010 data->u.tcp_stats.mptcp_fp_aggregate_success);
3011 nstat_set_keyval_scalar(&kv[i++],
3012 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_FROM_WIFI,
3013 data->u.tcp_stats.mptcp_handover_cell_from_wifi);
3014 nstat_set_keyval_scalar(&kv[i++],
3015 NSTAT_SYSINFO_MPTCP_HANDOVER_WIFI_FROM_CELL,
3016 data->u.tcp_stats.mptcp_handover_wifi_from_cell);
3017 nstat_set_keyval_scalar(&kv[i++],
3018 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_FROM_WIFI,
3019 data->u.tcp_stats.mptcp_interactive_cell_from_wifi);
3020 nstat_set_keyval_scalar(&kv[i++],
3021 NSTAT_SYSINFO_MPTCP_HANDOVER_CELL_BYTES,
3022 data->u.tcp_stats.mptcp_handover_cell_bytes);
3023 nstat_set_keyval_scalar(&kv[i++],
3024 NSTAT_SYSINFO_MPTCP_INTERACTIVE_CELL_BYTES,
3025 data->u.tcp_stats.mptcp_interactive_cell_bytes);
3026 nstat_set_keyval_scalar(&kv[i++],
3027 NSTAT_SYSINFO_MPTCP_AGGREGATE_CELL_BYTES,
3028 data->u.tcp_stats.mptcp_aggregate_cell_bytes);
3029 nstat_set_keyval_scalar(&kv[i++],
3030 NSTAT_SYSINFO_MPTCP_HANDOVER_ALL_BYTES,
3031 data->u.tcp_stats.mptcp_handover_all_bytes);
3032 nstat_set_keyval_scalar(&kv[i++],
3033 NSTAT_SYSINFO_MPTCP_INTERACTIVE_ALL_BYTES,
3034 data->u.tcp_stats.mptcp_interactive_all_bytes);
3035 nstat_set_keyval_scalar(&kv[i++],
3036 NSTAT_SYSINFO_MPTCP_AGGREGATE_ALL_BYTES,
3037 data->u.tcp_stats.mptcp_aggregate_all_bytes);
3038 nstat_set_keyval_scalar(&kv[i++],
3039 NSTAT_SYSINFO_MPTCP_BACK_TO_WIFI,
3040 data->u.tcp_stats.mptcp_back_to_wifi);
3041 nstat_set_keyval_scalar(&kv[i++],
3042 NSTAT_SYSINFO_MPTCP_WIFI_PROXY,
3043 data->u.tcp_stats.mptcp_wifi_proxy);
3044 nstat_set_keyval_scalar(&kv[i++],
3045 NSTAT_SYSINFO_MPTCP_CELL_PROXY,
3046 data->u.tcp_stats.mptcp_cell_proxy);
3047 nstat_set_keyval_scalar(&kv[i++],
3048 NSTAT_SYSINFO_MPTCP_TRIGGERED_CELL,
3049 data->u.tcp_stats.mptcp_triggered_cell);
3050 VERIFY(i == nkeyvals);
3051 break;
3052 }
3053 case NSTAT_SYSINFO_IFNET_ECN_STATS:
3054 {
3055 nstat_set_keyval_scalar(&kv[i++],
3056 NSTAT_SYSINFO_ECN_IFNET_TYPE,
3057 data->u.ifnet_ecn_stats.ifnet_type);
3058 nstat_set_keyval_scalar(&kv[i++],
3059 NSTAT_SYSINFO_ECN_IFNET_PROTO,
3060 data->u.ifnet_ecn_stats.ifnet_proto);
3061 nstat_set_keyval_scalar(&kv[i++],
3062 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SETUP,
3063 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_setup);
3064 nstat_set_keyval_scalar(&kv[i++],
3065 NSTAT_SYSINFO_ECN_IFNET_SERVER_SETUP,
3066 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_setup);
3067 nstat_set_keyval_scalar(&kv[i++],
3068 NSTAT_SYSINFO_ECN_IFNET_CLIENT_SUCCESS,
3069 data->u.ifnet_ecn_stats.ecn_stat.ecn_client_success);
3070 nstat_set_keyval_scalar(&kv[i++],
3071 NSTAT_SYSINFO_ECN_IFNET_SERVER_SUCCESS,
3072 data->u.ifnet_ecn_stats.ecn_stat.ecn_server_success);
3073 nstat_set_keyval_scalar(&kv[i++],
3074 NSTAT_SYSINFO_ECN_IFNET_PEER_NOSUPPORT,
3075 data->u.ifnet_ecn_stats.ecn_stat.ecn_peer_nosupport);
3076 nstat_set_keyval_scalar(&kv[i++],
3077 NSTAT_SYSINFO_ECN_IFNET_SYN_LOST,
3078 data->u.ifnet_ecn_stats.ecn_stat.ecn_syn_lost);
3079 nstat_set_keyval_scalar(&kv[i++],
3080 NSTAT_SYSINFO_ECN_IFNET_SYNACK_LOST,
3081 data->u.ifnet_ecn_stats.ecn_stat.ecn_synack_lost);
3082 nstat_set_keyval_scalar(&kv[i++],
3083 NSTAT_SYSINFO_ECN_IFNET_RECV_CE,
3084 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ce);
3085 nstat_set_keyval_scalar(&kv[i++],
3086 NSTAT_SYSINFO_ECN_IFNET_RECV_ECE,
3087 data->u.ifnet_ecn_stats.ecn_stat.ecn_recv_ece);
3088 nstat_set_keyval_scalar(&kv[i++],
3089 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_CE,
3090 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ce);
3091 nstat_set_keyval_scalar(&kv[i++],
3092 NSTAT_SYSINFO_ECN_IFNET_CONN_RECV_ECE,
3093 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_recv_ece);
3094 nstat_set_keyval_scalar(&kv[i++],
3095 NSTAT_SYSINFO_ECN_IFNET_CONN_PLNOCE,
3096 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plnoce);
3097 nstat_set_keyval_scalar(&kv[i++],
3098 NSTAT_SYSINFO_ECN_IFNET_CONN_PLCE,
3099 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_plce);
3100 nstat_set_keyval_scalar(&kv[i++],
3101 NSTAT_SYSINFO_ECN_IFNET_CONN_NOPLCE,
3102 data->u.ifnet_ecn_stats.ecn_stat.ecn_conn_noplce);
3103 nstat_set_keyval_scalar(&kv[i++],
3104 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNLOSS,
3105 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synloss);
3106 nstat_set_keyval_scalar(&kv[i++],
3107 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_REORDER,
3108 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_reorder);
3109 nstat_set_keyval_scalar(&kv[i++],
3110 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_CE,
3111 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_ce);
3112 nstat_set_keyval_scalar(&kv[i++],
3113 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_AVG,
3114 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_avg);
3115 nstat_set_keyval_scalar(&kv[i++],
3116 NSTAT_SYSINFO_ECN_IFNET_ON_RTT_VAR,
3117 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rtt_var);
3118 nstat_set_keyval_scalar(&kv[i++],
3119 NSTAT_SYSINFO_ECN_IFNET_ON_OOPERCENT,
3120 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.oo_percent);
3121 nstat_set_keyval_scalar(&kv[i++],
3122 NSTAT_SYSINFO_ECN_IFNET_ON_SACK_EPISODE,
3123 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.sack_episodes);
3124 nstat_set_keyval_scalar(&kv[i++],
3125 NSTAT_SYSINFO_ECN_IFNET_ON_REORDER_PERCENT,
3126 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.reorder_percent);
3127 nstat_set_keyval_scalar(&kv[i++],
3128 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_PERCENT,
3129 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_percent);
3130 nstat_set_keyval_scalar(&kv[i++],
3131 NSTAT_SYSINFO_ECN_IFNET_ON_RXMIT_DROP,
3132 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rxmit_drop);
3133 nstat_set_keyval_scalar(&kv[i++],
3134 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_AVG,
3135 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_avg);
3136 nstat_set_keyval_scalar(&kv[i++],
3137 NSTAT_SYSINFO_ECN_IFNET_OFF_RTT_VAR,
3138 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rtt_var);
3139 nstat_set_keyval_scalar(&kv[i++],
3140 NSTAT_SYSINFO_ECN_IFNET_OFF_OOPERCENT,
3141 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.oo_percent);
3142 nstat_set_keyval_scalar(&kv[i++],
3143 NSTAT_SYSINFO_ECN_IFNET_OFF_SACK_EPISODE,
3144 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.sack_episodes);
3145 nstat_set_keyval_scalar(&kv[i++],
3146 NSTAT_SYSINFO_ECN_IFNET_OFF_REORDER_PERCENT,
3147 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.reorder_percent);
3148 nstat_set_keyval_scalar(&kv[i++],
3149 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_PERCENT,
3150 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_percent);
3151 nstat_set_keyval_scalar(&kv[i++],
3152 NSTAT_SYSINFO_ECN_IFNET_OFF_RXMIT_DROP,
3153 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rxmit_drop);
3154 nstat_set_keyval_scalar(&kv[i++],
3155 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_TXPKTS,
3156 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_txpkts);
3157 nstat_set_keyval_scalar(&kv[i++],
3158 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXMTPKTS,
3159 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxmitpkts);
3160 nstat_set_keyval_scalar(&kv[i++],
3161 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_RXPKTS,
3162 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_rxpkts);
3163 nstat_set_keyval_scalar(&kv[i++],
3164 NSTAT_SYSINFO_ECN_IFNET_ON_TOTAL_OOPKTS,
3165 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.total_oopkts);
3166 nstat_set_keyval_scalar(&kv[i++],
3167 NSTAT_SYSINFO_ECN_IFNET_ON_DROP_RST,
3168 data->u.ifnet_ecn_stats.ecn_stat.ecn_on.rst_drop);
3169 nstat_set_keyval_scalar(&kv[i++],
3170 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_TXPKTS,
3171 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_txpkts);
3172 nstat_set_keyval_scalar(&kv[i++],
3173 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXMTPKTS,
3174 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxmitpkts);
3175 nstat_set_keyval_scalar(&kv[i++],
3176 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_RXPKTS,
3177 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_rxpkts);
3178 nstat_set_keyval_scalar(&kv[i++],
3179 NSTAT_SYSINFO_ECN_IFNET_OFF_TOTAL_OOPKTS,
3180 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.total_oopkts);
3181 nstat_set_keyval_scalar(&kv[i++],
3182 NSTAT_SYSINFO_ECN_IFNET_OFF_DROP_RST,
3183 data->u.ifnet_ecn_stats.ecn_stat.ecn_off.rst_drop);
3184 nstat_set_keyval_scalar(&kv[i++],
3185 NSTAT_SYSINFO_ECN_IFNET_TOTAL_CONN,
3186 data->u.ifnet_ecn_stats.ecn_stat.ecn_total_conn);
3187 nstat_set_keyval_scalar(&kv[i++],
3188 NSTAT_SYSINFO_IFNET_UNSENT_DATA,
3189 data->unsent_data_cnt);
3190 nstat_set_keyval_scalar(&kv[i++],
3191 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRST,
3192 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprst);
3193 nstat_set_keyval_scalar(&kv[i++],
3194 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_DROPRXMT,
3195 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_droprxmt);
3196 nstat_set_keyval_scalar(&kv[i++],
3197 NSTAT_SYSINFO_ECN_IFNET_FALLBACK_SYNRST,
3198 data->u.ifnet_ecn_stats.ecn_stat.ecn_fallback_synrst);
3199 break;
3200 }
3201 case NSTAT_SYSINFO_LIM_STATS:
3202 {
3203 nstat_set_keyval_string(&kv[i++],
3204 NSTAT_SYSINFO_LIM_IFNET_SIGNATURE,
3205 data->u.lim_stats.ifnet_signature,
3206 data->u.lim_stats.ifnet_siglen);
3207 nstat_set_keyval_scalar(&kv[i++],
3208 NSTAT_SYSINFO_LIM_IFNET_DL_MAX_BANDWIDTH,
3209 data->u.lim_stats.lim_stat.lim_dl_max_bandwidth);
3210 nstat_set_keyval_scalar(&kv[i++],
3211 NSTAT_SYSINFO_LIM_IFNET_UL_MAX_BANDWIDTH,
3212 data->u.lim_stats.lim_stat.lim_ul_max_bandwidth);
3213 nstat_set_keyval_scalar(&kv[i++],
3214 NSTAT_SYSINFO_LIM_IFNET_PACKET_LOSS_PERCENT,
3215 data->u.lim_stats.lim_stat.lim_packet_loss_percent);
3216 nstat_set_keyval_scalar(&kv[i++],
3217 NSTAT_SYSINFO_LIM_IFNET_PACKET_OOO_PERCENT,
3218 data->u.lim_stats.lim_stat.lim_packet_ooo_percent);
3219 nstat_set_keyval_scalar(&kv[i++],
3220 NSTAT_SYSINFO_LIM_IFNET_RTT_VARIANCE,
3221 data->u.lim_stats.lim_stat.lim_rtt_variance);
3222 nstat_set_keyval_scalar(&kv[i++],
3223 NSTAT_SYSINFO_LIM_IFNET_RTT_MIN,
3224 data->u.lim_stats.lim_stat.lim_rtt_min);
3225 nstat_set_keyval_scalar(&kv[i++],
3226 NSTAT_SYSINFO_LIM_IFNET_RTT_AVG,
3227 data->u.lim_stats.lim_stat.lim_rtt_average);
3228 nstat_set_keyval_scalar(&kv[i++],
3229 NSTAT_SYSINFO_LIM_IFNET_CONN_TIMEOUT_PERCENT,
3230 data->u.lim_stats.lim_stat.lim_conn_timeout_percent);
3231 nstat_set_keyval_scalar(&kv[i++],
3232 NSTAT_SYSINFO_LIM_IFNET_DL_DETECTED,
3233 data->u.lim_stats.lim_stat.lim_dl_detected);
3234 nstat_set_keyval_scalar(&kv[i++],
3235 NSTAT_SYSINFO_LIM_IFNET_UL_DETECTED,
3236 data->u.lim_stats.lim_stat.lim_ul_detected);
3237 nstat_set_keyval_scalar(&kv[i++],
3238 NSTAT_SYSINFO_LIM_IFNET_TYPE,
3239 data->u.lim_stats.ifnet_type);
3240 break;
3241 }
3242 case NSTAT_SYSINFO_NET_API_STATS:
3243 {
3244 nstat_set_keyval_scalar(&kv[i++],
3245 NSTAT_SYSINFO_API_IF_FLTR_ATTACH,
3246 data->u.net_api_stats.net_api_stats.nas_iflt_attach_total);
3247 nstat_set_keyval_scalar(&kv[i++],
3248 NSTAT_SYSINFO_API_IF_FLTR_ATTACH_OS,
3249 data->u.net_api_stats.net_api_stats.nas_iflt_attach_os_total);
3250 nstat_set_keyval_scalar(&kv[i++],
3251 NSTAT_SYSINFO_API_IP_FLTR_ADD,
3252 data->u.net_api_stats.net_api_stats.nas_ipf_add_total);
3253 nstat_set_keyval_scalar(&kv[i++],
3254 NSTAT_SYSINFO_API_IP_FLTR_ADD_OS,
3255 data->u.net_api_stats.net_api_stats.nas_ipf_add_os_total);
3256 nstat_set_keyval_scalar(&kv[i++],
3257 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH,
3258 data->u.net_api_stats.net_api_stats.nas_sfltr_register_total);
3259 nstat_set_keyval_scalar(&kv[i++],
3260 NSTAT_SYSINFO_API_SOCK_FLTR_ATTACH_OS,
3261 data->u.net_api_stats.net_api_stats.nas_sfltr_register_os_total);
3262
3263
3264 nstat_set_keyval_scalar(&kv[i++],
3265 NSTAT_SYSINFO_API_SOCK_ALLOC_TOTAL,
3266 data->u.net_api_stats.net_api_stats.nas_socket_alloc_total);
3267 nstat_set_keyval_scalar(&kv[i++],
3268 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL,
3269 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_total);
3270 nstat_set_keyval_scalar(&kv[i++],
3271 NSTAT_SYSINFO_API_SOCK_ALLOC_KERNEL_OS,
3272 data->u.net_api_stats.net_api_stats.nas_socket_in_kernel_os_total);
3273 nstat_set_keyval_scalar(&kv[i++],
3274 NSTAT_SYSINFO_API_SOCK_NECP_CLIENTUUID,
3275 data->u.net_api_stats.net_api_stats.nas_socket_necp_clientuuid_total);
3276
3277 nstat_set_keyval_scalar(&kv[i++],
3278 NSTAT_SYSINFO_API_SOCK_DOMAIN_LOCAL,
3279 data->u.net_api_stats.net_api_stats.nas_socket_domain_local_total);
3280 nstat_set_keyval_scalar(&kv[i++],
3281 NSTAT_SYSINFO_API_SOCK_DOMAIN_ROUTE,
3282 data->u.net_api_stats.net_api_stats.nas_socket_domain_route_total);
3283 nstat_set_keyval_scalar(&kv[i++],
3284 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET,
3285 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet_total);
3286 nstat_set_keyval_scalar(&kv[i++],
3287 NSTAT_SYSINFO_API_SOCK_DOMAIN_INET6,
3288 data->u.net_api_stats.net_api_stats.nas_socket_domain_inet6_total);
3289 nstat_set_keyval_scalar(&kv[i++],
3290 NSTAT_SYSINFO_API_SOCK_DOMAIN_SYSTEM,
3291 data->u.net_api_stats.net_api_stats.nas_socket_domain_system_total);
3292 nstat_set_keyval_scalar(&kv[i++],
3293 NSTAT_SYSINFO_API_SOCK_DOMAIN_MULTIPATH,
3294 data->u.net_api_stats.net_api_stats.nas_socket_domain_multipath_total);
3295 nstat_set_keyval_scalar(&kv[i++],
3296 NSTAT_SYSINFO_API_SOCK_DOMAIN_KEY,
3297 data->u.net_api_stats.net_api_stats.nas_socket_domain_key_total);
3298 nstat_set_keyval_scalar(&kv[i++],
3299 NSTAT_SYSINFO_API_SOCK_DOMAIN_NDRV,
3300 data->u.net_api_stats.net_api_stats.nas_socket_domain_ndrv_total);
3301 nstat_set_keyval_scalar(&kv[i++],
3302 NSTAT_SYSINFO_API_SOCK_DOMAIN_OTHER,
3303 data->u.net_api_stats.net_api_stats.nas_socket_domain_other_total);
3304
3305 nstat_set_keyval_scalar(&kv[i++],
3306 NSTAT_SYSINFO_API_SOCK_INET_STREAM,
3307 data->u.net_api_stats.net_api_stats.nas_socket_inet_stream_total);
3308 nstat_set_keyval_scalar(&kv[i++],
3309 NSTAT_SYSINFO_API_SOCK_INET_DGRAM,
3310 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_total);
3311 nstat_set_keyval_scalar(&kv[i++],
3312 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_CONNECTED,
3313 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_connected);
3314 nstat_set_keyval_scalar(&kv[i++],
3315 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_DNS,
3316 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_dns);
3317 nstat_set_keyval_scalar(&kv[i++],
3318 NSTAT_SYSINFO_API_SOCK_INET_DGRAM_NO_DATA,
3319 data->u.net_api_stats.net_api_stats.nas_socket_inet_dgram_no_data);
3320
3321 nstat_set_keyval_scalar(&kv[i++],
3322 NSTAT_SYSINFO_API_SOCK_INET6_STREAM,
3323 data->u.net_api_stats.net_api_stats.nas_socket_inet6_stream_total);
3324 nstat_set_keyval_scalar(&kv[i++],
3325 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM,
3326 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_total);
3327 nstat_set_keyval_scalar(&kv[i++],
3328 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_CONNECTED,
3329 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_connected);
3330 nstat_set_keyval_scalar(&kv[i++],
3331 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_DNS,
3332 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_dns);
3333 nstat_set_keyval_scalar(&kv[i++],
3334 NSTAT_SYSINFO_API_SOCK_INET6_DGRAM_NO_DATA,
3335 data->u.net_api_stats.net_api_stats.nas_socket_inet6_dgram_no_data);
3336
3337 nstat_set_keyval_scalar(&kv[i++],
3338 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN,
3339 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_total);
3340 nstat_set_keyval_scalar(&kv[i++],
3341 NSTAT_SYSINFO_API_SOCK_INET_MCAST_JOIN_OS,
3342 data->u.net_api_stats.net_api_stats.nas_socket_mcast_join_os_total);
3343
3344 nstat_set_keyval_scalar(&kv[i++],
3345 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_STREAM,
3346 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_stream_total);
3347 nstat_set_keyval_scalar(&kv[i++],
3348 NSTAT_SYSINFO_API_NEXUS_FLOW_INET_DATAGRAM,
3349 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet_dgram_total);
3350
3351 nstat_set_keyval_scalar(&kv[i++],
3352 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_STREAM,
3353 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_stream_total);
3354 nstat_set_keyval_scalar(&kv[i++],
3355 NSTAT_SYSINFO_API_NEXUS_FLOW_INET6_DATAGRAM,
3356 data->u.net_api_stats.net_api_stats.nas_nx_flow_inet6_dgram_total);
3357
3358 nstat_set_keyval_scalar(&kv[i++],
3359 NSTAT_SYSINFO_API_IFNET_ALLOC,
3360 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_total);
3361 nstat_set_keyval_scalar(&kv[i++],
3362 NSTAT_SYSINFO_API_IFNET_ALLOC_OS,
3363 data->u.net_api_stats.net_api_stats.nas_ifnet_alloc_os_total);
3364
3365 nstat_set_keyval_scalar(&kv[i++],
3366 NSTAT_SYSINFO_API_PF_ADDRULE,
3367 data->u.net_api_stats.net_api_stats.nas_pf_addrule_total);
3368 nstat_set_keyval_scalar(&kv[i++],
3369 NSTAT_SYSINFO_API_PF_ADDRULE_OS,
3370 data->u.net_api_stats.net_api_stats.nas_pf_addrule_os);
3371
3372 nstat_set_keyval_scalar(&kv[i++],
3373 NSTAT_SYSINFO_API_VMNET_START,
3374 data->u.net_api_stats.net_api_stats.nas_vmnet_total);
3375
3376
3377 nstat_set_keyval_scalar(&kv[i++],
3378 NSTAT_SYSINFO_API_REPORT_INTERVAL,
3379 data->u.net_api_stats.report_interval);
3380
3381 break;
3382 }
3383 }
3384 if (syscnt != NULL)
3385 {
3386 VERIFY(i > 0 && i <= nkeyvals);
3387 countsize = offsetof(nstat_sysinfo_counts,
3388 nstat_sysinfo_keyvals) +
3389 sizeof(nstat_sysinfo_keyval) * i;
3390 finalsize += countsize;
3391 syscnt->hdr.type = NSTAT_MSG_TYPE_SYSINFO_COUNTS;
3392 syscnt->hdr.length = finalsize;
3393 syscnt->counts.nstat_sysinfo_len = countsize;
3394
3395 result = ctl_enqueuedata(control->ncs_kctl,
3396 control->ncs_unit, syscnt, finalsize, CTL_DATA_EOR);
3397 if (result != 0)
3398 {
3399 nstat_stats.nstat_sysinfofailures += 1;
3400 }
3401 OSFree(syscnt, allocsize, nstat_malloc_tag);
3402 }
3403 return;
3404}
3405
3406__private_extern__ void
3407nstat_sysinfo_send_data(
3408 nstat_sysinfo_data *data)
3409{
3410 nstat_control_state *control;
3411
3412 lck_mtx_lock(&nstat_mtx);
3413 for (control = nstat_controls; control; control = control->ncs_next) {
3414 lck_mtx_lock(&control->ncs_mtx);
3415 if ((control->ncs_flags & NSTAT_FLAG_SYSINFO_SUBSCRIBED) != 0) {
3416 nstat_sysinfo_send_data_internal(control, data);
3417 }
3418 lck_mtx_unlock(&control->ncs_mtx);
3419 }
3420 lck_mtx_unlock(&nstat_mtx);
3421}
3422
3423static void
3424nstat_sysinfo_generate_report(void)
3425{
3426 mbuf_report_peak_usage();
3427 tcp_report_stats();
3428 nstat_ifnet_report_ecn_stats();
3429 nstat_ifnet_report_lim_stats();
3430 nstat_net_api_report_stats();
3431}
3432
3433#pragma mark -- net_api --
3434
3435static struct net_api_stats net_api_stats_before;
3436static u_int64_t net_api_stats_last_report_time;
3437
3438static void
3439nstat_net_api_report_stats(void)
3440{
3441 struct nstat_sysinfo_data data;
3442 struct nstat_sysinfo_net_api_stats *st = &data.u.net_api_stats;
3443 u_int64_t uptime;
3444
3445 uptime = net_uptime();
3446
3447 if ((u_int32_t)(uptime - net_api_stats_last_report_time) <
3448 net_api_stats_report_interval)
3449 return;
3450
3451 st->report_interval = uptime - net_api_stats_last_report_time;
3452 net_api_stats_last_report_time = uptime;
3453
3454 data.flags = NSTAT_SYSINFO_NET_API_STATS;
3455 data.unsent_data_cnt = 0;
3456
3457 /*
3458 * Some of the fields in the report are the current value and
3459 * other fields are the delta from the last report:
3460 * - Report difference for the per flow counters as they increase
3461 * with time
3462 * - Report current value for other counters as they tend not to change
3463 * much with time
3464 */
3465#define STATCOPY(f) \
3466 (st->net_api_stats.f = net_api_stats.f)
3467#define STATDIFF(f) \
3468 (st->net_api_stats.f = net_api_stats.f - net_api_stats_before.f)
3469
3470 STATCOPY(nas_iflt_attach_count);
3471 STATCOPY(nas_iflt_attach_total);
3472 STATCOPY(nas_iflt_attach_os_total);
3473
3474 STATCOPY(nas_ipf_add_count);
3475 STATCOPY(nas_ipf_add_total);
3476 STATCOPY(nas_ipf_add_os_total);
3477
3478 STATCOPY(nas_sfltr_register_count);
3479 STATCOPY(nas_sfltr_register_total);
3480 STATCOPY(nas_sfltr_register_os_total);
3481
3482 STATDIFF(nas_socket_alloc_total);
3483 STATDIFF(nas_socket_in_kernel_total);
3484 STATDIFF(nas_socket_in_kernel_os_total);
3485 STATDIFF(nas_socket_necp_clientuuid_total);
3486
3487 STATDIFF(nas_socket_domain_local_total);
3488 STATDIFF(nas_socket_domain_route_total);
3489 STATDIFF(nas_socket_domain_inet_total);
3490 STATDIFF(nas_socket_domain_inet6_total);
3491 STATDIFF(nas_socket_domain_system_total);
3492 STATDIFF(nas_socket_domain_multipath_total);
3493 STATDIFF(nas_socket_domain_key_total);
3494 STATDIFF(nas_socket_domain_ndrv_total);
3495 STATDIFF(nas_socket_domain_other_total);
3496
3497 STATDIFF(nas_socket_inet_stream_total);
3498 STATDIFF(nas_socket_inet_dgram_total);
3499 STATDIFF(nas_socket_inet_dgram_connected);
3500 STATDIFF(nas_socket_inet_dgram_dns);
3501 STATDIFF(nas_socket_inet_dgram_no_data);
3502
3503 STATDIFF(nas_socket_inet6_stream_total);
3504 STATDIFF(nas_socket_inet6_dgram_total);
3505 STATDIFF(nas_socket_inet6_dgram_connected);
3506 STATDIFF(nas_socket_inet6_dgram_dns);
3507 STATDIFF(nas_socket_inet6_dgram_no_data);
3508
3509 STATDIFF(nas_socket_mcast_join_total);
3510 STATDIFF(nas_socket_mcast_join_os_total);
3511
3512 STATDIFF(nas_sock_inet6_stream_exthdr_in);
3513 STATDIFF(nas_sock_inet6_stream_exthdr_out);
3514 STATDIFF(nas_sock_inet6_dgram_exthdr_in);
3515 STATDIFF(nas_sock_inet6_dgram_exthdr_out);
3516
3517 STATDIFF(nas_nx_flow_inet_stream_total);
3518 STATDIFF(nas_nx_flow_inet_dgram_total);
3519
3520 STATDIFF(nas_nx_flow_inet6_stream_total);
3521 STATDIFF(nas_nx_flow_inet6_dgram_total);
3522
3523 STATCOPY(nas_ifnet_alloc_count);
3524 STATCOPY(nas_ifnet_alloc_total);
3525 STATCOPY(nas_ifnet_alloc_os_count);
3526 STATCOPY(nas_ifnet_alloc_os_total);
3527
3528 STATCOPY(nas_pf_addrule_total);
3529 STATCOPY(nas_pf_addrule_os);
3530
3531 STATCOPY(nas_vmnet_total);
3532
3533#undef STATCOPY
3534#undef STATDIFF
3535
3536 nstat_sysinfo_send_data(&data);
3537
3538 /*
3539 * Save a copy of the current fields so we can diff them the next time
3540 */
3541 memcpy(&net_api_stats_before, &net_api_stats,
3542 sizeof(struct net_api_stats));
3543 _CASSERT(sizeof (net_api_stats_before) == sizeof (net_api_stats));
3544}
3545
3546
3547#pragma mark -- Kernel Control Socket --
3548
3549static kern_ctl_ref nstat_ctlref = NULL;
3550static lck_grp_t *nstat_lck_grp = NULL;
3551
3552static errno_t nstat_control_connect(kern_ctl_ref kctl, struct sockaddr_ctl *sac, void **uinfo);
3553static errno_t nstat_control_disconnect(kern_ctl_ref kctl, u_int32_t unit, void *uinfo);
3554static errno_t nstat_control_send(kern_ctl_ref kctl, u_int32_t unit, void *uinfo, mbuf_t m, int flags);
3555
3556static errno_t
3557nstat_enqueue_success(
3558 uint64_t context,
3559 nstat_control_state *state,
3560 u_int16_t flags)
3561{
3562 nstat_msg_hdr success;
3563 errno_t result;
3564
3565 bzero(&success, sizeof(success));
3566 success.context = context;
3567 success.type = NSTAT_MSG_TYPE_SUCCESS;
3568 success.length = sizeof(success);
3569 success.flags = flags;
3570 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &success,
3571 sizeof(success), CTL_DATA_EOR | CTL_DATA_CRIT);
3572 if (result != 0) {
3573 if (nstat_debug != 0)
3574 printf("%s: could not enqueue success message %d\n",
3575 __func__, result);
3576 nstat_stats.nstat_successmsgfailures += 1;
3577 }
3578 return result;
3579}
3580
3581static errno_t
3582nstat_control_send_goodbye(
3583 nstat_control_state *state,
3584 nstat_src *src)
3585{
3586 errno_t result = 0;
3587 int failed = 0;
3588
3589 if (nstat_control_reporting_allowed(state, src))
3590 {
3591 if ((state->ncs_flags & NSTAT_FLAG_SUPPORTS_UPDATES) != 0)
3592 {
3593 result = nstat_control_send_update(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3594 if (result != 0)
3595 {
3596 failed = 1;
3597 if (nstat_debug != 0)
3598 printf("%s - nstat_control_send_update() %d\n", __func__, result);
3599 }
3600 }
3601 else
3602 {
3603 // send one last counts notification
3604 result = nstat_control_send_counts(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING, NULL);
3605 if (result != 0)
3606 {
3607 failed = 1;
3608 if (nstat_debug != 0)
3609 printf("%s - nstat_control_send_counts() %d\n", __func__, result);
3610 }
3611
3612 // send a last description
3613 result = nstat_control_send_description(state, src, 0, NSTAT_MSG_HDR_FLAG_CLOSING);
3614 if (result != 0)
3615 {
3616 failed = 1;
3617 if (nstat_debug != 0)
3618 printf("%s - nstat_control_send_description() %d\n", __func__, result);
3619 }
3620 }
3621 }
3622
3623 // send the source removed notification
3624 result = nstat_control_send_removed(state, src);
3625 if (result != 0 && nstat_debug)
3626 {
3627 failed = 1;
3628 if (nstat_debug != 0)
3629 printf("%s - nstat_control_send_removed() %d\n", __func__, result);
3630 }
3631
3632 if (failed != 0)
3633 nstat_stats.nstat_control_send_goodbye_failures++;
3634
3635
3636 return result;
3637}
3638
3639static errno_t
3640nstat_flush_accumulated_msgs(
3641 nstat_control_state *state)
3642{
3643 errno_t result = 0;
3644 if (state->ncs_accumulated != NULL && mbuf_len(state->ncs_accumulated) > 0)
3645 {
3646 mbuf_pkthdr_setlen(state->ncs_accumulated, mbuf_len(state->ncs_accumulated));
3647 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, state->ncs_accumulated, CTL_DATA_EOR);
3648 if (result != 0)
3649 {
3650 nstat_stats.nstat_flush_accumulated_msgs_failures++;
3651 if (nstat_debug != 0)
3652 printf("%s - ctl_enqueuembuf failed: %d\n", __func__, result);
3653 mbuf_freem(state->ncs_accumulated);
3654 }
3655 state->ncs_accumulated = NULL;
3656 }
3657 return result;
3658}
3659
3660static errno_t
3661nstat_accumulate_msg(
3662 nstat_control_state *state,
3663 nstat_msg_hdr *hdr,
3664 size_t length)
3665{
3666 if (state->ncs_accumulated && mbuf_trailingspace(state->ncs_accumulated) < length)
3667 {
3668 // Will send the current mbuf
3669 nstat_flush_accumulated_msgs(state);
3670 }
3671
3672 errno_t result = 0;
3673
3674 if (state->ncs_accumulated == NULL)
3675 {
3676 unsigned int one = 1;
3677 if (mbuf_allocpacket(MBUF_DONTWAIT, NSTAT_MAX_MSG_SIZE, &one, &state->ncs_accumulated) != 0)
3678 {
3679 if (nstat_debug != 0)
3680 printf("%s - mbuf_allocpacket failed\n", __func__);
3681 result = ENOMEM;
3682 }
3683 else
3684 {
3685 mbuf_setlen(state->ncs_accumulated, 0);
3686 }
3687 }
3688
3689 if (result == 0)
3690 {
3691 hdr->length = length;
3692 result = mbuf_copyback(state->ncs_accumulated, mbuf_len(state->ncs_accumulated),
3693 length, hdr, MBUF_DONTWAIT);
3694 }
3695
3696 if (result != 0)
3697 {
3698 nstat_flush_accumulated_msgs(state);
3699 if (nstat_debug != 0)
3700 printf("%s - resorting to ctl_enqueuedata\n", __func__);
3701 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, hdr, length, CTL_DATA_EOR);
3702 }
3703
3704 if (result != 0)
3705 nstat_stats.nstat_accumulate_msg_failures++;
3706
3707 return result;
3708}
3709
3710static void*
3711nstat_idle_check(
3712 __unused thread_call_param_t p0,
3713 __unused thread_call_param_t p1)
3714{
3715 nstat_control_state *control;
3716 nstat_src *src, *tmpsrc;
3717 tailq_head_nstat_src dead_list;
3718 TAILQ_INIT(&dead_list);
3719
3720 lck_mtx_lock(&nstat_mtx);
3721
3722 nstat_idle_time = 0;
3723
3724 for (control = nstat_controls; control; control = control->ncs_next)
3725 {
3726 lck_mtx_lock(&control->ncs_mtx);
3727 if (!(control->ncs_flags & NSTAT_FLAG_REQCOUNTS))
3728 {
3729 TAILQ_FOREACH_SAFE(src, &control->ncs_src_queue, ns_control_link, tmpsrc)
3730 {
3731 if (src->provider->nstat_gone(src->cookie))
3732 {
3733 errno_t result;
3734
3735 // Pull it off the list
3736 TAILQ_REMOVE(&control->ncs_src_queue, src, ns_control_link);
3737
3738 result = nstat_control_send_goodbye(control, src);
3739
3740 // Put this on the list to release later
3741 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
3742 }
3743 }
3744 }
3745 control->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
3746 lck_mtx_unlock(&control->ncs_mtx);
3747 }
3748
3749 if (nstat_controls)
3750 {
3751 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3752 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3753 }
3754
3755 lck_mtx_unlock(&nstat_mtx);
3756
3757 /* Generate any system level reports, if needed */
3758 nstat_sysinfo_generate_report();
3759
3760 // Release the sources now that we aren't holding lots of locks
3761 while ((src = TAILQ_FIRST(&dead_list)))
3762 {
3763 TAILQ_REMOVE(&dead_list, src, ns_control_link);
3764 nstat_control_cleanup_source(NULL, src, FALSE);
3765 }
3766
3767
3768 return NULL;
3769}
3770
3771static void
3772nstat_control_register(void)
3773{
3774 // Create our lock group first
3775 lck_grp_attr_t *grp_attr = lck_grp_attr_alloc_init();
3776 lck_grp_attr_setdefault(grp_attr);
3777 nstat_lck_grp = lck_grp_alloc_init("network statistics kctl", grp_attr);
3778 lck_grp_attr_free(grp_attr);
3779
3780 lck_mtx_init(&nstat_mtx, nstat_lck_grp, NULL);
3781
3782 // Register the control
3783 struct kern_ctl_reg nstat_control;
3784 bzero(&nstat_control, sizeof(nstat_control));
3785 strlcpy(nstat_control.ctl_name, NET_STAT_CONTROL_NAME, sizeof(nstat_control.ctl_name));
3786 nstat_control.ctl_flags = CTL_FLAG_REG_EXTENDED | CTL_FLAG_REG_CRIT;
3787 nstat_control.ctl_sendsize = nstat_sendspace;
3788 nstat_control.ctl_recvsize = nstat_recvspace;
3789 nstat_control.ctl_connect = nstat_control_connect;
3790 nstat_control.ctl_disconnect = nstat_control_disconnect;
3791 nstat_control.ctl_send = nstat_control_send;
3792
3793 ctl_register(&nstat_control, &nstat_ctlref);
3794}
3795
3796static void
3797nstat_control_cleanup_source(
3798 nstat_control_state *state,
3799 struct nstat_src *src,
3800 boolean_t locked)
3801{
3802 errno_t result;
3803
3804 if (state)
3805 {
3806 result = nstat_control_send_removed(state, src);
3807 if (result != 0)
3808 {
3809 nstat_stats.nstat_control_cleanup_source_failures++;
3810 if (nstat_debug != 0)
3811 printf("%s - nstat_control_send_removed() %d\n",
3812 __func__, result);
3813 }
3814 }
3815 // Cleanup the source if we found it.
3816 src->provider->nstat_release(src->cookie, locked);
3817 OSFree(src, sizeof(*src), nstat_malloc_tag);
3818}
3819
3820
3821static bool
3822nstat_control_reporting_allowed(
3823 nstat_control_state *state,
3824 nstat_src *src)
3825{
3826 if (src->provider->nstat_reporting_allowed == NULL)
3827 return TRUE;
3828
3829 return (
3830 src->provider->nstat_reporting_allowed(src->cookie,
3831 &state->ncs_provider_filters[src->provider->nstat_provider_id])
3832 );
3833}
3834
3835
3836static errno_t
3837nstat_control_connect(
3838 kern_ctl_ref kctl,
3839 struct sockaddr_ctl *sac,
3840 void **uinfo)
3841{
3842 nstat_control_state *state = OSMalloc(sizeof(*state), nstat_malloc_tag);
3843 if (state == NULL) return ENOMEM;
3844
3845 bzero(state, sizeof(*state));
3846 lck_mtx_init(&state->ncs_mtx, nstat_lck_grp, NULL);
3847 state->ncs_kctl = kctl;
3848 state->ncs_unit = sac->sc_unit;
3849 state->ncs_flags = NSTAT_FLAG_REQCOUNTS;
3850 *uinfo = state;
3851
3852 lck_mtx_lock(&nstat_mtx);
3853 state->ncs_next = nstat_controls;
3854 nstat_controls = state;
3855
3856 if (nstat_idle_time == 0)
3857 {
3858 clock_interval_to_deadline(60, NSEC_PER_SEC, &nstat_idle_time);
3859 thread_call_func_delayed((thread_call_func_t)nstat_idle_check, NULL, nstat_idle_time);
3860 }
3861
3862 lck_mtx_unlock(&nstat_mtx);
3863
3864 return 0;
3865}
3866
3867static errno_t
3868nstat_control_disconnect(
3869 __unused kern_ctl_ref kctl,
3870 __unused u_int32_t unit,
3871 void *uinfo)
3872{
3873 u_int32_t watching;
3874 nstat_control_state *state = (nstat_control_state*)uinfo;
3875 tailq_head_nstat_src cleanup_list;
3876 nstat_src *src;
3877
3878 TAILQ_INIT(&cleanup_list);
3879
3880 // pull it out of the global list of states
3881 lck_mtx_lock(&nstat_mtx);
3882 nstat_control_state **statepp;
3883 for (statepp = &nstat_controls; *statepp; statepp = &(*statepp)->ncs_next)
3884 {
3885 if (*statepp == state)
3886 {
3887 *statepp = state->ncs_next;
3888 break;
3889 }
3890 }
3891 lck_mtx_unlock(&nstat_mtx);
3892
3893 lck_mtx_lock(&state->ncs_mtx);
3894 // Stop watching for sources
3895 nstat_provider *provider;
3896 watching = state->ncs_watching;
3897 state->ncs_watching = 0;
3898 for (provider = nstat_providers; provider && watching; provider = provider->next)
3899 {
3900 if ((watching & (1 << provider->nstat_provider_id)) != 0)
3901 {
3902 watching &= ~(1 << provider->nstat_provider_id);
3903 provider->nstat_watcher_remove(state);
3904 }
3905 }
3906
3907 // set cleanup flags
3908 state->ncs_flags |= NSTAT_FLAG_CLEANUP;
3909
3910 if (state->ncs_accumulated)
3911 {
3912 mbuf_freem(state->ncs_accumulated);
3913 state->ncs_accumulated = NULL;
3914 }
3915
3916 // Copy out the list of sources
3917 TAILQ_CONCAT(&cleanup_list, &state->ncs_src_queue, ns_control_link);
3918 lck_mtx_unlock(&state->ncs_mtx);
3919
3920 while ((src = TAILQ_FIRST(&cleanup_list)))
3921 {
3922 TAILQ_REMOVE(&cleanup_list, src, ns_control_link);
3923 nstat_control_cleanup_source(NULL, src, FALSE);
3924 }
3925
3926 lck_mtx_destroy(&state->ncs_mtx, nstat_lck_grp);
3927 OSFree(state, sizeof(*state), nstat_malloc_tag);
3928
3929 return 0;
3930}
3931
3932static nstat_src_ref_t
3933nstat_control_next_src_ref(
3934 nstat_control_state *state)
3935{
3936 return ++state->ncs_next_srcref;
3937}
3938
3939static errno_t
3940nstat_control_send_counts(
3941 nstat_control_state *state,
3942 nstat_src *src,
3943 unsigned long long context,
3944 u_int16_t hdr_flags,
3945 int *gone)
3946{
3947 nstat_msg_src_counts counts;
3948 errno_t result = 0;
3949
3950 /* Some providers may not have any counts to send */
3951 if (src->provider->nstat_counts == NULL)
3952 return (0);
3953
3954 bzero(&counts, sizeof(counts));
3955 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3956 counts.hdr.length = sizeof(counts);
3957 counts.hdr.flags = hdr_flags;
3958 counts.hdr.context = context;
3959 counts.srcref = src->srcref;
3960 counts.event_flags = 0;
3961
3962 if (src->provider->nstat_counts(src->cookie, &counts.counts, gone) == 0)
3963 {
3964 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) &&
3965 counts.counts.nstat_rxbytes == 0 &&
3966 counts.counts.nstat_txbytes == 0)
3967 {
3968 result = EAGAIN;
3969 }
3970 else
3971 {
3972 result = ctl_enqueuedata(state->ncs_kctl,
3973 state->ncs_unit, &counts, sizeof(counts),
3974 CTL_DATA_EOR);
3975 if (result != 0)
3976 nstat_stats.nstat_sendcountfailures += 1;
3977 }
3978 }
3979 return result;
3980}
3981
3982static errno_t
3983nstat_control_append_counts(
3984 nstat_control_state *state,
3985 nstat_src *src,
3986 int *gone)
3987{
3988 /* Some providers may not have any counts to send */
3989 if (!src->provider->nstat_counts) return 0;
3990
3991 nstat_msg_src_counts counts;
3992 bzero(&counts, sizeof(counts));
3993 counts.hdr.type = NSTAT_MSG_TYPE_SRC_COUNTS;
3994 counts.hdr.length = sizeof(counts);
3995 counts.srcref = src->srcref;
3996 counts.event_flags = 0;
3997
3998 errno_t result = 0;
3999 result = src->provider->nstat_counts(src->cookie, &counts.counts, gone);
4000 if (result != 0)
4001 {
4002 return result;
4003 }
4004
4005 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4006 counts.counts.nstat_rxbytes == 0 && counts.counts.nstat_txbytes == 0)
4007 {
4008 return EAGAIN;
4009 }
4010
4011 return nstat_accumulate_msg(state, &counts.hdr, counts.hdr.length);
4012}
4013
4014static int
4015nstat_control_send_description(
4016 nstat_control_state *state,
4017 nstat_src *src,
4018 u_int64_t context,
4019 u_int16_t hdr_flags)
4020{
4021 // Provider doesn't support getting the descriptor? Done.
4022 if (src->provider->nstat_descriptor_length == 0 ||
4023 src->provider->nstat_copy_descriptor == NULL)
4024 {
4025 return EOPNOTSUPP;
4026 }
4027
4028 // Allocate storage for the descriptor message
4029 mbuf_t msg;
4030 unsigned int one = 1;
4031 u_int32_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4032 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
4033 {
4034 return ENOMEM;
4035 }
4036
4037 nstat_msg_src_description *desc = (nstat_msg_src_description*)mbuf_data(msg);
4038 bzero(desc, size);
4039 mbuf_setlen(msg, size);
4040 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4041
4042 // Query the provider for the provider specific bits
4043 errno_t result = src->provider->nstat_copy_descriptor(src->cookie, desc->data, src->provider->nstat_descriptor_length);
4044
4045 if (result != 0)
4046 {
4047 mbuf_freem(msg);
4048 return result;
4049 }
4050
4051 desc->hdr.context = context;
4052 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4053 desc->hdr.length = size;
4054 desc->hdr.flags = hdr_flags;
4055 desc->srcref = src->srcref;
4056 desc->event_flags = 0;
4057 desc->provider = src->provider->nstat_provider_id;
4058
4059 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4060 if (result != 0)
4061 {
4062 nstat_stats.nstat_descriptionfailures += 1;
4063 mbuf_freem(msg);
4064 }
4065
4066 return result;
4067}
4068
4069static errno_t
4070nstat_control_append_description(
4071 nstat_control_state *state,
4072 nstat_src *src)
4073{
4074 size_t size = offsetof(nstat_msg_src_description, data) + src->provider->nstat_descriptor_length;
4075 if (size > 512 || src->provider->nstat_descriptor_length == 0 ||
4076 src->provider->nstat_copy_descriptor == NULL)
4077 {
4078 return EOPNOTSUPP;
4079 }
4080
4081 // Fill out a buffer on the stack, we will copy to the mbuf later
4082 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4083 bzero(buffer, size);
4084
4085 nstat_msg_src_description *desc = (nstat_msg_src_description*)buffer;
4086 desc->hdr.type = NSTAT_MSG_TYPE_SRC_DESC;
4087 desc->hdr.length = size;
4088 desc->srcref = src->srcref;
4089 desc->event_flags = 0;
4090 desc->provider = src->provider->nstat_provider_id;
4091
4092 errno_t result = 0;
4093 // Fill in the description
4094 // Query the provider for the provider specific bits
4095 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4096 src->provider->nstat_descriptor_length);
4097 if (result != 0)
4098 {
4099 return result;
4100 }
4101
4102 return nstat_accumulate_msg(state, &desc->hdr, size);
4103}
4104
4105static int
4106nstat_control_send_update(
4107 nstat_control_state *state,
4108 nstat_src *src,
4109 u_int64_t context,
4110 u_int16_t hdr_flags,
4111 int *gone)
4112{
4113 // Provider doesn't support getting the descriptor or counts? Done.
4114 if ((src->provider->nstat_descriptor_length == 0 ||
4115 src->provider->nstat_copy_descriptor == NULL) &&
4116 src->provider->nstat_counts == NULL)
4117 {
4118 return EOPNOTSUPP;
4119 }
4120
4121 // Allocate storage for the descriptor message
4122 mbuf_t msg;
4123 unsigned int one = 1;
4124 u_int32_t size = offsetof(nstat_msg_src_update, data) +
4125 src->provider->nstat_descriptor_length;
4126 if (mbuf_allocpacket(MBUF_DONTWAIT, size, &one, &msg) != 0)
4127 {
4128 return ENOMEM;
4129 }
4130
4131 nstat_msg_src_update *desc = (nstat_msg_src_update*)mbuf_data(msg);
4132 bzero(desc, size);
4133 desc->hdr.context = context;
4134 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4135 desc->hdr.length = size;
4136 desc->hdr.flags = hdr_flags;
4137 desc->srcref = src->srcref;
4138 desc->event_flags = 0;
4139 desc->provider = src->provider->nstat_provider_id;
4140
4141 mbuf_setlen(msg, size);
4142 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4143
4144 errno_t result = 0;
4145 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
4146 {
4147 // Query the provider for the provider specific bits
4148 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4149 src->provider->nstat_descriptor_length);
4150 if (result != 0)
4151 {
4152 mbuf_freem(msg);
4153 return result;
4154 }
4155 }
4156
4157 if (src->provider->nstat_counts)
4158 {
4159 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4160 if (result == 0)
4161 {
4162 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4163 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
4164 {
4165 result = EAGAIN;
4166 }
4167 else
4168 {
4169 result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg, CTL_DATA_EOR);
4170 }
4171 }
4172 }
4173
4174 if (result != 0)
4175 {
4176 nstat_stats.nstat_srcupatefailures += 1;
4177 mbuf_freem(msg);
4178 }
4179
4180 return result;
4181}
4182
4183static errno_t
4184nstat_control_append_update(
4185 nstat_control_state *state,
4186 nstat_src *src,
4187 int *gone)
4188{
4189 size_t size = offsetof(nstat_msg_src_update, data) + src->provider->nstat_descriptor_length;
4190 if (size > 512 || ((src->provider->nstat_descriptor_length == 0 ||
4191 src->provider->nstat_copy_descriptor == NULL) &&
4192 src->provider->nstat_counts == NULL))
4193 {
4194 return EOPNOTSUPP;
4195 }
4196
4197 // Fill out a buffer on the stack, we will copy to the mbuf later
4198 u_int64_t buffer[size/sizeof(u_int64_t) + 1]; // u_int64_t to ensure alignment
4199 bzero(buffer, size);
4200
4201 nstat_msg_src_update *desc = (nstat_msg_src_update*)buffer;
4202 desc->hdr.type = NSTAT_MSG_TYPE_SRC_UPDATE;
4203 desc->hdr.length = size;
4204 desc->srcref = src->srcref;
4205 desc->event_flags = 0;
4206 desc->provider = src->provider->nstat_provider_id;
4207
4208 errno_t result = 0;
4209 // Fill in the description
4210 if (src->provider->nstat_descriptor_length != 0 && src->provider->nstat_copy_descriptor)
4211 {
4212 // Query the provider for the provider specific bits
4213 result = src->provider->nstat_copy_descriptor(src->cookie, desc->data,
4214 src->provider->nstat_descriptor_length);
4215 if (result != 0)
4216 {
4217 nstat_stats.nstat_copy_descriptor_failures++;
4218 if (nstat_debug != 0)
4219 printf("%s: src->provider->nstat_copy_descriptor: %d\n", __func__, result);
4220 return result;
4221 }
4222 }
4223
4224 if (src->provider->nstat_counts)
4225 {
4226 result = src->provider->nstat_counts(src->cookie, &desc->counts, gone);
4227 if (result != 0)
4228 {
4229 nstat_stats.nstat_provider_counts_failures++;
4230 if (nstat_debug != 0)
4231 printf("%s: src->provider->nstat_counts: %d\n", __func__, result);
4232 return result;
4233 }
4234
4235 if ((src->filter & NSTAT_FILTER_NOZEROBYTES) == NSTAT_FILTER_NOZEROBYTES &&
4236 desc->counts.nstat_rxbytes == 0 && desc->counts.nstat_txbytes == 0)
4237 {
4238 return EAGAIN;
4239 }
4240 }
4241
4242 return nstat_accumulate_msg(state, &desc->hdr, size);
4243}
4244
4245static errno_t
4246nstat_control_send_removed(
4247 nstat_control_state *state,
4248 nstat_src *src)
4249{
4250 nstat_msg_src_removed removed;
4251 errno_t result;
4252
4253 bzero(&removed, sizeof(removed));
4254 removed.hdr.type = NSTAT_MSG_TYPE_SRC_REMOVED;
4255 removed.hdr.length = sizeof(removed);
4256 removed.hdr.context = 0;
4257 removed.srcref = src->srcref;
4258 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &removed,
4259 sizeof(removed), CTL_DATA_EOR | CTL_DATA_CRIT);
4260 if (result != 0)
4261 nstat_stats.nstat_msgremovedfailures += 1;
4262
4263 return result;
4264}
4265
4266static errno_t
4267nstat_control_handle_add_request(
4268 nstat_control_state *state,
4269 mbuf_t m)
4270{
4271 errno_t result;
4272
4273 // Verify the header fits in the first mbuf
4274 if (mbuf_len(m) < offsetof(nstat_msg_add_src_req, param))
4275 {
4276 return EINVAL;
4277 }
4278
4279 // Calculate the length of the parameter field
4280 int32_t paramlength = mbuf_pkthdr_len(m) - offsetof(nstat_msg_add_src_req, param);
4281 if (paramlength < 0 || paramlength > 2 * 1024)
4282 {
4283 return EINVAL;
4284 }
4285
4286 nstat_provider *provider = NULL;
4287 nstat_provider_cookie_t cookie = NULL;
4288 nstat_msg_add_src_req *req = mbuf_data(m);
4289 if (mbuf_pkthdr_len(m) > mbuf_len(m))
4290 {
4291 // parameter is too large, we need to make a contiguous copy
4292 void *data = OSMalloc(paramlength, nstat_malloc_tag);
4293
4294 if (!data) return ENOMEM;
4295 result = mbuf_copydata(m, offsetof(nstat_msg_add_src_req, param), paramlength, data);
4296 if (result == 0)
4297 result = nstat_lookup_entry(req->provider, data, paramlength, &provider, &cookie);
4298 OSFree(data, paramlength, nstat_malloc_tag);
4299 }
4300 else
4301 {
4302 result = nstat_lookup_entry(req->provider, (void*)&req->param, paramlength, &provider, &cookie);
4303 }
4304
4305 if (result != 0)
4306 {
4307 return result;
4308 }
4309
4310 result = nstat_control_source_add(req->hdr.context, state, provider, cookie);
4311 if (result != 0)
4312 provider->nstat_release(cookie, 0);
4313
4314 return result;
4315}
4316
4317static errno_t
4318nstat_set_provider_filter(
4319 nstat_control_state *state,
4320 nstat_msg_add_all_srcs *req)
4321{
4322 nstat_provider_id_t provider_id = req->provider;
4323
4324 u_int32_t prev_ncs_watching = atomic_or_32_ov(&state->ncs_watching, (1 << provider_id));
4325
4326 if ((prev_ncs_watching & (1 << provider_id)) != 0)
4327 return EALREADY;
4328
4329 state->ncs_watching |= (1 << provider_id);
4330 state->ncs_provider_filters[provider_id].npf_flags = req->filter;
4331 state->ncs_provider_filters[provider_id].npf_events = req->events;
4332 state->ncs_provider_filters[provider_id].npf_pid = req->target_pid;
4333 uuid_copy(state->ncs_provider_filters[provider_id].npf_uuid, req->target_uuid);
4334 return 0;
4335}
4336
4337static errno_t
4338nstat_control_handle_add_all(
4339 nstat_control_state *state,
4340 mbuf_t m)
4341{
4342 errno_t result = 0;
4343
4344 // Verify the header fits in the first mbuf
4345 if (mbuf_len(m) < sizeof(nstat_msg_add_all_srcs))
4346 {
4347 return EINVAL;
4348 }
4349
4350 nstat_msg_add_all_srcs *req = mbuf_data(m);
4351 if (req->provider > NSTAT_PROVIDER_LAST) return ENOENT;
4352
4353 nstat_provider *provider = nstat_find_provider_by_id(req->provider);
4354
4355 if (!provider) return ENOENT;
4356 if (provider->nstat_watcher_add == NULL) return ENOTSUP;
4357
4358 if (nstat_privcheck != 0) {
4359 result = priv_check_cred(kauth_cred_get(),
4360 PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4361 if (result != 0)
4362 return result;
4363 }
4364
4365 lck_mtx_lock(&state->ncs_mtx);
4366 if (req->filter & NSTAT_FILTER_SUPPRESS_SRC_ADDED)
4367 {
4368 // Suppression of source messages implicitly requires the use of update messages
4369 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4370 }
4371 lck_mtx_unlock(&state->ncs_mtx);
4372
4373 // rdar://problem/30301300 Different providers require different synchronization
4374 // to ensure that a new entry does not get double counted due to being added prior
4375 // to all current provider entries being added. Hence pass the provider the details
4376 // in the original request for this to be applied atomically
4377
4378 result = provider->nstat_watcher_add(state, req);
4379
4380 if (result == 0)
4381 nstat_enqueue_success(req->hdr.context, state, 0);
4382
4383 return result;
4384}
4385
4386static errno_t
4387nstat_control_source_add(
4388 u_int64_t context,
4389 nstat_control_state *state,
4390 nstat_provider *provider,
4391 nstat_provider_cookie_t cookie)
4392{
4393 // Fill out source added message if appropriate
4394 mbuf_t msg = NULL;
4395 nstat_src_ref_t *srcrefp = NULL;
4396
4397 u_int64_t provider_filter_flagss =
4398 state->ncs_provider_filters[provider->nstat_provider_id].npf_flags;
4399 boolean_t tell_user =
4400 ((provider_filter_flagss & NSTAT_FILTER_SUPPRESS_SRC_ADDED) == 0);
4401 u_int32_t src_filter =
4402 (provider_filter_flagss & NSTAT_FILTER_PROVIDER_NOZEROBYTES)
4403 ? NSTAT_FILTER_NOZEROBYTES : 0;
4404
4405 if (provider_filter_flagss & NSTAT_FILTER_TCP_NO_EARLY_CLOSE)
4406 {
4407 src_filter |= NSTAT_FILTER_TCP_NO_EARLY_CLOSE;
4408 }
4409
4410 if (tell_user)
4411 {
4412 unsigned int one = 1;
4413
4414 if (mbuf_allocpacket(MBUF_DONTWAIT, sizeof(nstat_msg_src_added),
4415 &one, &msg) != 0)
4416 return ENOMEM;
4417
4418 mbuf_setlen(msg, sizeof(nstat_msg_src_added));
4419 mbuf_pkthdr_setlen(msg, mbuf_len(msg));
4420 nstat_msg_src_added *add = mbuf_data(msg);
4421 bzero(add, sizeof(*add));
4422 add->hdr.type = NSTAT_MSG_TYPE_SRC_ADDED;
4423 add->hdr.length = mbuf_len(msg);
4424 add->hdr.context = context;
4425 add->provider = provider->nstat_provider_id;
4426 srcrefp = &add->srcref;
4427 }
4428
4429 // Allocate storage for the source
4430 nstat_src *src = OSMalloc(sizeof(*src), nstat_malloc_tag);
4431 if (src == NULL)
4432 {
4433 if (msg) mbuf_freem(msg);
4434 return ENOMEM;
4435 }
4436
4437 // Fill in the source, including picking an unused source ref
4438 lck_mtx_lock(&state->ncs_mtx);
4439
4440 src->srcref = nstat_control_next_src_ref(state);
4441 if (srcrefp)
4442 *srcrefp = src->srcref;
4443
4444 if (state->ncs_flags & NSTAT_FLAG_CLEANUP || src->srcref == NSTAT_SRC_REF_INVALID)
4445 {
4446 lck_mtx_unlock(&state->ncs_mtx);
4447 OSFree(src, sizeof(*src), nstat_malloc_tag);
4448 if (msg) mbuf_freem(msg);
4449 return EINVAL;
4450 }
4451 src->provider = provider;
4452 src->cookie = cookie;
4453 src->filter = src_filter;
4454 src->seq = 0;
4455
4456 if (msg)
4457 {
4458 // send the source added message if appropriate
4459 errno_t result = ctl_enqueuembuf(state->ncs_kctl, state->ncs_unit, msg,
4460 CTL_DATA_EOR);
4461 if (result != 0)
4462 {
4463 nstat_stats.nstat_srcaddedfailures += 1;
4464 lck_mtx_unlock(&state->ncs_mtx);
4465 OSFree(src, sizeof(*src), nstat_malloc_tag);
4466 mbuf_freem(msg);
4467 return result;
4468 }
4469 }
4470 // Put the source in the list
4471 TAILQ_INSERT_HEAD(&state->ncs_src_queue, src, ns_control_link);
4472 src->ns_control = state;
4473
4474 lck_mtx_unlock(&state->ncs_mtx);
4475
4476 return 0;
4477}
4478
4479static errno_t
4480nstat_control_handle_remove_request(
4481 nstat_control_state *state,
4482 mbuf_t m)
4483{
4484 nstat_src_ref_t srcref = NSTAT_SRC_REF_INVALID;
4485 nstat_src *src;
4486
4487 if (mbuf_copydata(m, offsetof(nstat_msg_rem_src_req, srcref), sizeof(srcref), &srcref) != 0)
4488 {
4489 return EINVAL;
4490 }
4491
4492 lck_mtx_lock(&state->ncs_mtx);
4493
4494 // Remove this source as we look for it
4495 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4496 {
4497 if (src->srcref == srcref)
4498 {
4499 break;
4500 }
4501 }
4502 if (src)
4503 {
4504 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4505 }
4506
4507 lck_mtx_unlock(&state->ncs_mtx);
4508
4509 if (src) nstat_control_cleanup_source(state, src, FALSE);
4510
4511 return src ? 0 : ENOENT;
4512}
4513
4514static errno_t
4515nstat_control_handle_query_request(
4516 nstat_control_state *state,
4517 mbuf_t m)
4518{
4519 // TBD: handle this from another thread so we can enqueue a lot of data
4520 // As written, if a client requests query all, this function will be
4521 // called from their send of the request message. We will attempt to write
4522 // responses and succeed until the buffer fills up. Since the clients thread
4523 // is blocked on send, it won't be reading unless the client has two threads
4524 // using this socket, one for read and one for write. Two threads probably
4525 // won't work with this code anyhow since we don't have proper locking in
4526 // place yet.
4527 tailq_head_nstat_src dead_list;
4528 errno_t result = ENOENT;
4529 nstat_msg_query_src_req req;
4530
4531 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4532 {
4533 return EINVAL;
4534 }
4535
4536 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4537 TAILQ_INIT(&dead_list);
4538
4539 lck_mtx_lock(&state->ncs_mtx);
4540
4541 if (all_srcs)
4542 {
4543 state->ncs_flags |= NSTAT_FLAG_REQCOUNTS;
4544 }
4545 nstat_src *src, *tmpsrc;
4546 u_int64_t src_count = 0;
4547 boolean_t partial = FALSE;
4548
4549 /*
4550 * Error handling policy and sequence number generation is folded into
4551 * nstat_control_begin_query.
4552 */
4553 partial = nstat_control_begin_query(state, &req.hdr);
4554
4555
4556 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4557 {
4558 int gone = 0;
4559
4560 // XXX ignore IFACE types?
4561 if (all_srcs || src->srcref == req.srcref)
4562 {
4563 if (nstat_control_reporting_allowed(state, src)
4564 && (!partial || !all_srcs || src->seq != state->ncs_seq))
4565 {
4566 if (all_srcs &&
4567 (req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0)
4568 {
4569 result = nstat_control_append_counts(state, src, &gone);
4570 }
4571 else
4572 {
4573 result = nstat_control_send_counts(state, src, req.hdr.context, 0, &gone);
4574 }
4575
4576 if (ENOMEM == result || ENOBUFS == result)
4577 {
4578 /*
4579 * If the counts message failed to
4580 * enqueue then we should clear our flag so
4581 * that a client doesn't miss anything on
4582 * idle cleanup. We skip the "gone"
4583 * processing in the hope that we may
4584 * catch it another time.
4585 */
4586 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4587 break;
4588 }
4589 if (partial)
4590 {
4591 /*
4592 * We skip over hard errors and
4593 * filtered sources.
4594 */
4595 src->seq = state->ncs_seq;
4596 src_count++;
4597 }
4598 }
4599 }
4600
4601 if (gone)
4602 {
4603 // send one last descriptor message so client may see last state
4604 // If we can't send the notification now, it
4605 // will be sent in the idle cleanup.
4606 result = nstat_control_send_description(state, src, 0, 0);
4607 if (result != 0)
4608 {
4609 nstat_stats.nstat_control_send_description_failures++;
4610 if (nstat_debug != 0)
4611 printf("%s - nstat_control_send_description() %d\n", __func__, result);
4612 state->ncs_flags &= ~NSTAT_FLAG_REQCOUNTS;
4613 break;
4614 }
4615
4616 // pull src out of the list
4617 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4618 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4619 }
4620
4621 if (all_srcs)
4622 {
4623 if (src_count >= QUERY_CONTINUATION_SRC_COUNT)
4624 {
4625 break;
4626 }
4627 }
4628 else if (req.srcref == src->srcref)
4629 {
4630 break;
4631 }
4632 }
4633
4634 nstat_flush_accumulated_msgs(state);
4635
4636 u_int16_t flags = 0;
4637 if (req.srcref == NSTAT_SRC_REF_ALL)
4638 flags = nstat_control_end_query(state, src, partial);
4639
4640 lck_mtx_unlock(&state->ncs_mtx);
4641
4642 /*
4643 * If an error occurred enqueueing data, then allow the error to
4644 * propagate to nstat_control_send. This way, the error is sent to
4645 * user-level.
4646 */
4647 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4648 {
4649 nstat_enqueue_success(req.hdr.context, state, flags);
4650 result = 0;
4651 }
4652
4653 while ((src = TAILQ_FIRST(&dead_list)))
4654 {
4655 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4656 nstat_control_cleanup_source(state, src, FALSE);
4657 }
4658
4659 return result;
4660}
4661
4662static errno_t
4663nstat_control_handle_get_src_description(
4664 nstat_control_state *state,
4665 mbuf_t m)
4666{
4667 nstat_msg_get_src_description req;
4668 errno_t result = ENOENT;
4669 nstat_src *src;
4670
4671 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4672 {
4673 return EINVAL;
4674 }
4675
4676 lck_mtx_lock(&state->ncs_mtx);
4677 u_int64_t src_count = 0;
4678 boolean_t partial = FALSE;
4679 const boolean_t all_srcs = (req.srcref == NSTAT_SRC_REF_ALL);
4680
4681 /*
4682 * Error handling policy and sequence number generation is folded into
4683 * nstat_control_begin_query.
4684 */
4685 partial = nstat_control_begin_query(state, &req.hdr);
4686
4687 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4688 {
4689 if (all_srcs || src->srcref == req.srcref)
4690 {
4691 if (nstat_control_reporting_allowed(state, src)
4692 && (!all_srcs || !partial || src->seq != state->ncs_seq))
4693 {
4694 if ((req.hdr.flags & NSTAT_MSG_HDR_FLAG_SUPPORTS_AGGREGATE) != 0 && all_srcs)
4695 {
4696 result = nstat_control_append_description(state, src);
4697 }
4698 else
4699 {
4700 result = nstat_control_send_description(state, src, req.hdr.context, 0);
4701 }
4702
4703 if (ENOMEM == result || ENOBUFS == result)
4704 {
4705 /*
4706 * If the description message failed to
4707 * enqueue then we give up for now.
4708 */
4709 break;
4710 }
4711 if (partial)
4712 {
4713 /*
4714 * Note, we skip over hard errors and
4715 * filtered sources.
4716 */
4717 src->seq = state->ncs_seq;
4718 src_count++;
4719 if (src_count >= QUERY_CONTINUATION_SRC_COUNT)
4720 {
4721 break;
4722 }
4723 }
4724 }
4725
4726 if (!all_srcs)
4727 {
4728 break;
4729 }
4730 }
4731 }
4732 nstat_flush_accumulated_msgs(state);
4733
4734 u_int16_t flags = 0;
4735 if (req.srcref == NSTAT_SRC_REF_ALL)
4736 flags = nstat_control_end_query(state, src, partial);
4737
4738 lck_mtx_unlock(&state->ncs_mtx);
4739 /*
4740 * If an error occurred enqueueing data, then allow the error to
4741 * propagate to nstat_control_send. This way, the error is sent to
4742 * user-level.
4743 */
4744 if (all_srcs && ENOMEM != result && ENOBUFS != result)
4745 {
4746 nstat_enqueue_success(req.hdr.context, state, flags);
4747 result = 0;
4748 }
4749
4750 return result;
4751}
4752
4753static errno_t
4754nstat_control_handle_set_filter(
4755 nstat_control_state *state,
4756 mbuf_t m)
4757{
4758 nstat_msg_set_filter req;
4759 nstat_src *src;
4760
4761 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4762 return EINVAL;
4763 if (req.srcref == NSTAT_SRC_REF_ALL ||
4764 req.srcref == NSTAT_SRC_REF_INVALID)
4765 return EINVAL;
4766
4767 lck_mtx_lock(&state->ncs_mtx);
4768 TAILQ_FOREACH(src, &state->ncs_src_queue, ns_control_link)
4769 {
4770 if (req.srcref == src->srcref)
4771 {
4772 src->filter = req.filter;
4773 break;
4774 }
4775 }
4776 lck_mtx_unlock(&state->ncs_mtx);
4777 if (src == NULL)
4778 return ENOENT;
4779
4780 return 0;
4781}
4782
4783static void
4784nstat_send_error(
4785 nstat_control_state *state,
4786 u_int64_t context,
4787 u_int32_t error)
4788{
4789 errno_t result;
4790 struct nstat_msg_error err;
4791
4792 bzero(&err, sizeof(err));
4793 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
4794 err.hdr.length = sizeof(err);
4795 err.hdr.context = context;
4796 err.error = error;
4797
4798 result = ctl_enqueuedata(state->ncs_kctl, state->ncs_unit, &err,
4799 sizeof(err), CTL_DATA_EOR | CTL_DATA_CRIT);
4800 if (result != 0)
4801 nstat_stats.nstat_msgerrorfailures++;
4802}
4803
4804static boolean_t
4805nstat_control_begin_query(
4806 nstat_control_state *state,
4807 const nstat_msg_hdr *hdrp)
4808{
4809 boolean_t partial = FALSE;
4810
4811 if (hdrp->flags & NSTAT_MSG_HDR_FLAG_CONTINUATION)
4812 {
4813 /* A partial query all has been requested. */
4814 partial = TRUE;
4815
4816 if (state->ncs_context != hdrp->context)
4817 {
4818 if (state->ncs_context != 0)
4819 nstat_send_error(state, state->ncs_context, EAGAIN);
4820
4821 /* Initialize state for a partial query all. */
4822 state->ncs_context = hdrp->context;
4823 state->ncs_seq++;
4824 }
4825 }
4826
4827 return partial;
4828}
4829
4830static u_int16_t
4831nstat_control_end_query(
4832 nstat_control_state *state,
4833 nstat_src *last_src,
4834 boolean_t partial)
4835{
4836 u_int16_t flags = 0;
4837
4838 if (last_src == NULL || !partial)
4839 {
4840 /*
4841 * We iterated through the entire srcs list or exited early
4842 * from the loop when a partial update was not requested (an
4843 * error occurred), so clear context to indicate internally
4844 * that the query is finished.
4845 */
4846 state->ncs_context = 0;
4847 }
4848 else
4849 {
4850 /*
4851 * Indicate to userlevel to make another partial request as
4852 * there are still sources left to be reported.
4853 */
4854 flags |= NSTAT_MSG_HDR_FLAG_CONTINUATION;
4855 }
4856
4857 return flags;
4858}
4859
4860static errno_t
4861nstat_control_handle_get_update(
4862 nstat_control_state *state,
4863 mbuf_t m)
4864{
4865 nstat_msg_query_src_req req;
4866
4867 if (mbuf_copydata(m, 0, sizeof(req), &req) != 0)
4868 {
4869 return EINVAL;
4870 }
4871
4872 lck_mtx_lock(&state->ncs_mtx);
4873
4874 state->ncs_flags |= NSTAT_FLAG_SUPPORTS_UPDATES;
4875
4876 errno_t result = ENOENT;
4877 nstat_src *src, *tmpsrc;
4878 tailq_head_nstat_src dead_list;
4879 u_int64_t src_count = 0;
4880 boolean_t partial = FALSE;
4881 TAILQ_INIT(&dead_list);
4882
4883 /*
4884 * Error handling policy and sequence number generation is folded into
4885 * nstat_control_begin_query.
4886 */
4887 partial = nstat_control_begin_query(state, &req.hdr);
4888
4889 TAILQ_FOREACH_SAFE(src, &state->ncs_src_queue, ns_control_link, tmpsrc)
4890 {
4891 int gone;
4892
4893 gone = 0;
4894 if (nstat_control_reporting_allowed(state, src))
4895 {
4896 /* skip this source if it has the current state
4897 * sequence number as it's already been reported in
4898 * this query-all partial sequence. */
4899 if (req.srcref == NSTAT_SRC_REF_ALL
4900 && (FALSE == partial || src->seq != state->ncs_seq))
4901 {
4902 result = nstat_control_append_update(state, src, &gone);
4903 if (ENOMEM == result || ENOBUFS == result)
4904 {
4905 /*
4906 * If the update message failed to
4907 * enqueue then give up.
4908 */
4909 break;
4910 }
4911 if (partial)
4912 {
4913 /*
4914 * We skip over hard errors and
4915 * filtered sources.
4916 */
4917 src->seq = state->ncs_seq;
4918 src_count++;
4919 }
4920 }
4921 else if (src->srcref == req.srcref)
4922 {
4923 result = nstat_control_send_update(state, src, req.hdr.context, 0, &gone);
4924 }
4925 }
4926
4927 if (gone)
4928 {
4929 // pull src out of the list
4930 TAILQ_REMOVE(&state->ncs_src_queue, src, ns_control_link);
4931 TAILQ_INSERT_TAIL(&dead_list, src, ns_control_link);
4932 }
4933
4934 if (req.srcref != NSTAT_SRC_REF_ALL && req.srcref == src->srcref)
4935 {
4936 break;
4937 }
4938 if (src_count >= QUERY_CONTINUATION_SRC_COUNT)
4939 {
4940 break;
4941 }
4942 }
4943
4944 nstat_flush_accumulated_msgs(state);
4945
4946
4947 u_int16_t flags = 0;
4948 if (req.srcref == NSTAT_SRC_REF_ALL)
4949 flags = nstat_control_end_query(state, src, partial);
4950
4951 lck_mtx_unlock(&state->ncs_mtx);
4952 /*
4953 * If an error occurred enqueueing data, then allow the error to
4954 * propagate to nstat_control_send. This way, the error is sent to
4955 * user-level.
4956 */
4957 if (req.srcref == NSTAT_SRC_REF_ALL && ENOMEM != result && ENOBUFS != result)
4958 {
4959 nstat_enqueue_success(req.hdr.context, state, flags);
4960 result = 0;
4961 }
4962
4963 while ((src = TAILQ_FIRST(&dead_list)))
4964 {
4965 TAILQ_REMOVE(&dead_list, src, ns_control_link);
4966 // release src and send notification
4967 nstat_control_cleanup_source(state, src, FALSE);
4968 }
4969
4970 return result;
4971}
4972
4973static errno_t
4974nstat_control_handle_subscribe_sysinfo(
4975 nstat_control_state *state)
4976{
4977 errno_t result = priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0);
4978
4979 if (result != 0)
4980 {
4981 return result;
4982 }
4983
4984 lck_mtx_lock(&state->ncs_mtx);
4985 state->ncs_flags |= NSTAT_FLAG_SYSINFO_SUBSCRIBED;
4986 lck_mtx_unlock(&state->ncs_mtx);
4987
4988 return 0;
4989}
4990
4991static errno_t
4992nstat_control_send(
4993 kern_ctl_ref kctl,
4994 u_int32_t unit,
4995 void *uinfo,
4996 mbuf_t m,
4997 __unused int flags)
4998{
4999 nstat_control_state *state = (nstat_control_state*)uinfo;
5000 struct nstat_msg_hdr *hdr;
5001 struct nstat_msg_hdr storage;
5002 errno_t result = 0;
5003
5004 if (mbuf_pkthdr_len(m) < sizeof(*hdr))
5005 {
5006 // Is this the right thing to do?
5007 mbuf_freem(m);
5008 return EINVAL;
5009 }
5010
5011 if (mbuf_len(m) >= sizeof(*hdr))
5012 {
5013 hdr = mbuf_data(m);
5014 }
5015 else
5016 {
5017 mbuf_copydata(m, 0, sizeof(storage), &storage);
5018 hdr = &storage;
5019 }
5020
5021 // Legacy clients may not set the length
5022 // Those clients are likely not setting the flags either
5023 // Fix everything up so old clients continue to work
5024 if (hdr->length != mbuf_pkthdr_len(m))
5025 {
5026 hdr->flags = 0;
5027 hdr->length = mbuf_pkthdr_len(m);
5028 if (hdr == &storage)
5029 {
5030 mbuf_copyback(m, 0, sizeof(*hdr), hdr, MBUF_DONTWAIT);
5031 }
5032 }
5033
5034 switch (hdr->type)
5035 {
5036 case NSTAT_MSG_TYPE_ADD_SRC:
5037 result = nstat_control_handle_add_request(state, m);
5038 break;
5039
5040 case NSTAT_MSG_TYPE_ADD_ALL_SRCS:
5041 result = nstat_control_handle_add_all(state, m);
5042 break;
5043
5044 case NSTAT_MSG_TYPE_REM_SRC:
5045 result = nstat_control_handle_remove_request(state, m);
5046 break;
5047
5048 case NSTAT_MSG_TYPE_QUERY_SRC:
5049 result = nstat_control_handle_query_request(state, m);
5050 break;
5051
5052 case NSTAT_MSG_TYPE_GET_SRC_DESC:
5053 result = nstat_control_handle_get_src_description(state, m);
5054 break;
5055
5056 case NSTAT_MSG_TYPE_SET_FILTER:
5057 result = nstat_control_handle_set_filter(state, m);
5058 break;
5059
5060 case NSTAT_MSG_TYPE_GET_UPDATE:
5061 result = nstat_control_handle_get_update(state, m);
5062 break;
5063
5064 case NSTAT_MSG_TYPE_SUBSCRIBE_SYSINFO:
5065 result = nstat_control_handle_subscribe_sysinfo(state);
5066 break;
5067
5068 default:
5069 result = EINVAL;
5070 break;
5071 }
5072
5073 if (result != 0)
5074 {
5075 struct nstat_msg_error err;
5076
5077 bzero(&err, sizeof(err));
5078 err.hdr.type = NSTAT_MSG_TYPE_ERROR;
5079 err.hdr.length = sizeof(err) + mbuf_pkthdr_len(m);
5080 err.hdr.context = hdr->context;
5081 err.error = result;
5082
5083 if (mbuf_prepend(&m, sizeof(err), MBUF_DONTWAIT) == 0 &&
5084 mbuf_copyback(m, 0, sizeof(err), &err, MBUF_DONTWAIT) == 0)
5085 {
5086 result = ctl_enqueuembuf(kctl, unit, m, CTL_DATA_EOR | CTL_DATA_CRIT);
5087 if (result != 0)
5088 {
5089 mbuf_freem(m);
5090 }
5091 m = NULL;
5092 }
5093
5094 if (result != 0)
5095 {
5096 // Unable to prepend the error to the request - just send the error
5097 err.hdr.length = sizeof(err);
5098 result = ctl_enqueuedata(kctl, unit, &err, sizeof(err),
5099 CTL_DATA_EOR | CTL_DATA_CRIT);
5100 if (result != 0)
5101 nstat_stats.nstat_msgerrorfailures += 1;
5102 }
5103 nstat_stats.nstat_handle_msg_failures += 1;
5104 }
5105
5106 if (m) mbuf_freem(m);
5107
5108 return result;
5109}
5110
5111
5112static int
5113tcp_progress_indicators_for_interface(unsigned int ifindex, uint64_t recentflow_maxduration, struct xtcpprogress_indicators *indicators)
5114{
5115 int error = 0;
5116 struct inpcb *inp;
5117 uint64_t min_recent_start_time;
5118
5119 min_recent_start_time = mach_continuous_time() - recentflow_maxduration;
5120 bzero(indicators, sizeof(*indicators));
5121
5122 lck_rw_lock_shared(tcbinfo.ipi_lock);
5123 /*
5124 * For progress indicators we don't need to special case TCP to collect time wait connections
5125 */
5126 LIST_FOREACH(inp, tcbinfo.ipi_listhead, inp_list)
5127 {
5128 struct tcpcb *tp = intotcpcb(inp);
5129 if (tp && inp->inp_last_outifp &&
5130 inp->inp_last_outifp->if_index == ifindex &&
5131 inp->inp_state != INPCB_STATE_DEAD &&
5132 !(tp->t_flags & TF_LOCAL))
5133 {
5134 struct tcp_conn_status connstatus;
5135 indicators->xp_numflows++;
5136 tcp_get_connectivity_status(tp, &connstatus);
5137 if (connstatus.write_probe_failed)
5138 indicators->xp_write_probe_fails++;
5139 if (connstatus.read_probe_failed)
5140 indicators->xp_read_probe_fails++;
5141 if (connstatus.conn_probe_failed)
5142 indicators->xp_conn_probe_fails++;
5143 if (inp->inp_start_timestamp > min_recent_start_time)
5144 {
5145 uint64_t flow_count;
5146
5147 indicators->xp_recentflows++;
5148 atomic_get_64(flow_count, &inp->inp_stat->rxbytes);
5149 indicators->xp_recentflows_rxbytes += flow_count;
5150 atomic_get_64(flow_count, &inp->inp_stat->txbytes);
5151 indicators->xp_recentflows_txbytes += flow_count;
5152
5153 indicators->xp_recentflows_rxooo += tp->t_stat.rxoutoforderbytes;
5154 indicators->xp_recentflows_rxdup += tp->t_stat.rxduplicatebytes;
5155 indicators->xp_recentflows_retx += tp->t_stat.txretransmitbytes;
5156 if (tp->snd_max - tp->snd_una)
5157 {
5158 indicators->xp_recentflows_unacked++;
5159 }
5160 }
5161 }
5162 }
5163 lck_rw_done(tcbinfo.ipi_lock);
5164
5165 return (error);
5166}
5167
5168
5169__private_extern__ int
5170ntstat_tcp_progress_indicators(struct sysctl_req *req)
5171{
5172 struct xtcpprogress_indicators indicators = {};
5173 int error = 0;
5174 struct tcpprogressreq requested;
5175
5176 if (priv_check_cred(kauth_cred_get(), PRIV_NET_PRIVILEGED_NETWORK_STATISTICS, 0) != 0)
5177 {
5178 return EACCES;
5179 }
5180 if (req->newptr == USER_ADDR_NULL)
5181 {
5182 return EINVAL;
5183 }
5184 if (req->newlen < sizeof(req))
5185 {
5186 return EINVAL;
5187 }
5188 error = SYSCTL_IN(req, &requested, sizeof(requested));
5189 if (error != 0)
5190 {
5191 return error;
5192 }
5193 error = tcp_progress_indicators_for_interface(requested.ifindex, requested.recentflow_maxduration, &indicators);
5194 if (error != 0)
5195 {
5196 return error;
5197 }
5198 error = SYSCTL_OUT(req, &indicators, sizeof(indicators));
5199
5200 return (error);
5201}
5202
5203
5204
5205
5206