1/*
2 * Copyright (c) 2004-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include "kpi_interface.h"
30
31#include <sys/queue.h>
32#include <sys/param.h> /* for definition of NULL */
33#include <kern/debug.h> /* for panic */
34#include <sys/errno.h>
35#include <sys/socket.h>
36#include <sys/kern_event.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/kpi_mbuf.h>
40#include <sys/mcache.h>
41#include <sys/protosw.h>
42#include <sys/syslog.h>
43#include <net/if_var.h>
44#include <net/if_dl.h>
45#include <net/dlil.h>
46#include <net/if_types.h>
47#include <net/if_dl.h>
48#include <net/if_arp.h>
49#include <net/if_llreach.h>
50#include <net/if_ether.h>
51#include <net/net_api_stats.h>
52#include <net/route.h>
53#include <net/if_ports_used.h>
54#include <libkern/libkern.h>
55#include <libkern/OSAtomic.h>
56#include <kern/locks.h>
57#include <kern/clock.h>
58#include <sys/sockio.h>
59#include <sys/proc.h>
60#include <sys/sysctl.h>
61#include <sys/mbuf.h>
62#include <netinet/ip_var.h>
63#include <netinet/udp.h>
64#include <netinet/udp_var.h>
65#include <netinet/tcp.h>
66#include <netinet/tcp_var.h>
67#include <netinet/in_pcb.h>
68#ifdef INET
69#include <netinet/igmp_var.h>
70#endif
71#include <netinet6/mld6_var.h>
72#include <netkey/key.h>
73#include <stdbool.h>
74
75#include "net/net_str_id.h"
76
77#if CONFIG_MACF
78#include <sys/kauth.h>
79#include <security/mac_framework.h>
80#endif
81
82#if SKYWALK
83#include <skywalk/os_skywalk_private.h>
84#include <skywalk/nexus/netif/nx_netif.h>
85#endif /* SKYWALK */
86
87extern uint64_t if_creation_generation_count;
88
89#undef ifnet_allocate
90errno_t ifnet_allocate(const struct ifnet_init_params *init,
91 ifnet_t *ifp);
92
93static errno_t ifnet_allocate_common(const struct ifnet_init_params *init,
94 ifnet_t *ifp, bool is_internal);
95
96
97#define TOUCHLASTCHANGE(__if_lastchange) { \
98 (__if_lastchange)->tv_sec = (time_t)net_uptime(); \
99 (__if_lastchange)->tv_usec = 0; \
100}
101
102static errno_t ifnet_defrouter_llreachinfo(ifnet_t, sa_family_t,
103 struct ifnet_llreach_info *);
104static void ifnet_kpi_free(ifnet_t);
105static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
106 u_int32_t *);
107static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
108 u_char, int);
109static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
110
111
112/*
113 * Temporary work around until we have real reference counting
114 *
115 * We keep the bits about calling dlil_if_release (which should be
116 * called recycle) transparent by calling it from our if_free function
117 * pointer. We have to keep the client's original detach function
118 * somewhere so we can call it.
119 */
120static void
121ifnet_kpi_free(ifnet_t ifp)
122{
123 if ((ifp->if_refflags & IFRF_EMBRYONIC) == 0) {
124 ifnet_detached_func detach_func;
125
126 detach_func = ifp->if_detach;
127 if (detach_func != NULL) {
128 (*detach_func)(ifp);
129 }
130 }
131
132 ifnet_dispose(interface: ifp);
133}
134
135errno_t
136ifnet_allocate_common(const struct ifnet_init_params *init,
137 ifnet_t *ifp, bool is_internal)
138{
139 struct ifnet_init_eparams einit;
140
141 bzero(s: &einit, n: sizeof(einit));
142
143 einit.ver = IFNET_INIT_CURRENT_VERSION;
144 einit.len = sizeof(einit);
145 einit.flags = IFNET_INIT_LEGACY | IFNET_INIT_NX_NOAUTO;
146 if (!is_internal) {
147 einit.flags |= IFNET_INIT_ALLOC_KPI;
148 }
149 einit.uniqueid = init->uniqueid;
150 einit.uniqueid_len = init->uniqueid_len;
151 einit.name = init->name;
152 einit.unit = init->unit;
153 einit.family = init->family;
154 einit.type = init->type;
155 einit.output = init->output;
156 einit.demux = init->demux;
157 einit.add_proto = init->add_proto;
158 einit.del_proto = init->del_proto;
159 einit.check_multi = init->check_multi;
160 einit.framer = init->framer;
161 einit.softc = init->softc;
162 einit.ioctl = init->ioctl;
163 einit.set_bpf_tap = init->set_bpf_tap;
164 einit.detach = init->detach;
165 einit.event = init->event;
166 einit.broadcast_addr = init->broadcast_addr;
167 einit.broadcast_len = init->broadcast_len;
168
169 return ifnet_allocate_extended(init: &einit, interface: ifp);
170}
171
172errno_t
173ifnet_allocate_internal(const struct ifnet_init_params *init, ifnet_t *ifp)
174{
175 return ifnet_allocate_common(init, ifp, true);
176}
177
178errno_t
179ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *ifp)
180{
181 return ifnet_allocate_common(init, ifp, false);
182}
183
184static void
185ifnet_set_broadcast_addr(ifnet_t ifp, const void * broadcast_addr,
186 u_int32_t broadcast_len)
187{
188 if (broadcast_len == 0 || broadcast_addr == NULL) {
189 /* no broadcast address */
190 bzero(s: &ifp->if_broadcast, n: sizeof(ifp->if_broadcast));
191 } else if (broadcast_len > sizeof(ifp->if_broadcast.u.buffer)) {
192 ifp->if_broadcast.u.ptr
193 = (u_char *)kalloc_data(broadcast_len,
194 Z_WAITOK | Z_NOFAIL);
195 bcopy(src: broadcast_addr,
196 dst: ifp->if_broadcast.u.ptr,
197 n: broadcast_len);
198 } else {
199 bcopy(src: broadcast_addr,
200 dst: ifp->if_broadcast.u.buffer,
201 n: broadcast_len);
202 }
203 ifp->if_broadcast.length = broadcast_len;
204}
205
206errno_t
207ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
208 ifnet_t *interface)
209{
210#if SKYWALK
211 ifnet_start_func ostart = NULL;
212#endif /* SKYWALK */
213 struct ifnet_init_eparams einit;
214 struct ifnet *ifp = NULL;
215 char if_xname[IFXNAMSIZ] = {0};
216 int error;
217
218 einit = *einit0;
219
220 if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
221 einit.len < sizeof(einit)) {
222 return EINVAL;
223 }
224
225 if (einit.family == 0 || einit.name == NULL ||
226 strlen(s: einit.name) >= IFNAMSIZ ||
227 (einit.type & 0xFFFFFF00) != 0 || einit.type == 0) {
228 return EINVAL;
229 }
230
231#if SKYWALK
232 /* headroom must be a multiple of 8 bytes */
233 if ((einit.tx_headroom & 0x7) != 0) {
234 return EINVAL;
235 }
236 if ((einit.flags & IFNET_INIT_SKYWALK_NATIVE) == 0) {
237 /*
238 * Currently Interface advisory reporting is supported only
239 * for skywalk interface.
240 */
241 if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
242 return EINVAL;
243 }
244 }
245#endif /* SKYWALK */
246
247 if (einit.flags & IFNET_INIT_LEGACY) {
248#if SKYWALK
249 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
250 return EINVAL;
251 }
252#endif /* SKYWALK */
253 if (einit.output == NULL ||
254 (einit.flags & IFNET_INIT_INPUT_POLL)) {
255 return EINVAL;
256 }
257 einit.pre_enqueue = NULL;
258 einit.start = NULL;
259 einit.output_ctl = NULL;
260 einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
261 einit.input_poll = NULL;
262 einit.input_ctl = NULL;
263 } else {
264#if SKYWALK
265 /*
266 * For native Skywalk drivers, steer all start requests
267 * to ifp_if_start() until the netif device adapter is
268 * fully activated, at which point we will point it to
269 * nx_netif_doorbell().
270 */
271 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
272 if (einit.start != NULL) {
273 return EINVAL;
274 }
275 /* override output start callback */
276 ostart = einit.start = ifp_if_start;
277 } else {
278 ostart = einit.start;
279 }
280#endif /* SKYWALK */
281 if (einit.start == NULL) {
282 return EINVAL;
283 }
284
285 einit.output = NULL;
286 if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX) {
287 return EINVAL;
288 }
289
290 if (einit.flags & IFNET_INIT_INPUT_POLL) {
291 if (einit.input_poll == NULL || einit.input_ctl == NULL) {
292 return EINVAL;
293 }
294 } else {
295 einit.input_poll = NULL;
296 einit.input_ctl = NULL;
297 }
298 }
299
300 if (einit.type > UCHAR_MAX) {
301 return EINVAL;
302 }
303
304 if (einit.unit > SHRT_MAX) {
305 return EINVAL;
306 }
307
308 /* Initialize external name (name + unit) */
309 (void) snprintf(if_xname, count: sizeof(if_xname), "%s%d",
310 einit.name, einit.unit);
311
312 if (einit.uniqueid == NULL) {
313 einit.uniqueid = if_xname;
314 einit.uniqueid_len = (uint32_t)strlen(s: if_xname);
315 }
316
317 error = dlil_if_acquire(einit.family, einit.uniqueid,
318 einit.uniqueid_len, if_xname, &ifp);
319
320 if (error == 0) {
321 uint64_t br;
322
323 /*
324 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
325 * to point to storage of at least IFNAMSIZ bytes. It is safe
326 * to write to this.
327 */
328 strlcpy(__DECONST(char *, ifp->if_name), src: einit.name, IFNAMSIZ);
329 ifp->if_type = (u_char)einit.type;
330 ifp->if_family = einit.family;
331 ifp->if_subfamily = einit.subfamily;
332 ifp->if_unit = (short)einit.unit;
333 ifp->if_output = einit.output;
334 ifp->if_pre_enqueue = einit.pre_enqueue;
335 ifp->if_start = einit.start;
336 ifp->if_output_ctl = einit.output_ctl;
337 ifp->if_output_sched_model = einit.output_sched_model;
338 ifp->if_output_bw.eff_bw = einit.output_bw;
339 ifp->if_output_bw.max_bw = einit.output_bw_max;
340 ifp->if_output_lt.eff_lt = einit.output_lt;
341 ifp->if_output_lt.max_lt = einit.output_lt_max;
342 ifp->if_input_poll = einit.input_poll;
343 ifp->if_input_ctl = einit.input_ctl;
344 ifp->if_input_bw.eff_bw = einit.input_bw;
345 ifp->if_input_bw.max_bw = einit.input_bw_max;
346 ifp->if_input_lt.eff_lt = einit.input_lt;
347 ifp->if_input_lt.max_lt = einit.input_lt_max;
348 ifp->if_demux = einit.demux;
349 ifp->if_add_proto = einit.add_proto;
350 ifp->if_del_proto = einit.del_proto;
351 ifp->if_check_multi = einit.check_multi;
352 ifp->if_framer_legacy = einit.framer;
353 ifp->if_framer = einit.framer_extended;
354 ifp->if_softc = einit.softc;
355 ifp->if_ioctl = einit.ioctl;
356 ifp->if_set_bpf_tap = einit.set_bpf_tap;
357 ifp->if_free = (einit.free != NULL) ? einit.free : ifnet_kpi_free;
358 ifp->if_event = einit.event;
359 ifp->if_detach = einit.detach;
360
361 /* Initialize Network ID */
362 ifp->network_id_len = 0;
363 bzero(s: &ifp->network_id, n: sizeof(ifp->network_id));
364
365 /* Initialize external name (name + unit) */
366 snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
367 "%s", if_xname);
368
369 /*
370 * On embedded, framer() is already in the extended form;
371 * we simply use it as is, unless the caller specifies
372 * framer_extended() which will then override it.
373 *
374 * On non-embedded, framer() has long been exposed as part
375 * of the public KPI, and therefore its signature must
376 * remain the same (without the pre- and postpend length
377 * parameters.) We special case ether_frameout, such that
378 * it gets mapped to its extended variant. All other cases
379 * utilize the stub routine which will simply return zeroes
380 * for those new parameters.
381 *
382 * Internally, DLIL will only use the extended callback
383 * variant which is represented by if_framer.
384 */
385#if !XNU_TARGET_OS_OSX
386 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
387 ifp->if_framer = ifp->if_framer_legacy;
388 }
389#else /* XNU_TARGET_OS_OSX */
390 if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
391 if (ifp->if_framer_legacy == ether_frameout) {
392 ifp->if_framer = ether_frameout_extended;
393 } else {
394 ifp->if_framer = ifnet_framer_stub;
395 }
396 }
397#endif /* XNU_TARGET_OS_OSX */
398
399 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
400 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
401 } else if (ifp->if_output_bw.eff_bw == 0) {
402 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
403 }
404
405 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
406 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
407 } else if (ifp->if_input_bw.eff_bw == 0) {
408 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
409 }
410
411 if (ifp->if_output_bw.max_bw == 0) {
412 ifp->if_output_bw = ifp->if_input_bw;
413 } else if (ifp->if_input_bw.max_bw == 0) {
414 ifp->if_input_bw = ifp->if_output_bw;
415 }
416
417 /* Pin if_baudrate to 32 bits */
418 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
419 if (br != 0) {
420 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
421 }
422
423 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
424 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
425 } else if (ifp->if_output_lt.eff_lt == 0) {
426 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
427 }
428
429 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
430 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
431 } else if (ifp->if_input_lt.eff_lt == 0) {
432 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
433 }
434
435 if (ifp->if_output_lt.max_lt == 0) {
436 ifp->if_output_lt = ifp->if_input_lt;
437 } else if (ifp->if_input_lt.max_lt == 0) {
438 ifp->if_input_lt = ifp->if_output_lt;
439 }
440
441 if (ifp->if_ioctl == NULL) {
442 ifp->if_ioctl = ifp_if_ioctl;
443 }
444
445 if_clear_eflags(ifp, -1);
446 if (ifp->if_start != NULL) {
447 if_set_eflags(ifp, IFEF_TXSTART);
448 if (ifp->if_pre_enqueue == NULL) {
449 ifp->if_pre_enqueue = ifnet_enqueue;
450 }
451 ifp->if_output = ifp->if_pre_enqueue;
452 }
453
454 if (ifp->if_input_poll != NULL) {
455 if_set_eflags(ifp, IFEF_RXPOLL);
456 }
457
458 ifp->if_output_dlil = dlil_output_handler;
459 ifp->if_input_dlil = dlil_input_handler;
460
461 VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
462 (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
463 ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
464 ifp->if_input_ctl == NULL));
465 VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
466 (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
467
468 ifnet_set_broadcast_addr(ifp, broadcast_addr: einit.broadcast_addr,
469 broadcast_len: einit.broadcast_len);
470
471 if_clear_xflags(ifp, -1);
472#if SKYWALK
473 ifp->if_tx_headroom = 0;
474 ifp->if_tx_trailer = 0;
475 ifp->if_rx_mit_ival = 0;
476 ifp->if_save_start = ostart;
477 if (einit.flags & IFNET_INIT_SKYWALK_NATIVE) {
478 VERIFY(ifp->if_eflags & IFEF_TXSTART);
479 VERIFY(!(einit.flags & IFNET_INIT_LEGACY));
480 if_set_eflags(ifp, IFEF_SKYWALK_NATIVE);
481 ifp->if_tx_headroom = einit.tx_headroom;
482 ifp->if_tx_trailer = einit.tx_trailer;
483 ifp->if_rx_mit_ival = einit.rx_mit_ival;
484 /*
485 * For native Skywalk drivers, make sure packets
486 * emitted by the BSD stack get dropped until the
487 * interface is in service. When the netif host
488 * adapter is fully activated, we'll point it to
489 * nx_netif_output().
490 */
491 ifp->if_output = ifp_if_output;
492 /*
493 * Override driver-supplied parameters
494 * and force IFEF_ENQUEUE_MULTI?
495 */
496 if (sk_netif_native_txmodel ==
497 NETIF_NATIVE_TXMODEL_ENQUEUE_MULTI) {
498 einit.start_delay_qlen = sk_tx_delay_qlen;
499 einit.start_delay_timeout = sk_tx_delay_timeout;
500 }
501 /* netif comes with native interfaces */
502 VERIFY((ifp->if_xflags & IFXF_LEGACY) == 0);
503 } else if (!ifnet_needs_compat(ifp)) {
504 /*
505 * If we're told not to plumb in netif compat
506 * for this interface, set IFXF_NX_NOAUTO to
507 * prevent DLIL from auto-attaching the nexus.
508 */
509 einit.flags |= IFNET_INIT_NX_NOAUTO;
510 /* legacy (non-netif) interface */
511 if_set_xflags(ifp, IFXF_LEGACY);
512 }
513
514 ifp->if_save_output = ifp->if_output;
515 if ((einit.flags & IFNET_INIT_NX_NOAUTO) != 0) {
516 if_set_xflags(ifp, IFXF_NX_NOAUTO);
517 }
518 if ((einit.flags & IFNET_INIT_IF_ADV) != 0) {
519 if_set_eflags(ifp, IFEF_ADV_REPORT);
520 }
521#else /* !SKYWALK */
522 /* legacy interface */
523 if_set_xflags(ifp, IFXF_LEGACY);
524#endif /* !SKYWALK */
525
526 if ((ifp->if_snd = ifclassq_alloc()) == NULL) {
527 panic_plain("%s: ifp=%p couldn't allocate class queues",
528 __func__, ifp);
529 /* NOTREACHED */
530 }
531
532 /*
533 * output target queue delay is specified in millisecond
534 * convert it to nanoseconds
535 */
536 IFCQ_TARGET_QDELAY(ifp->if_snd) =
537 einit.output_target_qdelay * 1000 * 1000;
538 IFCQ_MAXLEN(ifp->if_snd) = einit.sndq_maxlen;
539
540 ifnet_enqueue_multi_setup(ifp, einit.start_delay_qlen,
541 einit.start_delay_timeout);
542
543 IFCQ_PKT_DROP_LIMIT(ifp->if_snd) = IFCQ_DEFAULT_PKT_DROP_LIMIT;
544
545 /*
546 * Set embryonic flag; this will be cleared
547 * later when it is fully attached.
548 */
549 ifp->if_refflags = IFRF_EMBRYONIC;
550
551 /*
552 * Count the newly allocated ifnet
553 */
554 OSIncrementAtomic64(address: &net_api_stats.nas_ifnet_alloc_count);
555 INC_ATOMIC_INT64_LIM(net_api_stats.nas_ifnet_alloc_total);
556 if ((einit.flags & IFNET_INIT_ALLOC_KPI) != 0) {
557 if_set_xflags(ifp, IFXF_ALLOC_KPI);
558 } else {
559 OSIncrementAtomic64(
560 address: &net_api_stats.nas_ifnet_alloc_os_count);
561 INC_ATOMIC_INT64_LIM(
562 net_api_stats.nas_ifnet_alloc_os_total);
563 }
564
565 if (ifp->if_subfamily == IFNET_SUBFAMILY_MANAGEMENT) {
566 if_set_xflags(ifp, IFXF_MANAGEMENT);
567 if_management_interface_check_needed = true;
568 }
569
570 /*
571 * Increment the generation count on interface creation
572 */
573 ifp->if_creation_generation_id = os_atomic_inc(&if_creation_generation_count, relaxed);
574
575 *interface = ifp;
576 }
577 return error;
578}
579
580errno_t
581ifnet_reference(ifnet_t ifp)
582{
583 return dlil_if_ref(ifp);
584}
585
586void
587ifnet_dispose(ifnet_t ifp)
588{
589 dlil_if_release(ifp);
590}
591
592errno_t
593ifnet_release(ifnet_t ifp)
594{
595 return dlil_if_free(ifp);
596}
597
598errno_t
599ifnet_interface_family_find(const char *module_string,
600 ifnet_family_t *family_id)
601{
602 if (module_string == NULL || family_id == NULL) {
603 return EINVAL;
604 }
605
606 return net_str_id_find_internal(module_string, family_id,
607 NSI_IF_FAM_ID, 1);
608}
609
610void *
611ifnet_softc(ifnet_t interface)
612{
613 return (interface == NULL) ? NULL : interface->if_softc;
614}
615
616const char *
617ifnet_name(ifnet_t interface)
618{
619 return (interface == NULL) ? NULL : interface->if_name;
620}
621
622ifnet_family_t
623ifnet_family(ifnet_t interface)
624{
625 return (interface == NULL) ? 0 : interface->if_family;
626}
627
628ifnet_subfamily_t
629ifnet_subfamily(ifnet_t interface)
630{
631 return (interface == NULL) ? 0 : interface->if_subfamily;
632}
633
634u_int32_t
635ifnet_unit(ifnet_t interface)
636{
637 return (interface == NULL) ? (u_int32_t)0xffffffff :
638 (u_int32_t)interface->if_unit;
639}
640
641u_int32_t
642ifnet_index(ifnet_t interface)
643{
644 return (interface == NULL) ? (u_int32_t)0xffffffff :
645 interface->if_index;
646}
647
648errno_t
649ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
650{
651 bool set_IFF_UP;
652 bool change_IFF_UP;
653 uint16_t old_flags;
654
655 if (interface == NULL) {
656 return EINVAL;
657 }
658 set_IFF_UP = (new_flags & IFF_UP) != 0;
659 change_IFF_UP = (mask & IFF_UP) != 0;
660#if SKYWALK
661 if (set_IFF_UP && change_IFF_UP) {
662 /*
663 * When a native skywalk interface is marked IFF_UP, ensure
664 * the flowswitch is attached.
665 */
666 ifnet_attach_native_flowswitch(ifp: interface);
667 }
668#endif /* SKYWALK */
669
670 ifnet_lock_exclusive(ifp: interface);
671
672 /* If we are modifying the up/down state, call if_updown */
673 if (change_IFF_UP) {
674 if_updown(ifp: interface, up: set_IFF_UP);
675 }
676
677 old_flags = interface->if_flags;
678 interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
679 /* If we are modifying the multicast flag, set/unset the silent flag */
680 if ((old_flags & IFF_MULTICAST) !=
681 (interface->if_flags & IFF_MULTICAST)) {
682#if INET
683 if (IGMP_IFINFO(interface) != NULL) {
684 igmp_initsilent(interface, IGMP_IFINFO(interface));
685 }
686#endif /* INET */
687 if (MLD_IFINFO(interface) != NULL) {
688 mld6_initsilent(interface, MLD_IFINFO(interface));
689 }
690 }
691
692 ifnet_lock_done(ifp: interface);
693
694 return 0;
695}
696
697u_int16_t
698ifnet_flags(ifnet_t interface)
699{
700 return (interface == NULL) ? 0 : interface->if_flags;
701}
702
703/*
704 * This routine ensures the following:
705 *
706 * If IFEF_AWDL is set by the caller, also set the rest of flags as
707 * defined in IFEF_AWDL_MASK.
708 *
709 * If IFEF_AWDL has been set on the interface and the caller attempts
710 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
711 * return failure.
712 *
713 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
714 * on the interface.
715 *
716 * All other flags not associated with AWDL are not affected.
717 *
718 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
719 */
720static errno_t
721ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
722{
723 u_int32_t eflags;
724
725 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
726
727 eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
728
729 if (ifp->if_eflags & IFEF_AWDL) {
730 if (eflags & IFEF_AWDL) {
731 if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK) {
732 return EINVAL;
733 }
734 } else {
735 *new_eflags &= ~IFEF_AWDL_MASK;
736 *mask |= IFEF_AWDL_MASK;
737 }
738 } else if (eflags & IFEF_AWDL) {
739 *new_eflags |= IFEF_AWDL_MASK;
740 *mask |= IFEF_AWDL_MASK;
741 } else if (eflags & IFEF_AWDL_RESTRICTED &&
742 !(ifp->if_eflags & IFEF_AWDL)) {
743 return EINVAL;
744 }
745
746 return 0;
747}
748
749errno_t
750ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
751{
752 uint32_t oeflags;
753 struct kev_msg ev_msg;
754 struct net_event_data ev_data;
755
756 if (interface == NULL) {
757 return EINVAL;
758 }
759
760 bzero(s: &ev_msg, n: sizeof(ev_msg));
761 ifnet_lock_exclusive(ifp: interface);
762 /*
763 * Sanity checks for IFEF_AWDL and its related flags.
764 */
765 if (ifnet_awdl_check_eflags(ifp: interface, new_eflags: &new_flags, mask: &mask) != 0) {
766 ifnet_lock_done(ifp: interface);
767 return EINVAL;
768 }
769 /*
770 * Currently Interface advisory reporting is supported only for
771 * skywalk interface.
772 */
773 if ((((new_flags & mask) & IFEF_ADV_REPORT) != 0) &&
774 ((interface->if_eflags & IFEF_SKYWALK_NATIVE) == 0)) {
775 ifnet_lock_done(ifp: interface);
776 return EINVAL;
777 }
778 oeflags = interface->if_eflags;
779 if_clear_eflags(interface, mask);
780 if (new_flags != 0) {
781 if_set_eflags(interface, (new_flags & mask));
782 }
783 ifnet_lock_done(ifp: interface);
784 if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
785 !(oeflags & IFEF_AWDL_RESTRICTED)) {
786 ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
787 /*
788 * The interface is now restricted to applications that have
789 * the entitlement.
790 * The check for the entitlement will be done in the data
791 * path, so we don't have to do anything here.
792 */
793 } else if (oeflags & IFEF_AWDL_RESTRICTED &&
794 !(interface->if_eflags & IFEF_AWDL_RESTRICTED)) {
795 ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
796 }
797 /*
798 * Notify configd so that it has a chance to perform better
799 * reachability detection.
800 */
801 if (ev_msg.event_code) {
802 bzero(s: &ev_data, n: sizeof(ev_data));
803 ev_msg.vendor_code = KEV_VENDOR_APPLE;
804 ev_msg.kev_class = KEV_NETWORK_CLASS;
805 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
806 strlcpy(dst: ev_data.if_name, src: interface->if_name, IFNAMSIZ);
807 ev_data.if_family = interface->if_family;
808 ev_data.if_unit = interface->if_unit;
809 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
810 ev_msg.dv[0].data_ptr = &ev_data;
811 ev_msg.dv[1].data_length = 0;
812 dlil_post_complete_msg(interface, &ev_msg);
813 }
814
815 return 0;
816}
817
818u_int32_t
819ifnet_eflags(ifnet_t interface)
820{
821 return (interface == NULL) ? 0 : interface->if_eflags;
822}
823
824errno_t
825ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
826{
827 if (ifp == NULL) {
828 return EINVAL;
829 }
830 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
831
832 /*
833 * If this is called prior to ifnet attach, the actual work will
834 * be done at attach time. Otherwise, if it is called after
835 * ifnet detach, then it is a no-op.
836 */
837 if (!ifnet_is_attached(ifp, refio: 0)) {
838 ifp->if_idle_new_flags = new_flags;
839 ifp->if_idle_new_flags_mask = mask;
840 return 0;
841 } else {
842 ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
843 }
844
845 ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
846 return 0;
847}
848
849errno_t
850ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
851{
852 errno_t err;
853
854 ifnet_lock_exclusive(ifp);
855 err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
856 ifnet_lock_done(ifp);
857
858 return err;
859}
860
861u_int32_t
862ifnet_idle_flags(ifnet_t ifp)
863{
864 return (ifp == NULL) ? 0 : ifp->if_idle_flags;
865}
866
867errno_t
868ifnet_set_link_quality(ifnet_t ifp, int quality)
869{
870 errno_t err = 0;
871
872 if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
873 err = EINVAL;
874 goto done;
875 }
876
877 if (!ifnet_is_attached(ifp, refio: 0)) {
878 err = ENXIO;
879 goto done;
880 }
881
882 if_lqm_update(ifp, quality, 0);
883
884done:
885 return err;
886}
887
888int
889ifnet_link_quality(ifnet_t ifp)
890{
891 int lqm;
892
893 if (ifp == NULL) {
894 return IFNET_LQM_THRESH_OFF;
895 }
896
897 ifnet_lock_shared(ifp);
898 lqm = ifp->if_interface_state.lqm_state;
899 ifnet_lock_done(ifp);
900
901 return lqm;
902}
903
904errno_t
905ifnet_set_interface_state(ifnet_t ifp,
906 struct if_interface_state *if_interface_state)
907{
908 errno_t err = 0;
909
910 if (ifp == NULL || if_interface_state == NULL) {
911 err = EINVAL;
912 goto done;
913 }
914
915 if (!ifnet_is_attached(ifp, refio: 0)) {
916 err = ENXIO;
917 goto done;
918 }
919
920 if_state_update(ifp, if_interface_state);
921
922done:
923 return err;
924}
925
926errno_t
927ifnet_get_interface_state(ifnet_t ifp,
928 struct if_interface_state *if_interface_state)
929{
930 errno_t err = 0;
931
932 if (ifp == NULL || if_interface_state == NULL) {
933 err = EINVAL;
934 goto done;
935 }
936
937 if (!ifnet_is_attached(ifp, refio: 0)) {
938 err = ENXIO;
939 goto done;
940 }
941
942 if_get_state(ifp, if_interface_state);
943
944done:
945 return err;
946}
947
948
949static errno_t
950ifnet_defrouter_llreachinfo(ifnet_t ifp, sa_family_t af,
951 struct ifnet_llreach_info *iflri)
952{
953 if (ifp == NULL || iflri == NULL) {
954 return EINVAL;
955 }
956
957 VERIFY(af == AF_INET || af == AF_INET6);
958
959 return ifnet_llreach_get_defrouter(ifp, af, iflri);
960}
961
962errno_t
963ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
964{
965 return ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri);
966}
967
968errno_t
969ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
970{
971 return ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri);
972}
973
974errno_t
975ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
976 u_int32_t mask)
977{
978 errno_t error = 0;
979 int tmp;
980
981 if (ifp == NULL) {
982 return EINVAL;
983 }
984
985 ifnet_lock_exclusive(ifp);
986 tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
987 if ((tmp & ~IFCAP_VALID)) {
988 error = EINVAL;
989 } else {
990 ifp->if_capabilities = tmp;
991 }
992 ifnet_lock_done(ifp);
993
994 return error;
995}
996
997u_int32_t
998ifnet_capabilities_supported(ifnet_t ifp)
999{
1000 return (ifp == NULL) ? 0 : ifp->if_capabilities;
1001}
1002
1003
1004errno_t
1005ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
1006 u_int32_t mask)
1007{
1008 errno_t error = 0;
1009 int tmp;
1010 struct kev_msg ev_msg;
1011 struct net_event_data ev_data;
1012
1013 if (ifp == NULL) {
1014 return EINVAL;
1015 }
1016
1017 ifnet_lock_exclusive(ifp);
1018 tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
1019 if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities)) {
1020 error = EINVAL;
1021 } else {
1022 ifp->if_capenable = tmp;
1023 }
1024 ifnet_lock_done(ifp);
1025
1026 /* Notify application of the change */
1027 bzero(s: &ev_data, n: sizeof(struct net_event_data));
1028 bzero(s: &ev_msg, n: sizeof(struct kev_msg));
1029 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1030 ev_msg.kev_class = KEV_NETWORK_CLASS;
1031 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1032
1033 ev_msg.event_code = KEV_DL_IFCAP_CHANGED;
1034 strlcpy(dst: &ev_data.if_name[0], src: ifp->if_name, IFNAMSIZ);
1035 ev_data.if_family = ifp->if_family;
1036 ev_data.if_unit = (u_int32_t)ifp->if_unit;
1037 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1038 ev_msg.dv[0].data_ptr = &ev_data;
1039 ev_msg.dv[1].data_length = 0;
1040 dlil_post_complete_msg(ifp, &ev_msg);
1041
1042 return error;
1043}
1044
1045u_int32_t
1046ifnet_capabilities_enabled(ifnet_t ifp)
1047{
1048 return (ifp == NULL) ? 0 : ifp->if_capenable;
1049}
1050
1051static const ifnet_offload_t offload_mask =
1052 (IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
1053 IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
1054 IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_CSUM_ZERO_INVERT |
1055 IFNET_VLAN_TAGGING | IFNET_VLAN_MTU | IFNET_MULTIPAGES |
1056 IFNET_TSO_IPV4 | IFNET_TSO_IPV6 | IFNET_TX_STATUS | IFNET_HW_TIMESTAMP |
1057 IFNET_SW_TIMESTAMP);
1058
1059static const ifnet_offload_t any_offload_csum = IFNET_CHECKSUMF;
1060
1061static errno_t
1062ifnet_set_offload_common(ifnet_t interface, ifnet_offload_t offload, boolean_t set_both)
1063{
1064 u_int32_t ifcaps = 0;
1065
1066 if (interface == NULL) {
1067 return EINVAL;
1068 }
1069
1070 ifnet_lock_exclusive(ifp: interface);
1071 interface->if_hwassist = (offload & offload_mask);
1072
1073#if SKYWALK
1074 /* preserve skywalk capability */
1075 if ((interface->if_capabilities & IFCAP_SKYWALK) != 0) {
1076 ifcaps |= IFCAP_SKYWALK;
1077 }
1078#endif /* SKYWALK */
1079 if (dlil_verbose) {
1080 log(LOG_DEBUG, "%s: set offload flags=0x%x\n",
1081 if_name(interface),
1082 interface->if_hwassist);
1083 }
1084 ifnet_lock_done(ifp: interface);
1085
1086 if ((offload & any_offload_csum)) {
1087 ifcaps |= IFCAP_HWCSUM;
1088 }
1089 if ((offload & IFNET_TSO_IPV4)) {
1090 ifcaps |= IFCAP_TSO4;
1091 }
1092 if ((offload & IFNET_TSO_IPV6)) {
1093 ifcaps |= IFCAP_TSO6;
1094 }
1095 if ((offload & IFNET_LRO)) {
1096 ifcaps |= IFCAP_LRO;
1097 }
1098 if ((offload & IFNET_VLAN_MTU)) {
1099 ifcaps |= IFCAP_VLAN_MTU;
1100 }
1101 if ((offload & IFNET_VLAN_TAGGING)) {
1102 ifcaps |= IFCAP_VLAN_HWTAGGING;
1103 }
1104 if ((offload & IFNET_TX_STATUS)) {
1105 ifcaps |= IFCAP_TXSTATUS;
1106 }
1107 if ((offload & IFNET_HW_TIMESTAMP)) {
1108 ifcaps |= IFCAP_HW_TIMESTAMP;
1109 }
1110 if ((offload & IFNET_SW_TIMESTAMP)) {
1111 ifcaps |= IFCAP_SW_TIMESTAMP;
1112 }
1113 if ((offload & IFNET_CSUM_PARTIAL)) {
1114 ifcaps |= IFCAP_CSUM_PARTIAL;
1115 }
1116 if ((offload & IFNET_CSUM_ZERO_INVERT)) {
1117 ifcaps |= IFCAP_CSUM_ZERO_INVERT;
1118 }
1119 if (ifcaps != 0) {
1120 if (set_both) {
1121 (void) ifnet_set_capabilities_supported(ifp: interface,
1122 new_caps: ifcaps, IFCAP_VALID);
1123 }
1124 (void) ifnet_set_capabilities_enabled(ifp: interface, new_caps: ifcaps,
1125 IFCAP_VALID);
1126 }
1127
1128 return 0;
1129}
1130
1131errno_t
1132ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
1133{
1134 return ifnet_set_offload_common(interface, offload, TRUE);
1135}
1136
1137errno_t
1138ifnet_set_offload_enabled(ifnet_t interface, ifnet_offload_t offload)
1139{
1140 return ifnet_set_offload_common(interface, offload, FALSE);
1141}
1142
1143ifnet_offload_t
1144ifnet_offload(ifnet_t interface)
1145{
1146 return (interface == NULL) ?
1147 0 : (interface->if_hwassist & offload_mask);
1148}
1149
1150errno_t
1151ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
1152{
1153 errno_t error = 0;
1154
1155 if (interface == NULL || mtuLen < interface->if_mtu) {
1156 return EINVAL;
1157 }
1158 if (mtuLen > IP_MAXPACKET) {
1159 return EINVAL;
1160 }
1161
1162 switch (family) {
1163 case AF_INET:
1164 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1165 interface->if_tso_v4_mtu = mtuLen;
1166 } else {
1167 error = EINVAL;
1168 }
1169 break;
1170
1171 case AF_INET6:
1172 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1173 interface->if_tso_v6_mtu = mtuLen;
1174 } else {
1175 error = EINVAL;
1176 }
1177 break;
1178
1179 default:
1180 error = EPROTONOSUPPORT;
1181 break;
1182 }
1183
1184 if (error == 0) {
1185 struct ifclassq *ifq = interface->if_snd;
1186 ASSERT(ifq != NULL);
1187 /* Inform all transmit queues about the new TSO MTU */
1188 IFCQ_LOCK(ifq);
1189 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_MTU);
1190 IFCQ_UNLOCK(ifq);
1191 }
1192
1193 return error;
1194}
1195
1196errno_t
1197ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
1198{
1199 errno_t error = 0;
1200
1201 if (interface == NULL || mtuLen == NULL) {
1202 return EINVAL;
1203 }
1204
1205 switch (family) {
1206 case AF_INET:
1207 if (interface->if_hwassist & IFNET_TSO_IPV4) {
1208 *mtuLen = interface->if_tso_v4_mtu;
1209 } else {
1210 error = EINVAL;
1211 }
1212 break;
1213
1214 case AF_INET6:
1215 if (interface->if_hwassist & IFNET_TSO_IPV6) {
1216 *mtuLen = interface->if_tso_v6_mtu;
1217 } else {
1218 error = EINVAL;
1219 }
1220 break;
1221
1222 default:
1223 error = EPROTONOSUPPORT;
1224 break;
1225 }
1226
1227 return error;
1228}
1229
1230errno_t
1231ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
1232{
1233 struct kev_msg ev_msg;
1234 struct net_event_data ev_data;
1235
1236 bzero(s: &ev_data, n: sizeof(struct net_event_data));
1237 bzero(s: &ev_msg, n: sizeof(struct kev_msg));
1238
1239 if (interface == NULL) {
1240 return EINVAL;
1241 }
1242
1243 /* Do not accept wacky values */
1244 if ((properties & mask) & ~IF_WAKE_VALID_FLAGS) {
1245 return EINVAL;
1246 }
1247
1248 if ((mask & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1249 if ((properties & IF_WAKE_ON_MAGIC_PACKET) != 0) {
1250 if_set_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1251 } else {
1252 if_clear_xflags(interface, IFXF_WAKE_ON_MAGIC_PACKET);
1253 }
1254 }
1255
1256 (void) ifnet_touch_lastchange(interface);
1257
1258 /* Notify application of the change */
1259 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1260 ev_msg.kev_class = KEV_NETWORK_CLASS;
1261 ev_msg.kev_subclass = KEV_DL_SUBCLASS;
1262
1263 ev_msg.event_code = KEV_DL_WAKEFLAGS_CHANGED;
1264 strlcpy(dst: &ev_data.if_name[0], src: interface->if_name, IFNAMSIZ);
1265 ev_data.if_family = interface->if_family;
1266 ev_data.if_unit = (u_int32_t)interface->if_unit;
1267 ev_msg.dv[0].data_length = sizeof(struct net_event_data);
1268 ev_msg.dv[0].data_ptr = &ev_data;
1269 ev_msg.dv[1].data_length = 0;
1270 dlil_post_complete_msg(interface, &ev_msg);
1271
1272 return 0;
1273}
1274
1275u_int32_t
1276ifnet_get_wake_flags(ifnet_t interface)
1277{
1278 u_int32_t flags = 0;
1279
1280 if (interface == NULL) {
1281 return 0;
1282 }
1283
1284 if ((interface->if_xflags & IFXF_WAKE_ON_MAGIC_PACKET) != 0) {
1285 flags |= IF_WAKE_ON_MAGIC_PACKET;
1286 }
1287
1288 return flags;
1289}
1290
1291/*
1292 * Should MIB data store a copy?
1293 */
1294errno_t
1295ifnet_set_link_mib_data(ifnet_t interface, void *mibData, uint32_t mibLen)
1296{
1297 if (interface == NULL) {
1298 return EINVAL;
1299 }
1300
1301 ifnet_lock_exclusive(ifp: interface);
1302 interface->if_linkmib = (void*)mibData;
1303 interface->if_linkmiblen = mibLen;
1304 ifnet_lock_done(ifp: interface);
1305 return 0;
1306}
1307
1308errno_t
1309ifnet_get_link_mib_data(ifnet_t interface, void *mibData, uint32_t *mibLen)
1310{
1311 errno_t result = 0;
1312
1313 if (interface == NULL) {
1314 return EINVAL;
1315 }
1316
1317 ifnet_lock_shared(ifp: interface);
1318 if (*mibLen < interface->if_linkmiblen) {
1319 result = EMSGSIZE;
1320 }
1321 if (result == 0 && interface->if_linkmib == NULL) {
1322 result = ENOTSUP;
1323 }
1324
1325 if (result == 0) {
1326 *mibLen = interface->if_linkmiblen;
1327 bcopy(src: interface->if_linkmib, dst: mibData, n: *mibLen);
1328 }
1329 ifnet_lock_done(ifp: interface);
1330
1331 return result;
1332}
1333
1334uint32_t
1335ifnet_get_link_mib_data_length(ifnet_t interface)
1336{
1337 return (interface == NULL) ? 0 : interface->if_linkmiblen;
1338}
1339
1340errno_t
1341ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1342 mbuf_t m, void *route, const struct sockaddr *dest)
1343{
1344 if (interface == NULL || protocol_family == 0 || m == NULL) {
1345 if (m != NULL) {
1346 mbuf_freem_list(mbuf: m);
1347 }
1348 return EINVAL;
1349 }
1350 return dlil_output(interface, protocol_family, m, route, dest, 0, NULL);
1351}
1352
1353errno_t
1354ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1355{
1356 if (interface == NULL || m == NULL) {
1357 if (m != NULL) {
1358 mbuf_freem_list(mbuf: m);
1359 }
1360 return EINVAL;
1361 }
1362 return dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL);
1363}
1364
1365errno_t
1366ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1367{
1368 if (interface == NULL) {
1369 return EINVAL;
1370 }
1371
1372 interface->if_mtu = mtu;
1373 return 0;
1374}
1375
1376u_int32_t
1377ifnet_mtu(ifnet_t interface)
1378{
1379 return (interface == NULL) ? 0 : interface->if_mtu;
1380}
1381
1382u_char
1383ifnet_type(ifnet_t interface)
1384{
1385 return (interface == NULL) ? 0 : interface->if_data.ifi_type;
1386}
1387
1388errno_t
1389ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1390{
1391 if (interface == NULL) {
1392 return EINVAL;
1393 }
1394
1395 interface->if_data.ifi_addrlen = addrlen;
1396 return 0;
1397}
1398
1399u_char
1400ifnet_addrlen(ifnet_t interface)
1401{
1402 return (interface == NULL) ? 0 : interface->if_data.ifi_addrlen;
1403}
1404
1405errno_t
1406ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1407{
1408 if (interface == NULL) {
1409 return EINVAL;
1410 }
1411
1412 interface->if_data.ifi_hdrlen = hdrlen;
1413 return 0;
1414}
1415
1416u_char
1417ifnet_hdrlen(ifnet_t interface)
1418{
1419 return (interface == NULL) ? 0 : interface->if_data.ifi_hdrlen;
1420}
1421
1422errno_t
1423ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1424{
1425 if (interface == NULL) {
1426 return EINVAL;
1427 }
1428
1429 interface->if_data.ifi_metric = metric;
1430 return 0;
1431}
1432
1433u_int32_t
1434ifnet_metric(ifnet_t interface)
1435{
1436 return (interface == NULL) ? 0 : interface->if_data.ifi_metric;
1437}
1438
1439errno_t
1440ifnet_set_baudrate(struct ifnet *ifp, uint64_t baudrate)
1441{
1442 if (ifp == NULL) {
1443 return EINVAL;
1444 }
1445
1446 ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1447 ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1448
1449 /* Pin if_baudrate to 32 bits until we can change the storage size */
1450 ifp->if_baudrate = (baudrate > UINT32_MAX) ? UINT32_MAX : (uint32_t)baudrate;
1451
1452 return 0;
1453}
1454
1455u_int64_t
1456ifnet_baudrate(struct ifnet *ifp)
1457{
1458 return (ifp == NULL) ? 0 : ifp->if_baudrate;
1459}
1460
1461errno_t
1462ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1463 struct if_bandwidths *input_bw)
1464{
1465 if (ifp == NULL) {
1466 return EINVAL;
1467 }
1468
1469 /* set input values first (if any), as output values depend on them */
1470 if (input_bw != NULL) {
1471 (void) ifnet_set_input_bandwidths(ifp, input_bw);
1472 }
1473
1474 if (output_bw != NULL) {
1475 (void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1476 }
1477
1478 return 0;
1479}
1480
1481static void
1482ifnet_set_link_status_outbw(struct ifnet *ifp)
1483{
1484 struct if_wifi_status_v1 *sr;
1485 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1486 if (ifp->if_output_bw.eff_bw != 0) {
1487 sr->valid_bitmask |=
1488 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
1489 sr->ul_effective_bandwidth =
1490 ifp->if_output_bw.eff_bw > UINT32_MAX ?
1491 UINT32_MAX :
1492 (uint32_t)ifp->if_output_bw.eff_bw;
1493 }
1494 if (ifp->if_output_bw.max_bw != 0) {
1495 sr->valid_bitmask |=
1496 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
1497 sr->ul_max_bandwidth =
1498 ifp->if_output_bw.max_bw > UINT32_MAX ?
1499 UINT32_MAX :
1500 (uint32_t)ifp->if_output_bw.max_bw;
1501 }
1502}
1503
1504errno_t
1505ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1506 boolean_t locked)
1507{
1508 struct if_bandwidths old_bw;
1509 struct ifclassq *ifq;
1510 u_int64_t br;
1511
1512 VERIFY(ifp != NULL && bw != NULL);
1513
1514 ifq = ifp->if_snd;
1515 if (!locked) {
1516 IFCQ_LOCK(ifq);
1517 }
1518 IFCQ_LOCK_ASSERT_HELD(ifq);
1519
1520 old_bw = ifp->if_output_bw;
1521 if (bw->eff_bw != 0) {
1522 ifp->if_output_bw.eff_bw = bw->eff_bw;
1523 }
1524 if (bw->max_bw != 0) {
1525 ifp->if_output_bw.max_bw = bw->max_bw;
1526 }
1527 if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw) {
1528 ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1529 } else if (ifp->if_output_bw.eff_bw == 0) {
1530 ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1531 }
1532
1533 /* Pin if_baudrate to 32 bits */
1534 br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1535 if (br != 0) {
1536 ifp->if_baudrate = (br > UINT32_MAX) ? UINT32_MAX : (uint32_t)br;
1537 }
1538
1539 /* Adjust queue parameters if needed */
1540 if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1541 old_bw.max_bw != ifp->if_output_bw.max_bw) {
1542 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1543 }
1544
1545 if (!locked) {
1546 IFCQ_UNLOCK(ifq);
1547 }
1548
1549 /*
1550 * If this is a Wifi interface, update the values in
1551 * if_link_status structure also.
1552 */
1553 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1554 lck_rw_lock_exclusive(lck: &ifp->if_link_status_lock);
1555 ifnet_set_link_status_outbw(ifp);
1556 lck_rw_done(lck: &ifp->if_link_status_lock);
1557 }
1558
1559 return 0;
1560}
1561
1562static void
1563ifnet_set_link_status_inbw(struct ifnet *ifp)
1564{
1565 struct if_wifi_status_v1 *sr;
1566
1567 sr = &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
1568 if (ifp->if_input_bw.eff_bw != 0) {
1569 sr->valid_bitmask |=
1570 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
1571 sr->dl_effective_bandwidth =
1572 ifp->if_input_bw.eff_bw > UINT32_MAX ?
1573 UINT32_MAX :
1574 (uint32_t)ifp->if_input_bw.eff_bw;
1575 }
1576 if (ifp->if_input_bw.max_bw != 0) {
1577 sr->valid_bitmask |=
1578 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
1579 sr->dl_max_bandwidth = ifp->if_input_bw.max_bw > UINT32_MAX ?
1580 UINT32_MAX :
1581 (uint32_t)ifp->if_input_bw.max_bw;
1582 }
1583}
1584
1585errno_t
1586ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1587{
1588 struct if_bandwidths old_bw;
1589
1590 VERIFY(ifp != NULL && bw != NULL);
1591
1592 old_bw = ifp->if_input_bw;
1593 if (bw->eff_bw != 0) {
1594 ifp->if_input_bw.eff_bw = bw->eff_bw;
1595 }
1596 if (bw->max_bw != 0) {
1597 ifp->if_input_bw.max_bw = bw->max_bw;
1598 }
1599 if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw) {
1600 ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1601 } else if (ifp->if_input_bw.eff_bw == 0) {
1602 ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1603 }
1604
1605 if (IFNET_IS_WIFI(ifp) && ifp->if_link_status != NULL) {
1606 lck_rw_lock_exclusive(lck: &ifp->if_link_status_lock);
1607 ifnet_set_link_status_inbw(ifp);
1608 lck_rw_done(lck: &ifp->if_link_status_lock);
1609 }
1610
1611 if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1612 old_bw.max_bw != ifp->if_input_bw.max_bw) {
1613 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1614 }
1615
1616 return 0;
1617}
1618
1619u_int64_t
1620ifnet_output_linkrate(struct ifnet *ifp)
1621{
1622 struct ifclassq *ifq = ifp->if_snd;
1623 u_int64_t rate;
1624
1625 IFCQ_LOCK_ASSERT_HELD(ifq);
1626
1627 rate = ifp->if_output_bw.eff_bw;
1628 if (IFCQ_TBR_IS_ENABLED(ifq)) {
1629 u_int64_t tbr_rate = ifq->ifcq_tbr.tbr_rate_raw;
1630 VERIFY(tbr_rate > 0);
1631 rate = MIN(rate, ifq->ifcq_tbr.tbr_rate_raw);
1632 }
1633
1634 return rate;
1635}
1636
1637u_int64_t
1638ifnet_input_linkrate(struct ifnet *ifp)
1639{
1640 return ifp->if_input_bw.eff_bw;
1641}
1642
1643errno_t
1644ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1645 struct if_bandwidths *input_bw)
1646{
1647 if (ifp == NULL) {
1648 return EINVAL;
1649 }
1650
1651 if (output_bw != NULL) {
1652 *output_bw = ifp->if_output_bw;
1653 }
1654 if (input_bw != NULL) {
1655 *input_bw = ifp->if_input_bw;
1656 }
1657
1658 return 0;
1659}
1660
1661errno_t
1662ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1663 struct if_latencies *input_lt)
1664{
1665 if (ifp == NULL) {
1666 return EINVAL;
1667 }
1668
1669 if (output_lt != NULL) {
1670 (void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1671 }
1672
1673 if (input_lt != NULL) {
1674 (void) ifnet_set_input_latencies(ifp, input_lt);
1675 }
1676
1677 return 0;
1678}
1679
1680errno_t
1681ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1682 boolean_t locked)
1683{
1684 struct if_latencies old_lt;
1685 struct ifclassq *ifq;
1686
1687 VERIFY(ifp != NULL && lt != NULL);
1688
1689 ifq = ifp->if_snd;
1690 if (!locked) {
1691 IFCQ_LOCK(ifq);
1692 }
1693 IFCQ_LOCK_ASSERT_HELD(ifq);
1694
1695 old_lt = ifp->if_output_lt;
1696 if (lt->eff_lt != 0) {
1697 ifp->if_output_lt.eff_lt = lt->eff_lt;
1698 }
1699 if (lt->max_lt != 0) {
1700 ifp->if_output_lt.max_lt = lt->max_lt;
1701 }
1702 if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt) {
1703 ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1704 } else if (ifp->if_output_lt.eff_lt == 0) {
1705 ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1706 }
1707
1708 /* Adjust queue parameters if needed */
1709 if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1710 old_lt.max_lt != ifp->if_output_lt.max_lt) {
1711 ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1712 }
1713
1714 if (!locked) {
1715 IFCQ_UNLOCK(ifq);
1716 }
1717
1718 return 0;
1719}
1720
1721errno_t
1722ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1723{
1724 struct if_latencies old_lt;
1725
1726 VERIFY(ifp != NULL && lt != NULL);
1727
1728 old_lt = ifp->if_input_lt;
1729 if (lt->eff_lt != 0) {
1730 ifp->if_input_lt.eff_lt = lt->eff_lt;
1731 }
1732 if (lt->max_lt != 0) {
1733 ifp->if_input_lt.max_lt = lt->max_lt;
1734 }
1735 if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt) {
1736 ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1737 } else if (ifp->if_input_lt.eff_lt == 0) {
1738 ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1739 }
1740
1741 if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1742 old_lt.max_lt != ifp->if_input_lt.max_lt) {
1743 ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1744 }
1745
1746 return 0;
1747}
1748
1749errno_t
1750ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1751 struct if_latencies *input_lt)
1752{
1753 if (ifp == NULL) {
1754 return EINVAL;
1755 }
1756
1757 if (output_lt != NULL) {
1758 *output_lt = ifp->if_output_lt;
1759 }
1760 if (input_lt != NULL) {
1761 *input_lt = ifp->if_input_lt;
1762 }
1763
1764 return 0;
1765}
1766
1767errno_t
1768ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1769{
1770 errno_t err;
1771
1772 if (ifp == NULL) {
1773 return EINVAL;
1774 } else if (!ifnet_is_attached(ifp, refio: 1)) {
1775 return ENXIO;
1776 }
1777
1778#if SKYWALK
1779 if (SKYWALK_CAPABLE(ifp)) {
1780 err = netif_rxpoll_set_params(ifp, p, FALSE);
1781 ifnet_decr_iorefcnt(ifp);
1782 return err;
1783 }
1784#endif /* SKYWALK */
1785 err = dlil_rxpoll_set_params(ifp, p, FALSE);
1786
1787 /* Release the io ref count */
1788 ifnet_decr_iorefcnt(ifp);
1789
1790 return err;
1791}
1792
1793errno_t
1794ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1795{
1796 errno_t err;
1797
1798 if (ifp == NULL || p == NULL) {
1799 return EINVAL;
1800 } else if (!ifnet_is_attached(ifp, refio: 1)) {
1801 return ENXIO;
1802 }
1803
1804 err = dlil_rxpoll_get_params(ifp, p);
1805
1806 /* Release the io ref count */
1807 ifnet_decr_iorefcnt(ifp);
1808
1809 return err;
1810}
1811
1812errno_t
1813ifnet_stat_increment(struct ifnet *ifp,
1814 const struct ifnet_stat_increment_param *s)
1815{
1816 if (ifp == NULL) {
1817 return EINVAL;
1818 }
1819
1820 if (s->packets_in != 0) {
1821 os_atomic_add(&ifp->if_data.ifi_ipackets, s->packets_in, relaxed);
1822 }
1823 if (s->bytes_in != 0) {
1824 os_atomic_add(&ifp->if_data.ifi_ibytes, s->bytes_in, relaxed);
1825 }
1826 if (s->errors_in != 0) {
1827 os_atomic_add(&ifp->if_data.ifi_ierrors, s->errors_in, relaxed);
1828 }
1829
1830 if (s->packets_out != 0) {
1831 os_atomic_add(&ifp->if_data.ifi_opackets, s->packets_out, relaxed);
1832 }
1833 if (s->bytes_out != 0) {
1834 os_atomic_add(&ifp->if_data.ifi_obytes, s->bytes_out, relaxed);
1835 }
1836 if (s->errors_out != 0) {
1837 os_atomic_add(&ifp->if_data.ifi_oerrors, s->errors_out, relaxed);
1838 }
1839
1840 if (s->collisions != 0) {
1841 os_atomic_add(&ifp->if_data.ifi_collisions, s->collisions, relaxed);
1842 }
1843 if (s->dropped != 0) {
1844 os_atomic_add(&ifp->if_data.ifi_iqdrops, s->dropped, relaxed);
1845 }
1846
1847 /* Touch the last change time. */
1848 TOUCHLASTCHANGE(&ifp->if_lastchange);
1849
1850 if (ifp->if_data_threshold != 0) {
1851 ifnet_notify_data_threshold(ifp);
1852 }
1853
1854 return 0;
1855}
1856
1857errno_t
1858ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1859 u_int32_t bytes_in, u_int32_t errors_in)
1860{
1861 if (ifp == NULL) {
1862 return EINVAL;
1863 }
1864
1865 if (packets_in != 0) {
1866 os_atomic_add(&ifp->if_data.ifi_ipackets, packets_in, relaxed);
1867 }
1868 if (bytes_in != 0) {
1869 os_atomic_add(&ifp->if_data.ifi_ibytes, bytes_in, relaxed);
1870 }
1871 if (errors_in != 0) {
1872 os_atomic_add(&ifp->if_data.ifi_ierrors, errors_in, relaxed);
1873 }
1874
1875 TOUCHLASTCHANGE(&ifp->if_lastchange);
1876
1877 if (ifp->if_data_threshold != 0) {
1878 ifnet_notify_data_threshold(ifp);
1879 }
1880
1881 return 0;
1882}
1883
1884errno_t
1885ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1886 u_int32_t bytes_out, u_int32_t errors_out)
1887{
1888 if (ifp == NULL) {
1889 return EINVAL;
1890 }
1891
1892 if (packets_out != 0) {
1893 os_atomic_add(&ifp->if_data.ifi_opackets, packets_out, relaxed);
1894 }
1895 if (bytes_out != 0) {
1896 os_atomic_add(&ifp->if_data.ifi_obytes, bytes_out, relaxed);
1897 }
1898 if (errors_out != 0) {
1899 os_atomic_add(&ifp->if_data.ifi_oerrors, errors_out, relaxed);
1900 }
1901
1902 TOUCHLASTCHANGE(&ifp->if_lastchange);
1903
1904 if (ifp->if_data_threshold != 0) {
1905 ifnet_notify_data_threshold(ifp);
1906 }
1907
1908 return 0;
1909}
1910
1911errno_t
1912ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1913{
1914 if (ifp == NULL) {
1915 return EINVAL;
1916 }
1917
1918 os_atomic_store(&ifp->if_data.ifi_ipackets, s->packets_in, release);
1919 os_atomic_store(&ifp->if_data.ifi_ibytes, s->bytes_in, release);
1920 os_atomic_store(&ifp->if_data.ifi_imcasts, s->multicasts_in, release);
1921 os_atomic_store(&ifp->if_data.ifi_ierrors, s->errors_in, release);
1922
1923 os_atomic_store(&ifp->if_data.ifi_opackets, s->packets_out, release);
1924 os_atomic_store(&ifp->if_data.ifi_obytes, s->bytes_out, release);
1925 os_atomic_store(&ifp->if_data.ifi_omcasts, s->multicasts_out, release);
1926 os_atomic_store(&ifp->if_data.ifi_oerrors, s->errors_out, release);
1927
1928 os_atomic_store(&ifp->if_data.ifi_collisions, s->collisions, release);
1929 os_atomic_store(&ifp->if_data.ifi_iqdrops, s->dropped, release);
1930 os_atomic_store(&ifp->if_data.ifi_noproto, s->no_protocol, release);
1931
1932 /* Touch the last change time. */
1933 TOUCHLASTCHANGE(&ifp->if_lastchange);
1934
1935 if (ifp->if_data_threshold != 0) {
1936 ifnet_notify_data_threshold(ifp);
1937 }
1938
1939 return 0;
1940}
1941
1942errno_t
1943ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1944{
1945 if (ifp == NULL) {
1946 return EINVAL;
1947 }
1948
1949 s->packets_in = os_atomic_load(&ifp->if_data.ifi_ipackets, relaxed);
1950 s->bytes_in = os_atomic_load(&ifp->if_data.ifi_ibytes, relaxed);
1951 s->multicasts_in = os_atomic_load(&ifp->if_data.ifi_imcasts, relaxed);
1952 s->errors_in = os_atomic_load(&ifp->if_data.ifi_ierrors, relaxed);
1953
1954 s->packets_out = os_atomic_load(&ifp->if_data.ifi_opackets, relaxed);
1955 s->bytes_out = os_atomic_load(&ifp->if_data.ifi_obytes, relaxed);
1956 s->multicasts_out = os_atomic_load(&ifp->if_data.ifi_omcasts, relaxed);
1957 s->errors_out = os_atomic_load(&ifp->if_data.ifi_oerrors, relaxed);
1958
1959 s->collisions = os_atomic_load(&ifp->if_data.ifi_collisions, relaxed);
1960 s->dropped = os_atomic_load(&ifp->if_data.ifi_iqdrops, relaxed);
1961 s->no_protocol = os_atomic_load(&ifp->if_data.ifi_noproto, relaxed);
1962
1963 if (ifp->if_data_threshold != 0) {
1964 ifnet_notify_data_threshold(ifp);
1965 }
1966
1967 return 0;
1968}
1969
1970errno_t
1971ifnet_touch_lastchange(ifnet_t interface)
1972{
1973 if (interface == NULL) {
1974 return EINVAL;
1975 }
1976
1977 TOUCHLASTCHANGE(&interface->if_lastchange);
1978
1979 return 0;
1980}
1981
1982errno_t
1983ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1984{
1985 if (interface == NULL) {
1986 return EINVAL;
1987 }
1988
1989 *last_change = interface->if_data.ifi_lastchange;
1990 /* Crude conversion from uptime to calendar time */
1991 last_change->tv_sec += boottime_sec();
1992
1993 return 0;
1994}
1995
1996errno_t
1997ifnet_touch_lastupdown(ifnet_t interface)
1998{
1999 if (interface == NULL) {
2000 return EINVAL;
2001 }
2002
2003 TOUCHLASTCHANGE(&interface->if_lastupdown);
2004
2005 return 0;
2006}
2007
2008errno_t
2009ifnet_updown_delta(ifnet_t interface, struct timeval *updown_delta)
2010{
2011 if (interface == NULL) {
2012 return EINVAL;
2013 }
2014
2015 /* Calculate the delta */
2016 updown_delta->tv_sec = (time_t)net_uptime();
2017 if (updown_delta->tv_sec > interface->if_data.ifi_lastupdown.tv_sec) {
2018 updown_delta->tv_sec -= interface->if_data.ifi_lastupdown.tv_sec;
2019 } else {
2020 updown_delta->tv_sec = 0;
2021 }
2022 updown_delta->tv_usec = 0;
2023
2024 return 0;
2025}
2026
2027errno_t
2028ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
2029{
2030 return addresses == NULL ? EINVAL :
2031 ifnet_get_address_list_family(interface, addresses, family: 0);
2032}
2033
2034struct ifnet_addr_list {
2035 SLIST_ENTRY(ifnet_addr_list) ifal_le;
2036 struct ifaddr *ifal_ifa;
2037};
2038
2039errno_t
2040ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
2041 sa_family_t family)
2042{
2043 return ifnet_get_address_list_family_internal(interface, addresses,
2044 family, 0, Z_WAITOK, 0);
2045}
2046
2047errno_t
2048ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
2049{
2050 return addresses == NULL ? EINVAL :
2051 ifnet_get_address_list_family_internal(interface, addresses,
2052 0, 0, Z_WAITOK, 1);
2053}
2054
2055extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
2056
2057extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
2058
2059__private_extern__ errno_t
2060ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
2061 sa_family_t family, int detached, int how, int return_inuse_addrs)
2062{
2063 SLIST_HEAD(, ifnet_addr_list) ifal_head;
2064 struct ifnet_addr_list *ifal, *ifal_tmp;
2065 struct ifnet *ifp;
2066 int count = 0;
2067 errno_t err = 0;
2068 int usecount = 0;
2069 int index = 0;
2070
2071 SLIST_INIT(&ifal_head);
2072
2073 if (addresses == NULL) {
2074 err = EINVAL;
2075 goto done;
2076 }
2077 *addresses = NULL;
2078
2079 if (detached) {
2080 /*
2081 * Interface has been detached, so skip the lookup
2082 * at ifnet_head and go directly to inner loop.
2083 */
2084 ifp = interface;
2085 if (ifp == NULL) {
2086 err = EINVAL;
2087 goto done;
2088 }
2089 goto one;
2090 }
2091
2092 ifnet_head_lock_shared();
2093 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2094 if (interface != NULL && ifp != interface) {
2095 continue;
2096 }
2097one:
2098 ifnet_lock_shared(ifp);
2099 if (interface == NULL || interface == ifp) {
2100 struct ifaddr *ifa;
2101 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2102 IFA_LOCK(ifa);
2103 if (family != 0 &&
2104 ifa->ifa_addr->sa_family != family) {
2105 IFA_UNLOCK(ifa);
2106 continue;
2107 }
2108 ifal = kalloc_type(struct ifnet_addr_list, how);
2109 if (ifal == NULL) {
2110 IFA_UNLOCK(ifa);
2111 ifnet_lock_done(ifp);
2112 if (!detached) {
2113 ifnet_head_done();
2114 }
2115 err = ENOMEM;
2116 goto done;
2117 }
2118 ifal->ifal_ifa = ifa;
2119 ifa_addref(ifa);
2120 SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
2121 ++count;
2122 IFA_UNLOCK(ifa);
2123 }
2124 }
2125 ifnet_lock_done(ifp);
2126 if (detached) {
2127 break;
2128 }
2129 }
2130 if (!detached) {
2131 ifnet_head_done();
2132 }
2133
2134 if (count == 0) {
2135 err = ENXIO;
2136 goto done;
2137 }
2138
2139 *addresses = kalloc_type(ifaddr_t, count + 1, how | Z_ZERO);
2140 if (*addresses == NULL) {
2141 err = ENOMEM;
2142 goto done;
2143 }
2144
2145done:
2146 SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
2147 SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
2148 if (err == 0) {
2149 if (return_inuse_addrs) {
2150 usecount = tcp_find_anypcb_byaddr(ifa: ifal->ifal_ifa);
2151 usecount += udp_find_anypcb_byaddr(ifa: ifal->ifal_ifa);
2152 if (usecount) {
2153 (*addresses)[index] = ifal->ifal_ifa;
2154 index++;
2155 } else {
2156 ifa_remref(ifa: ifal->ifal_ifa);
2157 }
2158 } else {
2159 (*addresses)[--count] = ifal->ifal_ifa;
2160 }
2161 } else {
2162 ifa_remref(ifa: ifal->ifal_ifa);
2163 }
2164 kfree_type(struct ifnet_addr_list, ifal);
2165 }
2166
2167 VERIFY(err == 0 || *addresses == NULL);
2168 if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
2169 VERIFY(return_inuse_addrs == 1);
2170 kfree_type(ifaddr_t, count + 1, *addresses);
2171 err = ENXIO;
2172 }
2173 return err;
2174}
2175
2176void
2177ifnet_free_address_list(ifaddr_t *addresses)
2178{
2179 int i;
2180
2181 if (addresses == NULL) {
2182 return;
2183 }
2184
2185 for (i = 0; addresses[i] != NULL; i++) {
2186 ifa_remref(ifa: addresses[i]);
2187 }
2188
2189 kfree_type(ifaddr_t, i + 1, addresses);
2190}
2191
2192void *
2193ifnet_lladdr(ifnet_t interface)
2194{
2195 struct ifaddr *ifa;
2196 void *lladdr;
2197
2198 if (interface == NULL) {
2199 return NULL;
2200 }
2201
2202 /*
2203 * if_lladdr points to the permanent link address of
2204 * the interface and it never gets deallocated; internal
2205 * code should simply use IF_LLADDR() for performance.
2206 */
2207 ifa = interface->if_lladdr;
2208 IFA_LOCK_SPIN(ifa);
2209 lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
2210 IFA_UNLOCK(ifa);
2211
2212 return lladdr;
2213}
2214
2215errno_t
2216ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
2217 size_t *out_len)
2218{
2219 if (interface == NULL || addr == NULL || out_len == NULL) {
2220 return EINVAL;
2221 }
2222
2223 *out_len = interface->if_broadcast.length;
2224
2225 if (buffer_len < interface->if_broadcast.length) {
2226 return EMSGSIZE;
2227 }
2228
2229 if (interface->if_broadcast.length == 0) {
2230 return ENXIO;
2231 }
2232
2233 if (interface->if_broadcast.length <=
2234 sizeof(interface->if_broadcast.u.buffer)) {
2235 bcopy(src: interface->if_broadcast.u.buffer, dst: addr,
2236 n: interface->if_broadcast.length);
2237 } else {
2238 bcopy(src: interface->if_broadcast.u.ptr, dst: addr,
2239 n: interface->if_broadcast.length);
2240 }
2241
2242 return 0;
2243}
2244
2245static errno_t
2246ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
2247 size_t lladdr_len, kauth_cred_t *credp)
2248{
2249 const u_int8_t *bytes;
2250 size_t bytes_len;
2251 struct ifaddr *ifa;
2252 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
2253 errno_t error = 0;
2254
2255 /*
2256 * Make sure to accomodate the largest possible
2257 * size of SA(if_lladdr)->sa_len.
2258 */
2259 _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
2260
2261 if (interface == NULL || lladdr == NULL) {
2262 return EINVAL;
2263 }
2264
2265 ifa = interface->if_lladdr;
2266 IFA_LOCK_SPIN(ifa);
2267 bcopy(src: ifa->ifa_addr, dst: &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
2268 IFA_UNLOCK(ifa);
2269
2270 bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
2271 if (bytes_len != lladdr_len) {
2272 bzero(s: lladdr, n: lladdr_len);
2273 error = EMSGSIZE;
2274 } else {
2275 bcopy(src: bytes, dst: lladdr, n: bytes_len);
2276 }
2277
2278 return error;
2279}
2280
2281errno_t
2282ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
2283{
2284 return ifnet_lladdr_copy_bytes_internal(interface, lladdr, lladdr_len: length,
2285 NULL);
2286}
2287
2288errno_t
2289ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
2290{
2291#if CONFIG_MACF
2292 kauth_cred_t cred;
2293 net_thread_marks_t marks;
2294#endif
2295 kauth_cred_t *credp;
2296 errno_t error;
2297
2298#if CONFIG_MACF
2299 marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
2300 cred = current_cached_proc_cred(PROC_NULL);
2301 credp = &cred;
2302#else
2303 credp = NULL;
2304#endif
2305
2306 error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, lladdr_len: length,
2307 credp);
2308
2309#if CONFIG_MACF
2310 net_thread_marks_pop(marks);
2311#endif
2312
2313 return error;
2314}
2315
2316static errno_t
2317ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
2318 size_t lladdr_len, u_char new_type, int apply_type)
2319{
2320 struct ifaddr *ifa;
2321 errno_t error = 0;
2322
2323 if (interface == NULL) {
2324 return EINVAL;
2325 }
2326
2327 ifnet_head_lock_shared();
2328 ifnet_lock_exclusive(ifp: interface);
2329 if (lladdr_len != 0 &&
2330 (lladdr_len != interface->if_addrlen || lladdr == 0)) {
2331 ifnet_lock_done(ifp: interface);
2332 ifnet_head_done();
2333 return EINVAL;
2334 }
2335 ifa = ifnet_addrs[interface->if_index - 1];
2336 if (ifa != NULL) {
2337 struct sockaddr_dl *sdl;
2338
2339 IFA_LOCK_SPIN(ifa);
2340 sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2341 if (lladdr_len != 0) {
2342 bcopy(src: lladdr, LLADDR(sdl), n: lladdr_len);
2343 } else {
2344 bzero(LLADDR(sdl), n: interface->if_addrlen);
2345 }
2346 /* lladdr_len-check with if_addrlen makes sure it fits in u_char */
2347 sdl->sdl_alen = (u_char)lladdr_len;
2348
2349 if (apply_type) {
2350 sdl->sdl_type = new_type;
2351 }
2352 IFA_UNLOCK(ifa);
2353 } else {
2354 error = ENXIO;
2355 }
2356 ifnet_lock_done(ifp: interface);
2357 ifnet_head_done();
2358
2359 /* Generate a kernel event */
2360 if (error == 0) {
2361 intf_event_enqueue_nwk_wq_entry(ifp: interface, NULL,
2362 intf_event_code: INTF_EVENT_CODE_LLADDR_UPDATE);
2363 dlil_post_msg(interface, KEV_DL_SUBCLASS,
2364 KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0, FALSE);
2365 }
2366
2367 return error;
2368}
2369
2370errno_t
2371ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
2372{
2373 return ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, new_type: 0, apply_type: 0);
2374}
2375
2376errno_t
2377ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
2378 size_t lladdr_len, u_char type)
2379{
2380 return ifnet_set_lladdr_internal(interface, lladdr,
2381 lladdr_len, new_type: type, apply_type: 1);
2382}
2383
2384errno_t
2385ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
2386 ifmultiaddr_t *ifmap)
2387{
2388 if (interface == NULL || maddr == NULL) {
2389 return EINVAL;
2390 }
2391
2392 /* Don't let users screw up protocols' entries. */
2393 switch (maddr->sa_family) {
2394 case AF_LINK: {
2395 const struct sockaddr_dl *sdl =
2396 (const struct sockaddr_dl *)(uintptr_t)maddr;
2397 if (sdl->sdl_len < sizeof(struct sockaddr_dl) ||
2398 (sdl->sdl_nlen + sdl->sdl_alen + sdl->sdl_slen +
2399 offsetof(struct sockaddr_dl, sdl_data) > sdl->sdl_len)) {
2400 return EINVAL;
2401 }
2402 break;
2403 }
2404 case AF_UNSPEC:
2405 if (maddr->sa_len < ETHER_ADDR_LEN +
2406 offsetof(struct sockaddr, sa_data)) {
2407 return EINVAL;
2408 }
2409 break;
2410 default:
2411 return EINVAL;
2412 }
2413
2414 return if_addmulti_anon(interface, maddr, ifmap);
2415}
2416
2417errno_t
2418ifnet_remove_multicast(ifmultiaddr_t ifma)
2419{
2420 struct sockaddr *maddr;
2421
2422 if (ifma == NULL) {
2423 return EINVAL;
2424 }
2425
2426 maddr = ifma->ifma_addr;
2427 /* Don't let users screw up protocols' entries. */
2428 if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK) {
2429 return EINVAL;
2430 }
2431
2432 return if_delmulti_anon(ifma->ifma_ifp, maddr);
2433}
2434
2435errno_t
2436ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
2437{
2438 int count = 0;
2439 int cmax = 0;
2440 struct ifmultiaddr *addr;
2441
2442 if (ifp == NULL || addresses == NULL) {
2443 return EINVAL;
2444 }
2445
2446 ifnet_lock_shared(ifp);
2447 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2448 cmax++;
2449 }
2450
2451 *addresses = kalloc_type(ifmultiaddr_t, cmax + 1, Z_WAITOK);
2452 if (*addresses == NULL) {
2453 ifnet_lock_done(ifp);
2454 return ENOMEM;
2455 }
2456
2457 LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
2458 if (count + 1 > cmax) {
2459 break;
2460 }
2461 (*addresses)[count] = (ifmultiaddr_t)addr;
2462 ifmaddr_reference(ifmaddr: (*addresses)[count]);
2463 count++;
2464 }
2465 (*addresses)[cmax] = NULL;
2466 ifnet_lock_done(ifp);
2467
2468 return 0;
2469}
2470
2471void
2472ifnet_free_multicast_list(ifmultiaddr_t *addresses)
2473{
2474 int i;
2475
2476 if (addresses == NULL) {
2477 return;
2478 }
2479
2480 for (i = 0; addresses[i] != NULL; i++) {
2481 ifmaddr_release(ifmaddr: addresses[i]);
2482 }
2483
2484 kfree_type(ifmultiaddr_t, i + 1, addresses);
2485}
2486
2487errno_t
2488ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
2489{
2490 struct ifnet *ifp;
2491 size_t namelen;
2492
2493 if (ifname == NULL) {
2494 return EINVAL;
2495 }
2496
2497 namelen = strlen(s: ifname);
2498
2499 *ifpp = NULL;
2500
2501 ifnet_head_lock_shared();
2502 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2503 struct ifaddr *ifa;
2504 struct sockaddr_dl *ll_addr;
2505
2506 ifa = ifnet_addrs[ifp->if_index - 1];
2507 if (ifa == NULL) {
2508 continue;
2509 }
2510
2511 IFA_LOCK(ifa);
2512 ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
2513
2514 if (namelen == ll_addr->sdl_nlen && strncmp(s1: ll_addr->sdl_data,
2515 s2: ifname, n: ll_addr->sdl_nlen) == 0) {
2516 IFA_UNLOCK(ifa);
2517 *ifpp = ifp;
2518 ifnet_reference(ifp: *ifpp);
2519 break;
2520 }
2521 IFA_UNLOCK(ifa);
2522 }
2523 ifnet_head_done();
2524
2525 return (ifp == NULL) ? ENXIO : 0;
2526}
2527
2528errno_t
2529ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2530{
2531 return ifnet_list_get_common(family, FALSE, list, count);
2532}
2533
2534__private_extern__ errno_t
2535ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
2536{
2537 return ifnet_list_get_common(family, TRUE, list, count);
2538}
2539
2540struct ifnet_list {
2541 SLIST_ENTRY(ifnet_list) ifl_le;
2542 struct ifnet *ifl_ifp;
2543};
2544
2545static errno_t
2546ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
2547 u_int32_t *count)
2548{
2549#pragma unused(get_all)
2550 SLIST_HEAD(, ifnet_list) ifl_head;
2551 struct ifnet_list *ifl, *ifl_tmp;
2552 struct ifnet *ifp;
2553 int cnt = 0;
2554 errno_t err = 0;
2555
2556 SLIST_INIT(&ifl_head);
2557
2558 if (list == NULL || count == NULL) {
2559 err = EINVAL;
2560 goto done;
2561 }
2562 *count = 0;
2563 *list = NULL;
2564
2565 ifnet_head_lock_shared();
2566 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2567 if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2568 ifl = kalloc_type(struct ifnet_list, Z_NOWAIT);
2569 if (ifl == NULL) {
2570 ifnet_head_done();
2571 err = ENOMEM;
2572 goto done;
2573 }
2574 ifl->ifl_ifp = ifp;
2575 ifnet_reference(ifp);
2576 SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2577 ++cnt;
2578 }
2579 }
2580 ifnet_head_done();
2581
2582 if (cnt == 0) {
2583 err = ENXIO;
2584 goto done;
2585 }
2586
2587 *list = kalloc_type(ifnet_t, cnt + 1, Z_WAITOK | Z_ZERO);
2588 if (*list == NULL) {
2589 err = ENOMEM;
2590 goto done;
2591 }
2592 *count = cnt;
2593
2594done:
2595 SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2596 SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2597 if (err == 0) {
2598 (*list)[--cnt] = ifl->ifl_ifp;
2599 } else {
2600 ifnet_release(ifp: ifl->ifl_ifp);
2601 }
2602 kfree_type(struct ifnet_list, ifl);
2603 }
2604
2605 return err;
2606}
2607
2608void
2609ifnet_list_free(ifnet_t *interfaces)
2610{
2611 int i;
2612
2613 if (interfaces == NULL) {
2614 return;
2615 }
2616
2617 for (i = 0; interfaces[i]; i++) {
2618 ifnet_release(ifp: interfaces[i]);
2619 }
2620
2621 kfree_type(ifnet_t, i + 1, interfaces);
2622}
2623
2624/*************************************************************************/
2625/* ifaddr_t accessors */
2626/*************************************************************************/
2627
2628errno_t
2629ifaddr_reference(ifaddr_t ifa)
2630{
2631 if (ifa == NULL) {
2632 return EINVAL;
2633 }
2634
2635 ifa_addref(ifa);
2636 return 0;
2637}
2638
2639errno_t
2640ifaddr_release(ifaddr_t ifa)
2641{
2642 if (ifa == NULL) {
2643 return EINVAL;
2644 }
2645
2646 ifa_remref(ifa);
2647 return 0;
2648}
2649
2650sa_family_t
2651ifaddr_address_family(ifaddr_t ifa)
2652{
2653 sa_family_t family = 0;
2654
2655 if (ifa != NULL) {
2656 IFA_LOCK_SPIN(ifa);
2657 if (ifa->ifa_addr != NULL) {
2658 family = ifa->ifa_addr->sa_family;
2659 }
2660 IFA_UNLOCK(ifa);
2661 }
2662 return family;
2663}
2664
2665errno_t
2666ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2667{
2668 u_int32_t copylen;
2669
2670 if (ifa == NULL || out_addr == NULL) {
2671 return EINVAL;
2672 }
2673
2674 IFA_LOCK_SPIN(ifa);
2675 if (ifa->ifa_addr == NULL) {
2676 IFA_UNLOCK(ifa);
2677 return ENOTSUP;
2678 }
2679
2680 copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2681 ifa->ifa_addr->sa_len : addr_size;
2682 bcopy(src: ifa->ifa_addr, dst: out_addr, n: copylen);
2683
2684 if (ifa->ifa_addr->sa_len > addr_size) {
2685 IFA_UNLOCK(ifa);
2686 return EMSGSIZE;
2687 }
2688
2689 IFA_UNLOCK(ifa);
2690 return 0;
2691}
2692
2693errno_t
2694ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2695{
2696 u_int32_t copylen;
2697
2698 if (ifa == NULL || out_addr == NULL) {
2699 return EINVAL;
2700 }
2701
2702 IFA_LOCK_SPIN(ifa);
2703 if (ifa->ifa_dstaddr == NULL) {
2704 IFA_UNLOCK(ifa);
2705 return ENOTSUP;
2706 }
2707
2708 copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2709 ifa->ifa_dstaddr->sa_len : addr_size;
2710 bcopy(src: ifa->ifa_dstaddr, dst: out_addr, n: copylen);
2711
2712 if (ifa->ifa_dstaddr->sa_len > addr_size) {
2713 IFA_UNLOCK(ifa);
2714 return EMSGSIZE;
2715 }
2716
2717 IFA_UNLOCK(ifa);
2718 return 0;
2719}
2720
2721errno_t
2722ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2723{
2724 u_int32_t copylen;
2725
2726 if (ifa == NULL || out_addr == NULL) {
2727 return EINVAL;
2728 }
2729
2730 IFA_LOCK_SPIN(ifa);
2731 if (ifa->ifa_netmask == NULL) {
2732 IFA_UNLOCK(ifa);
2733 return ENOTSUP;
2734 }
2735
2736 copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2737 ifa->ifa_netmask->sa_len : addr_size;
2738 bcopy(src: ifa->ifa_netmask, dst: out_addr, n: copylen);
2739
2740 if (ifa->ifa_netmask->sa_len > addr_size) {
2741 IFA_UNLOCK(ifa);
2742 return EMSGSIZE;
2743 }
2744
2745 IFA_UNLOCK(ifa);
2746 return 0;
2747}
2748
2749ifnet_t
2750ifaddr_ifnet(ifaddr_t ifa)
2751{
2752 struct ifnet *ifp;
2753
2754 if (ifa == NULL) {
2755 return NULL;
2756 }
2757
2758 /* ifa_ifp is set once at creation time; it is never changed */
2759 ifp = ifa->ifa_ifp;
2760
2761 return ifp;
2762}
2763
2764ifaddr_t
2765ifaddr_withaddr(const struct sockaddr *address)
2766{
2767 if (address == NULL) {
2768 return NULL;
2769 }
2770
2771 return ifa_ifwithaddr(address);
2772}
2773
2774ifaddr_t
2775ifaddr_withdstaddr(const struct sockaddr *address)
2776{
2777 if (address == NULL) {
2778 return NULL;
2779 }
2780
2781 return ifa_ifwithdstaddr(address);
2782}
2783
2784ifaddr_t
2785ifaddr_withnet(const struct sockaddr *net)
2786{
2787 if (net == NULL) {
2788 return NULL;
2789 }
2790
2791 return ifa_ifwithnet(net);
2792}
2793
2794ifaddr_t
2795ifaddr_withroute(int flags, const struct sockaddr *destination,
2796 const struct sockaddr *gateway)
2797{
2798 if (destination == NULL || gateway == NULL) {
2799 return NULL;
2800 }
2801
2802 return ifa_ifwithroute(flags, destination, gateway);
2803}
2804
2805ifaddr_t
2806ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2807{
2808 if (addr == NULL || interface == NULL) {
2809 return NULL;
2810 }
2811
2812 return ifaof_ifpforaddr_select(addr, interface);
2813}
2814
2815errno_t
2816ifmaddr_reference(ifmultiaddr_t ifmaddr)
2817{
2818 if (ifmaddr == NULL) {
2819 return EINVAL;
2820 }
2821
2822 IFMA_ADDREF(ifmaddr);
2823 return 0;
2824}
2825
2826errno_t
2827ifmaddr_release(ifmultiaddr_t ifmaddr)
2828{
2829 if (ifmaddr == NULL) {
2830 return EINVAL;
2831 }
2832
2833 IFMA_REMREF(ifmaddr);
2834 return 0;
2835}
2836
2837errno_t
2838ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2839 u_int32_t addr_size)
2840{
2841 u_int32_t copylen;
2842
2843 if (ifma == NULL || out_addr == NULL) {
2844 return EINVAL;
2845 }
2846
2847 IFMA_LOCK(ifma);
2848 if (ifma->ifma_addr == NULL) {
2849 IFMA_UNLOCK(ifma);
2850 return ENOTSUP;
2851 }
2852
2853 copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2854 ifma->ifma_addr->sa_len : addr_size);
2855 bcopy(src: ifma->ifma_addr, dst: out_addr, n: copylen);
2856
2857 if (ifma->ifma_addr->sa_len > addr_size) {
2858 IFMA_UNLOCK(ifma);
2859 return EMSGSIZE;
2860 }
2861 IFMA_UNLOCK(ifma);
2862 return 0;
2863}
2864
2865errno_t
2866ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2867 u_int32_t addr_size)
2868{
2869 struct ifmultiaddr *ifma_ll;
2870
2871 if (ifma == NULL || out_addr == NULL) {
2872 return EINVAL;
2873 }
2874 if ((ifma_ll = ifma->ifma_ll) == NULL) {
2875 return ENOTSUP;
2876 }
2877
2878 return ifmaddr_address(ifma: ifma_ll, out_addr, addr_size);
2879}
2880
2881ifnet_t
2882ifmaddr_ifnet(ifmultiaddr_t ifma)
2883{
2884 return (ifma == NULL) ? NULL : ifma->ifma_ifp;
2885}
2886
2887/**************************************************************************/
2888/* interface cloner */
2889/**************************************************************************/
2890
2891errno_t
2892ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2893 if_clone_t *ifcloner)
2894{
2895 errno_t error = 0;
2896 struct if_clone *ifc = NULL;
2897 size_t namelen;
2898
2899 if (cloner_params == NULL || ifcloner == NULL ||
2900 cloner_params->ifc_name == NULL ||
2901 cloner_params->ifc_create == NULL ||
2902 cloner_params->ifc_destroy == NULL ||
2903 (namelen = strlen(s: cloner_params->ifc_name)) >= IFNAMSIZ) {
2904 error = EINVAL;
2905 goto fail;
2906 }
2907
2908 if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2909 printf("%s: already a cloner for %s\n", __func__,
2910 cloner_params->ifc_name);
2911 error = EEXIST;
2912 goto fail;
2913 }
2914
2915 ifc = kalloc_type(struct if_clone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2916 strlcpy(dst: ifc->ifc_name, src: cloner_params->ifc_name, IFNAMSIZ + 1);
2917 ifc->ifc_namelen = (uint8_t)namelen;
2918 ifc->ifc_maxunit = IF_MAXUNIT;
2919 ifc->ifc_create = cloner_params->ifc_create;
2920 ifc->ifc_destroy = cloner_params->ifc_destroy;
2921
2922 error = if_clone_attach(ifc);
2923 if (error != 0) {
2924 printf("%s: if_clone_attach failed %d\n", __func__, error);
2925 goto fail;
2926 }
2927 *ifcloner = ifc;
2928
2929 return 0;
2930fail:
2931 if (ifc != NULL) {
2932 kfree_type(struct if_clone, ifc);
2933 }
2934 return error;
2935}
2936
2937errno_t
2938ifnet_clone_detach(if_clone_t ifcloner)
2939{
2940 errno_t error = 0;
2941 struct if_clone *ifc = ifcloner;
2942
2943 if (ifc == NULL) {
2944 return EINVAL;
2945 }
2946
2947 if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2948 printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2949 error = EINVAL;
2950 goto fail;
2951 }
2952
2953 if_clone_detach(ifc);
2954
2955 kfree_type(struct if_clone, ifc);
2956
2957fail:
2958 return error;
2959}
2960
2961/**************************************************************************/
2962/* misc */
2963/**************************************************************************/
2964
2965static errno_t
2966ifnet_get_local_ports_extended_inner(ifnet_t ifp, protocol_family_t protocol,
2967 u_int32_t flags, u_int8_t *bitfield)
2968{
2969 u_int32_t ifindex;
2970
2971 /* no point in continuing if no address is assigned */
2972 if (ifp != NULL && TAILQ_EMPTY(&ifp->if_addrhead)) {
2973 return 0;
2974 }
2975
2976 if_ports_used_update_wakeuuid(ifp);
2977
2978#if SKYWALK
2979 if (netns_is_enabled()) {
2980 netns_get_local_ports(ifp, protocol, flags, bitfield);
2981 }
2982#endif /* SKYWALK */
2983
2984 ifindex = (ifp != NULL) ? ifp->if_index : 0;
2985
2986 if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY)) {
2987 udp_get_ports_used(ifp, protocol, flags,
2988 bitfield);
2989 }
2990
2991 if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY)) {
2992 tcp_get_ports_used(ifp, protocol, flags,
2993 bitfield);
2994 }
2995
2996 return 0;
2997}
2998
2999errno_t
3000ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
3001 u_int32_t flags, u_int8_t *bitfield)
3002{
3003 ifnet_t parent_ifp = NULL;
3004
3005 if (bitfield == NULL) {
3006 return EINVAL;
3007 }
3008
3009 switch (protocol) {
3010 case PF_UNSPEC:
3011 case PF_INET:
3012 case PF_INET6:
3013 break;
3014 default:
3015 return EINVAL;
3016 }
3017
3018 /* bit string is long enough to hold 16-bit port values */
3019 bzero(s: bitfield, bitstr_size(IP_PORTRANGE_SIZE));
3020
3021 ifnet_get_local_ports_extended_inner(ifp, protocol, flags, bitfield);
3022
3023 /* get local ports for parent interface */
3024 if (ifp != NULL && ifnet_get_delegate_parent(difp: ifp, parent: &parent_ifp) == 0) {
3025 ifnet_get_local_ports_extended_inner(ifp: parent_ifp, protocol,
3026 flags, bitfield);
3027 ifnet_release_delegate_parent(difp: ifp);
3028 }
3029
3030 return 0;
3031}
3032
3033errno_t
3034ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
3035{
3036 u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
3037 return ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
3038 bitfield);
3039}
3040
3041errno_t
3042ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr *sa, int32_t rssi,
3043 int lqm, int npm, u_int8_t srvinfo[48])
3044{
3045 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3046 return EINVAL;
3047 }
3048 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3049 return EINVAL;
3050 }
3051 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3052 return EINVAL;
3053 }
3054
3055 return dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
3056}
3057
3058errno_t
3059ifnet_notice_node_presence_v2(ifnet_t ifp, struct sockaddr *sa, struct sockaddr_dl *sdl,
3060 int32_t rssi, int lqm, int npm, u_int8_t srvinfo[48])
3061{
3062 /* Support older version if sdl is NULL */
3063 if (sdl == NULL) {
3064 return ifnet_notice_node_presence(ifp, sa, rssi, lqm, npm, srvinfo);
3065 }
3066
3067 if (ifp == NULL || sa == NULL || srvinfo == NULL) {
3068 return EINVAL;
3069 }
3070 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3071 return EINVAL;
3072 }
3073
3074 if (sa->sa_family != AF_INET6) {
3075 return EINVAL;
3076 }
3077
3078 if (sdl->sdl_family != AF_LINK) {
3079 return EINVAL;
3080 }
3081
3082 return dlil_node_present_v2(ifp, sa, sdl, rssi, lqm, npm, srvinfo);
3083}
3084
3085errno_t
3086ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr *sa)
3087{
3088 if (ifp == NULL || sa == NULL) {
3089 return EINVAL;
3090 }
3091 if (sa->sa_len > sizeof(struct sockaddr_storage)) {
3092 return EINVAL;
3093 }
3094 if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6) {
3095 return EINVAL;
3096 }
3097
3098 dlil_node_absent(ifp, sa);
3099 return 0;
3100}
3101
3102errno_t
3103ifnet_notice_primary_elected(ifnet_t ifp)
3104{
3105 if (ifp == NULL) {
3106 return EINVAL;
3107 }
3108
3109 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_PRIMARY_ELECTED, NULL, 0, FALSE);
3110 return 0;
3111}
3112
3113errno_t
3114ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
3115{
3116#pragma unused(val)
3117
3118 m_do_tx_compl_callback(m, ifp);
3119
3120 return 0;
3121}
3122
3123errno_t
3124ifnet_tx_compl(ifnet_t ifp, mbuf_t m)
3125{
3126 m_do_tx_compl_callback(m, ifp);
3127
3128 return 0;
3129}
3130
3131errno_t
3132ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
3133 u_int8_t info[IFNET_MODARGLEN])
3134{
3135 if (ifp == NULL || modid == NULL) {
3136 return EINVAL;
3137 }
3138
3139 dlil_report_issues(ifp, modid, info);
3140 return 0;
3141}
3142
3143errno_t
3144ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
3145{
3146 ifnet_t odifp = NULL;
3147
3148 if (ifp == NULL) {
3149 return EINVAL;
3150 } else if (!ifnet_is_attached(ifp, refio: 1)) {
3151 return ENXIO;
3152 }
3153
3154 ifnet_lock_exclusive(ifp);
3155 odifp = ifp->if_delegated.ifp;
3156 if (odifp != NULL && odifp == delegated_ifp) {
3157 /* delegate info is unchanged; nothing more to do */
3158 ifnet_lock_done(ifp);
3159 goto done;
3160 }
3161 // Test if this delegate interface would cause a loop
3162 ifnet_t delegate_check_ifp = delegated_ifp;
3163 while (delegate_check_ifp != NULL) {
3164 if (delegate_check_ifp == ifp) {
3165 printf("%s: delegating to %s would cause a loop\n",
3166 ifp->if_xname, delegated_ifp->if_xname);
3167 ifnet_lock_done(ifp);
3168 goto done;
3169 }
3170 delegate_check_ifp = delegate_check_ifp->if_delegated.ifp;
3171 }
3172 bzero(s: &ifp->if_delegated, n: sizeof(ifp->if_delegated));
3173 if (delegated_ifp != NULL && ifp != delegated_ifp) {
3174 uint32_t set_eflags;
3175
3176 ifp->if_delegated.ifp = delegated_ifp;
3177 ifnet_reference(ifp: delegated_ifp);
3178 ifp->if_delegated.type = delegated_ifp->if_type;
3179 ifp->if_delegated.family = delegated_ifp->if_family;
3180 ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
3181 ifp->if_delegated.expensive =
3182 delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
3183 ifp->if_delegated.constrained =
3184 delegated_ifp->if_xflags & IFXF_CONSTRAINED ? 1 : 0;
3185
3186 /*
3187 * Propogate flags related to ECN from delegated interface
3188 */
3189 if_clear_eflags(ifp, IFEF_ECN_ENABLE | IFEF_ECN_DISABLE);
3190 set_eflags = (delegated_ifp->if_eflags &
3191 (IFEF_ECN_ENABLE | IFEF_ECN_DISABLE));
3192 if_set_eflags(ifp, set_eflags);
3193 printf("%s: is now delegating %s (type 0x%x, family %u, "
3194 "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
3195 delegated_ifp->if_type, delegated_ifp->if_family,
3196 delegated_ifp->if_subfamily);
3197 }
3198
3199 ifnet_lock_done(ifp);
3200
3201 if (odifp != NULL) {
3202 if (odifp != delegated_ifp) {
3203 printf("%s: is no longer delegating %s\n",
3204 ifp->if_xname, odifp->if_xname);
3205 }
3206 ifnet_release(ifp: odifp);
3207 }
3208
3209 /* Generate a kernel event */
3210 dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0, FALSE);
3211
3212done:
3213 /* Release the io ref count */
3214 ifnet_decr_iorefcnt(ifp);
3215
3216 return 0;
3217}
3218
3219errno_t
3220ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
3221{
3222 if (ifp == NULL || pdelegated_ifp == NULL) {
3223 return EINVAL;
3224 } else if (!ifnet_is_attached(ifp, refio: 1)) {
3225 return ENXIO;
3226 }
3227
3228 ifnet_lock_shared(ifp);
3229 if (ifp->if_delegated.ifp != NULL) {
3230 ifnet_reference(ifp: ifp->if_delegated.ifp);
3231 }
3232 *pdelegated_ifp = ifp->if_delegated.ifp;
3233 ifnet_lock_done(ifp);
3234
3235 /* Release the io ref count */
3236 ifnet_decr_iorefcnt(ifp);
3237
3238 return 0;
3239}
3240
3241errno_t
3242ifnet_get_keepalive_offload_frames(ifnet_t ifp,
3243 struct ifnet_keepalive_offload_frame *frames_array,
3244 u_int32_t frames_array_count, size_t frame_data_offset,
3245 u_int32_t *used_frames_count)
3246{
3247 u_int32_t i;
3248
3249 if (frames_array == NULL || used_frames_count == NULL ||
3250 frame_data_offset >= IFNET_KEEPALIVE_OFFLOAD_FRAME_DATA_SIZE) {
3251 return EINVAL;
3252 }
3253
3254 /* frame_data_offset should be 32-bit aligned */
3255 if (P2ROUNDUP(frame_data_offset, sizeof(u_int32_t)) !=
3256 frame_data_offset) {
3257 return EINVAL;
3258 }
3259
3260 *used_frames_count = 0;
3261 if (frames_array_count == 0) {
3262 return 0;
3263 }
3264
3265 /* Keep-alive offload not required for CLAT interface */
3266 if (IS_INTF_CLAT46(ifp)) {
3267 return 0;
3268 }
3269
3270 for (i = 0; i < frames_array_count; i++) {
3271 struct ifnet_keepalive_offload_frame *frame = frames_array + i;
3272
3273 bzero(s: frame, n: sizeof(struct ifnet_keepalive_offload_frame));
3274 }
3275
3276 /* First collect IPsec related keep-alive frames */
3277 *used_frames_count = key_fill_offload_frames_for_savs(ifp,
3278 frames_array, frames_array_count, frame_data_offset);
3279
3280 /* If there is more room, collect other UDP keep-alive frames */
3281 if (*used_frames_count < frames_array_count) {
3282 udp_fill_keepalive_offload_frames(ifp, frames_array,
3283 frames_array_count, frame_data_offset,
3284 used_frames_count);
3285 }
3286
3287 /* If there is more room, collect other TCP keep-alive frames */
3288 if (*used_frames_count < frames_array_count) {
3289 tcp_fill_keepalive_offload_frames(ifp, frames_array,
3290 frames_array_count, frame_data_offset,
3291 used_frames_count);
3292 }
3293
3294 VERIFY(*used_frames_count <= frames_array_count);
3295
3296 return 0;
3297}
3298
3299errno_t
3300ifnet_notify_tcp_keepalive_offload_timeout(ifnet_t ifp,
3301 struct ifnet_keepalive_offload_frame *frame)
3302{
3303 errno_t error = 0;
3304
3305 if (ifp == NULL || frame == NULL) {
3306 return EINVAL;
3307 }
3308
3309 if (frame->type != IFNET_KEEPALIVE_OFFLOAD_FRAME_TCP) {
3310 return EINVAL;
3311 }
3312 if (frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV4 &&
3313 frame->ether_type != IFNET_KEEPALIVE_OFFLOAD_FRAME_ETHERTYPE_IPV6) {
3314 return EINVAL;
3315 }
3316 if (frame->local_port == 0 || frame->remote_port == 0) {
3317 return EINVAL;
3318 }
3319
3320 error = tcp_notify_kao_timeout(ifp, frame);
3321
3322 return error;
3323}
3324
3325errno_t
3326ifnet_link_status_report(ifnet_t ifp, const void *buffer,
3327 size_t buffer_len)
3328{
3329 struct if_link_status *ifsr;
3330 errno_t err = 0;
3331
3332 if (ifp == NULL || buffer == NULL || buffer_len == 0) {
3333 return EINVAL;
3334 }
3335
3336 ifnet_lock_shared(ifp);
3337
3338 /*
3339 * Make sure that the interface is attached but there is no need
3340 * to take a reference because this call is coming from the driver.
3341 */
3342 if (!ifnet_is_attached(ifp, refio: 0)) {
3343 ifnet_lock_done(ifp);
3344 return ENXIO;
3345 }
3346
3347 lck_rw_lock_exclusive(lck: &ifp->if_link_status_lock);
3348
3349 /*
3350 * If this is the first status report then allocate memory
3351 * to store it.
3352 */
3353 if (ifp->if_link_status == NULL) {
3354 ifp->if_link_status = kalloc_type(struct if_link_status, Z_ZERO);
3355 if (ifp->if_link_status == NULL) {
3356 err = ENOMEM;
3357 goto done;
3358 }
3359 }
3360
3361 ifsr = __DECONST(struct if_link_status *, buffer);
3362
3363 if (ifp->if_type == IFT_CELLULAR) {
3364 struct if_cellular_status_v1 *if_cell_sr, *new_cell_sr;
3365 /*
3366 * Currently we have a single version -- if it does
3367 * not match, just return.
3368 */
3369 if (ifsr->ifsr_version !=
3370 IF_CELLULAR_STATUS_REPORT_CURRENT_VERSION) {
3371 err = ENOTSUP;
3372 goto done;
3373 }
3374
3375 if (ifsr->ifsr_len != sizeof(*if_cell_sr)) {
3376 err = EINVAL;
3377 goto done;
3378 }
3379
3380 if_cell_sr =
3381 &ifp->if_link_status->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3382 new_cell_sr = &ifsr->ifsr_u.ifsr_cell.if_cell_u.if_status_v1;
3383 /* Check if we need to act on any new notifications */
3384 if ((new_cell_sr->valid_bitmask &
3385 IF_CELL_UL_MSS_RECOMMENDED_VALID) &&
3386 new_cell_sr->mss_recommended !=
3387 if_cell_sr->mss_recommended) {
3388 os_atomic_or(&tcbinfo.ipi_flags, INPCBINFO_UPDATE_MSS, relaxed);
3389 inpcb_timer_sched(&tcbinfo, type: INPCB_TIMER_FAST);
3390#if NECP
3391 necp_update_all_clients();
3392#endif
3393 }
3394
3395 /* Finally copy the new information */
3396 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
3397 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
3398 if_cell_sr->valid_bitmask = 0;
3399 bcopy(src: new_cell_sr, dst: if_cell_sr, n: sizeof(*if_cell_sr));
3400 } else if (IFNET_IS_WIFI(ifp)) {
3401 struct if_wifi_status_v1 *if_wifi_sr, *new_wifi_sr;
3402
3403 /* Check version */
3404 if (ifsr->ifsr_version !=
3405 IF_WIFI_STATUS_REPORT_CURRENT_VERSION) {
3406 err = ENOTSUP;
3407 goto done;
3408 }
3409
3410 if (ifsr->ifsr_len != sizeof(*if_wifi_sr)) {
3411 err = EINVAL;
3412 goto done;
3413 }
3414
3415 if_wifi_sr =
3416 &ifp->if_link_status->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3417 new_wifi_sr =
3418 &ifsr->ifsr_u.ifsr_wifi.if_wifi_u.if_status_v1;
3419 ifp->if_link_status->ifsr_version = ifsr->ifsr_version;
3420 ifp->if_link_status->ifsr_len = ifsr->ifsr_len;
3421 if_wifi_sr->valid_bitmask = 0;
3422 bcopy(src: new_wifi_sr, dst: if_wifi_sr, n: sizeof(*if_wifi_sr));
3423
3424 /*
3425 * Update the bandwidth values if we got recent values
3426 * reported through the other KPI.
3427 */
3428 if (!(new_wifi_sr->valid_bitmask &
3429 IF_WIFI_UL_MAX_BANDWIDTH_VALID) &&
3430 ifp->if_output_bw.max_bw > 0) {
3431 if_wifi_sr->valid_bitmask |=
3432 IF_WIFI_UL_MAX_BANDWIDTH_VALID;
3433 if_wifi_sr->ul_max_bandwidth =
3434 ifp->if_output_bw.max_bw > UINT32_MAX ?
3435 UINT32_MAX :
3436 (uint32_t)ifp->if_output_bw.max_bw;
3437 }
3438 if (!(new_wifi_sr->valid_bitmask &
3439 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID) &&
3440 ifp->if_output_bw.eff_bw > 0) {
3441 if_wifi_sr->valid_bitmask |=
3442 IF_WIFI_UL_EFFECTIVE_BANDWIDTH_VALID;
3443 if_wifi_sr->ul_effective_bandwidth =
3444 ifp->if_output_bw.eff_bw > UINT32_MAX ?
3445 UINT32_MAX :
3446 (uint32_t)ifp->if_output_bw.eff_bw;
3447 }
3448 if (!(new_wifi_sr->valid_bitmask &
3449 IF_WIFI_DL_MAX_BANDWIDTH_VALID) &&
3450 ifp->if_input_bw.max_bw > 0) {
3451 if_wifi_sr->valid_bitmask |=
3452 IF_WIFI_DL_MAX_BANDWIDTH_VALID;
3453 if_wifi_sr->dl_max_bandwidth =
3454 ifp->if_input_bw.max_bw > UINT32_MAX ?
3455 UINT32_MAX :
3456 (uint32_t)ifp->if_input_bw.max_bw;
3457 }
3458 if (!(new_wifi_sr->valid_bitmask &
3459 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID) &&
3460 ifp->if_input_bw.eff_bw > 0) {
3461 if_wifi_sr->valid_bitmask |=
3462 IF_WIFI_DL_EFFECTIVE_BANDWIDTH_VALID;
3463 if_wifi_sr->dl_effective_bandwidth =
3464 ifp->if_input_bw.eff_bw > UINT32_MAX ?
3465 UINT32_MAX :
3466 (uint32_t)ifp->if_input_bw.eff_bw;
3467 }
3468 }
3469
3470done:
3471 lck_rw_done(lck: &ifp->if_link_status_lock);
3472 ifnet_lock_done(ifp);
3473 return err;
3474}
3475
3476/*************************************************************************/
3477/* Fastlane QoS Ca */
3478/*************************************************************************/
3479
3480errno_t
3481ifnet_set_fastlane_capable(ifnet_t interface, boolean_t capable)
3482{
3483 if (interface == NULL) {
3484 return EINVAL;
3485 }
3486
3487 if_set_qosmarking_mode(interface,
3488 capable ? IFRTYPE_QOSMARKING_FASTLANE : IFRTYPE_QOSMARKING_MODE_NONE);
3489
3490 return 0;
3491}
3492
3493errno_t
3494ifnet_get_fastlane_capable(ifnet_t interface, boolean_t *capable)
3495{
3496 if (interface == NULL || capable == NULL) {
3497 return EINVAL;
3498 }
3499 if (interface->if_qosmarking_mode == IFRTYPE_QOSMARKING_FASTLANE) {
3500 *capable = true;
3501 } else {
3502 *capable = false;
3503 }
3504 return 0;
3505}
3506
3507errno_t
3508ifnet_get_unsent_bytes(ifnet_t interface, int64_t *unsent_bytes)
3509{
3510 int64_t bytes;
3511
3512 if (interface == NULL || unsent_bytes == NULL) {
3513 return EINVAL;
3514 }
3515
3516 bytes = *unsent_bytes = 0;
3517
3518 if (!IF_FULLY_ATTACHED(interface)) {
3519 return ENXIO;
3520 }
3521
3522 bytes = interface->if_sndbyte_unsent;
3523
3524 if (interface->if_eflags & IFEF_TXSTART) {
3525 bytes += IFCQ_BYTES(interface->if_snd);
3526 }
3527 *unsent_bytes = bytes;
3528
3529 return 0;
3530}
3531
3532errno_t
3533ifnet_get_buffer_status(const ifnet_t ifp, ifnet_buffer_status_t *buf_status)
3534{
3535 if (ifp == NULL || buf_status == NULL) {
3536 return EINVAL;
3537 }
3538
3539 bzero(s: buf_status, n: sizeof(*buf_status));
3540
3541 if (!IF_FULLY_ATTACHED(ifp)) {
3542 return ENXIO;
3543 }
3544
3545 if (ifp->if_eflags & IFEF_TXSTART) {
3546 buf_status->buf_interface = IFCQ_BYTES(ifp->if_snd);
3547 }
3548
3549 buf_status->buf_sndbuf = ((buf_status->buf_interface != 0) ||
3550 (ifp->if_sndbyte_unsent != 0)) ? 1 : 0;
3551
3552 return 0;
3553}
3554
3555void
3556ifnet_normalise_unsent_data(void)
3557{
3558 struct ifnet *ifp;
3559
3560 ifnet_head_lock_shared();
3561 TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
3562 ifnet_lock_exclusive(ifp);
3563 if (!IF_FULLY_ATTACHED(ifp)) {
3564 ifnet_lock_done(ifp);
3565 continue;
3566 }
3567 if (!(ifp->if_eflags & IFEF_TXSTART)) {
3568 ifnet_lock_done(ifp);
3569 continue;
3570 }
3571
3572 if (ifp->if_sndbyte_total > 0 ||
3573 IFCQ_BYTES(ifp->if_snd) > 0) {
3574 ifp->if_unsent_data_cnt++;
3575 }
3576
3577 ifnet_lock_done(ifp);
3578 }
3579 ifnet_head_done();
3580}
3581
3582errno_t
3583ifnet_set_low_power_mode(ifnet_t ifp, boolean_t on)
3584{
3585 errno_t error;
3586
3587 error = if_set_low_power(ifp, on);
3588
3589 return error;
3590}
3591
3592errno_t
3593ifnet_get_low_power_mode(ifnet_t ifp, boolean_t *on)
3594{
3595 if (ifp == NULL || on == NULL) {
3596 return EINVAL;
3597 }
3598
3599 *on = ((ifp->if_xflags & IFXF_LOW_POWER) != 0);
3600 return 0;
3601}
3602