1/*
2 * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _SKYWALK_NEXUS_NETIF_H_
30#define _SKYWALK_NEXUS_NETIF_H_
31
32#include <skywalk/os_skywalk_private.h>
33#include <skywalk/nexus/nexus_pktq.h>
34
35#if CONFIG_NEXUS_NETIF
36
37#define NEXUS_PROVIDER_NET_IF "com.apple.nexus.netif"
38
39#define NX_NETIF_MAXPORTS 128
40#define NX_NETIF_EVENT_RING_NUM 1 /* number of event rings */
41#define NX_NETIF_EVENT_RING_SIZE 32 /* default event ring size */
42
43struct netif_filter {
44 STAILQ_ENTRY(netif_filter) nf_link;
45 nexus_port_t nf_port;
46 uint32_t nf_refcnt;
47 void *nf_cb_arg;
48 errno_t (*nf_cb_func)(void *,
49 struct __kern_packet *, uint32_t);
50};
51STAILQ_HEAD(netif_filter_head, netif_filter);
52
53struct netif_flow_desc {
54 uint16_t fd_ethertype;
55 struct in6_addr fd_laddr;
56 struct in6_addr fd_raddr;
57};
58
59struct netif_port_info {
60 struct nx_port_info_header npi_hdr;
61 struct netif_flow_desc npi_fd;
62};
63
64struct netif_flow {
65 SLIST_ENTRY(netif_flow) nf_link;
66 SLIST_ENTRY(netif_flow) nf_table_link;
67 nexus_port_t nf_port;
68 uint32_t nf_refcnt;
69 struct netif_flow_desc nf_desc;
70 void *nf_cb_arg;
71 errno_t (*nf_cb_func)(void *,
72 void *, uint32_t);
73};
74
75typedef enum {
76 FT_TYPE_ETHERTYPE,
77 FT_TYPE_IPV6_ULA
78} netif_flowtable_type_t;
79
80struct netif_flowtable {
81 struct netif_flowtable_ops *ft_ops;
82 void *ft_internal;
83};
84
85typedef int netif_flow_lookup_t(struct netif_flowtable *,
86 struct __kern_packet *, uint32_t, struct netif_flow **);
87typedef boolean_t netif_flow_match_t(struct netif_flow_desc *,
88 struct netif_flow_desc *);
89typedef int netif_flow_info_t(struct __kern_packet *,
90 struct netif_flow_desc *, uint32_t);
91typedef int netif_flow_insert_t(struct netif_flowtable *,
92 struct netif_flow *);
93typedef void netif_flow_remove_t(struct netif_flowtable *,
94 struct netif_flow *);
95typedef struct netif_flowtable *netif_flow_table_alloc_t(
96 struct netif_flowtable_ops *);
97typedef void netif_flow_table_free_t(struct netif_flowtable *);
98
99struct netif_flowtable_ops {
100 netif_flow_lookup_t *nfo_lookup;
101 netif_flow_match_t *nfo_match;
102 netif_flow_info_t *nfo_info;
103 netif_flow_insert_t *nfo_insert;
104 netif_flow_remove_t *nfo_remove;
105 netif_flow_table_alloc_t *nfo_table_alloc;
106 netif_flow_table_free_t *nfo_table_free;
107};
108
109SLIST_HEAD(netif_flow_head, netif_flow);
110struct nexus_netif_adapter {
111 /*
112 * This is an overlay structure on nexus_adapter;
113 * make sure it contains 'up' as the first member.
114 */
115 struct nexus_adapter nifna_up;
116 struct nx_netif *nifna_netif;
117
118 struct nx_netif_mit *nifna_tx_mit;
119 struct nx_netif_mit *nifna_rx_mit;
120
121 /*
122 * XXX For filter or vpna only
123 */
124 union {
125 struct netif_filter *nifna_filter;
126 struct netif_flow *nifna_flow;
127 };
128 uint16_t nifna_gencnt;
129};
130
131struct netif_queue {
132 decl_lck_mtx_data(, nq_lock);
133 struct netif_qset *nq_qset; /* backpointer to parent netif qset */
134 struct pktq nq_pktq;
135 struct netif_qstats nq_stats;
136 uint64_t nq_accumulated_bytes;
137 uint64_t nq_accumulated_pkts;
138 uint64_t nq_accumulate_start; /* in seconds */
139 void *nq_ctx;
140 kern_packet_svc_class_t nq_svc; /* service class of TX queue */
141 uint16_t nq_flags;
142}__attribute__((aligned(sizeof(uint64_t))));
143
144/* values for nq_flags */
145#define NETIF_QUEUE_EXT_INITED 0x0001 /* nxnpi_queue_init() succeeded */
146#define NETIF_QUEUE_IS_RX 0x0002 /* RX queue, else TX */
147
148#define _NETIF_QSET_QUEUE(_p, _n) \
149 (struct netif_queue *)(void *)((uint8_t *)((_p)->nqs_driver_queues) + \
150 ((_n) * sizeof(struct netif_queue)))
151#define NETIF_QSET_RX_QUEUE(_p, _n) _NETIF_QSET_QUEUE(_p, _n)
152#define NETIF_QSET_TX_QUEUE(_p, _n) \
153 _NETIF_QSET_QUEUE(_p, (_p)->nqs_num_rx_queues + (_n))
154
155/* the top 32 bits are unused for now */
156#define NETIF_QSET_ID_ENCODE(llink_id_internal, qset_idx) \
157 ((((llink_id_internal) << 16) | (qset_idx)) & 0xffffffff)
158
159struct netif_qset {
160 struct netif_llink *nqs_llink; /* backpointer to parent logical link */
161 struct ifclassq *nqs_ifcq;
162 SLIST_ENTRY(netif_qset) nqs_list;
163 void *nqs_ctx; /* context provided by driver */
164 uint64_t nqs_id; /* queue set identifier */
165 uint8_t nqs_idx; /* queue set index */
166 uint16_t nqs_flags;
167 uint8_t nqs_num_rx_queues;
168 uint8_t nqs_num_tx_queues;
169 /*
170 * nq_queues will be organized as:
171 * nq_queues[0..nq_num_rx_queues-1] will hold RX queues.
172 * nq_queues[nq_num_rx_queues..nq_num_tx_queues-1] will hold TX queues.
173 */
174 struct netif_queue nqs_driver_queues[0]
175 __attribute__((aligned(sizeof(uint64_t))));
176};
177
178/* values for nqs_flags */
179#define NETIF_QSET_FLAG_DEFAULT 0x0001 /* default queue set of the logical link */
180#define NETIF_QSET_FLAG_AQM 0x0002 /* provides AQM */
181#define NETIF_QSET_FLAG_LOW_LATENCY 0x0004 /* provides low latency service */
182#define NETIF_QSET_FLAG_EXT_INITED 0x0008 /* nxnpi_qset_init() succeeded */
183
184#define NETIF_DEFAULT_QSET(_qs) ((_qs)->nqs_flags & NETIF_QSET_FLAG_DEFAULT)
185
186struct netif_llink {
187 struct nx_netif *nll_nif; /* backpointer to parent netif instance */
188 STAILQ_ENTRY(netif_llink) nll_link;
189 SLIST_HEAD(, netif_qset) nll_qset_list;
190 struct netif_qset *nll_default_qset;
191 struct ifclassq *nll_ifcq;
192 struct os_refcnt nll_refcnt;
193#define NETIF_LLINK_ID_DEFAULT 0
194 kern_nexus_netif_llink_id_t nll_link_id;
195 uint16_t nll_link_id_internal;
196 uint16_t nll_qset_cnt;
197 uint8_t nll_state;
198 uint8_t nll_flags;
199 void *nll_ctx; /* context provided by driver */
200};
201STAILQ_HEAD(netif_llink_head, netif_llink);
202
203/* values for nll_flags */
204#define NETIF_LLINK_FLAG_DEFAULT 0x1 /* default logical link */
205
206/* values for nll_state */
207#define NETIF_LLINK_STATE_INIT 0x1 /* Intialized and ready for use */
208#define NETIF_LLINK_STATE_DESTROYED 0x2 /* not available for use */
209
210#define NETIF_DEFAULT_LLINK(_ll) ((_ll)->nll_flags & NETIF_LLINK_FLAG_DEFAULT)
211
212SLIST_HEAD(netif_agent_flow_head, netif_agent_flow);
213struct netif_agent_flow {
214 SLIST_ENTRY(netif_agent_flow) naf_link;
215 uuid_t naf_flow_uuid;
216 uuid_t naf_bind_key;
217 nexus_port_t naf_nx_port;
218 uint16_t naf_flags;
219 pid_t naf_pid;
220 union sockaddr_in_4_6 naf_daddr;
221 union sockaddr_in_4_6 naf_saddr;
222};
223
224#define NIFNA(_na) ((struct nexus_netif_adapter *)(_na))
225
226/* nif_flags */
227/*
228 * This is named differently from the flow classification rule
229 * (IPV6 ULA) because this gives us the flexibility of using
230 * different types of classification in the future.
231 */
232#define NETIF_FLAG_LOW_LATENCY 0x00000001
233#define NETIF_FLAG_COMPAT 0x00000002
234#define NETIF_FLAG_LLINK_INITIALIZED 0x00000004
235#define NETIF_IS_LOW_LATENCY(n) \
236 (((n)->nif_flags & NETIF_FLAG_LOW_LATENCY) != 0)
237#define NETIF_IS_COMPAT(n) \
238 (((n)->nif_flags & NETIF_FLAG_COMPAT) != 0)
239#define NETIF_LLINK_ENABLED(n) \
240 (((n)->nif_flags & NETIF_FLAG_LLINK_INITIALIZED) != 0)
241#define NETIF_DEFAULT_DROP_ENABLED(n) \
242 (nx_netif_filter_default_drop != 0 && \
243 (((n)->nif_filter_flags & NETIF_FILTER_FLAG_INITIALIZED) != 0))
244
245/* nif_agent_flags */
246#define NETIF_AGENT_FLAG_REGISTERED 0x00000001
247#define NETIF_AGENT_FLAG_ADDED 0x00000002
248
249/* nif_filter_flags */
250#define NETIF_FILTER_FLAG_INITIALIZED 0x00000001
251#define NETIF_FILTER_FLAG_ENABLED 0x00000002
252
253/* nif_flow_flags */
254#define NETIF_FLOW_FLAG_INITIALIZED 0x00000001
255#define NETIF_FLOW_FLAG_ENABLED 0x00000002
256
257/* nif_llink_flags */
258#define NETIF_LLINK_FLAG_INITIALIZED 0x00000001
259
260/* Used by netif_hwna_set_mode() */
261typedef enum {
262 NETIF_MODE_NONE,
263 NETIF_MODE_FSW,
264 NETIF_MODE_LLW
265} netif_mode_t;
266
267/* nif capabilities */
268#define NETIF_CAPAB_INTERFACE_ADVISORY 0x00000001
269#define NETIF_CAPAB_QSET_EXTENSIONS 0x00000002
270
271struct netif_qset_extensions {
272 kern_nexus_capab_qsext_notify_steering_info_fn_t qe_notify_steering_info;
273 void *qe_prov_ctx;
274};
275
276/*
277 * nx_netif is a descriptor for a netif nexus instance.
278 */
279struct nx_netif {
280 decl_lck_rw_data(, nif_lock);
281 struct kern_nexus *nif_nx;
282
283 struct nxbind *nif_dev_nxb;
284 struct nxbind *nif_host_nxb;
285 uuid_t nif_uuid; /* attachment UUID */
286 struct netif_stats nif_stats;
287 uint32_t nif_flags;
288 struct os_refcnt nif_refcnt;
289
290 decl_lck_mtx_data(, nif_agent_lock);
291 struct netif_agent_flow_head nif_agent_flow_list;
292 uint32_t nif_agent_flow_cnt;
293 uint32_t nif_agent_flags;
294 netagent_session_t nif_agent_session;
295 uuid_t nif_agent_uuid;
296
297 uint32_t nif_hwassist;
298 uint32_t nif_capabilities;
299 uint32_t nif_capenable;
300 uint64_t nif_input_rate; /* device input rate limit */
301
302 struct ifnet *nif_ifp;
303 struct nx_flowswitch *nif_fsw; /* attached flowswitch nexus */
304 struct sk_nexusadv *nif_fsw_nxadv; /* flowswitch nexus advisory */
305 struct netif_nexus_advisory *nif_netif_nxadv; /* netif nexus advisory */
306
307 /* packet-mbuf copy routines */
308 pkt_copy_from_mbuf_t *nif_pkt_copy_from_mbuf;
309 pkt_copy_to_mbuf_t *nif_pkt_copy_to_mbuf;
310 pkt_copy_from_pkt_t *nif_pkt_copy_from_pkt;
311
312 /* packet filtering */
313 decl_lck_mtx_data(, nif_filter_lock);
314 uint32_t nif_filter_flags;
315 uint32_t nif_filter_vp_cnt;
316 uint32_t nif_filter_cnt;
317 struct kern_pbufpool *nif_filter_pp;
318 struct netif_filter_head nif_filter_list;
319 union {
320 struct nx_mbq nif_tx_processed_mbq[MBUF_TC_MAX];
321 struct nx_pktq nif_tx_processed_pktq[KPKT_TC_MAX];
322 };
323
324 /* virtual port */
325 decl_lck_mtx_data(, nif_flow_lock);
326 uint32_t nif_vp_cnt;
327 uint32_t nif_flow_flags;
328 uint32_t nif_flow_cnt;
329 struct netif_flow_head nif_flow_list;
330 struct netif_flowtable *nif_flow_table;
331 struct kern_channel *nif_hw_ch;
332 uint32_t nif_hw_ch_refcnt;
333
334 /* logical link */
335 decl_lck_rw_data(, nif_llink_lock);
336 struct kern_nexus_netif_llink_init *nif_default_llink_params;
337 struct netif_llink *nif_default_llink;
338 STAILQ_HEAD(, netif_llink) nif_llink_list;
339 uint16_t nif_llink_cnt;
340
341 /* capability configuration callback function and context */
342 uint32_t nif_extended_capabilities;
343 kern_nexus_capab_interface_advisory_config_fn_t nif_intf_adv_config;
344 void *nif_intf_adv_prov_ctx;
345
346 struct netif_qset_extensions nif_qset_extensions;
347#if (DEVELOPMENT || DEBUG)
348 struct skoid nif_skoid;
349#endif /* !DEVELOPMENT && !DEBUG */
350};
351
352#define NX_NETIF_PRIVATE(_nx) ((struct nx_netif *)(_nx)->nx_arg)
353
354#define NETIF_RWINIT(_nif) \
355 lck_rw_init(&(_nif)->nif_lock, &nexus_lock_group, &nexus_lock_attr)
356#define NETIF_WLOCK(_nif) \
357 lck_rw_lock_exclusive(&(_nif)->nif_lock)
358#define NETIF_WUNLOCK(_nif) \
359 lck_rw_unlock_exclusive(&(_nif)->nif_lock)
360#define NETIF_WLOCKTORLOCK(_nif) \
361 lck_rw_lock_exclusive_to_shared(&(_nif)->nif_lock)
362#define NETIF_RLOCK(_nif) \
363 lck_rw_lock_shared(&(_nif)->nif_lock)
364#define NETIF_RLOCKTOWLOCK(_nif) \
365 lck_rw_lock_shared_to_exclusive(&(_nif)->nif_lock)
366#define NETIF_RTRYLOCK(_nif) \
367 lck_rw_try_lock(&(_nif)->nif_lock, LCK_RW_TYPE_SHARED)
368#define NETIF_RUNLOCK(_nif) \
369 lck_rw_unlock_shared(&(_nif)->nif_lock)
370#define NETIF_UNLOCK(_nif) \
371 lck_rw_done(&(_nif)->nif_lock)
372#define NETIF_RWDESTROY(_nif) \
373 lck_rw_destroy(&(_nif)->nif_lock, &nexus_lock_group)
374#define NETIF_WLOCK_ASSERT_HELD(_nif) \
375 LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_EXCLUSIVE)
376#define NETIF_RLOCK_ASSERT_HELD(_nif) \
377 LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_SHARED)
378#define NETIF_LOCK_ASSERT_HELD(_nif) \
379 LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_HELD)
380
381SYSCTL_DECL(_kern_skywalk_netif);
382
383/*
384 * Macros to determine if an interface is skywalk capable or skywalk enabled.
385 * See the magic field in struct nexus_adapter.
386 */
387#define SKYWALK_CAPABLE(ifp) \
388 (NA(ifp) != NULL && (ifnet_capabilities_supported(ifp) & IFCAP_SKYWALK))
389
390#define SKYWALK_SET_CAPABLE(ifp) do { \
391 ifnet_lock_exclusive(ifp); \
392 (ifp)->if_capabilities |= IFCAP_SKYWALK; \
393 (ifp)->if_capenable |= IFCAP_SKYWALK; \
394 ifnet_lock_done(ifp); \
395} while (0)
396
397#define SKYWALK_CLEAR_CAPABLE(ifp) do { \
398 ifnet_lock_exclusive(ifp); \
399 (ifp)->if_capabilities &= ~IFCAP_SKYWALK; \
400 (ifp)->if_capenable &= ~IFCAP_SKYWALK; \
401 ifnet_lock_done(ifp); \
402} while (0)
403
404#define SKYWALK_NATIVE(ifp) \
405 ((ifp)->if_eflags & IFEF_SKYWALK_NATIVE)
406
407typedef enum {
408 MIT_MODE_SIMPLE,
409 MIT_MODE_ADVANCED_STATIC,
410 MIT_MODE_ADVANCED_DYNAMIC,
411} mit_mode_t;
412
413/*
414 * Mitigation support.
415 */
416struct mit_cfg_tbl {
417 uint32_t cfg_plowat; /* packets low watermark */
418 uint32_t cfg_phiwat; /* packets high watermark */
419 uint32_t cfg_blowat; /* bytes low watermark */
420 uint32_t cfg_bhiwat; /* bytes high watermark */
421 uint32_t cfg_ival; /* delay interval (in microsecond) */
422};
423
424#define NETIF_MIT_CFG_TBL_MAX_CFG 5
425
426struct nx_netif_mit {
427 decl_lck_spin_data(, mit_lock);
428 volatile struct __kern_channel_ring *mit_ckr; /* kring backpointer */
429 uint32_t mit_flags;
430 uint32_t mit_requests;
431 uint32_t mit_interval;
432
433 /*
434 * Adaptive mitigation.
435 */
436 uint32_t mit_cfg_idx_max; /* highest config selector */
437 uint32_t mit_cfg_idx; /* current config selector */
438 const struct mit_cfg_tbl *mit_cfg; /* current config mapping */
439 mit_mode_t mit_mode; /* current mode */
440 uint32_t mit_packets_avg; /* average # of packets */
441 uint32_t mit_packets_min; /* smallest # of packets */
442 uint32_t mit_packets_max; /* largest # of packets */
443 uint32_t mit_bytes_avg; /* average # of bytes */
444 uint32_t mit_bytes_min; /* smallest # of bytes */
445 uint32_t mit_bytes_max; /* largest # of bytes */
446
447 struct pktcntr mit_sstats; /* pkts & bytes per sampling */
448 struct timespec mit_mode_holdtime; /* mode holdtime in nsec */
449 struct timespec mit_mode_lasttime; /* last mode change time nsec */
450 struct timespec mit_sample_time; /* sampling holdtime in nsec */
451 struct timespec mit_sample_lasttime; /* last sampling time in nsec */
452 struct timespec mit_start_time; /* time of start work in nsec */
453
454 struct thread *mit_thread;
455 char mit_name[MAXTHREADNAMESIZE];
456
457 const struct ifnet *mit_netif_ifp;
458 /* interface-specific mitigation table */
459 struct mit_cfg_tbl mit_tbl[NETIF_MIT_CFG_TBL_MAX_CFG];
460
461#if (DEVELOPMENT || DEBUG)
462 struct skoid mit_skoid;
463#endif /* !DEVELOPMENT && !DEBUG */
464};
465
466#define NETIF_MITF_INITIALIZED 0x00000001 /* has been initialized */
467#define NETIF_MITF_SAMPLING 0x00000002 /* busy sampling stats */
468#define NETIF_MITF_SIMPLE 0x00000004 /* no stats, no delay */
469#define NETIF_MITF_READY 0x10000000 /* thread is ready */
470#define NETIF_MITF_RUNNING 0x20000000 /* thread is running */
471#define NETIF_MITF_TERMINATING 0x40000000 /* thread is terminating */
472#define NETIF_MITF_TERMINATED 0x80000000 /* thread is terminated */
473
474#define MIT_SPIN_LOCK(_mit) \
475 lck_spin_lock(&(_mit)->mit_lock)
476#define MIT_SPIN_LOCK_ASSERT_HELD(_mit) \
477 LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_OWNED)
478#define MIT_SPIN_LOCK_ASSERT_NOTHELD(_mit) \
479 LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_NOTOWNED)
480#define MIT_SPIN_UNLOCK(_mit) \
481 lck_spin_unlock(&(_mit)->mit_lock)
482
483extern kern_allocation_name_t skmem_tag_netif_filter;
484extern kern_allocation_name_t skmem_tag_netif_flow;
485extern kern_allocation_name_t skmem_tag_netif_agent_flow;
486extern kern_allocation_name_t skmem_tag_netif_llink;
487extern kern_allocation_name_t skmem_tag_netif_qset;
488
489__BEGIN_DECLS
490extern struct nxdom nx_netif_dom_s;
491extern struct kern_nexus_domain_provider nx_netif_prov_s;
492
493extern struct nx_netif *nx_netif_alloc(zalloc_flags_t);
494extern void nx_netif_free(struct nx_netif *);
495extern void nx_netif_retain(struct nx_netif *);
496extern void nx_netif_release(struct nx_netif *);
497
498extern int nx_netif_dev_krings_create(struct nexus_adapter *,
499 struct kern_channel *);
500extern void nx_netif_dev_krings_delete(struct nexus_adapter *,
501 struct kern_channel *, boolean_t);
502extern int nx_netif_na_find(struct kern_nexus *, struct kern_channel *,
503 struct chreq *, struct nxbind *, struct proc *, struct nexus_adapter **,
504 boolean_t create);
505extern int nx_netif_na_special(struct nexus_adapter *,
506 struct kern_channel *, struct chreq *, nxspec_cmd_t);
507extern int nx_netif_na_special_common(struct nexus_adapter *,
508 struct kern_channel *, struct chreq *, nxspec_cmd_t);
509extern int nx_netif_common_intr(struct __kern_channel_ring *, struct proc *,
510 uint32_t, uint32_t *);
511
512extern int nx_netif_prov_init(struct kern_nexus_domain_provider *);
513extern int nx_netif_prov_params(struct kern_nexus_domain_provider *,
514 const uint32_t, const struct nxprov_params *, struct nxprov_params *,
515 struct skmem_region_params[SKMEM_REGIONS], uint32_t);
516extern int nx_netif_prov_mem_new(struct kern_nexus_domain_provider *,
517 struct kern_nexus *, struct nexus_adapter *);
518extern void nx_netif_prov_fini(struct kern_nexus_domain_provider *);
519extern int nx_netif_prov_config(struct kern_nexus_domain_provider *,
520 struct kern_nexus *, struct nx_cfg_req *, int, struct proc *,
521 kauth_cred_t);
522extern int nx_netif_prov_nx_ctor(struct kern_nexus *);
523extern void nx_netif_prov_nx_dtor(struct kern_nexus *);
524extern int nx_netif_prov_nx_mem_info(struct kern_nexus *,
525 struct kern_pbufpool **, struct kern_pbufpool **);
526extern size_t nx_netif_prov_nx_mib_get(struct kern_nexus *nx,
527 struct nexus_mib_filter *, void *, size_t, struct proc *);
528extern int nx_netif_prov_nx_stop(struct kern_nexus *);
529
530extern void nx_netif_reap(struct nexus_netif_adapter *, struct ifnet *,
531 uint32_t, boolean_t);
532
533extern void nx_netif_copy_stats(struct nexus_netif_adapter *,
534 struct if_netif_stats *);
535extern struct nexus_netif_adapter * na_netif_alloc(zalloc_flags_t);
536extern void na_netif_free(struct nexus_adapter *);
537extern void na_netif_finalize(struct nexus_netif_adapter *, struct ifnet *);
538extern void nx_netif_llw_detach_notify(void *);
539extern void nx_netif_config_interface_advisory(struct kern_nexus *, bool);
540
541/*
542 * netif netagent API
543 */
544extern void nx_netif_agent_init(struct nx_netif *);
545extern void nx_netif_agent_fini(struct nx_netif *);
546extern int nx_netif_netagent_flow_add(struct nx_netif *, struct nx_flow_req *);
547extern int nx_netif_netagent_flow_del(struct nx_netif *, struct nx_flow_req *);
548
549/*
550 * "Interrupt" mitigation API. This is used by the netif adapter to reduce
551 * the number of "interrupt" requests/wakeup to clients on incoming packets.
552 */
553extern void nx_netif_mit_init(struct nx_netif *, const struct ifnet *,
554 struct nx_netif_mit *, struct __kern_channel_ring *, boolean_t);
555extern void nx_netif_mit_cleanup(struct nx_netif_mit *);
556extern int nx_netif_mit_tx_intr(struct __kern_channel_ring *, struct proc *,
557 uint32_t, uint32_t *);
558extern int nx_netif_mit_rx_intr(struct __kern_channel_ring *, struct proc *,
559 uint32_t, uint32_t *);
560
561/*
562 * Interface filter API
563 */
564#define NETIF_FILTER_RX 0x0001
565#define NETIF_FILTER_TX 0x0002
566#define NETIF_FILTER_SOURCE 0x0004
567#define NETIF_FILTER_INJECT 0x0008
568extern errno_t nx_netif_filter_inject(struct nexus_netif_adapter *,
569 struct netif_filter *, struct __kern_packet *, uint32_t);
570extern errno_t nx_netif_filter_add(struct nx_netif *, nexus_port_t, void *,
571 errno_t (*)(void *, struct __kern_packet *, uint32_t),
572 struct netif_filter **);
573extern errno_t nx_netif_filter_remove(struct nx_netif *, struct netif_filter *);
574extern void nx_netif_filter_init(struct nx_netif *);
575extern void nx_netif_filter_fini(struct nx_netif *);
576extern void nx_netif_filter_enable(struct nx_netif *);
577extern void nx_netif_filter_disable(struct nx_netif *);
578
579/*
580 * These callbacks are invoked when a packet chain has traversed the full
581 * filter chain.
582 */
583extern errno_t nx_netif_filter_rx_cb(struct nexus_netif_adapter *,
584 struct __kern_packet *, uint32_t);
585extern errno_t nx_netif_filter_tx_cb(struct nexus_netif_adapter *,
586 struct __kern_packet *, uint32_t);
587
588/*
589 * These are called by nx_netif_filter_tx_cb() to feed filtered packets
590 * back to driver.
591 */
592extern errno_t
593 nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter *,
594 mbuf_svc_class_t, struct mbuf *);
595extern errno_t
596 nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter *,
597 kern_packet_svc_class_t, struct __kern_packet *);
598
599/*
600 * Called by nx_netif_na_find() to create a filter nexus adapter.
601 */
602extern int netif_filter_na_create(struct kern_nexus *, struct chreq *,
603 struct nexus_adapter **);
604
605/*
606 * Callbacks from ifnet
607 */
608extern errno_t nx_netif_native_tx_dequeue(struct nexus_netif_adapter *,
609 uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
610 uint32_t *, uint32_t *, boolean_t, errno_t);
611extern errno_t nx_netif_native_tx_get_len(struct nexus_netif_adapter *,
612 uint32_t, uint32_t *, uint32_t *, errno_t);
613extern errno_t nx_netif_compat_tx_dequeue(struct nexus_netif_adapter *,
614 uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
615 uint32_t *, uint32_t *, boolean_t, errno_t);
616extern errno_t nx_netif_compat_tx_get_len(struct nexus_netif_adapter *,
617 uint32_t, uint32_t *, uint32_t *, errno_t);
618
619/*
620 * doorbell dequeue tunable
621 */
622extern uint32_t nx_netif_doorbell_max_dequeue;
623
624/*
625 * Default drop tunable
626 */
627extern uint32_t nx_netif_filter_default_drop;
628
629/*
630 * Flow API
631 */
632#define NETIF_FLOW_SOURCE 0x0001
633#define NETIF_FLOW_INJECT 0x0002
634#define NETIF_FLOW_OUTBOUND 0x0004 /* Assumes inbound if flag is missing */
635
636extern errno_t nx_netif_demux(struct nexus_netif_adapter *,
637 struct __kern_packet *, struct __kern_packet **, uint32_t);
638extern errno_t nx_netif_flow_add(struct nx_netif *, nexus_port_t,
639 struct netif_flow_desc *, void *, errno_t (*)(void *, void *, uint32_t),
640 struct netif_flow **);
641extern errno_t nx_netif_flow_remove(struct nx_netif *, struct netif_flow *);
642extern void nx_netif_flow_init(struct nx_netif *);
643extern void nx_netif_flow_fini(struct nx_netif *);
644extern void nx_netif_flow_enable(struct nx_netif *);
645extern void nx_netif_flow_disable(struct nx_netif *);
646extern void nx_netif_snoop(struct nx_netif *, struct __kern_packet *,
647 boolean_t);
648extern boolean_t nx_netif_validate_macaddr(struct nx_netif *,
649 struct __kern_packet *, uint32_t);
650extern boolean_t nx_netif_flow_match(struct nx_netif *, struct __kern_packet *,
651 struct netif_flow *, uint32_t);
652extern struct netif_flow * nx_netif_flow_classify(struct nx_netif *,
653 struct __kern_packet *, uint32_t);
654extern void nx_netif_flow_release(struct nx_netif *, struct netif_flow *);
655extern int netif_vp_na_create(struct kern_nexus *, struct chreq *,
656 struct nexus_adapter **);
657extern errno_t netif_vp_na_channel_event(struct nx_netif *, uint32_t,
658 struct __kern_channel_event *, uint16_t);
659
660/*
661 * Disable all checks on inbound/outbound packets on VP adapters
662 */
663extern uint32_t nx_netif_vp_accept_all;
664
665/*
666 * Utility functions
667 */
668extern struct __kern_packet *nx_netif_alloc_packet(struct kern_pbufpool *,
669 uint32_t, kern_packet_t *);
670extern void nx_netif_free_packet(struct __kern_packet *);
671extern void nx_netif_free_packet_chain(struct __kern_packet *, int *);
672extern void netif_ifp_inc_traffic_class_out_pkt(struct ifnet *, uint32_t,
673 uint32_t, uint32_t);
674
675#define NETIF_CONVERT_RX 0x0001
676#define NETIF_CONVERT_TX 0x0002
677
678extern struct __kern_packet *
679 nx_netif_mbuf_to_filter_pkt_chain(struct nexus_netif_adapter *,
680 struct mbuf *, uint32_t);
681extern struct mbuf *
682 nx_netif_filter_pkt_to_mbuf_chain(struct nexus_netif_adapter *,
683 struct __kern_packet *, uint32_t);
684
685extern struct __kern_packet *
686 nx_netif_pkt_to_filter_pkt(struct nexus_netif_adapter *,
687 struct __kern_packet *, uint32_t);
688extern struct __kern_packet *
689 nx_netif_pkt_to_filter_pkt_chain(struct nexus_netif_adapter *,
690 struct __kern_packet *, uint32_t);
691extern struct __kern_packet *
692 nx_netif_filter_pkt_to_pkt_chain(struct nexus_netif_adapter *,
693 struct __kern_packet *, uint32_t);
694
695extern struct mbuf *
696 nx_netif_pkt_to_mbuf(struct nexus_netif_adapter *,
697 struct __kern_packet *, uint32_t);
698extern struct __kern_packet *
699 nx_netif_pkt_to_pkt(struct nexus_netif_adapter *,
700 struct __kern_packet *, uint32_t);
701
702extern void nx_netif_mbuf_chain_info(struct mbuf *,
703 struct mbuf **, uint32_t *, uint32_t *);
704extern void nx_netif_pkt_chain_info(struct __kern_packet *,
705 struct __kern_packet **, uint32_t *, uint32_t *);
706extern int nx_netif_get_max_mtu(ifnet_t, uint32_t *);
707
708extern void nx_netif_mit_config(struct nexus_netif_adapter *,
709 boolean_t *, boolean_t *, boolean_t *, boolean_t *);
710
711extern void nx_netif_vp_region_params_adjust(struct nexus_adapter *,
712 struct skmem_region_params *);
713
714extern void nx_netif_pktap_output(ifnet_t, int, struct __kern_packet *);
715
716extern int netif_rx_notify_default(struct __kern_channel_ring *,
717 struct proc *p, uint32_t);
718extern int netif_rx_notify_fast(struct __kern_channel_ring *,
719 struct proc *p, uint32_t);
720extern int netif_llw_rx_notify_default(struct __kern_channel_ring *,
721 struct proc *p, uint32_t);
722extern int netif_llw_rx_notify_fast(struct __kern_channel_ring *,
723 struct proc *p, uint32_t);
724extern void netif_receive(struct nexus_netif_adapter *,
725 struct __kern_packet *, struct nexus_pkt_stats *);
726
727#define NETIF_XMIT_FLAG_CHANNEL 0x0001
728#define NETIF_XMIT_FLAG_HOST 0x0002
729#define NETIF_XMIT_FLAG_REDIRECT 0x0004
730#define NETIF_XMIT_FLAG_PACING 0x0008
731extern void netif_transmit(struct ifnet *, uint32_t);
732extern int netif_ring_tx_refill(const kern_channel_ring_t,
733 uint32_t, uint32_t, boolean_t, boolean_t *, boolean_t);
734extern void netif_hwna_set_mode(struct nexus_adapter *, netif_mode_t,
735 void (*)(struct nexus_adapter *, struct __kern_packet *,
736 struct nexus_pkt_stats *));
737extern void netif_hwna_clear_mode(struct nexus_adapter *);
738
739/*
740 * rxpoll functions
741 */
742extern errno_t netif_rxpoll_set_params(struct ifnet *,
743 struct ifnet_poll_params *, boolean_t locked);
744extern void netif_rxpoll_compat_thread_func(void *, wait_result_t);
745
746/*
747 * GSO functions
748 */
749extern int netif_gso_dispatch(struct ifnet *ifp, struct mbuf *m);
750extern void netif_gso_init(void);
751extern void netif_gso_fini(void);
752
753/*
754 * Logical link functions
755 */
756extern void nx_netif_llink_retain(struct netif_llink *);
757extern void nx_netif_llink_release(struct netif_llink **);
758extern void nx_netif_qset_retain(struct netif_qset *);
759extern void nx_netif_qset_release(struct netif_qset **);
760extern void nx_netif_llink_init(struct nx_netif *);
761extern void nx_netif_llink_fini(struct nx_netif *);
762extern struct netif_qset * nx_netif_find_qset(struct nx_netif *, uint64_t);
763extern struct netif_qset * nx_netif_get_default_qset_noref(struct nx_netif *);
764extern int netif_qset_enqueue(struct netif_qset *, struct __kern_packet *,
765 struct __kern_packet *, uint32_t, uint32_t, uint32_t *, uint32_t *);
766extern int nx_netif_default_llink_config(struct nx_netif *,
767 struct kern_nexus_netif_llink_init *);
768extern void nx_netif_llink_config_free(struct nx_netif *);
769extern int nx_netif_llink_ext_init_default_queues(struct kern_nexus *);
770extern void nx_netif_llink_ext_fini_default_queues(struct kern_nexus *);
771extern int nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *,
772 bool);
773extern int nx_netif_llink_add(struct nx_netif *,
774 struct kern_nexus_netif_llink_init *, struct netif_llink **);
775extern int nx_netif_llink_remove(struct nx_netif *,
776 kern_nexus_netif_llink_id_t);
777extern int nx_netif_notify_steering_info(struct nx_netif *,
778 struct netif_qset *, struct ifnet_traffic_descriptor_common *, bool);
779__END_DECLS
780#endif /* CONFIG_NEXUS_NETIF */
781#include <skywalk/nexus/netif/nx_netif_compat.h>
782#include <skywalk/nexus/netif/nx_netif_host.h>
783#endif /* _SKYWALK_NEXUS_NETIF_H_ */
784