| 1 | /* |
| 2 | * Copyright (c) 2015-2023 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | /* |
| 30 | * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. |
| 31 | * All rights reserved. |
| 32 | * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. |
| 33 | * |
| 34 | * Redistribution and use in source and binary forms, with or without |
| 35 | * modification, are permitted provided that the following conditions |
| 36 | * are met: |
| 37 | * 1. Redistributions of source code must retain the above copyright |
| 38 | * notice, this list of conditions and the following disclaimer. |
| 39 | * 2. Redistributions in binary form must reproduce the above copyright |
| 40 | * notice, this list of conditions and the following disclaimer in the |
| 41 | * documentation and/or other materials provided with the distribution. |
| 42 | * |
| 43 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 44 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 45 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 46 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 47 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 48 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 49 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 50 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 51 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 52 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 53 | * SUCH DAMAGE. |
| 54 | */ |
| 55 | |
| 56 | #ifndef _SKYWALK_NEXUS_NEXUSVAR_H_ |
| 57 | #define _SKYWALK_NEXUS_NEXUSVAR_H_ |
| 58 | |
| 59 | #ifdef BSD_KERNEL_PRIVATE |
| 60 | #include <skywalk/core/skywalk_var.h> |
| 61 | #include <skywalk/os_nexus_private.h> |
| 62 | |
| 63 | struct chreq; |
| 64 | struct nxdom; |
| 65 | struct kern_channel; |
| 66 | struct kern_nexus_domain_provider; |
| 67 | |
| 68 | /* |
| 69 | * Nexus controller instance. |
| 70 | */ |
| 71 | struct nxctl { |
| 72 | decl_lck_mtx_data(, nxctl_lock); |
| 73 | uint32_t nxctl_refcnt; |
| 74 | uint32_t nxctl_flags; |
| 75 | uuid_t nxctl_uuid; |
| 76 | uuid_t nxctl_proc_uuid; |
| 77 | uint64_t nxctl_proc_uniqueid; |
| 78 | STAILQ_ENTRY(nxctl) nxctl_link; |
| 79 | struct fileproc *nxctl_fp; |
| 80 | kauth_cred_t nxctl_cred; |
| 81 | void *nxctl_traffic_rule_storage; |
| 82 | }; |
| 83 | |
| 84 | #define NEXUSCTLF_ATTACHED 0x1 |
| 85 | #define NEXUSCTLF_NOFDREF 0x2 |
| 86 | #define NEXUSCTLF_KERNEL 0x4 |
| 87 | |
| 88 | #define NEXUSCTLF_BITS \ |
| 89 | "\020\01ATTACHED\02NOFDREF\03KERNEL" |
| 90 | |
| 91 | /* |
| 92 | * Nexus port binding structure. |
| 93 | */ |
| 94 | struct nxbind { |
| 95 | uint32_t nxb_flags; |
| 96 | pid_t nxb_pid; |
| 97 | uint64_t nxb_uniqueid; |
| 98 | uuid_t nxb_exec_uuid; |
| 99 | uint32_t nxb_key_len; |
| 100 | void *nxb_key; |
| 101 | }; |
| 102 | |
| 103 | #define NXBF_MATCH_UNIQUEID 0x1 /* match against process's unique ID */ |
| 104 | #define NXBF_MATCH_EXEC_UUID 0x2 /* match against executable's UUID */ |
| 105 | #define NXBF_MATCH_KEY 0x4 /* match against key blob */ |
| 106 | |
| 107 | #define NXBF_BITS \ |
| 108 | "\020\01UNIQUEID\02EXEC_UUID\03KEY" |
| 109 | |
| 110 | /* |
| 111 | * Nexus port info structure. |
| 112 | */ |
| 113 | struct nx_port_info { |
| 114 | /* |
| 115 | * We need to store some states on the nexus port info, |
| 116 | * e.g. defunct. The states are encoded in the tagged |
| 117 | * pointer handle npi_nah. |
| 118 | */ |
| 119 | uintptr_t npi_nah; |
| 120 | struct nxbind *npi_nxb; |
| 121 | void *npi_info; |
| 122 | }; |
| 123 | |
| 124 | /* |
| 125 | * Used for indicating what type is attached to npi_info |
| 126 | * The type enum is defined here. One namespace for all nexus types. |
| 127 | * The actual structure is defined in nexus specific headers. |
| 128 | */ |
| 129 | typedef enum { |
| 130 | NX_PORT_INFO_TYPE_NETIF = 0x10000001 |
| 131 | } nx_port_info_type_t; |
| 132 | |
| 133 | /* |
| 134 | * Header of nexus specific structure npi_info |
| 135 | */ |
| 136 | struct { |
| 137 | nx_port_info_type_t ; |
| 138 | size_t ; |
| 139 | }; |
| 140 | |
| 141 | #define NX_PORT_CHUNK 64 |
| 142 | #define NX_PORT_CHUNK_FREE 0xffffffffffffffff /* entire chunk is free */ |
| 143 | |
| 144 | /* |
| 145 | * Nexus port state type. |
| 146 | * |
| 147 | * Be mindful that due to the use of tagger pointer for nexus adapter in the |
| 148 | * nexus port info structure, this type gets encoded with the requirement |
| 149 | * that the object addresses are aligned on 4-bytes boundary at the minimum. |
| 150 | * That leaves 2 bits for the states, therefore limiting the maximum enum |
| 151 | * value to 3. |
| 152 | */ |
| 153 | typedef enum { |
| 154 | NEXUS_PORT_STATE_WORKING = 0, /* fully operational */ |
| 155 | NEXUS_PORT_STATE_DEFUNCT, /* no longer in service */ |
| 156 | NEXUS_PORT_STATE_RESERVED_1, /* for future use */ |
| 157 | NEXUS_PORT_STATE_RESERVED_2, /* for future use */ |
| 158 | NEXUS_PORT_STATE_MAX = NEXUS_PORT_STATE_RESERVED_2 |
| 159 | } nexus_port_state_t; |
| 160 | |
| 161 | #define NPI_NA_STATE_MASK ((uintptr_t)0x3) /* 11 */ |
| 162 | #define NPI_NA_TAG_MASK ((uintptr_t)0x3) /* 11 */ |
| 163 | |
| 164 | #define NPI_NA_TAG(_p) ((uintptr_t)(_p) & NPI_NA_TAG_MASK) |
| 165 | #define NPI_NA_ADDR_MASK (~NPI_NA_TAG_MASK) |
| 166 | |
| 167 | #define NPI_NA_STATE(_p) ((uintptr_t)(_p) & NPI_NA_STATE_MASK) |
| 168 | #define NPI_NA_STATE_ENC(_s) ((uintptr_t)(_s) & NPI_NA_STATE_MASK) |
| 169 | |
| 170 | #define NPI_NA_ADDR(_p) ((uintptr_t)(_p) & NPI_NA_ADDR_MASK) |
| 171 | #define NPI_NA_ADDR_ENC(_p) ((uintptr_t)(_p) & NPI_NA_ADDR_MASK) |
| 172 | |
| 173 | #define NPI_NA_ENCODE(_p, _s) (NPI_NA_ADDR_ENC(_p) | NPI_NA_STATE_ENC(_s)) |
| 174 | |
| 175 | #define NPI_NA(_npi) \ |
| 176 | ((struct nexus_adapter *)NPI_NA_ADDR((_npi)->npi_nah)) |
| 177 | #define NPI_IS_DEFUNCT(_npi) \ |
| 178 | (NPI_NA_STATE((_npi)->npi_nah) == NEXUS_PORT_STATE_DEFUNCT) |
| 179 | |
| 180 | /* |
| 181 | * Nexus-wide advisory region and object. |
| 182 | */ |
| 183 | struct kern_nexus_advisory { |
| 184 | struct skmem_region *nxv_reg; |
| 185 | void *nxv_adv; |
| 186 | nexus_advisory_type_t nxv_adv_type; |
| 187 | union { |
| 188 | struct sk_nexusadv *flowswitch_nxv_adv; |
| 189 | struct netif_nexus_advisory *netif_nxv_adv; |
| 190 | }; |
| 191 | }; |
| 192 | |
| 193 | /* |
| 194 | * Nexus instance. |
| 195 | * |
| 196 | * At present most fields are protected by sk_lock. The exception is |
| 197 | * the nx_ch_if_adv_head list which uses nx_ch_if_adv_lock instead. |
| 198 | * |
| 199 | * In cases where sk_lock, nx_ch_if_adv_lock and ch_lock must be held, |
| 200 | * the following ordering needs to be followed: |
| 201 | * |
| 202 | * sk_lock -> nx_ch_if_adv_lock -> ch_lock |
| 203 | */ |
| 204 | struct kern_nexus { |
| 205 | uint32_t nx_refcnt; |
| 206 | volatile uint32_t nx_flags; |
| 207 | void *nx_ctx; |
| 208 | nexus_ctx_release_fn_t nx_ctx_release; |
| 209 | struct kern_nexus_provider *nx_prov; |
| 210 | uint64_t nx_id; |
| 211 | uuid_t nx_uuid; |
| 212 | STAILQ_ENTRY(kern_nexus) nx_prov_link; |
| 213 | RB_ENTRY(kern_nexus) nx_link; |
| 214 | STAILQ_HEAD(, kern_channel) nx_ch_head; |
| 215 | uint32_t nx_ch_count; |
| 216 | STAILQ_HEAD(, kern_channel) nx_ch_nonxref_head; |
| 217 | decl_lck_rw_data(, nx_ch_if_adv_lock); |
| 218 | STAILQ_HEAD(, kern_channel) nx_ch_if_adv_head; |
| 219 | void *nx_arg; |
| 220 | struct kern_pbufpool *nx_rx_pp; |
| 221 | struct kern_pbufpool *nx_tx_pp; |
| 222 | struct kern_nexus_advisory nx_adv; |
| 223 | |
| 224 | /* nexus port */ |
| 225 | struct nx_port_info *nx_ports; |
| 226 | bitmap_t *nx_ports_bmap; |
| 227 | nexus_port_size_t nx_active_ports; |
| 228 | nexus_port_size_t nx_num_ports; |
| 229 | }; |
| 230 | |
| 231 | #define NXF_ATTACHED 0x1 |
| 232 | #define NXF_CLOSED 0x2 /* attached but closed */ |
| 233 | #define NXF_REJECT (1U << 31) /* not accepting channel activities */ |
| 234 | |
| 235 | #define NXF_BITS \ |
| 236 | "\020\01ATTACHED\02CLOSED\040REJECT" |
| 237 | |
| 238 | #define NX_PROV(_nx) ((_nx)->nx_prov) |
| 239 | #define NX_PROV_PARAMS(_nx) (NX_PROV(_nx)->nxprov_params) |
| 240 | #define NX_DOM_PROV(_nx) (NX_PROV(_nx)->nxprov_dom_prov) |
| 241 | #define NX_DOM(_nx) (NX_DOM_PROV(_nx)->nxdom_prov_dom) |
| 242 | |
| 243 | #define NX_REJECT_ACT(_nx) (((_nx)->nx_flags & NXF_REJECT) != 0) |
| 244 | |
| 245 | /* |
| 246 | * Nexus provider. |
| 247 | */ |
| 248 | struct kern_nexus_provider { |
| 249 | uint32_t nxprov_refcnt; |
| 250 | uint32_t nxprov_flags; |
| 251 | STAILQ_ENTRY(kern_nexus_provider) nxprov_link; |
| 252 | STAILQ_HEAD(, kern_nexus) nxprov_nx_head; |
| 253 | uint32_t nxprov_nx_count; |
| 254 | struct nxctl *nxprov_ctl; |
| 255 | uuid_t nxprov_uuid; |
| 256 | struct kern_nexus_domain_provider *nxprov_dom_prov; |
| 257 | union { |
| 258 | struct kern_nexus_provider_init nxprov_ext; |
| 259 | struct kern_nexus_netif_provider_init nxprov_netif_ext; |
| 260 | }; |
| 261 | struct nxprov_params *nxprov_params; |
| 262 | struct skmem_region_params nxprov_region_params[SKMEM_REGIONS]; |
| 263 | }; |
| 264 | |
| 265 | /* valid flags for nxprov_flags */ |
| 266 | #define NXPROVF_ATTACHED 0x1 /* attached to global list */ |
| 267 | #define NXPROVF_CLOSED 0x2 /* attached but closed */ |
| 268 | #define NXPROVF_EXTERNAL 0x4 /* external nexus provider */ |
| 269 | #define NXPROVF_VIRTUAL_DEVICE 0x8 /* device is virtual (no DMA) */ |
| 270 | |
| 271 | #define NXPROV_LLINK(_nxp) \ |
| 272 | ((_nxp)->nxprov_params->nxp_flags & NXPF_NETIF_LLINK) |
| 273 | |
| 274 | #define NXPROVF_BITS \ |
| 275 | "\020\01ATTACHED\02CLOSED\03EXTERNAL\04VIRTUALDEV" |
| 276 | |
| 277 | #define NX_ANONYMOUS_PROV(_nx) \ |
| 278 | (NX_PROV(_nx)->nxprov_params->nxp_flags & NXPF_ANONYMOUS) |
| 279 | #define NX_USER_CHANNEL_PROV(_nx) \ |
| 280 | (NX_PROV(_nx)->nxprov_params->nxp_flags & NXPF_USER_CHANNEL) |
| 281 | #define NX_LLINK_PROV(_nx) NXPROV_LLINK(NX_PROV(_nx)) |
| 282 | |
| 283 | /* |
| 284 | * Nexus domain provider. |
| 285 | */ |
| 286 | struct kern_nexus_domain_provider { |
| 287 | STAILQ_ENTRY(kern_nexus_domain_provider) nxdom_prov_link; |
| 288 | STAILQ_ENTRY(kern_nexus_domain_provider) nxdom_prov_detaching_link; |
| 289 | char nxdom_prov_name[64]; |
| 290 | uuid_t nxdom_prov_uuid; |
| 291 | uint64_t nxdom_prov_gencnt; |
| 292 | uint32_t nxdom_prov_refcnt; |
| 293 | uint32_t nxdom_prov_flags; |
| 294 | struct nxdom *nxdom_prov_dom; |
| 295 | struct kern_nexus_domain_provider_init nxdom_prov_ext; |
| 296 | /* |
| 297 | * The callbacks are grouped together to simplify the |
| 298 | * initialization of external domain providers; see |
| 299 | * kern_nexus_register_domain_provider() for details. |
| 300 | */ |
| 301 | struct nxdom_prov_cb { |
| 302 | int (*dp_cb_init)(struct kern_nexus_domain_provider *); |
| 303 | void (*dp_cb_fini)(struct kern_nexus_domain_provider *); |
| 304 | int (*dp_cb_params)(struct kern_nexus_domain_provider *, |
| 305 | const uint32_t, const struct nxprov_params *, |
| 306 | struct nxprov_params *, |
| 307 | struct skmem_region_params[SKMEM_REGIONS], uint32_t); |
| 308 | int (*dp_cb_mem_new)(struct kern_nexus_domain_provider *, |
| 309 | struct kern_nexus *, struct nexus_adapter *); |
| 310 | int (*dp_cb_config)(struct kern_nexus_domain_provider *, |
| 311 | struct kern_nexus *, struct nx_cfg_req *, int, |
| 312 | struct proc *, kauth_cred_t); |
| 313 | int (*dp_cb_nx_ctor)(struct kern_nexus *); |
| 314 | void (*dp_cb_nx_dtor)(struct kern_nexus *); |
| 315 | int (*dp_cb_nx_mem_info)(struct kern_nexus *, |
| 316 | struct kern_pbufpool **, struct kern_pbufpool **); |
| 317 | size_t (*dp_cb_nx_mib_get)(struct kern_nexus *, |
| 318 | struct nexus_mib_filter *, void *, size_t, struct proc *); |
| 319 | int (*dp_cb_nx_stop)(struct kern_nexus *); |
| 320 | } nxdom_prov_cb; |
| 321 | #define nxdom_prov_init nxdom_prov_cb.dp_cb_init |
| 322 | #define nxdom_prov_fini nxdom_prov_cb.dp_cb_fini |
| 323 | #define nxdom_prov_params nxdom_prov_cb.dp_cb_params |
| 324 | #define nxdom_prov_mem_new nxdom_prov_cb.dp_cb_mem_new |
| 325 | #define nxdom_prov_config nxdom_prov_cb.dp_cb_config |
| 326 | #define nxdom_prov_nx_ctor nxdom_prov_cb.dp_cb_nx_ctor |
| 327 | #define nxdom_prov_nx_dtor nxdom_prov_cb.dp_cb_nx_dtor |
| 328 | #define nxdom_prov_nx_mem_info nxdom_prov_cb.dp_cb_nx_mem_info |
| 329 | #define nxdom_prov_nx_mib_get nxdom_prov_cb.dp_cb_nx_mib_get |
| 330 | #define nxdom_prov_nx_stop nxdom_prov_cb.dp_cb_nx_stop |
| 331 | }; |
| 332 | |
| 333 | #define NXDOMPROVF_INITIALIZED 0x1 /* provider has been initialized */ |
| 334 | #define NXDOMPROVF_ATTACHED 0x2 /* provider is attached to a domain */ |
| 335 | #define NXDOMPROVF_DETACHING 0x4 /* provider is being detached */ |
| 336 | #define NXDOMPROVF_EXT 0x8 /* external provider */ |
| 337 | #define NXDOMPROVF_EXT_INITED 0x10 /* nxpi_init() succeeded */ |
| 338 | #define NXDOMPROVF_DEFAULT 0x20 /* default provider for domain */ |
| 339 | |
| 340 | struct nxp_bounds { |
| 341 | uint32_t nb_def; |
| 342 | uint32_t nb_min; |
| 343 | uint32_t nb_max; |
| 344 | }; |
| 345 | |
| 346 | /* |
| 347 | * Nexus domain. |
| 348 | * |
| 349 | * Each Nexus type is represented by a Nexus domain; there can |
| 350 | * be more than one providers for a given domain. |
| 351 | */ |
| 352 | struct nxdom { |
| 353 | STAILQ_ENTRY(nxdom) nxdom_link; |
| 354 | STAILQ_HEAD(, kern_nexus_domain_provider) nxdom_prov_head; |
| 355 | nexus_type_t nxdom_type; |
| 356 | nexus_meta_type_t nxdom_md_type; |
| 357 | nexus_meta_subtype_t nxdom_md_subtype; |
| 358 | uint32_t nxdom_flags; |
| 359 | struct nxp_bounds nxdom_ports; |
| 360 | struct nxp_bounds nxdom_tx_rings; |
| 361 | struct nxp_bounds nxdom_rx_rings; |
| 362 | struct nxp_bounds nxdom_tx_slots; |
| 363 | struct nxp_bounds nxdom_rx_slots; |
| 364 | struct nxp_bounds nxdom_buf_size; |
| 365 | struct nxp_bounds nxdom_large_buf_size; |
| 366 | struct nxp_bounds nxdom_meta_size; |
| 367 | struct nxp_bounds nxdom_stats_size; |
| 368 | struct nxp_bounds nxdom_pipes; |
| 369 | struct nxp_bounds nxdom_extensions; |
| 370 | struct nxp_bounds nxdom_mhints; |
| 371 | struct nxp_bounds nxdom_flowadv_max; |
| 372 | struct nxp_bounds nxdom_nexusadv_size; |
| 373 | struct nxp_bounds nxdom_capabilities; |
| 374 | struct nxp_bounds nxdom_qmap; |
| 375 | struct nxp_bounds nxdom_max_frags; |
| 376 | struct skmem_region_params nxdom_region_params[SKMEM_REGIONS]; |
| 377 | const char *nxdom_name; |
| 378 | |
| 379 | /* |
| 380 | * Nexus domain callbacks. |
| 381 | */ |
| 382 | void (*nxdom_init)(struct nxdom *); /* optional */ |
| 383 | void (*nxdom_terminate)(struct nxdom *); /* optional */ |
| 384 | void (*nxdom_fini)(struct nxdom *); /* optional */ |
| 385 | int (*nxdom_find_port) /* optional */ |
| 386 | (struct kern_nexus *, boolean_t, nexus_port_t *); |
| 387 | boolean_t (*nxdom_port_is_reserved) /* optional */ |
| 388 | (struct kern_nexus *, nexus_port_t); |
| 389 | int (*nxdom_bind_port) /* required */ |
| 390 | (struct kern_nexus *, nexus_port_t *, struct nxbind *, void *); |
| 391 | int (*nxdom_unbind_port) /* required */ |
| 392 | (struct kern_nexus *, nexus_port_t); |
| 393 | int (*nxdom_connect) /* required */ |
| 394 | (struct kern_nexus_domain_provider *, struct kern_nexus *, |
| 395 | struct kern_channel *, struct chreq *, struct kern_channel *, |
| 396 | struct nxbind *, struct proc *); |
| 397 | void (*nxdom_disconnect) /* required */ |
| 398 | (struct kern_nexus_domain_provider *, struct kern_nexus *, |
| 399 | struct kern_channel *); |
| 400 | void (*nxdom_defunct) /* required */ |
| 401 | (struct kern_nexus_domain_provider *, struct kern_nexus *, |
| 402 | struct kern_channel *, struct proc *); |
| 403 | void (*nxdom_defunct_finalize) /* required */ |
| 404 | (struct kern_nexus_domain_provider *, struct kern_nexus *, |
| 405 | struct kern_channel *, boolean_t); |
| 406 | }; |
| 407 | |
| 408 | #define NEXUSDOMF_INITIALIZED 0x1 /* domain has been initialized */ |
| 409 | #define NEXUSDOMF_ATTACHED 0x2 /* domain is globally attached */ |
| 410 | #define NEXUSDOMF_TERMINATED 0x4 /* domain has been terminated */ |
| 411 | |
| 412 | #define NXDOM_DEF(_dom, var) ((_dom)->nxdom_##var.nb_def) |
| 413 | #define NXDOM_MIN(_dom, var) ((_dom)->nxdom_##var.nb_min) |
| 414 | #define NXDOM_MAX(_dom, var) ((_dom)->nxdom_##var.nb_max) |
| 415 | |
| 416 | extern struct nexus_controller kernnxctl; |
| 417 | extern struct nexus_controller usernxctl; |
| 418 | extern lck_grp_t nexus_lock_group; |
| 419 | extern lck_grp_t nexus_mbq_lock_group; |
| 420 | extern lck_grp_t nexus_pktq_lock_group; |
| 421 | extern lck_attr_t nexus_lock_attr; |
| 422 | extern kern_allocation_name_t skmem_tag_nx_key; |
| 423 | extern kern_allocation_name_t skmem_tag_nx_port_info; |
| 424 | |
| 425 | extern struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX]; |
| 426 | |
| 427 | #define NX_SHARED_NXCTL_INSTANCE(_nxctl) \ |
| 428 | ((_nxctl) == kernnxctl.ncd_nxctl) |
| 429 | |
| 430 | #define NXCTL_LOCK(_nxctl) do { \ |
| 431 | if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) { \ |
| 432 | lck_mtx_lock(&((_nxctl)->nxctl_lock)); \ |
| 433 | } else { \ |
| 434 | LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \ |
| 435 | LCK_MTX_ASSERT_NOTOWNED); \ |
| 436 | } \ |
| 437 | } while (0) |
| 438 | |
| 439 | #define NXCTL_UNLOCK(_nxctl) do { \ |
| 440 | if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) { \ |
| 441 | lck_mtx_unlock(&((_nxctl)->nxctl_lock));\ |
| 442 | } \ |
| 443 | LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \ |
| 444 | LCK_MTX_ASSERT_NOTOWNED); \ |
| 445 | } while (0) |
| 446 | |
| 447 | #define NXCTL_LOCK_ASSERT_HELD(_nxctl) do { \ |
| 448 | if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) { \ |
| 449 | LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \ |
| 450 | LCK_MTX_ASSERT_OWNED); \ |
| 451 | } else { \ |
| 452 | LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \ |
| 453 | LCK_MTX_ASSERT_NOTOWNED); \ |
| 454 | } \ |
| 455 | } while (0) |
| 456 | |
| 457 | __BEGIN_DECLS |
| 458 | extern int nexus_init(void); |
| 459 | extern void nexus_fini(void); |
| 460 | |
| 461 | extern struct kern_nexus *nx_create(struct nxctl *, const uuid_t, |
| 462 | const nexus_type_t, const void *, nexus_ctx_release_fn_t, |
| 463 | struct kern_pbufpool *, struct kern_pbufpool *, int *); |
| 464 | extern void nx_retain(struct kern_nexus *); |
| 465 | extern void nx_retain_locked(struct kern_nexus *); |
| 466 | extern int nx_release(struct kern_nexus *); |
| 467 | extern int nx_release_locked(struct kern_nexus *); |
| 468 | extern void nx_detach(struct kern_nexus *); |
| 469 | extern void nx_stop(struct kern_nexus *nx); |
| 470 | extern int nx_close(struct kern_nexus *, boolean_t); |
| 471 | extern int nx_destroy(struct nxctl *, const uuid_t); |
| 472 | extern struct kern_nexus *nx_find(const uuid_t, boolean_t); |
| 473 | extern int nx_advisory_alloc(struct kern_nexus *, const char *, |
| 474 | struct skmem_region_params *, nexus_advisory_type_t); |
| 475 | extern void nx_advisory_free(struct kern_nexus *); |
| 476 | extern int nx_port_find(struct kern_nexus *, nexus_port_t, |
| 477 | nexus_port_t, nexus_port_t *); |
| 478 | extern int nx_port_alloc(struct kern_nexus *, nexus_port_t, |
| 479 | struct nxbind *, struct nexus_adapter **, struct proc *); |
| 480 | extern int nx_port_bind(struct kern_nexus *, nexus_port_t, |
| 481 | struct nxbind *); |
| 482 | extern int nx_port_bind_info(struct kern_nexus *, nexus_port_t, |
| 483 | struct nxbind *, void *); |
| 484 | extern int nx_port_unbind(struct kern_nexus *, nexus_port_t); |
| 485 | extern struct nexus_adapter *nx_port_get_na(struct kern_nexus *, |
| 486 | nexus_port_t); |
| 487 | extern int nx_port_get_info(struct kern_nexus *, nexus_port_t, |
| 488 | nx_port_info_type_t, void *, uint32_t); |
| 489 | extern void nx_port_defunct(struct kern_nexus *, nexus_port_t); |
| 490 | extern void nx_port_free(struct kern_nexus *, nexus_port_t); |
| 491 | extern void nx_port_free_all(struct kern_nexus *); |
| 492 | extern bool nx_port_is_valid(struct kern_nexus *, nexus_port_t); |
| 493 | extern bool nx_port_is_defunct(struct kern_nexus *, nexus_port_t); |
| 494 | extern void nx_port_foreach(struct kern_nexus *, void (^)(nexus_port_t)); |
| 495 | extern void nx_interface_advisory_notify(struct kern_nexus *); |
| 496 | |
| 497 | extern struct nxctl *nxctl_create(struct proc *, struct fileproc *, |
| 498 | const uuid_t, int *); |
| 499 | extern void nxctl_close(struct nxctl *); |
| 500 | extern void nxctl_traffic_rule_clean(struct nxctl *); |
| 501 | extern void nxctl_traffic_rule_init(void); |
| 502 | extern void nxctl_traffic_rule_fini(void); |
| 503 | extern int nxctl_inet_traffic_rule_find_qset_id_with_pkt(const char *, |
| 504 | struct __kern_packet *, uint64_t *); |
| 505 | extern int nxctl_inet_traffic_rule_find_qset_id(const char *, |
| 506 | struct ifnet_traffic_descriptor_inet *, uint64_t *); |
| 507 | extern int nxctl_inet_traffic_rule_get_count(const char *, uint32_t *); |
| 508 | extern int nxctl_get_opt(struct nxctl *, struct sockopt *); |
| 509 | extern int nxctl_set_opt(struct nxctl *, struct sockopt *); |
| 510 | extern void nxctl_retain(struct nxctl *); |
| 511 | extern int nxctl_release(struct nxctl *); |
| 512 | extern void nxctl_dtor(void *); |
| 513 | |
| 514 | extern int nxprov_advise_connect(struct kern_nexus *, struct kern_channel *, |
| 515 | struct proc *p); |
| 516 | extern void nxprov_advise_disconnect(struct kern_nexus *, |
| 517 | struct kern_channel *); |
| 518 | extern struct kern_nexus_provider *nxprov_create(struct proc *, |
| 519 | struct nxctl *, struct nxprov_reg *, int *); |
| 520 | extern struct kern_nexus_provider *nxprov_create_kern(struct nxctl *, |
| 521 | struct kern_nexus_domain_provider *, struct nxprov_reg *, |
| 522 | const struct kern_nexus_provider_init *init, int *err); |
| 523 | extern int nxprov_close(struct kern_nexus_provider *, boolean_t); |
| 524 | extern int nxprov_destroy(struct nxctl *, const uuid_t); |
| 525 | extern void nxprov_retain(struct kern_nexus_provider *); |
| 526 | extern int nxprov_release(struct kern_nexus_provider *); |
| 527 | extern struct nxprov_params *nxprov_params_alloc(zalloc_flags_t); |
| 528 | extern void nxprov_params_free(struct nxprov_params *); |
| 529 | |
| 530 | struct nxprov_adjusted_params { |
| 531 | nexus_meta_subtype_t *adj_md_subtype; |
| 532 | uint32_t *adj_stats_size; |
| 533 | uint32_t *adj_flowadv_max; |
| 534 | uint32_t *adj_nexusadv_size; |
| 535 | uint32_t *adj_caps; |
| 536 | uint32_t *adj_tx_rings; |
| 537 | uint32_t *adj_rx_rings; |
| 538 | uint32_t *adj_tx_slots; |
| 539 | uint32_t *adj_rx_slots; |
| 540 | uint32_t *adj_alloc_rings; |
| 541 | uint32_t *adj_free_rings; |
| 542 | uint32_t *adj_alloc_slots; |
| 543 | uint32_t *adj_free_slots; |
| 544 | uint32_t *adj_buf_size; |
| 545 | uint32_t *adj_buf_region_segment_size; |
| 546 | uint32_t *adj_pp_region_config_flags; |
| 547 | uint32_t *adj_max_frags; |
| 548 | uint32_t *adj_event_rings; |
| 549 | uint32_t *adj_event_slots; |
| 550 | uint32_t *adj_max_buffers; |
| 551 | uint32_t *adj_large_buf_size; |
| 552 | }; |
| 553 | |
| 554 | extern int nxprov_params_adjust(struct kern_nexus_domain_provider *, |
| 555 | const uint32_t, const struct nxprov_params *, struct nxprov_params *, |
| 556 | struct skmem_region_params[SKMEM_REGIONS], const struct nxdom *, |
| 557 | const struct nxdom *, const struct nxdom *, uint32_t, |
| 558 | int (*adjust_fn)(const struct kern_nexus_domain_provider *, |
| 559 | const struct nxprov_params *, struct nxprov_adjusted_params *)); |
| 560 | |
| 561 | extern void nxdom_attach_all(void); |
| 562 | extern void nxdom_detach_all(void); |
| 563 | extern struct nxdom *nxdom_find(nexus_type_t); |
| 564 | |
| 565 | extern struct kern_nexus_domain_provider *nxdom_prov_find( |
| 566 | const struct nxdom *, const char *); |
| 567 | extern struct kern_nexus_domain_provider *nxdom_prov_find_uuid(const uuid_t); |
| 568 | extern int nxdom_prov_add(struct nxdom *, struct kern_nexus_domain_provider *); |
| 569 | extern void nxdom_prov_del(struct kern_nexus_domain_provider *); |
| 570 | extern void nxdom_prov_retain_locked(struct kern_nexus_domain_provider *); |
| 571 | extern void nxdom_prov_retain(struct kern_nexus_domain_provider *); |
| 572 | extern boolean_t nxdom_prov_release_locked(struct kern_nexus_domain_provider *); |
| 573 | extern boolean_t nxdom_prov_release(struct kern_nexus_domain_provider *); |
| 574 | extern int nxdom_prov_validate_params(struct kern_nexus_domain_provider *, |
| 575 | const struct nxprov_reg *, struct nxprov_params *, |
| 576 | struct skmem_region_params[SKMEM_REGIONS], const uint32_t, uint32_t); |
| 577 | |
| 578 | extern struct nxbind *nxb_alloc(zalloc_flags_t); |
| 579 | extern void nxb_free(struct nxbind *); |
| 580 | extern boolean_t nxb_is_equal(struct nxbind *, struct nxbind *); |
| 581 | extern void nxb_move(struct nxbind *, struct nxbind *); |
| 582 | |
| 583 | typedef void kern_nexus_walktree_f_t(struct kern_nexus *, void *); |
| 584 | extern void kern_nexus_walktree(kern_nexus_walktree_f_t *, void *, boolean_t); |
| 585 | |
| 586 | extern int kern_nexus_get_pbufpool_info(const uuid_t nx_uuid, |
| 587 | struct kern_pbufpool_memory_info *rx_pool, |
| 588 | struct kern_pbufpool_memory_info *tx_pool); |
| 589 | __END_DECLS |
| 590 | |
| 591 | #include <skywalk/nexus/nexus_adapter.h> |
| 592 | |
| 593 | __attribute__((always_inline)) |
| 594 | static inline int |
| 595 | nx_sync_tx(struct __kern_channel_ring *kring, boolean_t commit) |
| 596 | { |
| 597 | struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx); |
| 598 | |
| 599 | ASSERT(kring->ckr_tx == NR_TX); |
| 600 | if (nxprov->nxprov_ext.nxpi_sync_tx != NULL) { |
| 601 | return nxprov->nxprov_ext.nxpi_sync_tx(nxprov, |
| 602 | KRNA(kring)->na_nx, kring, |
| 603 | (commit ? KERN_NEXUS_SYNCF_COMMIT : 0)); |
| 604 | } else { |
| 605 | return 0; |
| 606 | } |
| 607 | } |
| 608 | |
| 609 | __attribute__((always_inline)) |
| 610 | static inline int |
| 611 | nx_sync_rx(struct __kern_channel_ring *kring, boolean_t commit) |
| 612 | { |
| 613 | struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx); |
| 614 | |
| 615 | ASSERT(kring->ckr_tx == NR_RX); |
| 616 | if (nxprov->nxprov_ext.nxpi_sync_rx != NULL) { |
| 617 | return nxprov->nxprov_ext.nxpi_sync_rx(nxprov, |
| 618 | KRNA(kring)->na_nx, kring, |
| 619 | (commit ? KERN_NEXUS_SYNCF_COMMIT : 0)); |
| 620 | } else { |
| 621 | return 0; |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | __attribute__((always_inline)) |
| 626 | static __inline__ void |
| 627 | nx_tx_doorbell(struct __kern_channel_ring *kring, boolean_t async) |
| 628 | { |
| 629 | struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx); |
| 630 | |
| 631 | ASSERT(kring->ckr_tx == NR_TX); |
| 632 | ASSERT(nxprov->nxprov_ext.nxpi_tx_doorbell != NULL); |
| 633 | nxprov->nxprov_ext.nxpi_tx_doorbell(nxprov, KRNA(kring)->na_nx, |
| 634 | kring, (async ? KERN_NEXUS_TXDOORBELLF_ASYNC_REFILL: 0)); |
| 635 | } |
| 636 | |
| 637 | __attribute__((always_inline)) |
| 638 | static inline int |
| 639 | nx_rx_sync_packets(struct __kern_channel_ring *kring, |
| 640 | uint64_t *__counted_by(*count)packets, uint32_t *count) |
| 641 | { |
| 642 | struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx); |
| 643 | |
| 644 | ASSERT(kring->ckr_tx == NR_RX); |
| 645 | if (nxprov->nxprov_ext.nxpi_rx_sync_packets != NULL) { |
| 646 | return nxprov->nxprov_ext.nxpi_rx_sync_packets(nxprov, |
| 647 | KRNA(kring)->na_nx, kring, packets, count, 0); |
| 648 | } else { |
| 649 | return 0; |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | __attribute__((always_inline)) |
| 654 | static inline boolean_t |
| 655 | nx_has_rx_sync_packets(struct __kern_channel_ring *kring) |
| 656 | { |
| 657 | struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx); |
| 658 | |
| 659 | ASSERT(kring->ckr_tx == NR_RX); |
| 660 | return nxprov->nxprov_ext.nxpi_rx_sync_packets != NULL; |
| 661 | } |
| 662 | |
| 663 | __attribute__((always_inline)) |
| 664 | static __inline__ errno_t |
| 665 | nx_tx_qset_notify(struct kern_nexus *nx, void *qset_ctx) |
| 666 | { |
| 667 | struct kern_nexus_provider *nxprov = NX_PROV(nx); |
| 668 | sk_protect_t protect; |
| 669 | errno_t err; |
| 670 | |
| 671 | ASSERT(nxprov->nxprov_netif_ext.nxnpi_tx_qset_notify != NULL); |
| 672 | protect = sk_tx_notify_protect(); |
| 673 | err = nxprov->nxprov_netif_ext.nxnpi_tx_qset_notify(nxprov, nx, |
| 674 | qset_ctx, 0); |
| 675 | sk_tx_notify_unprotect(protect); |
| 676 | return err; |
| 677 | } |
| 678 | #endif /* BSD_KERNEL_PRIVATE */ |
| 679 | #endif /* _SKYWALK_NEXUS_NEXUSVAR_H_ */ |
| 680 | |