| 1 | /* |
| 2 | * Copyright (c) 2015-2023 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | /* |
| 30 | * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved. |
| 31 | * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved. |
| 32 | * |
| 33 | * Redistribution and use in source and binary forms, with or without |
| 34 | * modification, are permitted provided that the following conditions |
| 35 | * are met: |
| 36 | * 1. Redistributions of source code must retain the above copyright |
| 37 | * notice, this list of conditions and the following disclaimer. |
| 38 | * 2. Redistributions in binary form must reproduce the above copyright |
| 39 | * notice, this list of conditions and the following disclaimer in the |
| 40 | * documentation and/or other materials provided with the distribution. |
| 41 | * |
| 42 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
| 43 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 44 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 45 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
| 46 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 47 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
| 48 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| 49 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 50 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 51 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 52 | * SUCH DAMAGE. |
| 53 | */ |
| 54 | |
| 55 | #ifndef _SKYWALK_NEXUS_ADAPTER_H_ |
| 56 | #define _SKYWALK_NEXUS_ADAPTER_H_ |
| 57 | |
| 58 | #ifdef BSD_KERNEL_PRIVATE |
| 59 | #include <skywalk/os_skywalk_private.h> |
| 60 | #include <skywalk/os_packet_private.h> |
| 61 | |
| 62 | #define NEXUS_ADAPTER_NAMELEN 64 |
| 63 | |
| 64 | struct chreq; |
| 65 | struct kern_nexus; |
| 66 | struct __kern_channel_ring; |
| 67 | struct nexus_vp_adapter; |
| 68 | struct nexus_upipe_adapter; |
| 69 | |
| 70 | typedef enum { |
| 71 | NA_INVALID = 0, /* uninitialized */ |
| 72 | NA_PSEUDO, /* struct nexus_adapter */ |
| 73 | #if CONFIG_NEXUS_USER_PIPE |
| 74 | NA_USER_PIPE, /* struct nexus_upipe_adapter */ |
| 75 | #endif /* CONFIG_NEXUS_USER_PIPE */ |
| 76 | #if CONFIG_NEXUS_KERNEL_PIPE |
| 77 | NA_KERNEL_PIPE, /* struct nexus_kpipe_adapter */ |
| 78 | #endif /* CONFIG_NEXUS_KERNEL_PIPE */ |
| 79 | #if CONFIG_NEXUS_MONITOR |
| 80 | NA_MONITOR, /* struct nexus_monitor_adapter */ |
| 81 | #endif /* CONFIG_NEXUS_MONITOR */ |
| 82 | #if CONFIG_NEXUS_NETIF |
| 83 | NA_NETIF_DEV, /* struct nexus_netif_adapter (dev) */ |
| 84 | NA_NETIF_HOST, /* struct nexus_netif_adapter (host) */ |
| 85 | NA_NETIF_COMPAT_DEV, /* struct nexus_netif_compat_adapter (dev) */ |
| 86 | NA_NETIF_COMPAT_HOST, /* struct nexus_netif_compat_adapter (host) */ |
| 87 | NA_NETIF_FILTER, /* struct nexus_netif_adapter (vp) */ |
| 88 | NA_NETIF_VP, /* struct nexus_netif_adapter (vp) */ |
| 89 | #endif /* CONFIG_NEXUS_NETIF */ |
| 90 | #if CONFIG_NEXUS_FLOWSWITCH |
| 91 | NA_FLOWSWITCH_VP, /* struct nexus_vp_adapter */ |
| 92 | #endif /* CONFIG_NEXUS_FLOWSWITCH */ |
| 93 | } nexus_adapter_type_t; |
| 94 | |
| 95 | typedef enum { |
| 96 | NXSPEC_CMD_CONNECT = 0, |
| 97 | NXSPEC_CMD_DISCONNECT = 1, |
| 98 | NXSPEC_CMD_START = 2, |
| 99 | NXSPEC_CMD_STOP = 3, |
| 100 | } nxspec_cmd_t; |
| 101 | |
| 102 | typedef enum { |
| 103 | NA_ACTIVATE_MODE_ON = 0, /* activate adapter */ |
| 104 | NA_ACTIVATE_MODE_DEFUNCT, /* defunct an activate adapter */ |
| 105 | NA_ACTIVATE_MODE_OFF, /* deactivate adapter */ |
| 106 | } na_activate_mode_t; |
| 107 | |
| 108 | struct nexus_pkt_stats { |
| 109 | uint64_t nps_pkts; |
| 110 | uint64_t nps_bytes; |
| 111 | }; |
| 112 | |
| 113 | /* |
| 114 | * The "struct nexus_adapter" contains all base fields needed to support |
| 115 | * Nexus adapter operations. There are different types of Nexus adapters |
| 116 | * (upipe, kpipe, fsw, monitor, vp, ...) so a nexus_adapter is |
| 117 | * always the first field in the derived type. |
| 118 | */ |
| 119 | struct nexus_adapter { |
| 120 | volatile uint32_t na_flags; /* NAF_* flags */ |
| 121 | nexus_adapter_type_t na_type; /* nexus type */ |
| 122 | const nexus_meta_type_t na_md_type; /* metadata type */ |
| 123 | const nexus_meta_subtype_t na_md_subtype; /* metadata subtype */ |
| 124 | |
| 125 | nexus_port_t na_nx_port; |
| 126 | |
| 127 | /* |
| 128 | * Number of user-space descriptors using this interface, |
| 129 | * which is equal to the number of channel schema objects |
| 130 | * in the mapped region. |
| 131 | */ |
| 132 | uint32_t na_channels; |
| 133 | |
| 134 | /* number of adapter transmit and receive rings */ |
| 135 | uint32_t na_num_rx_rings; |
| 136 | uint32_t na_num_tx_rings; |
| 137 | |
| 138 | /* number of ring pairs used by packet allocator */ |
| 139 | uint32_t na_num_allocator_ring_pairs; |
| 140 | |
| 141 | /* number of event rings */ |
| 142 | uint32_t na_num_event_rings; |
| 143 | |
| 144 | /* number of large buffer alloc rings */ |
| 145 | uint32_t na_num_large_buf_alloc_rings; |
| 146 | |
| 147 | uint64_t na_work_ts; /* when we last worked on it */ |
| 148 | |
| 149 | /* |
| 150 | * na_{tx,rx,alloc,free,event}_rings are private but allocated |
| 151 | * as a contiguous chunk of memory. |
| 152 | */ |
| 153 | struct __kern_channel_ring *na_tx_rings; /* array of TX rings. */ |
| 154 | struct __kern_channel_ring *na_rx_rings; /* array of RX rings. */ |
| 155 | |
| 156 | /* |
| 157 | * na_nx refers to the nexus instance associated with this |
| 158 | * nexus adapter; in cases such as the virtual port adapter |
| 159 | * of a flow switch nexus used for user pipe, this will |
| 160 | * indicate the latter. The na_nxdom_prov will point to |
| 161 | * the actual nexus domain associated with the adapter. |
| 162 | */ |
| 163 | struct kern_nexus *na_nx; |
| 164 | |
| 165 | /* |
| 166 | * Standard refcount to control the lifetime of the adapter |
| 167 | * (it should be equal to the lifetime of the corresponding ifp) |
| 168 | */ |
| 169 | volatile uint32_t na_refcount; |
| 170 | |
| 171 | int na_si_users[NR_ALL]; /* # of users per global wait queue */ |
| 172 | struct ch_selinfo na_si[NR_ALL]; /* global wait queues */ |
| 173 | |
| 174 | /* |
| 175 | * Memory arena. |
| 176 | */ |
| 177 | struct skmem_arena *na_arena; |
| 178 | |
| 179 | /* |
| 180 | * Number of descriptors in each queue. |
| 181 | */ |
| 182 | uint32_t na_num_tx_slots; |
| 183 | uint32_t na_num_rx_slots; |
| 184 | uint32_t na_num_allocator_slots; |
| 185 | uint32_t na_num_event_slots; |
| 186 | uint32_t na_num_large_buf_alloc_slots; |
| 187 | |
| 188 | /* |
| 189 | * Combined slot count of all rings. |
| 190 | * Used for allocating slot_ctx and scratch memory. |
| 191 | */ |
| 192 | uint32_t na_total_slots; |
| 193 | |
| 194 | /* |
| 195 | * Flow advisory (if applicable). |
| 196 | */ |
| 197 | const uint32_t na_flowadv_max; /* max # of flow advisory entries */ |
| 198 | |
| 199 | /* |
| 200 | * Shareable statistics (if applicable). |
| 201 | */ |
| 202 | const nexus_stats_type_t na_stats_type; /* stats type */ |
| 203 | |
| 204 | /* |
| 205 | * Array of packet allocator and event rings |
| 206 | */ |
| 207 | struct __kern_channel_ring *na_alloc_rings; |
| 208 | struct __kern_channel_ring *na_free_rings; |
| 209 | struct __kern_channel_ring *na_event_rings; |
| 210 | struct __kern_channel_ring *na_large_buf_alloc_rings; |
| 211 | |
| 212 | uint64_t na_ch_mit_ival; /* mitigation interval */ |
| 213 | |
| 214 | /* |
| 215 | * The actual nexus domain associated with the adapter. |
| 216 | */ |
| 217 | struct kern_nexus_domain_provider *na_nxdom_prov; |
| 218 | |
| 219 | /* |
| 220 | * Array of slot contexts. This covers enough space to hold |
| 221 | * slot contexts of slot_ctx size for all of the TX and RX rings, |
| 222 | * It is optional and is requested at na_krings_create() time. |
| 223 | */ |
| 224 | struct slot_ctx *na_slot_ctxs; |
| 225 | |
| 226 | /* |
| 227 | * Array of packet handlers, enough for all slots in the |
| 228 | * TX and RX rings of this adapter. It is automatically |
| 229 | * created at na_krings_create() time. |
| 230 | */ |
| 231 | kern_packet_t *na_scratch; |
| 232 | |
| 233 | struct __kern_channel_ring *na_tail; /* pointer past the last ring */ |
| 234 | |
| 235 | #if CONFIG_NEXUS_FLOWSWITCH || CONFIG_NEXUS_NETIF |
| 236 | /* |
| 237 | * Additional information attached to this adapter by other |
| 238 | * Skywalk subsystems; currently used by flow switch and netif. |
| 239 | */ |
| 240 | void *na_private; |
| 241 | |
| 242 | /* |
| 243 | * References to the ifnet and device routines, used by the netif |
| 244 | * nexus adapter functions. A non-NULL na_ifp indicates an io ref |
| 245 | * count to the ifnet that needs to be released at adapter detach |
| 246 | * time (at which point it will be nullifed). |
| 247 | */ |
| 248 | struct ifnet *na_ifp; |
| 249 | /* |
| 250 | * lookup table to retrieve the ring corresponding to a service |
| 251 | * class. we store the ring index in na_(tx/rx)_rings array. |
| 252 | */ |
| 253 | uint8_t na_kring_svc_lut[KPKT_SC_MAX_CLASSES]; |
| 254 | #endif /* CONFIG_NEXUS_FLOWSWITCH || CONFIG_NEXUS_NETIF */ |
| 255 | |
| 256 | #if CONFIG_NEXUS_USER_PIPE |
| 257 | uint32_t na_next_pipe; /* next free slot in the array */ |
| 258 | uint32_t na_max_pipes; /* size of the array */ |
| 259 | /* array of pipes that have this adapter as a parent */ |
| 260 | struct nexus_upipe_adapter **na_pipes; |
| 261 | #endif /* CONFIG_NEXUS_USER_PIPE */ |
| 262 | |
| 263 | char na_name[NEXUS_ADAPTER_NAMELEN]; /* diagnostics */ |
| 264 | uuid_t na_uuid; |
| 265 | |
| 266 | /* |
| 267 | * na_activate() is called to activate, defunct or deactivate a nexus |
| 268 | * adapter. This is invoked by na_bind_channel(), the first time a |
| 269 | * channel is opened to the adapter; by na_defunct() when an open |
| 270 | * channel gets defunct; as well as by na_unbind_channel() when the |
| 271 | * last channel instance opened to the adapter is closed. |
| 272 | */ |
| 273 | int (*na_activate)(struct nexus_adapter *, na_activate_mode_t); |
| 274 | /* |
| 275 | * na_special() is an optional callback implemented by nexus types |
| 276 | * that support kernel channel (special mode). This allows the nexus |
| 277 | * to override the logic surrounding na_{bind,unbind}_channel() calls. |
| 278 | */ |
| 279 | int (*na_special)(struct nexus_adapter *, struct kern_channel *, |
| 280 | struct chreq *, nxspec_cmd_t); |
| 281 | /* |
| 282 | * na_txsync() pushes packets to the underlying device; |
| 283 | * na_rxsync() collects packets from the underlying device. |
| 284 | */ |
| 285 | int (*na_txsync)(struct __kern_channel_ring *kring, struct proc *, |
| 286 | uint32_t flags); |
| 287 | int (*na_rxsync)(struct __kern_channel_ring *kring, struct proc *, |
| 288 | uint32_t flags); |
| 289 | #define NA_SYNCF_MONITOR 0x1 |
| 290 | #define NA_SYNCF_FORCE_READ 0x2 |
| 291 | #define NA_SYNCF_FORCE_RECLAIM 0x4 |
| 292 | #define NA_SYNCF_NETIF 0x8 /* netif normal sync */ |
| 293 | #define NA_SYNCF_NETIF_ASYNC 0x10 /* asynchronous doorbell */ |
| 294 | #define NA_SYNCF_NETIF_DOORBELL 0x20 /* doorbell request */ |
| 295 | #define NA_SYNCF_NETIF_IFSTART 0x40 /* in if_start context */ |
| 296 | #define NA_SYNCF_FORCE_UPP_SYNC 0x80 /* force upp sync alloc/free */ |
| 297 | #define NA_SYNCF_UPP_PURGE 0x100 /* purge upp alloc pool */ |
| 298 | #define NA_SYNCF_SYNC_ONLY 0x200 /* sync only, no doorbell */ |
| 299 | |
| 300 | /* |
| 301 | * na_notify() is used to act ater data have become available, |
| 302 | * or the state of the ring has changed. Depending on the nexus |
| 303 | * type, this may involve triggering an event and/or performing |
| 304 | * additional work such as calling na_txsync(). |
| 305 | */ |
| 306 | int (*na_notify)(struct __kern_channel_ring *kring, struct proc *, |
| 307 | uint32_t flags); |
| 308 | #define NA_NOTEF_MONITOR 0x1 |
| 309 | #define NA_NOTEF_IN_KEVENT 0x2 |
| 310 | #define NA_NOTEF_CAN_SLEEP 0x4 /* OK to block in kr_enter() */ |
| 311 | #define NA_NOTEF_NETIF 0x8 /* same as NA_SYNCF_NETIF */ |
| 312 | #define NA_NOTEF_PUSH 0x100 /* need immediate attention */ |
| 313 | |
| 314 | /* |
| 315 | * na_channel_event_notify() is used to send events on the user channel. |
| 316 | */ |
| 317 | int (*na_channel_event_notify)(struct nexus_adapter *, |
| 318 | struct __kern_channel_event *, uint16_t); |
| 319 | /* |
| 320 | * na_config() is an optional callback for returning nexus-specific |
| 321 | * configuration information. This is implemented by nexus types |
| 322 | * that handle dynamically changing configs. |
| 323 | */ |
| 324 | int (*na_config)(struct nexus_adapter *, |
| 325 | uint32_t *txr, uint32_t *txd, uint32_t *rxr, uint32_t *rxd); |
| 326 | /* |
| 327 | * na_krings_create() creates and initializes the __kern_channel_ring |
| 328 | * arrays, as well as initializing the callback routines within; |
| 329 | * na_krings_delete() cleans up and destroys the kernel rings. |
| 330 | */ |
| 331 | int (*na_krings_create)(struct nexus_adapter *, struct kern_channel *); |
| 332 | void (*na_krings_delete)(struct nexus_adapter *, struct kern_channel *, |
| 333 | boolean_t); |
| 334 | /* |
| 335 | * na_dtor() is the destructor callback that is invoked when the |
| 336 | * last reference to the nexus adapter has been released. |
| 337 | */ |
| 338 | void (*na_dtor)(struct nexus_adapter *); |
| 339 | /* |
| 340 | * na_free() is the free callback that gets invoked after the |
| 341 | * adapter has been destroyed. |
| 342 | */ |
| 343 | void (*na_free)(struct nexus_adapter *); |
| 344 | |
| 345 | /* |
| 346 | * packet-chain-based callbacks for passing packets up the stack. |
| 347 | * The inject variant is used by filters for rejecting packets |
| 348 | * into the rx path from user space. |
| 349 | */ |
| 350 | void (*na_rx)(struct nexus_adapter *, |
| 351 | struct __kern_packet *, struct nexus_pkt_stats *); |
| 352 | }; |
| 353 | |
| 354 | /* valid values for na_flags */ |
| 355 | #define NAF_ACTIVE 0x1 /* skywalk is active */ |
| 356 | #define NAF_HOST_ONLY 0x2 /* host adapter (no device rings) */ |
| 357 | #define NAF_SPEC_INIT 0x4 /* na_special() initialized */ |
| 358 | #define NAF_NATIVE 0x8 /* skywalk native netif adapter */ |
| 359 | #define NAF_MEM_NO_INIT 0x10 /* na_kr_setup() skipped */ |
| 360 | #define NAF_SLOT_CONTEXT 0x20 /* na_slot_ctxs is valid */ |
| 361 | #define NAF_USER_PKT_POOL 0x40 /* na supports user packet pool */ |
| 362 | #define NAF_TX_MITIGATION 0x80 /* na supports TX event mitigation */ |
| 363 | #define NAF_RX_MITIGATION 0x100 /* na supports RX event mitigation */ |
| 364 | #define NAF_DEFUNCT 0x200 /* no longer in service */ |
| 365 | #define NAF_MEM_LOANED 0x400 /* arena owned by another adapter */ |
| 366 | #define NAF_REJECT 0x800 /* not accepting channel activities */ |
| 367 | #define NAF_EVENT_RING 0x1000 /* NA is providing event ring */ |
| 368 | #define NAF_CHANNEL_EVENT_ATTACHED 0x2000 /* kevent registered for ch events */ |
| 369 | #define NAF_VIRTUAL_DEVICE 0x8000 /* netif adapter for virtual device */ |
| 370 | #define NAF_MODE_FSW 0x10000 /* NA is owned by fsw */ |
| 371 | #define NAF_MODE_LLW 0x20000 /* NA is owned by llw */ |
| 372 | #define NAF_LOW_LATENCY 0x40000 /* Low latency NA */ |
| 373 | #define NAF_DRAINING 0x80000 /* NA is being drained */ |
| 374 | /* |
| 375 | * defunct allowed flag. |
| 376 | * Currently used only by the parent nexus adapter of user-pipe nexus |
| 377 | * to indicate that defuncting is allowed on the channels. |
| 378 | */ |
| 379 | #define NAF_DEFUNCT_OK 0x100000 |
| 380 | #define NAF_KERNEL_ONLY (1U << 31) /* used internally, not usable by userland */ |
| 381 | |
| 382 | #define NAF_BITS \ |
| 383 | "\020\01ACTIVE\02HOST_ONLY\03SPEC_INIT\04NATIVE" \ |
| 384 | "\05MEM_NO_INIT\06SLOT_CONTEXT\07USER_PKT_POOL" \ |
| 385 | "\010TX_MITIGATION\011RX_MITIGATION\012DEFUNCT\013MEM_LOANED" \ |
| 386 | "\014REJECT\015EVENT_RING\016EVENT_ATTACH" \ |
| 387 | "\020VIRTUAL\021MODE_FSW\022MODE_LLW\023LOW_LATENCY\024DRAINING" \ |
| 388 | "\025DEFUNCT_OK\040KERNEL_ONLY" |
| 389 | |
| 390 | #define NA_FREE(na) do { \ |
| 391 | (na)->na_free(na); \ |
| 392 | } while (0) |
| 393 | |
| 394 | /* |
| 395 | * NA returns a pointer to the struct nexus_adapter from the ifp's netif nexus. |
| 396 | */ |
| 397 | #define NA(_ifp) ((_ifp)->if_na) |
| 398 | |
| 399 | __attribute__((always_inline)) |
| 400 | static inline uint32_t |
| 401 | na_get_nslots(const struct nexus_adapter *na, enum txrx t) |
| 402 | { |
| 403 | switch (t) { |
| 404 | case NR_TX: |
| 405 | return na->na_num_tx_slots; |
| 406 | case NR_RX: |
| 407 | return na->na_num_rx_slots; |
| 408 | case NR_A: |
| 409 | case NR_F: |
| 410 | return na->na_num_allocator_slots; |
| 411 | case NR_EV: |
| 412 | return na->na_num_event_slots; |
| 413 | case NR_LBA: |
| 414 | return na->na_num_large_buf_alloc_slots; |
| 415 | default: |
| 416 | VERIFY(0); |
| 417 | /* NOTREACHED */ |
| 418 | __builtin_unreachable(); |
| 419 | } |
| 420 | } |
| 421 | |
| 422 | __attribute__((always_inline)) |
| 423 | static inline void |
| 424 | na_set_nslots(struct nexus_adapter *na, enum txrx t, uint32_t v) |
| 425 | { |
| 426 | switch (t) { |
| 427 | case NR_TX: |
| 428 | na->na_num_tx_slots = v; |
| 429 | break; |
| 430 | case NR_RX: |
| 431 | na->na_num_rx_slots = v; |
| 432 | break; |
| 433 | case NR_A: |
| 434 | case NR_F: |
| 435 | na->na_num_allocator_slots = v; |
| 436 | break; |
| 437 | case NR_EV: |
| 438 | na->na_num_event_slots = v; |
| 439 | break; |
| 440 | case NR_LBA: |
| 441 | na->na_num_large_buf_alloc_slots = v; |
| 442 | break; |
| 443 | default: |
| 444 | VERIFY(0); |
| 445 | /* NOTREACHED */ |
| 446 | __builtin_unreachable(); |
| 447 | } |
| 448 | } |
| 449 | |
| 450 | __attribute__((always_inline)) |
| 451 | static inline uint32_t |
| 452 | na_get_nrings(const struct nexus_adapter *na, enum txrx t) |
| 453 | { |
| 454 | switch (t) { |
| 455 | case NR_TX: |
| 456 | return na->na_num_tx_rings; |
| 457 | case NR_RX: |
| 458 | return na->na_num_rx_rings; |
| 459 | case NR_A: |
| 460 | case NR_F: |
| 461 | return na->na_num_allocator_ring_pairs; |
| 462 | case NR_EV: |
| 463 | return na->na_num_event_rings; |
| 464 | case NR_LBA: |
| 465 | return na->na_num_large_buf_alloc_rings; |
| 466 | default: |
| 467 | VERIFY(0); |
| 468 | /* NOTREACHED */ |
| 469 | __builtin_unreachable(); |
| 470 | } |
| 471 | } |
| 472 | |
| 473 | __attribute__((always_inline)) |
| 474 | static inline void |
| 475 | na_set_nrings(struct nexus_adapter *na, enum txrx t, uint32_t v) |
| 476 | { |
| 477 | switch (t) { |
| 478 | case NR_TX: |
| 479 | na->na_num_tx_rings = v; |
| 480 | break; |
| 481 | case NR_RX: |
| 482 | na->na_num_rx_rings = v; |
| 483 | break; |
| 484 | case NR_A: |
| 485 | case NR_F: |
| 486 | na->na_num_allocator_ring_pairs = v; |
| 487 | break; |
| 488 | case NR_EV: |
| 489 | na->na_num_event_rings = v; |
| 490 | break; |
| 491 | case NR_LBA: |
| 492 | /* we only support one ring for now */ |
| 493 | ASSERT(v <= 1); |
| 494 | na->na_num_large_buf_alloc_rings = v; |
| 495 | break; |
| 496 | default: |
| 497 | VERIFY(0); |
| 498 | /* NOTREACHED */ |
| 499 | __builtin_unreachable(); |
| 500 | } |
| 501 | } |
| 502 | |
| 503 | __attribute__((always_inline)) |
| 504 | static inline struct __kern_channel_ring * |
| 505 | NAKR(struct nexus_adapter *na, enum txrx t) |
| 506 | { |
| 507 | switch (t) { |
| 508 | case NR_TX: |
| 509 | return na->na_tx_rings; |
| 510 | case NR_RX: |
| 511 | return na->na_rx_rings; |
| 512 | case NR_A: |
| 513 | return na->na_alloc_rings; |
| 514 | case NR_F: |
| 515 | return na->na_free_rings; |
| 516 | case NR_EV: |
| 517 | return na->na_event_rings; |
| 518 | case NR_LBA: |
| 519 | return na->na_large_buf_alloc_rings; |
| 520 | default: |
| 521 | VERIFY(0); |
| 522 | /* NOTREACHED */ |
| 523 | __builtin_unreachable(); |
| 524 | } |
| 525 | } |
| 526 | |
| 527 | /* |
| 528 | * If the adapter is owned by the kernel, neither another flow switch nor user |
| 529 | * can use it; if the adapter is owned by a user, only users can share it. |
| 530 | * Evaluation must be done under SK_LOCK(). |
| 531 | */ |
| 532 | #define NA_KERNEL_ONLY(_na) (((_na)->na_flags & NAF_KERNEL_ONLY) != 0) |
| 533 | #define NA_OWNED_BY_ANY(_na) \ |
| 534 | (NA_KERNEL_ONLY(_na) || ((_na)->na_channels > 0)) |
| 535 | #define NA_OWNED_BY_FSW(_na) \ |
| 536 | (((_na)->na_flags & NAF_MODE_FSW) != 0) |
| 537 | #define NA_OWNED_BY_LLW(_na) \ |
| 538 | (((_na)->na_flags & NAF_MODE_LLW) != 0) |
| 539 | |
| 540 | /* |
| 541 | * Whether the adapter has been activated via na_activate() call. |
| 542 | */ |
| 543 | #define NA_IS_ACTIVE(_na) (((_na)->na_flags & NAF_ACTIVE) != 0) |
| 544 | #define NA_IS_DEFUNCT(_na) (((_na)->na_flags & NAF_DEFUNCT) != 0) |
| 545 | #define NA_CHANNEL_EVENT_ATTACHED(_na) \ |
| 546 | (((_na)->na_flags & NAF_CHANNEL_EVENT_ATTACHED) != 0) |
| 547 | /* |
| 548 | * Whether channel activities are rejected by the adapter. This takes the |
| 549 | * nexus adapter argument separately, as ch->ch_na may not be set yet. |
| 550 | */ |
| 551 | __attribute__((always_inline)) |
| 552 | static inline boolean_t |
| 553 | na_reject_channel(struct kern_channel *ch, struct nexus_adapter *na) |
| 554 | { |
| 555 | boolean_t reject; |
| 556 | |
| 557 | ASSERT(ch->ch_na == NULL || ch->ch_na == na); |
| 558 | |
| 559 | if ((na->na_flags & NAF_REJECT) || NX_REJECT_ACT(na->na_nx)) { |
| 560 | /* set trapdoor NAF_REJECT flag */ |
| 561 | if (!(na->na_flags & NAF_REJECT)) { |
| 562 | SK_ERR("%s(%d) marked as non-permissive" , |
| 563 | ch->ch_name, ch->ch_pid); |
| 564 | os_atomic_or(&na->na_flags, NAF_REJECT, relaxed); |
| 565 | ch_deactivate(ch); |
| 566 | } |
| 567 | reject = TRUE; |
| 568 | } else { |
| 569 | reject = FALSE; |
| 570 | } |
| 571 | |
| 572 | return reject; |
| 573 | } |
| 574 | |
| 575 | #if SK_LOG |
| 576 | __attribute__((always_inline)) |
| 577 | static inline const char * |
| 578 | na_activate_mode2str(na_activate_mode_t m) |
| 579 | { |
| 580 | switch (m) { |
| 581 | case NA_ACTIVATE_MODE_ON: |
| 582 | return "on" ; |
| 583 | case NA_ACTIVATE_MODE_DEFUNCT: |
| 584 | return "defunct" ; |
| 585 | case NA_ACTIVATE_MODE_OFF: |
| 586 | return "off" ; |
| 587 | default: |
| 588 | VERIFY(0); |
| 589 | /* NOTREACHED */ |
| 590 | __builtin_unreachable(); |
| 591 | } |
| 592 | } |
| 593 | #endif /* SK_LOG */ |
| 594 | |
| 595 | __BEGIN_DECLS |
| 596 | extern void na_init(void); |
| 597 | extern void na_fini(void); |
| 598 | |
| 599 | extern int na_bind_channel(struct nexus_adapter *na, struct kern_channel *ch, |
| 600 | struct chreq *); |
| 601 | extern void na_unbind_channel(struct kern_channel *ch); |
| 602 | |
| 603 | /* |
| 604 | * Common routine for all functions that create a nexus adapter. It performs |
| 605 | * two main tasks: |
| 606 | * - if the na points to an ifp, mark the ifp as Skywalk capable |
| 607 | * using na as its native adapter; |
| 608 | * - provide defaults for the setup callbacks and the memory allocator |
| 609 | */ |
| 610 | extern void na_attach_common(struct nexus_adapter *, |
| 611 | struct kern_nexus *, struct kern_nexus_domain_provider *); |
| 612 | /* |
| 613 | * Update the ring parameters (number and size of tx and rx rings). |
| 614 | * It calls the nm_config callback, if available. |
| 615 | */ |
| 616 | extern int na_update_config(struct nexus_adapter *na); |
| 617 | |
| 618 | extern int na_rings_mem_setup(struct nexus_adapter *, boolean_t, |
| 619 | struct kern_channel *); |
| 620 | extern void na_rings_mem_teardown(struct nexus_adapter *, |
| 621 | struct kern_channel *, boolean_t); |
| 622 | extern void na_ch_rings_defunct(struct kern_channel *, struct proc *); |
| 623 | |
| 624 | /* convenience wrappers for na_set_all_rings, used in drivers */ |
| 625 | extern void na_disable_all_rings(struct nexus_adapter *); |
| 626 | extern void na_enable_all_rings(struct nexus_adapter *); |
| 627 | extern void na_lock_all_rings(struct nexus_adapter *); |
| 628 | extern void na_unlock_all_rings(struct nexus_adapter *); |
| 629 | extern int na_interp_ringid(struct nexus_adapter *, ring_id_t, ring_set_t, |
| 630 | uint32_t[NR_TXRX], uint32_t[NR_TXRX]); |
| 631 | extern struct kern_pbufpool *na_kr_get_pp(struct nexus_adapter *, enum txrx); |
| 632 | |
| 633 | extern int na_find(struct kern_channel *, struct kern_nexus *, |
| 634 | struct chreq *, struct kern_channel *, struct nxbind *, |
| 635 | struct proc *, struct nexus_adapter **, boolean_t); |
| 636 | extern void na_retain_locked(struct nexus_adapter *na); |
| 637 | extern int na_release_locked(struct nexus_adapter *na); |
| 638 | |
| 639 | extern int na_connect(struct kern_nexus *, struct kern_channel *, |
| 640 | struct chreq *, struct kern_channel *, struct nxbind *, struct proc *); |
| 641 | extern void na_disconnect(struct kern_nexus *, struct kern_channel *); |
| 642 | extern void na_defunct(struct kern_nexus *, struct kern_channel *, |
| 643 | struct nexus_adapter *, boolean_t); |
| 644 | extern int na_connect_spec(struct kern_nexus *, struct kern_channel *, |
| 645 | struct chreq *, struct proc *); |
| 646 | extern void na_disconnect_spec(struct kern_nexus *, struct kern_channel *); |
| 647 | extern void na_start_spec(struct kern_nexus *, struct kern_channel *); |
| 648 | extern void na_stop_spec(struct kern_nexus *, struct kern_channel *); |
| 649 | |
| 650 | extern int na_pseudo_create(struct kern_nexus *, struct chreq *, |
| 651 | struct nexus_adapter **); |
| 652 | extern void na_kr_drop(struct nexus_adapter *, boolean_t); |
| 653 | extern void na_flowadv_entry_alloc(const struct nexus_adapter *, uuid_t, |
| 654 | const flowadv_idx_t, const uint32_t); |
| 655 | extern void na_flowadv_entry_free(const struct nexus_adapter *, uuid_t, |
| 656 | const flowadv_idx_t, const uint32_t); |
| 657 | extern bool na_flowadv_set(const struct nexus_adapter *, |
| 658 | const flowadv_idx_t, const flowadv_token_t); |
| 659 | extern boolean_t na_flowadv_clear(const struct kern_channel *, |
| 660 | const flowadv_idx_t, const flowadv_token_t); |
| 661 | extern int na_flowadv_report_ce_event(const struct kern_channel *ch, |
| 662 | const flowadv_idx_t fe_idx, const flowadv_token_t flow_token, |
| 663 | uint32_t ce_cnt, uint32_t total_pkt_cnt); |
| 664 | extern void na_flowadv_event(struct __kern_channel_ring *); |
| 665 | extern void na_post_event(struct __kern_channel_ring *, boolean_t, boolean_t, |
| 666 | boolean_t, uint32_t); |
| 667 | |
| 668 | extern void na_drain(struct nexus_adapter *, boolean_t); |
| 669 | |
| 670 | __END_DECLS |
| 671 | #endif /* BSD_KERNEL_PRIVATE */ |
| 672 | #endif /* _SKYWALK_NEXUS_ADAPTER_H_ */ |
| 673 | |