1 | /* |
2 | * Copyright (c) 2015-2023 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef _SKYWALK_OS_CHANNEL_H_ |
30 | #define _SKYWALK_OS_CHANNEL_H_ |
31 | |
32 | #ifdef PRIVATE |
33 | |
34 | #include <stdint.h> |
35 | #include <sys/types.h> |
36 | #include <sys/cdefs.h> |
37 | #include <uuid/uuid.h> |
38 | #include <mach/vm_types.h> |
39 | #include <skywalk/os_nexus.h> |
40 | #include <skywalk/os_packet.h> |
41 | #ifndef KERNEL |
42 | #include <skywalk/os_channel_event.h> |
43 | #include <net/if_var.h> |
44 | #endif /* !KERNEL */ |
45 | |
46 | /* |
47 | * Compiler guards used by Libnetcore. |
48 | */ |
49 | #define OS_CHANNEL_HAS_NUM_BUFFERS_ATTR 1 /* CHANNEL_ATTR_NUM_BUFFERS */ |
50 | #define OS_CHANNEL_HAS_LARGE_PACKET 1 /* CHANNEL_ATTR_LARGE_BUF_SIZE and */ |
51 | /* os_channel_large_packet_alloc() */ |
52 | |
53 | /* Flow advisory table index */ |
54 | typedef uint32_t flowadv_idx_t; |
55 | #define FLOWADV_IDX_NONE ((flowadv_idx_t)-1) |
56 | |
57 | /* |
58 | * Channel ring direction. |
59 | */ |
60 | typedef enum { |
61 | CHANNEL_DIR_TX_RX, /* default: TX and RX ring(s) */ |
62 | CHANNEL_DIR_TX, /* (monitor) only TX ring(s) */ |
63 | CHANNEL_DIR_RX /* (monitor) only RX ring(s) */ |
64 | } ring_dir_t; |
65 | |
66 | /* |
67 | * Channel ring ID. |
68 | */ |
69 | typedef uint32_t ring_id_t; |
70 | #define CHANNEL_RING_ID_ANY ((ring_id_t)-1) |
71 | |
72 | typedef enum { |
73 | CHANNEL_FIRST_TX_RING, |
74 | CHANNEL_LAST_TX_RING, |
75 | CHANNEL_FIRST_RX_RING, |
76 | CHANNEL_LAST_RX_RING |
77 | } ring_id_type_t; |
78 | |
79 | /* Sync mode values */ |
80 | typedef enum { |
81 | CHANNEL_SYNC_TX, /* synchronize TX ring(s) */ |
82 | CHANNEL_SYNC_RX, /* synchronize RX ring(s) */ |
83 | #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE) |
84 | CHANNEL_SYNC_UPP /* synchronize packet pool rings only */ |
85 | #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */ |
86 | } sync_mode_t; |
87 | |
88 | /* Sync flags */ |
89 | typedef uint32_t sync_flags_t; |
90 | #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE) |
91 | #define CHANNEL_SYNCF_ALLOC 0x1 /* synchronize alloc ring */ |
92 | #define CHANNEL_SYNCF_FREE 0x2 /* synchronize free ring */ |
93 | #define CHANNEL_SYNCF_PURGE 0x4 /* purge user packet pool */ |
94 | #define CHANNEL_SYNCF_ALLOC_BUF 0x8 /* synchronize buflet alloc ring */ |
95 | #define CHANNEL_SYNCF_LARGE_ALLOC 0x10 /* synchronize large alloc ring */ |
96 | #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */ |
97 | |
98 | /* |
99 | * Opaque handles. |
100 | */ |
101 | struct channel; |
102 | struct channel_ring_desc; |
103 | struct __slot_desc; |
104 | struct channel_attr; |
105 | |
106 | typedef struct channel *channel_t; |
107 | typedef struct channel_ring_desc *channel_ring_t; |
108 | typedef struct __slot_desc *channel_slot_t; |
109 | typedef struct channel_attr *channel_attr_t; |
110 | |
111 | /* |
112 | * Channel monitor types. |
113 | */ |
114 | typedef enum { |
115 | CHANNEL_MONITOR_OFF, /* default */ |
116 | CHANNEL_MONITOR_NO_COPY, /* zero-copy (delayed) mode */ |
117 | CHANNEL_MONITOR_COPY /* copy (immediate) mode */ |
118 | } channel_monitor_type_t; |
119 | |
120 | /* |
121 | * Channel threshold unit types. |
122 | */ |
123 | typedef enum { |
124 | CHANNEL_THRESHOLD_UNIT_SLOTS, /* unit in slots (default) */ |
125 | CHANNEL_THRESHOLD_UNIT_BYTES, /* unit in bytes */ |
126 | } channel_threshold_unit_t; |
127 | |
128 | /* |
129 | * Channel attribute types gettable/settable via os_channel_attr_{get,set}. |
130 | * |
131 | * g: retrievable at any time |
132 | * s: settable at any time |
133 | * S: settable once, only at creation time |
134 | */ |
135 | typedef enum { |
136 | CHANNEL_ATTR_TX_RINGS, /* (g) # of transmit rings */ |
137 | CHANNEL_ATTR_RX_RINGS, /* (g) # of receive rings */ |
138 | CHANNEL_ATTR_TX_SLOTS, /* (g) # of slots per transmit ring */ |
139 | CHANNEL_ATTR_RX_SLOTS, /* (g) # of slots per receive ring */ |
140 | CHANNEL_ATTR_SLOT_BUF_SIZE, /* (g) buffer per slot (bytes) */ |
141 | CHANNEL_ATTR_SLOT_META_SIZE, /* (g) metadata per slot (bytes) */ |
142 | CHANNEL_ATTR_EXCLUSIVE, /* (g/s) bool: exclusive open */ |
143 | CHANNEL_ATTR_NO_AUTO_SYNC, /* (g/s) bool: will do explicit sync */ |
144 | CHANNEL_ATTR_MONITOR, /* (g/s) see channel_monitor_type_t */ |
145 | CHANNEL_ATTR_TX_LOWAT_UNIT, /* (g/s) see channel_threshold_unit_t */ |
146 | CHANNEL_ATTR_TX_LOWAT_VALUE, /* (g/s) transmit low-watermark */ |
147 | CHANNEL_ATTR_RX_LOWAT_UNIT, /* (g/s) see channel_threshold_unit_t */ |
148 | CHANNEL_ATTR_RX_LOWAT_VALUE, /* (g/s) receive low-watermark */ |
149 | CHANNEL_ATTR_NEXUS_TYPE, /* (g) nexus type */ |
150 | CHANNEL_ATTR_NEXUS_EXTENSIONS, /* (g) nexus extension(s) */ |
151 | CHANNEL_ATTR_NEXUS_MHINTS, /* (g) nexus memory usage hints */ |
152 | CHANNEL_ATTR_TX_HOST_RINGS, /* (g) # of transmit host rings */ |
153 | CHANNEL_ATTR_RX_HOST_RINGS, /* (g) # of receive host rings */ |
154 | CHANNEL_ATTR_NEXUS_IFINDEX, /* (g) nexus network interface index */ |
155 | CHANNEL_ATTR_NEXUS_STATS_SIZE, /* (g) nexus statistics region size */ |
156 | CHANNEL_ATTR_NEXUS_FLOWADV_MAX, /* (g) # of flow advisory entries */ |
157 | CHANNEL_ATTR_NEXUS_META_TYPE, /* (g) nexus metadata type */ |
158 | CHANNEL_ATTR_NEXUS_META_SUBTYPE, /* (g) nexus metadata subtype */ |
159 | CHANNEL_ATTR_NEXUS_CHECKSUM_OFFLOAD, /* (g) nexus checksum offload */ |
160 | CHANNEL_ATTR_USER_PACKET_POOL, /* (g/s) bool: use user packet pool */ |
161 | CHANNEL_ATTR_NEXUS_ADV_SIZE, /* (g) nexus advisory region size */ |
162 | CHANNEL_ATTR_NEXUS_DEFUNCT_OK, /* (g/s) bool: allow defunct */ |
163 | CHANNEL_ATTR_FILTER, /* (g/s) bool: filter mode */ |
164 | CHANNEL_ATTR_EVENT_RING, /* (g/s) bool: enable event ring */ |
165 | CHANNEL_ATTR_MAX_FRAGS, /* (g) max length of buflet chain */ |
166 | CHANNEL_ATTR_NUM_BUFFERS, /* (g) # of buffers in user pool */ |
167 | CHANNEL_ATTR_LOW_LATENCY, /* (g/s) bool: low latency channel */ |
168 | CHANNEL_ATTR_LARGE_BUF_SIZE, /* (g) large buffer size (bytes) */ |
169 | } channel_attr_type_t; |
170 | |
171 | /* |
172 | * Channel nexus metadata type. |
173 | */ |
174 | typedef enum { |
175 | CHANNEL_NEXUS_META_TYPE_INVALID = 0, |
176 | CHANNEL_NEXUS_META_TYPE_QUANTUM, /* OK for os_packet quantum APIs */ |
177 | CHANNEL_NEXUS_META_TYPE_PACKET, /* OK for all os_packet APIs */ |
178 | } channel_nexus_meta_type_t; |
179 | |
180 | /* |
181 | * Channel nexus metadata subtype. |
182 | */ |
183 | typedef enum { |
184 | CHANNEL_NEXUS_META_SUBTYPE_INVALID = 0, |
185 | CHANNEL_NEXUS_META_SUBTYPE_PAYLOAD, |
186 | CHANNEL_NEXUS_META_SUBTYPE_RAW, |
187 | } channel_nexus_meta_subtype_t; |
188 | |
189 | /* |
190 | * Valid values for CHANNEL_ATTR_NEXUS_CHECKSUM_OFFLOAD |
191 | */ |
192 | #define CHANNEL_NEXUS_CHECKSUM_PARTIAL 0x1 /* partial checksum */ |
193 | |
194 | /* |
195 | * Channel statistics ID. |
196 | */ |
197 | typedef enum { |
198 | CHANNEL_STATS_ID_IP = 0, /* struct ip_stats */ |
199 | CHANNEL_STATS_ID_IP6, /* struct ip6_stats */ |
200 | CHANNEL_STATS_ID_TCP, /* struct tcp_stats */ |
201 | CHANNEL_STATS_ID_UDP, /* struct udp_stats */ |
202 | CHANNEL_STATS_ID_QUIC, /* struct quic_stats */ |
203 | } channel_stats_id_t; |
204 | |
205 | /* |
206 | * Slot properties. Structure is aligned to allow for efficient copy. |
207 | * |
208 | * Fields except for sp_{flags,len} are immutables (I). The system will |
209 | * verify for correctness during os_channel_put() across the immutable |
210 | * fields, and will abort the process if it detects inconsistencies. |
211 | * This is meant to help with debugging, since it indicates bugs and/or |
212 | * memory corruption. |
213 | */ |
214 | typedef struct slot_prop { |
215 | uint16_t sp_flags; /* private flags */ |
216 | uint16_t sp_len; /* length for this slot */ |
217 | uint32_t sp_idx; /* (I) slot index */ |
218 | mach_vm_address_t sp_ext_ptr; /* (I) pointer for indirect buffer */ |
219 | mach_vm_address_t sp_buf_ptr; /* (I) pointer for native buffer */ |
220 | mach_vm_address_t sp_mdata_ptr; /* (I) pointer for metadata */ |
221 | uint32_t _sp_pad[8]; /* reserved */ |
222 | } slot_prop_t __attribute__((aligned(sizeof(uint64_t)))); |
223 | |
224 | #ifndef KERNEL |
225 | /* |
226 | * User APIs. |
227 | */ |
228 | #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE) |
229 | __BEGIN_DECLS |
230 | /* |
231 | * Creates a Channel attribute object. |
232 | * |
233 | * This must be paired with a os_channel_attr_destroy() on the handle. |
234 | */ |
235 | extern channel_attr_t os_channel_attr_create(void); |
236 | |
237 | /* |
238 | * Clones a Channel attribute object. If source attribute is NULL |
239 | * it behaves just like os_channel_attr_create(); |
240 | * |
241 | * This must be paired with a os_channel_attr_destroy() on the handle. |
242 | */ |
243 | extern channel_attr_t os_channel_attr_clone(const channel_attr_t); |
244 | |
245 | /* |
246 | * Sets a value for a given attribute type on a Channel attribute object. |
247 | */ |
248 | extern int os_channel_attr_set(const channel_attr_t attr, |
249 | const channel_attr_type_t type, const uint64_t value); |
250 | |
251 | /* |
252 | * Sets a key blob on a Channel attribute object. Existing key blob |
253 | * information in the attribute object will be removed, if any, and |
254 | * replaced with the new key blob. Specifying 0 for key_len will |
255 | * clear the key stored in the attribute object. The maximum key |
256 | * length is specified by NEXUS_MAX_KEY_LEN. |
257 | */ |
258 | extern int os_channel_attr_set_key(const channel_attr_t attr, |
259 | const void *key, const uint32_t key_len); |
260 | |
261 | /* |
262 | * Gets a value for a given attribute type on a Channel attribute object. |
263 | */ |
264 | extern int os_channel_attr_get(const channel_attr_t attr, |
265 | const channel_attr_type_t type, uint64_t *value); |
266 | |
267 | /* |
268 | * Gets a key blob on a Channel attribute object. If key is NULL, |
269 | * returns the length of the key blob with key_len, so caller knows |
270 | * how much to allocate space for key blob. |
271 | */ |
272 | extern int os_channel_attr_get_key(const channel_attr_t attr, |
273 | void *key, uint32_t *key_len); |
274 | |
275 | /* |
276 | * Destroys a Channel attribute object, along with all resources |
277 | * associated with it (e.g. key blob). |
278 | */ |
279 | extern void os_channel_attr_destroy(const channel_attr_t attr); |
280 | |
281 | /* |
282 | * Opens a Channel to a Nexus provider instance. Upon success, maps memory |
283 | * region and allocates resources. |
284 | * |
285 | * This must be paired with a os_channel_destroy() on the handle, in order to |
286 | * unmap the memory region and free resources. |
287 | */ |
288 | extern channel_t os_channel_create(const uuid_t uuid, const nexus_port_t port); |
289 | |
290 | /* |
291 | * Extended version of os_channel_create(). |
292 | */ |
293 | extern channel_t os_channel_create_extended(const uuid_t uuid, |
294 | const nexus_port_t port, const ring_dir_t dir, const ring_id_t rid, |
295 | const channel_attr_t attr); |
296 | |
297 | /* |
298 | * Retrieves the file descriptor associated with the Channel. |
299 | */ |
300 | extern int os_channel_get_fd(const channel_t channel); |
301 | |
302 | /* |
303 | * Retrieves current channel attributes into the channel_attr_t handle. |
304 | */ |
305 | extern int os_channel_read_attr(const channel_t channel, channel_attr_t attr); |
306 | |
307 | /* |
308 | * Updates channel attributes based on those referred to by the channel_attr_t |
309 | * handle. See comments above on channel_attr_type_t; this routine will only |
310 | * update attributes that are marked with 's' but not 'S'. |
311 | */ |
312 | extern int os_channel_write_attr(const channel_t channel, channel_attr_t attr); |
313 | |
314 | /* |
315 | * Retrieves channel's associated nexus type into *nexus_type, and the |
316 | * provider-specific extension attribute into *ext. |
317 | */ |
318 | extern int os_channel_read_nexus_extension_info(const channel_t channel, |
319 | nexus_type_t *nexus_type, uint64_t *ext); |
320 | |
321 | /* |
322 | * Non-blocking synchronization. Channel handle may also be used |
323 | * with kqueue(2), select(2) or poll(2) through the file descriptor. |
324 | */ |
325 | extern int os_channel_sync(const channel_t channel, const sync_mode_t mode); |
326 | |
327 | /* |
328 | * Destroys a Channel. |
329 | */ |
330 | extern void os_channel_destroy(const channel_t channel); |
331 | |
332 | /* |
333 | * Checks if a channel is defunct. Returns non-zero if defunct. |
334 | */ |
335 | extern int os_channel_is_defunct(const channel_t channel); |
336 | |
337 | /* |
338 | * Data Movement APIs. |
339 | * |
340 | * Obtain channel_ring_t handle via os_channel_{tx,rx}_ring(). You will |
341 | * need to specify the ring_id_t which identifies the ring — this is true |
342 | * even for a single TX/RX ring case. The Nexus provider can communicate |
343 | * to the client the ID of the TX and RX ring that should be used to |
344 | * communicate to it, through a contract between the two. For instance, |
345 | * it can tell the client to use first TX ring and first RX ring, etc. |
346 | * through some side-channel. It should not assume 0 or any other number |
347 | * as ID, however, as the in-kernel Nexus object is the authoritative source |
348 | * of truth. This is where the os_channel_ring_id() call comes into the |
349 | * picture, as it will return the first and last usable TX and RX ring IDs |
350 | * for the Channel opened to that Nexus object. |
351 | * |
352 | * Once the TX or RX ring handle is obtained above, the client can ask for |
353 | * the first usable slot in the ring through os_channel_get_next_slot() |
354 | * passing NULL for the 'slot' parameter. This returns a channel_slot_t |
355 | * handle that represents the slot, along with the properties of that slot |
356 | * described by the slot_prop_t structure. If no slots are available, this |
357 | * call returns a NULL handle. It’s important to note that this |
358 | * call does NOT advance the ring’s current slot pointer; calling this |
359 | * multiple times in succession will yield the same result. |
360 | * |
361 | * The client proceeds to use the slot by examining the returned |
362 | * slot_prop_t fields including the pointer to the internal buffer |
363 | * associated with that slot. Once the client is finished, it updates |
364 | * the relevant slot_prop_t fields (e.g. length) and calls |
365 | * os_channel_set_slot_properties() to apply them to the slot. |
366 | * |
367 | * To get the next slot, the client provides the non-NULL slot value obtained |
368 | * from the previous call to os_channel_get_next_slot() as the 'slot' parameter |
369 | * in its next invocation of that function. |
370 | * |
371 | * To advance the ring’s current pointer, the client invokes |
372 | * os_channel_advance_slot() specifying the slot to advance past. If the slot |
373 | * is invalid, this function returns a non-zero value. |
374 | * |
375 | * Once the client is ready to commit, call os_channel_sync() in |
376 | * either/all directions. |
377 | */ |
378 | extern ring_id_t os_channel_ring_id(const channel_t channel, |
379 | const ring_id_type_t type); |
380 | extern channel_ring_t os_channel_tx_ring(const channel_t channel, |
381 | const ring_id_t rid); |
382 | extern channel_ring_t os_channel_rx_ring(const channel_t channel, |
383 | const ring_id_t rid); |
384 | extern int os_channel_pending(const channel_ring_t ring); |
385 | |
386 | /* |
387 | * This returns a nexus-specific timestamp in nanoseconds taken at the |
388 | * lasttime os_channel_sync() or its equivalent implicit kevent sync |
389 | * was called |
390 | */ |
391 | extern uint64_t os_channel_ring_sync_time(const channel_ring_t ring); |
392 | |
393 | /* |
394 | * This returns a nexus-specific timestamp in nanoseconds to indicate |
395 | * the time of last activity on the opposite end of the ring. |
396 | * This is only updated when sync or kevent equivalent is called. |
397 | */ |
398 | extern uint64_t os_channel_ring_notify_time(const channel_ring_t ring); |
399 | |
400 | /* |
401 | * For TX ring os_channel_available_slot_count() returns the minimum number |
402 | * of slots available availble for TX, and it is possible that |
403 | * os_channel_get_next_slot() will return more slots than the what was |
404 | * returned by an earlier call to os_channel_available_slot_count() |
405 | */ |
406 | extern uint32_t os_channel_available_slot_count(const channel_ring_t ring); |
407 | extern channel_slot_t os_channel_get_next_slot(const channel_ring_t ring, |
408 | const channel_slot_t slot, slot_prop_t *prop); |
409 | extern int os_channel_advance_slot(channel_ring_t ring, |
410 | const channel_slot_t slot); |
411 | extern void os_channel_set_slot_properties(const channel_ring_t ring, |
412 | const channel_slot_t slot, const slot_prop_t *prop); |
413 | |
414 | /* |
415 | * Return the packet handle associated with a given slot of a ring. |
416 | */ |
417 | extern packet_t os_channel_slot_get_packet(const channel_ring_t ring, |
418 | const channel_slot_t slot); |
419 | |
420 | /* |
421 | * Each nexus that the channel is connected to determines whether or |
422 | * not there is a shareable statistics region identified by one of |
423 | * the channel_stats_id_t values. This routine returns a pointer to |
424 | * such a region upon success, or NULL if not supported by the nexus. |
425 | */ |
426 | extern void *os_channel_get_stats_region(const channel_t channel, |
427 | const channel_stats_id_t id); |
428 | |
429 | /* |
430 | * Each nexus that the channel is connected to determines whether or |
431 | * not there is a nexus-wide advisory region. This routine returns |
432 | * a pointer to such a region upon success, or NULL if not supported |
433 | * by the nexus. |
434 | */ |
435 | extern void *os_channel_get_advisory_region(const channel_t channel); |
436 | |
437 | /* |
438 | * Each nexus that supports flow admission control may be queried to |
439 | * advise whether or not the channel is willing to admit more packets |
440 | * for a given flow. A return value of 0 indicates that the packets |
441 | * for the flow are admissible. If ENOBUFS is returned, the flow is |
442 | * currently suspended, and further attempts to send more packets on |
443 | * the ring may result in drops. Any other error values indicate |
444 | * that either the nexus doesn't support admission control, or the |
445 | * arguments aren't valid. |
446 | */ |
447 | extern int os_channel_flow_admissible(const channel_ring_t ring, |
448 | uuid_t flow_id, const flowadv_idx_t flow_index); |
449 | |
450 | extern int os_channel_flow_adv_get_ce_count(const channel_ring_t chrd, |
451 | uuid_t flow_id, const flowadv_idx_t flow_index, uint32_t *ce_cnt, |
452 | uint32_t *pkt_cnt); |
453 | /* |
454 | * Allocate a packet from the channel's packet pool. |
455 | * Returns 0 on success with the packet handle in packet arg. |
456 | * Note: os_channel_packet_alloc() & os_channel_packet_free() should be |
457 | * serialized and should not be called from the different thread context. |
458 | */ |
459 | extern int |
460 | os_channel_packet_alloc(const channel_t chd, packet_t *packet); |
461 | |
462 | /* |
463 | * Allocate a large packet from the channel's packet pool. |
464 | * Returns 0 on success with the packet handle in packet arg. |
465 | * Note: os_channel_large_packet_alloc() & os_channel_packet_free() should be |
466 | * serialized and should not be called from the different thread context. |
467 | */ |
468 | extern int |
469 | os_channel_large_packet_alloc(const channel_t chd, packet_t *packet); |
470 | |
471 | /* |
472 | * Free a packet allocated from the channel's packet pool. |
473 | * Returns 0 on success |
474 | * Note: os_channel_packet_alloc() & os_channel_packet_free() should be |
475 | * serialized and should not be called from the different thread context. |
476 | */ |
477 | extern int |
478 | os_channel_packet_free(const channel_t chd, packet_t packet); |
479 | |
480 | /* |
481 | * Attach the given packet to a channel slot |
482 | */ |
483 | extern int |
484 | os_channel_slot_attach_packet(const channel_ring_t chrd, |
485 | const channel_slot_t slot, packet_t packet); |
486 | |
487 | /* |
488 | * Detach a given packet from a channel slot |
489 | */ |
490 | extern int |
491 | os_channel_slot_detach_packet(const channel_ring_t chrd, |
492 | const channel_slot_t slot, packet_t packet); |
493 | |
494 | /* |
495 | * purge packets from the channel's packet pool. |
496 | * This API should be called at regular intervals by application to purge |
497 | * unused packets from the channel's packet pool. Recommended interval is |
498 | * 11 seconds. |
499 | * Returns 0 on success. |
500 | * Note: This call should be serialized with os_channel_packet_alloc() & |
501 | * os_channel_packet_free() and should not be called from different |
502 | * thread context. |
503 | */ |
504 | extern int |
505 | os_channel_packet_pool_purge(const channel_t chd); |
506 | |
507 | /* |
508 | * Retrieve handle to the next available event(s) on the channel. |
509 | * os_event_get_next_event() can then called on the event handle to |
510 | * retrieve the individual events from the handle. |
511 | * Returns 0 on success, ENXIO if the channel is defunct. |
512 | */ |
513 | extern int |
514 | os_channel_get_next_event_handle(const channel_t chd, |
515 | os_channel_event_handle_t *ehandle, os_channel_event_type_t *etype, |
516 | uint32_t *nevents); |
517 | |
518 | /* |
519 | * Free an event retrieved from the channel. |
520 | * Returns 0 on success, ENXIO if the channel is defunct. |
521 | */ |
522 | extern int |
523 | os_channel_event_free(const channel_t chd, os_channel_event_handle_t ehandle); |
524 | |
525 | /* |
526 | * API to retrieve the latest interface advisory report on the channel. |
527 | * Returns 0 on succcess. If the return value is EAGAIN, caller can attempt |
528 | * to retrieve the information again. |
529 | */ |
530 | extern int |
531 | os_channel_get_interface_advisory(const channel_t chd, |
532 | struct ifnet_interface_advisory *advisory); |
533 | |
534 | /* |
535 | * API to configure interface advisory report on the channel. |
536 | * Returns 0 on succcess. |
537 | */ |
538 | extern int |
539 | os_channel_configure_interface_advisory(const channel_t chd, boolean_t enable); |
540 | |
541 | extern int |
542 | os_channel_buflet_alloc(const channel_t chd, buflet_t *bft); |
543 | |
544 | extern int |
545 | os_channel_buflet_free(const channel_t chd, buflet_t ubft); |
546 | __END_DECLS |
547 | #endif /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */ |
548 | #else /* KERNEL */ |
549 | /* |
550 | * Kernel APIs. |
551 | */ |
552 | |
553 | /* |
554 | * Opaque handles. |
555 | */ |
556 | struct kern_channel; |
557 | struct __kern_channel_ring; |
558 | |
559 | typedef struct kern_channel *kern_channel_t; |
560 | typedef struct __kern_channel_ring *kern_channel_ring_t; |
561 | typedef struct __slot_desc *kern_channel_slot_t; |
562 | |
563 | /* |
564 | * Slot properties (deprecated). |
565 | */ |
566 | struct kern_slot_prop { |
567 | uint32_t _sp_pad[16]; /* reserved */ |
568 | } __attribute__((aligned(sizeof(uint64_t)))); |
569 | |
570 | /* |
571 | * @struct kern_channel_ring_stat_increment |
572 | * @abstract Structure used to increment the per ring statistic counters. |
573 | * @field kcrsi_slots_transferred number of slots transferred |
574 | * @filed kcrsi_bytes_transferred number of bytes transferred |
575 | */ |
576 | struct kern_channel_ring_stat_increment { |
577 | uint32_t kcrsi_slots_transferred; |
578 | uint32_t kcrsi_bytes_transferred; |
579 | }; |
580 | |
581 | /* |
582 | * Data Movement APIs. |
583 | * |
584 | * See block comment above for userland data movement APIs for general |
585 | * concepts. The main differences here are the kern_channel_notify() |
586 | * and kern_channel_reclaim() calls that aren't available for userland. |
587 | * These calls are typically invoked within the TX and RX sync callbacks |
588 | * implemented by the nexus provider. |
589 | * |
590 | * For TX sync, kern_channel_reclaim() is normally called after the |
591 | * provider has finished reclaiming slots that have been "transmitted". |
592 | * In this case, this call is simply a way to indicate to the system |
593 | * that such condition has happened. |
594 | * |
595 | * For RX sync, kern_channel_reclaim() must be called at the beginning |
596 | * of the callback in order to reclaim user-released slots, and to |
597 | * ensure that subsequent calls to kern_channel_available_slot_count() |
598 | * or kern_channel_get_next_slot() operates on the most recent state. |
599 | * |
600 | * The kern_channel_notify() is used to post notifications to indicate |
601 | * slot availability; this may result in the kernel event subsystem |
602 | * posting readable and writable events. |
603 | */ |
604 | __BEGIN_DECLS |
605 | extern uint32_t kern_channel_notify(const kern_channel_ring_t, uint32_t flags); |
606 | extern uint32_t kern_channel_available_slot_count( |
607 | const kern_channel_ring_t ring); |
608 | /* |
609 | * NOTE: kern_channel_set_slot_properties(), kern_channel_get_next_slot(), |
610 | * kern_channel_reclaim() and kern_channel_advance_slot() require that the |
611 | * caller invokes them from within the sync callback context; they will |
612 | * assert otherwise. |
613 | */ |
614 | extern void kern_channel_set_slot_properties(const kern_channel_ring_t, |
615 | const kern_channel_slot_t slot, const struct kern_slot_prop *prop); |
616 | extern kern_channel_slot_t kern_channel_get_next_slot( |
617 | const kern_channel_ring_t kring, const kern_channel_slot_t slot, |
618 | struct kern_slot_prop *slot_prop); |
619 | extern uint32_t kern_channel_reclaim(const kern_channel_ring_t); |
620 | extern void kern_channel_advance_slot(const kern_channel_ring_t kring, |
621 | kern_channel_slot_t slot); |
622 | |
623 | /* |
624 | * Packet. |
625 | */ |
626 | extern kern_packet_t kern_channel_slot_get_packet( |
627 | const kern_channel_ring_t ring, const kern_channel_slot_t slot); |
628 | |
629 | /* |
630 | * NOTE: kern_channel_slot_attach_packet(), kern_channel_slot_detach_packet() |
631 | * and kern_channel_ring_get_container() require that the caller invokes them |
632 | * from within the sync callback context; they will assert otherwise. |
633 | */ |
634 | extern errno_t kern_channel_slot_attach_packet(const kern_channel_ring_t ring, |
635 | const kern_channel_slot_t slot, kern_packet_t packet); |
636 | extern errno_t kern_channel_slot_detach_packet(const kern_channel_ring_t ring, |
637 | const kern_channel_slot_t slot, kern_packet_t packet); |
638 | extern errno_t kern_channel_ring_get_container(const kern_channel_ring_t ring, |
639 | kern_packet_t **array, uint32_t *count); |
640 | extern errno_t kern_channel_tx_refill(const kern_channel_ring_t ring, |
641 | uint32_t pkt_limit, uint32_t byte_limit, boolean_t tx_doorbell_ctxt, |
642 | boolean_t *pkts_pending); |
643 | extern errno_t kern_channel_get_service_class(const kern_channel_ring_t ring, |
644 | kern_packet_svc_class_t *svc); |
645 | extern errno_t kern_netif_queue_get_service_class(kern_netif_queue_t, |
646 | kern_packet_svc_class_t *); |
647 | |
648 | /* |
649 | * Misc. |
650 | */ |
651 | extern void *kern_channel_get_context(const kern_channel_t channel); |
652 | extern void *kern_channel_ring_get_context(const kern_channel_ring_t ring); |
653 | extern void *kern_channel_slot_get_context(const kern_channel_ring_t ring, |
654 | const kern_channel_slot_t slot); |
655 | |
656 | /* |
657 | * NOTE: kern_channel_increment_ring_{net}_stats() requires |
658 | * that the caller invokes it from within the sync callback context; |
659 | * it will assert otherwise. |
660 | */ |
661 | extern void kern_channel_increment_ring_stats(kern_channel_ring_t ring, |
662 | struct kern_channel_ring_stat_increment *stats); |
663 | extern void kern_channel_increment_ring_net_stats(kern_channel_ring_t ring, |
664 | ifnet_t, struct kern_channel_ring_stat_increment *stats); |
665 | |
666 | #ifdef BSD_KERNEL_PRIVATE |
667 | /* forward declare */ |
668 | struct flowadv_fcentry; |
669 | |
670 | /* Flow advisory token */ |
671 | typedef uint32_t flowadv_token_t; |
672 | |
673 | /* |
674 | * Private, unexported KPIs. |
675 | */ |
676 | __private_extern__ errno_t kern_channel_slot_attach_packet_byidx( |
677 | const kern_channel_ring_t kring, const uint32_t sidx, kern_packet_t ph); |
678 | __private_extern__ errno_t kern_channel_slot_detach_packet_byidx( |
679 | const kern_channel_ring_t kring, const uint32_t sidx, kern_packet_t ph); |
680 | __private_extern__ void kern_channel_flowadv_clear(struct flowadv_fcentry *); |
681 | __private_extern__ void kern_channel_flowadv_report_ce_event( |
682 | struct flowadv_fcentry *, uint32_t, uint32_t); |
683 | __private_extern__ void kern_channel_memstatus(struct proc *, uint32_t, |
684 | struct kern_channel *); |
685 | __private_extern__ void kern_channel_defunct(struct proc *, |
686 | struct kern_channel *); |
687 | __private_extern__ errno_t kern_channel_tx_refill_canblock( |
688 | const kern_channel_ring_t, uint32_t, uint32_t, boolean_t, boolean_t *); |
689 | #endif /* BSD_KERNEL_PRIVATE */ |
690 | __END_DECLS |
691 | #endif /* KERNEL */ |
692 | #endif /* PRIVATE */ |
693 | #endif /* !_SKYWALK_OS_CHANNEL_H_ */ |
694 | |