1 | /* |
2 | * Copyright (c) 2016-2022 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #ifndef _SKYWALK_PACKET_COMMON_H_ |
30 | #define _SKYWALK_PACKET_COMMON_H_ |
31 | |
32 | #if defined(PRIVATE) || defined(BSD_KERNEL_PRIVATE) |
33 | /* |
34 | * Routines common to kernel and userland. This file is intended to |
35 | * be included by code implementing the packet APIs, in particular, |
36 | * the Skywalk kernel and libsyscall code. |
37 | */ |
38 | |
39 | #include <skywalk/os_packet_private.h> |
40 | #include <net/if_vlan_var.h> |
41 | #include <sys/errno.h> |
42 | #include <sys/kdebug.h> |
43 | |
44 | #ifndef KERNEL |
45 | /* |
46 | * User. |
47 | */ |
48 | #if !defined(LIBSYSCALL_INTERFACE) |
49 | #error "LIBSYSCALL_INTERFACE not defined" |
50 | #endif /* !LIBSYSCALL_INTERFACE */ |
51 | #define QUM_ADDR(_ph) SK_PTR_ADDR_UQUM(_ph) |
52 | #define PKT_ADDR(_ph) SK_PTR_ADDR_UPKT(_ph) |
53 | #define BLT_ADDR(_bp) ((struct __user_buflet *)(uintptr_t)_bp) |
54 | #else /* KERNEL */ |
55 | /* |
56 | * Kernel. |
57 | */ |
58 | #include <skywalk/packet/packet_var.h> |
59 | #include <skywalk/packet/pbufpool_var.h> |
60 | #define QUM_ADDR(_ph) SK_PTR_ADDR_KQUM(_ph) |
61 | #define PKT_ADDR(_ph) SK_PTR_ADDR_KPKT(_ph) |
62 | #define BLT_ADDR(_bp) ((struct __kern_buflet *)(uintptr_t)_bp) |
63 | #define PKT_HAS_ATTACHED_MBUF(_ph) \ |
64 | ((PKT_ADDR(_ph)->pkt_pflags & PKT_F_MBUF_DATA) != 0) |
65 | #endif /* KERNEL */ |
66 | |
67 | /* |
68 | * Common. |
69 | */ |
70 | #if (DEBUG || DEVELOPMENT) |
71 | #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype) do { \ |
72 | if (__improbable(SK_PTR_TYPE(_ph) != (uint64_t)(_type) || \ |
73 | SK_PTR_SUBTYPE(_ph) != (uint64_t)(_subtype))) { \ |
74 | pkt_subtype_assert_fail(_ph, _type, _subtype); \ |
75 | /* NOTREACHED */ \ |
76 | __builtin_unreachable(); \ |
77 | } \ |
78 | } while (0) |
79 | |
80 | #define PKT_TYPE_ASSERT(_ph, _type) do { \ |
81 | if (__improbable(SK_PTR_TYPE(_ph) != (uint64_t)(_type))) { \ |
82 | pkt_type_assert_fail(_ph, _type); \ |
83 | /* NOTREACHED */ \ |
84 | __builtin_unreachable(); \ |
85 | } \ |
86 | } while (0) |
87 | #else /* !DEBUG && !DEVELOPMENT */ |
88 | #define PKT_SUBTYPE_ASSERT(_ph, _type, _subtype) ((void)0) |
89 | #define PKT_TYPE_ASSERT(_ph, _type) ((void)0) |
90 | #endif /* !DEBUG && !DEVELOPMENT */ |
91 | |
92 | #define QUM_GET_NEXT_BUFLET(_qum, _pbuf, _buf) do { \ |
93 | ASSERT((_pbuf) == NULL || (_pbuf) == (_qum)->qum_buf); \ |
94 | (_buf) = (((_pbuf) == NULL) ? (_qum)->qum_buf : NULL); \ |
95 | } while (0) |
96 | |
97 | #define PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf) do { \ |
98 | if (__improbable((_bcnt) == 0)) { \ |
99 | (_buf) = NULL; \ |
100 | break; \ |
101 | } \ |
102 | if (__probable((_pkt)->pkt_qum_buf.buf_addr != 0)) { \ |
103 | (_buf) = &(_pkt)->pkt_qum_buf; \ |
104 | } else { \ |
105 | (_buf) = __unsafe_forge_single(struct __kern_buflet *, \ |
106 | __DECONST(void *, (_pkt)->pkt_qum_buf.buf_nbft_addr));\ |
107 | } \ |
108 | } while (0) |
109 | |
110 | #define _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do { \ |
111 | if ((_pbuf) == NULL) { \ |
112 | PKT_GET_FIRST_BUFLET(_pkt, _bcnt, _buf); \ |
113 | } else { \ |
114 | (_buf) = __unsafe_forge_single(struct __kern_buflet *, \ |
115 | __DECONST(void *, (_pbuf)->buf_nbft_addr)); \ |
116 | } \ |
117 | } while (0) |
118 | |
119 | #ifndef KERNEL |
120 | #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do { \ |
121 | _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf); \ |
122 | } while (0) |
123 | #else /* KERNEL */ |
124 | #define PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf) do { \ |
125 | ASSERT(((_bcnt) >= 1) || ((_pbuf) == NULL)); \ |
126 | _PKT_GET_NEXT_BUFLET(_pkt, _bcnt, _pbuf, _buf); \ |
127 | } while (0) |
128 | #endif /* KERNEL */ |
129 | |
130 | #ifdef KERNEL |
131 | #define PKT_COMPOSE_NX_PORT_ID(_nx_port, _gencnt) \ |
132 | ((uint32_t)((_gencnt & 0xffff) << 16) | (_nx_port & 0xffff)) |
133 | |
134 | #define PKT_DECOMPOSE_NX_PORT_ID(_nx_port_id, _nx_port, _gencnt) do { \ |
135 | _nx_port = _nx_port_id & 0xffff; \ |
136 | _gencnt = (_nx_port_id >> 16) & 0xffff; \ |
137 | } while (0) |
138 | #endif /* KERNEL */ |
139 | |
140 | __attribute__((always_inline)) |
141 | static inline int |
142 | __packet_set_headroom(const uint64_t ph, const uint8_t headroom) |
143 | { |
144 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
145 | if (__probable(headroom < PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) { |
146 | PKT_ADDR(ph)->pkt_headroom = headroom; |
147 | return 0; |
148 | } |
149 | return ERANGE; |
150 | } |
151 | |
152 | __attribute__((always_inline)) |
153 | static inline uint8_t |
154 | __packet_get_headroom(const uint64_t ph) |
155 | { |
156 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
157 | return PKT_ADDR(ph)->pkt_headroom; |
158 | } |
159 | |
160 | __attribute__((always_inline)) |
161 | static inline int |
162 | (const uint64_t ph, const uint8_t len) |
163 | { |
164 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
165 | if (__probable(len <= PKT_ADDR(ph)->pkt_qum_buf.buf_dlim)) { |
166 | PKT_ADDR(ph)->pkt_l2_len = len; |
167 | return 0; |
168 | } |
169 | return ERANGE; |
170 | } |
171 | |
172 | __attribute__((always_inline)) |
173 | static inline uint8_t |
174 | (const uint64_t ph) |
175 | { |
176 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
177 | return PKT_ADDR(ph)->pkt_l2_len; |
178 | } |
179 | |
180 | __attribute__((always_inline)) |
181 | static inline int |
182 | __packet_set_link_broadcast(const uint64_t ph) |
183 | { |
184 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
185 | PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_BCAST; |
186 | return 0; |
187 | } |
188 | |
189 | __attribute__((always_inline)) |
190 | static inline boolean_t |
191 | __packet_get_link_broadcast(const uint64_t ph) |
192 | { |
193 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
194 | return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_BCAST) != 0; |
195 | } |
196 | |
197 | __attribute__((always_inline)) |
198 | static inline int |
199 | __packet_set_link_multicast(const uint64_t ph) |
200 | { |
201 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
202 | PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_MCAST; |
203 | return 0; |
204 | } |
205 | |
206 | __attribute__((always_inline)) |
207 | static inline boolean_t |
208 | __packet_get_link_multicast(const uint64_t ph) |
209 | { |
210 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
211 | return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_MCAST) != 0; |
212 | } |
213 | |
214 | __attribute__((always_inline)) |
215 | static inline int |
216 | __packet_set_link_ethfcs(const uint64_t ph) |
217 | { |
218 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
219 | PKT_ADDR(ph)->pkt_link_flags |= PKT_LINKF_ETHFCS; |
220 | return 0; |
221 | } |
222 | |
223 | __attribute__((always_inline)) |
224 | static inline boolean_t |
225 | __packet_get_link_ethfcs(const uint64_t ph) |
226 | { |
227 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
228 | return (PKT_ADDR(ph)->pkt_link_flags & PKT_LINKF_ETHFCS) != 0; |
229 | } |
230 | |
231 | __attribute__((always_inline)) |
232 | static inline int |
233 | __packet_set_transport_traffic_background(const uint64_t ph) |
234 | { |
235 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
236 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_BACKGROUND; |
237 | return 0; |
238 | } |
239 | |
240 | __attribute__((always_inline)) |
241 | static inline boolean_t |
242 | __packet_get_transport_traffic_background(const uint64_t ph) |
243 | { |
244 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
245 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_BACKGROUND) != 0; |
246 | } |
247 | |
248 | __attribute__((always_inline)) |
249 | static inline int |
250 | __packet_set_transport_traffic_realtime(const uint64_t ph) |
251 | { |
252 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
253 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_REALTIME; |
254 | return 0; |
255 | } |
256 | |
257 | __attribute__((always_inline)) |
258 | static inline boolean_t |
259 | __packet_get_transport_traffic_realtime(const uint64_t ph) |
260 | { |
261 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
262 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REALTIME) != 0; |
263 | } |
264 | |
265 | __attribute__((always_inline)) |
266 | static inline int |
267 | __packet_set_transport_retransmit(const uint64_t ph) |
268 | { |
269 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
270 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_REXMT; |
271 | return 0; |
272 | } |
273 | |
274 | __attribute__((always_inline)) |
275 | static inline boolean_t |
276 | __packet_get_transport_retransmit(const uint64_t ph) |
277 | { |
278 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
279 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_REXMT) != 0; |
280 | } |
281 | |
282 | __attribute__((always_inline)) |
283 | static inline int |
284 | __packet_set_transport_last_packet(const uint64_t ph) |
285 | { |
286 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
287 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_LAST_PKT; |
288 | return 0; |
289 | } |
290 | |
291 | __attribute__((always_inline)) |
292 | static inline int |
293 | __packet_set_group_start(const uint64_t ph) |
294 | { |
295 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
296 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_START; |
297 | return 0; |
298 | } |
299 | |
300 | __attribute__((always_inline)) |
301 | static inline boolean_t |
302 | __packet_get_group_start(const uint64_t ph) |
303 | { |
304 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
305 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_START) != 0; |
306 | } |
307 | |
308 | __attribute__((always_inline)) |
309 | static inline int |
310 | __packet_set_group_end(const uint64_t ph) |
311 | { |
312 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
313 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_GROUP_END; |
314 | return 0; |
315 | } |
316 | |
317 | __attribute__((always_inline)) |
318 | static inline boolean_t |
319 | __packet_get_group_end(const uint64_t ph) |
320 | { |
321 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
322 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_GROUP_END) != 0; |
323 | } |
324 | |
325 | __attribute__((always_inline)) |
326 | static inline errno_t |
327 | __packet_get_expire_time(const uint64_t ph, uint64_t *ts) |
328 | { |
329 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
330 | #ifdef KERNEL |
331 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
332 | #else /* !KERNEL */ |
333 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
334 | #endif /* !KERNEL */ |
335 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) == 0) { |
336 | return ENOENT; |
337 | } |
338 | if (ts == NULL) { |
339 | return EINVAL; |
340 | } |
341 | *ts = po->__po_expire_ts; |
342 | return 0; |
343 | } |
344 | |
345 | __attribute__((always_inline)) |
346 | static inline errno_t |
347 | __packet_set_expire_time(const uint64_t ph, const uint64_t ts) |
348 | { |
349 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
350 | #ifdef KERNEL |
351 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
352 | #else /* !KERNEL */ |
353 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
354 | #endif /* !KERNEL */ |
355 | if (ts != 0) { |
356 | po->__po_expire_ts = ts; |
357 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXPIRE_TS; |
358 | } else { |
359 | po->__po_expire_ts = 0; |
360 | PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXPIRE_TS; |
361 | } |
362 | return 0; |
363 | } |
364 | |
365 | __attribute__((always_inline)) |
366 | static inline errno_t |
367 | __packet_get_expiry_action(const uint64_t ph, packet_expiry_action_t *pea) |
368 | { |
369 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
370 | #ifdef KERNEL |
371 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
372 | #else /* !KERNEL */ |
373 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
374 | #endif /* !KERNEL */ |
375 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXP_ACTION) == 0) { |
376 | return ENOENT; |
377 | } |
378 | if (pea == NULL) { |
379 | return EINVAL; |
380 | } |
381 | *pea = po->__po_expiry_action; |
382 | return 0; |
383 | } |
384 | |
385 | __attribute__((always_inline)) |
386 | static inline errno_t |
387 | __packet_set_expiry_action(const uint64_t ph, packet_expiry_action_t pea) |
388 | { |
389 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
390 | #ifdef KERNEL |
391 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
392 | #else /* !KERNEL */ |
393 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
394 | #endif /* !KERNEL */ |
395 | if (pea != PACKET_EXPIRY_ACTION_NONE) { |
396 | po->__po_expiry_action = (uint8_t)pea; |
397 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_EXP_ACTION; |
398 | } else { |
399 | po->__po_expiry_action = 0; |
400 | PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_EXP_ACTION; |
401 | } |
402 | return 0; |
403 | } |
404 | |
405 | __attribute__((always_inline)) |
406 | static inline errno_t |
407 | __packet_opt_get_token(const struct __packet_opt *po, |
408 | void *__sized_by(PKT_OPT_MAX_TOKEN_SIZE)token, |
409 | uint16_t *len, uint8_t *type) |
410 | { |
411 | uint16_t tlen = po->__po_token_len; |
412 | uint8_t ttype; |
413 | |
414 | if (token == NULL || len == NULL || type == NULL || tlen > *len) { |
415 | return EINVAL; |
416 | } |
417 | ttype = (uint8_t)po->__po_token_type; |
418 | |
419 | ASSERT(tlen <= PKT_OPT_MAX_TOKEN_SIZE); |
420 | _CASSERT((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0); |
421 | bcopy(src: po->__po_token, dst: token, n: tlen); |
422 | *len = tlen; |
423 | *type = ttype; |
424 | return 0; |
425 | } |
426 | |
427 | __attribute__((always_inline)) |
428 | static inline errno_t |
429 | __packet_get_token(const uint64_t ph, |
430 | void *__sized_by(PKT_OPT_MAX_TOKEN_SIZE)token, uint16_t *len) |
431 | { |
432 | #ifdef KERNEL |
433 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
434 | #else /* !KERNEL */ |
435 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
436 | #endif /* !KERNEL */ |
437 | uint8_t type; |
438 | errno_t err; |
439 | |
440 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
441 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) { |
442 | return ENOENT; |
443 | } |
444 | err = __packet_opt_get_token(po, token, len, type: &type); |
445 | if ((err == 0) && (type != PKT_OPT_TOKEN_TYPE_OPAQUE)) { |
446 | err = ENOENT; |
447 | } |
448 | return err; |
449 | } |
450 | |
451 | __attribute__((always_inline)) |
452 | static inline errno_t |
453 | __packet_opt_set_token(struct __packet_opt *po, |
454 | const void *__sized_by(PKT_OPT_MAX_TOKEN_SIZE)token, |
455 | const uint16_t len, const uint8_t type, volatile uint64_t *pflags) |
456 | { |
457 | _CASSERT((__builtin_offsetof(struct __packet_opt, __po_token) % 8) == 0); |
458 | if (len != 0) { |
459 | if (token == NULL || len > PKT_OPT_MAX_TOKEN_SIZE || |
460 | type == 0) { |
461 | return EINVAL; |
462 | } |
463 | if (__probable(IS_P2ALIGNED(token, 8))) { |
464 | uint64_t *token64 = __DECONST(void *, token); |
465 | po->__po_token_data[0] = *token64; |
466 | po->__po_token_data[1] = *(token64 + 1); |
467 | } else { |
468 | bcopy(src: token, dst: po->__po_token, n: len); |
469 | } |
470 | po->__po_token_len = len; |
471 | po->__po_token_type = type; |
472 | *pflags |= PKT_F_OPT_TOKEN; |
473 | } else { |
474 | _CASSERT(sizeof(po->__po_token_data[0]) == 8); |
475 | _CASSERT(sizeof(po->__po_token_data[1]) == 8); |
476 | _CASSERT(sizeof(po->__po_token) == 16); |
477 | po->__po_token_data[0] = 0; |
478 | po->__po_token_data[1] = 0; |
479 | po->__po_token_len = 0; |
480 | po->__po_token_type = 0; |
481 | *pflags &= ~PKT_F_OPT_TOKEN; |
482 | } |
483 | return 0; |
484 | } |
485 | |
486 | #ifndef KERNEL |
487 | __attribute__((always_inline)) |
488 | static inline void |
489 | __packet_set_tx_timestamp(const uint64_t ph, const uint64_t ts) |
490 | { |
491 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
492 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
493 | |
494 | po->__po_pkt_tx_time = ts; |
495 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_TX_TIMESTAMP; |
496 | } |
497 | #endif /* !KERNEL */ |
498 | |
499 | __attribute__((always_inline)) |
500 | static inline errno_t |
501 | __packet_set_token(const uint64_t ph, |
502 | const void *__sized_by(PKT_OPT_MAX_TOKEN_SIZE)token, const uint16_t len) |
503 | { |
504 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
505 | #ifdef KERNEL |
506 | return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, token, len, |
507 | PKT_OPT_TOKEN_TYPE_OPAQUE, pflags: &PKT_ADDR(ph)->pkt_pflags); |
508 | #else /* !KERNEL */ |
509 | return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, token, len, |
510 | PKT_OPT_TOKEN_TYPE_OPAQUE, &PKT_ADDR(ph)->pkt_pflags); |
511 | #endif /* !KERNEL */ |
512 | } |
513 | |
514 | __attribute__((always_inline)) |
515 | static inline errno_t |
516 | __packet_get_packetid(const uint64_t ph, packet_id_t *pktid) |
517 | { |
518 | #ifdef KERNEL |
519 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
520 | #else /* !KERNEL */ |
521 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
522 | #endif /* !KERNEL */ |
523 | uint16_t len = sizeof(packet_id_t); |
524 | uint8_t type; |
525 | errno_t err; |
526 | |
527 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
528 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) == 0) { |
529 | return ENOENT; |
530 | } |
531 | err = __packet_opt_get_token(po, token: pktid, len: &len, type: &type); |
532 | if ((err == 0) && ((type != PKT_OPT_TOKEN_TYPE_PACKET_ID) || |
533 | (len != sizeof(packet_id_t)))) { |
534 | err = ENOENT; |
535 | } |
536 | return err; |
537 | } |
538 | |
539 | __attribute__((always_inline)) |
540 | static inline errno_t |
541 | __packet_set_packetid(const uint64_t ph, const packet_id_t *pktid) |
542 | { |
543 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
544 | #ifdef KERNEL |
545 | return __packet_opt_set_token(PKT_ADDR(ph)->pkt_com_opt, token: pktid, |
546 | len: sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID, |
547 | pflags: &PKT_ADDR(ph)->pkt_pflags); |
548 | #else /* !KERNEL */ |
549 | return __packet_opt_set_token(&PKT_ADDR(ph)->pkt_com_opt, pktid, |
550 | sizeof(packet_id_t), PKT_OPT_TOKEN_TYPE_PACKET_ID, |
551 | &PKT_ADDR(ph)->pkt_pflags); |
552 | #endif /* !KERNEL */ |
553 | } |
554 | |
555 | __attribute__((always_inline)) |
556 | static inline errno_t |
557 | __packet_get_vlan_tag(const uint64_t ph, uint16_t *vlan_tag, |
558 | boolean_t *tag_in_pkt) |
559 | { |
560 | #ifdef KERNEL |
561 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
562 | #else /* !KERNEL */ |
563 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
564 | #endif /* !KERNEL */ |
565 | uint64_t pflags; |
566 | |
567 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
568 | pflags = PKT_ADDR(ph)->pkt_pflags; |
569 | if ((pflags & PKT_F_OPT_VLTAG) == 0) { |
570 | return ENOENT; |
571 | } |
572 | if (vlan_tag != NULL) { |
573 | *vlan_tag = po->__po_vlan_tag; |
574 | } |
575 | if (tag_in_pkt != NULL) { |
576 | *tag_in_pkt = ((pflags & PKT_F_OPT_VLTAG_IN_PKT) != 0); |
577 | } |
578 | return 0; |
579 | } |
580 | |
581 | __attribute__((always_inline)) |
582 | static inline errno_t |
583 | __packet_set_vlan_tag(const uint64_t ph, const uint16_t vlan_tag, |
584 | const boolean_t tag_in_pkt) |
585 | { |
586 | #ifdef KERNEL |
587 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
588 | #else /* !KERNEL */ |
589 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
590 | #endif /* !KERNEL */ |
591 | |
592 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
593 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG; |
594 | po->__po_vlan_tag = vlan_tag; |
595 | |
596 | if (tag_in_pkt) { |
597 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_VLTAG_IN_PKT; |
598 | } |
599 | return 0; |
600 | } |
601 | |
602 | __attribute__((always_inline)) |
603 | static inline uint16_t |
604 | __packet_get_vlan_id(const uint16_t vlan_tag) |
605 | { |
606 | return EVL_VLANOFTAG(vlan_tag); |
607 | } |
608 | |
609 | __attribute__((always_inline)) |
610 | static inline uint8_t |
611 | __packet_get_vlan_priority(const uint16_t vlan_tag) |
612 | { |
613 | return EVL_PRIOFTAG(vlan_tag); |
614 | } |
615 | |
616 | __attribute__((always_inline)) |
617 | static inline errno_t |
618 | __packet_get_app_metadata(const uint64_t ph, |
619 | packet_app_metadata_type_t *app_type, uint8_t *app_metadata) |
620 | { |
621 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
622 | if (app_type == NULL || app_metadata == NULL) { |
623 | return EINVAL; |
624 | } |
625 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_APP_METADATA) == 0) { |
626 | return ENOENT; |
627 | } |
628 | #ifdef KERNEL |
629 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
630 | #else /* !KERNEL */ |
631 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
632 | #endif /* !KERNEL */ |
633 | if (po->__po_app_type == PACKET_APP_METADATA_TYPE_UNSPECIFIED) { |
634 | return ENOENT; |
635 | } |
636 | *app_type = po->__po_app_type; |
637 | *app_metadata = po->__po_app_metadata; |
638 | return 0; |
639 | } |
640 | |
641 | __attribute__((always_inline)) |
642 | static inline errno_t |
643 | __packet_set_app_metadata(const uint64_t ph, |
644 | const packet_app_metadata_type_t app_type, const uint8_t app_metadata) |
645 | { |
646 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
647 | #ifdef KERNEL |
648 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
649 | #else /* !KERNEL */ |
650 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
651 | #endif /* !KERNEL */ |
652 | if (app_type < PACKET_APP_METADATA_TYPE_MIN || |
653 | app_type > PACKET_APP_METADATA_TYPE_MAX) { |
654 | po->__po_app_type = PACKET_APP_METADATA_TYPE_UNSPECIFIED; |
655 | PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_OPT_APP_METADATA; |
656 | return EINVAL; |
657 | } |
658 | po->__po_app_type = app_type; |
659 | po->__po_app_metadata = app_metadata; |
660 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_OPT_APP_METADATA; |
661 | return 0; |
662 | } |
663 | |
664 | #ifdef KERNEL |
665 | __attribute__((always_inline)) |
666 | static inline void |
667 | __packet_set_wake_flag(const uint64_t ph) |
668 | { |
669 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
670 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_WAKE_PKT; |
671 | } |
672 | #endif |
673 | |
674 | __attribute__((always_inline)) |
675 | static inline boolean_t |
676 | __packet_get_wake_flag(const uint64_t ph) |
677 | { |
678 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_WAKE_PKT) != 0; |
679 | } |
680 | |
681 | __attribute__((always_inline)) |
682 | static inline void |
683 | __packet_set_keep_alive(const uint64_t ph, const boolean_t is_keep_alive) |
684 | { |
685 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
686 | if (is_keep_alive) { |
687 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_KEEPALIVE; |
688 | } else { |
689 | PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_KEEPALIVE; |
690 | } |
691 | } |
692 | |
693 | __attribute__((always_inline)) |
694 | static inline boolean_t |
695 | __packet_get_keep_alive(const uint64_t ph) |
696 | { |
697 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_KEEPALIVE) != 0; |
698 | } |
699 | |
700 | __attribute__((always_inline)) |
701 | static inline boolean_t |
702 | __packet_get_truncated(const uint64_t ph) |
703 | { |
704 | PKT_SUBTYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET, NEXUS_META_SUBTYPE_RAW); |
705 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_TRUNCATED) != 0; |
706 | } |
707 | |
708 | #ifdef KERNEL |
709 | __attribute__((always_inline)) |
710 | static inline boolean_t |
711 | __packet_get_transport_new_flow(const uint64_t ph) |
712 | { |
713 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
714 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_NEW_FLOW) != 0; |
715 | } |
716 | |
717 | __attribute__((always_inline)) |
718 | static inline boolean_t |
719 | __packet_get_transport_last_packet(const uint64_t ph) |
720 | { |
721 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
722 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_LAST_PKT) != 0; |
723 | } |
724 | |
725 | __attribute__((always_inline)) |
726 | static inline boolean_t |
727 | __packet_get_l4s_flag(const uint64_t ph) |
728 | { |
729 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
730 | return (PKT_ADDR(ph)->pkt_pflags & PKT_F_L4S) != 0; |
731 | } |
732 | #endif /* KERNEL */ |
733 | |
734 | __attribute__((always_inline)) |
735 | static inline void |
736 | __packet_set_l4s_flag(const uint64_t ph) |
737 | { |
738 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
739 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_L4S; |
740 | } |
741 | |
742 | __attribute__((always_inline)) |
743 | static inline int |
744 | __packet_set_service_class(const uint64_t ph, const uint32_t sc) |
745 | { |
746 | int err = 0; |
747 | |
748 | _CASSERT(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t))); |
749 | |
750 | switch (sc) { |
751 | case PKT_SC_BE: |
752 | case PKT_SC_BK_SYS: |
753 | case PKT_SC_BK: |
754 | case PKT_SC_RD: |
755 | case PKT_SC_OAM: |
756 | case PKT_SC_AV: |
757 | case PKT_SC_RV: |
758 | case PKT_SC_VI: |
759 | case PKT_SC_SIG: |
760 | case PKT_SC_VO: |
761 | case PKT_SC_CTL: |
762 | QUM_ADDR(ph)->qum_svc_class = sc; |
763 | break; |
764 | |
765 | default: |
766 | err = EINVAL; |
767 | break; |
768 | } |
769 | |
770 | return err; |
771 | } |
772 | |
773 | __attribute__((always_inline)) |
774 | static inline uint32_t |
775 | __packet_get_service_class(const uint64_t ph) |
776 | { |
777 | uint32_t sc; |
778 | |
779 | _CASSERT(sizeof(QUM_ADDR(ph)->qum_svc_class == sizeof(uint32_t))); |
780 | |
781 | switch (QUM_ADDR(ph)->qum_svc_class) { |
782 | case PKT_SC_BE: /* most likely best effort */ |
783 | case PKT_SC_BK_SYS: |
784 | case PKT_SC_BK: |
785 | case PKT_SC_RD: |
786 | case PKT_SC_OAM: |
787 | case PKT_SC_AV: |
788 | case PKT_SC_RV: |
789 | case PKT_SC_VI: |
790 | case PKT_SC_SIG: |
791 | case PKT_SC_VO: |
792 | case PKT_SC_CTL: |
793 | sc = QUM_ADDR(ph)->qum_svc_class; |
794 | break; |
795 | |
796 | default: |
797 | sc = PKT_SC_BE; |
798 | break; |
799 | } |
800 | |
801 | return sc; |
802 | } |
803 | |
804 | __attribute__((always_inline)) |
805 | static inline errno_t |
806 | __packet_set_comp_gencnt(const uint64_t ph, const uint32_t gencnt) |
807 | { |
808 | _CASSERT(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t))); |
809 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
810 | |
811 | PKT_ADDR(ph)->pkt_comp_gencnt = gencnt; |
812 | |
813 | return 0; |
814 | } |
815 | |
816 | __attribute__((always_inline)) |
817 | static inline errno_t |
818 | __packet_get_comp_gencnt(const uint64_t ph, uint32_t *pgencnt) |
819 | { |
820 | _CASSERT(sizeof(PKT_ADDR(ph)->pkt_comp_gencnt == sizeof(uint32_t))); |
821 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
822 | |
823 | if (pgencnt == NULL) { |
824 | return EINVAL; |
825 | } |
826 | |
827 | if (PKT_ADDR(ph)->pkt_comp_gencnt == 0) { |
828 | return ENOENT; |
829 | } |
830 | |
831 | *pgencnt = PKT_ADDR(ph)->pkt_comp_gencnt; |
832 | return 0; |
833 | } |
834 | |
835 | __attribute__((always_inline)) |
836 | static inline int |
837 | __packet_set_traffic_class(const uint64_t ph, const uint32_t tc) |
838 | { |
839 | uint32_t val = PKT_TC2SCVAL(tc); /* just the val portion */ |
840 | uint32_t sc; |
841 | |
842 | switch (val) { |
843 | case PKT_SCVAL_BK_SYS: |
844 | sc = PKT_SC_BK_SYS; |
845 | break; |
846 | case PKT_SCVAL_BK: |
847 | sc = PKT_SC_BK; |
848 | break; |
849 | case PKT_SCVAL_BE: |
850 | sc = PKT_SC_BE; |
851 | break; |
852 | case PKT_SCVAL_RD: |
853 | sc = PKT_SC_RD; |
854 | break; |
855 | case PKT_SCVAL_OAM: |
856 | sc = PKT_SC_OAM; |
857 | break; |
858 | case PKT_SCVAL_AV: |
859 | sc = PKT_SC_AV; |
860 | break; |
861 | case PKT_SCVAL_RV: |
862 | sc = PKT_SC_RV; |
863 | break; |
864 | case PKT_SCVAL_VI: |
865 | sc = PKT_SC_VI; |
866 | break; |
867 | case PKT_SCVAL_SIG: |
868 | sc = PKT_SC_SIG; |
869 | break; |
870 | case PKT_SCVAL_VO: |
871 | sc = PKT_SC_VO; |
872 | break; |
873 | case PKT_SCVAL_CTL: |
874 | sc = PKT_SC_CTL; |
875 | break; |
876 | default: |
877 | sc = PKT_SC_BE; |
878 | break; |
879 | } |
880 | |
881 | return __packet_set_service_class(ph, sc); |
882 | } |
883 | |
884 | __attribute__((always_inline)) |
885 | static inline uint32_t |
886 | __packet_get_traffic_class(const uint64_t ph) |
887 | { |
888 | return PKT_SC2TC(__packet_get_service_class(ph)); |
889 | } |
890 | |
891 | __attribute__((always_inline)) |
892 | static inline int |
893 | __packet_set_inet_checksum(const uint64_t ph, const packet_csum_flags_t flags, |
894 | const uint16_t start, const uint16_t stuff_val, boolean_t tx) |
895 | { |
896 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
897 | |
898 | PKT_ADDR(ph)->pkt_csum_flags = flags & PACKET_CSUM_FLAGS; |
899 | |
900 | if (tx) { |
901 | PKT_ADDR(ph)->pkt_csum_tx_start_off = start; |
902 | PKT_ADDR(ph)->pkt_csum_tx_stuff_off = stuff_val; |
903 | } else { |
904 | PKT_ADDR(ph)->pkt_csum_rx_start_off = start; |
905 | PKT_ADDR(ph)->pkt_csum_rx_value = stuff_val; |
906 | } |
907 | return 0; |
908 | } |
909 | |
910 | __attribute__((always_inline)) |
911 | static inline void |
912 | __packet_add_inet_csum_flags(const uint64_t ph, const packet_csum_flags_t flags) |
913 | { |
914 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
915 | |
916 | PKT_ADDR(ph)->pkt_csum_flags |= flags & PACKET_CSUM_FLAGS; |
917 | } |
918 | |
919 | __attribute__((always_inline)) |
920 | static inline packet_csum_flags_t |
921 | __packet_get_inet_checksum(const uint64_t ph, uint16_t *start, |
922 | uint16_t *stuff_val, boolean_t tx) |
923 | { |
924 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
925 | |
926 | if (tx) { |
927 | if (__probable(start != NULL)) { |
928 | *start = PKT_ADDR(ph)->pkt_csum_tx_start_off; |
929 | } |
930 | if (__probable(stuff_val != NULL)) { |
931 | *stuff_val = PKT_ADDR(ph)->pkt_csum_tx_stuff_off; |
932 | } |
933 | } else { |
934 | if (__probable(start != NULL)) { |
935 | *start = PKT_ADDR(ph)->pkt_csum_rx_start_off; |
936 | } |
937 | if (__probable(stuff_val != NULL)) { |
938 | *stuff_val = PKT_ADDR(ph)->pkt_csum_rx_value; |
939 | } |
940 | } |
941 | return PKT_ADDR(ph)->pkt_csum_flags & PACKET_CSUM_FLAGS; |
942 | } |
943 | |
944 | __attribute__((always_inline)) |
945 | static inline void |
946 | __packet_set_flow_uuid(const uint64_t ph, const uuid_t flow_uuid) |
947 | { |
948 | struct __quantum *q = &QUM_ADDR(ph)->qum_com; |
949 | |
950 | /* |
951 | * Anticipate a nicely (8-bytes) aligned UUID from caller; |
952 | * the one in qum_flow_id is always 8-byte aligned. |
953 | */ |
954 | if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) { |
955 | const uint64_t *id_64 = (const uint64_t *)(const void *)flow_uuid; |
956 | q->__q_flow_id_val64[0] = id_64[0]; |
957 | q->__q_flow_id_val64[1] = id_64[1]; |
958 | } else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) { |
959 | const uint32_t *id_32 = (const uint32_t *)(const void *)flow_uuid; |
960 | q->__q_flow_id_val32[0] = id_32[0]; |
961 | q->__q_flow_id_val32[1] = id_32[1]; |
962 | q->__q_flow_id_val32[2] = id_32[2]; |
963 | q->__q_flow_id_val32[3] = id_32[3]; |
964 | } else { |
965 | bcopy(src: flow_uuid, dst: q->__q_flow_id, n: sizeof(uuid_t)); |
966 | } |
967 | } |
968 | |
969 | __attribute__((always_inline)) |
970 | static inline void |
971 | __packet_get_flow_uuid(const uint64_t ph, uuid_t flow_uuid) |
972 | { |
973 | struct __quantum *q = &QUM_ADDR(ph)->qum_com; |
974 | |
975 | /* |
976 | * Anticipate a nicely (8-bytes) aligned UUID from caller; |
977 | * the one in qum_flow_id is always 8-byte aligned. |
978 | */ |
979 | if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint64_t)))) { |
980 | uint64_t *id_64 = (uint64_t *)(void *)flow_uuid; |
981 | id_64[0] = q->__q_flow_id_val64[0]; |
982 | id_64[1] = q->__q_flow_id_val64[1]; |
983 | } else if (__probable(IS_P2ALIGNED(flow_uuid, sizeof(uint32_t)))) { |
984 | uint32_t *id_32 = (uint32_t *)(void *)flow_uuid; |
985 | id_32[0] = q->__q_flow_id_val32[0]; |
986 | id_32[1] = q->__q_flow_id_val32[1]; |
987 | id_32[2] = q->__q_flow_id_val32[2]; |
988 | id_32[3] = q->__q_flow_id_val32[3]; |
989 | } else { |
990 | bcopy(src: q->__q_flow_id, dst: flow_uuid, n: sizeof(uuid_t)); |
991 | } |
992 | } |
993 | |
994 | __attribute__((always_inline)) |
995 | static inline void |
996 | __packet_clear_flow_uuid(const uint64_t ph) |
997 | { |
998 | struct __quantum *q = &QUM_ADDR(ph)->qum_com; |
999 | q->__q_flow_id_val64[0] = 0; |
1000 | q->__q_flow_id_val64[1] = 0; |
1001 | } |
1002 | |
1003 | __attribute__((always_inline)) |
1004 | static inline uint8_t |
1005 | __packet_get_aggregation_type(const uint64_t ph) |
1006 | { |
1007 | _CASSERT(sizeof(PKT_ADDR(ph)->pkt_aggr_type == sizeof(uint8_t))); |
1008 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1009 | |
1010 | return PKT_ADDR(ph)->pkt_aggr_type; |
1011 | } |
1012 | |
1013 | __attribute__((always_inline)) |
1014 | static inline uint32_t |
1015 | __packet_get_data_length(const uint64_t ph) |
1016 | { |
1017 | return QUM_ADDR(ph)->qum_len; |
1018 | } |
1019 | |
1020 | __attribute__((always_inline)) |
1021 | static inline uint16_t |
1022 | __packet_get_buflet_count(const uint64_t ph) |
1023 | { |
1024 | uint16_t bcnt = 0; |
1025 | |
1026 | switch (SK_PTR_TYPE(ph)) { |
1027 | case NEXUS_META_TYPE_PACKET: |
1028 | bcnt = PKT_ADDR(ph)->pkt_bufs_cnt; |
1029 | #ifdef KERNEL |
1030 | VERIFY(bcnt != 0 || |
1031 | PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp)); |
1032 | #else /* !KERNEL */ |
1033 | /* |
1034 | * Handle the case where the metadata region gets |
1035 | * redirected to anonymous zero-filled pages at |
1036 | * defunct time. There's always 1 buflet in the |
1037 | * packet metadata, so pretend that's the count. |
1038 | */ |
1039 | if (__improbable(bcnt == 0)) { |
1040 | bcnt = 1; |
1041 | } |
1042 | #endif /* !KERNEL */ |
1043 | break; |
1044 | case NEXUS_META_TYPE_QUANTUM: |
1045 | bcnt = 1; |
1046 | break; |
1047 | default: |
1048 | #ifdef KERNEL |
1049 | VERIFY(0); |
1050 | /* NOTREACHED */ |
1051 | __builtin_unreachable(); |
1052 | #endif /* KERNEL */ |
1053 | break; |
1054 | } |
1055 | return bcnt; |
1056 | } |
1057 | |
1058 | __attribute__((always_inline)) |
1059 | static inline int |
1060 | __packet_add_buflet(const uint64_t ph, const void *bprev0, const void *bnew0) |
1061 | { |
1062 | uint16_t bcnt; |
1063 | |
1064 | #ifdef KERNEL |
1065 | kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0); |
1066 | kern_buflet_t bnew = __DECONST(kern_buflet_t, bnew0); |
1067 | |
1068 | VERIFY(PKT_ADDR(ph) && bnew && (bnew != bprev)); |
1069 | VERIFY(PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp)); |
1070 | #else /* !KERNEL */ |
1071 | buflet_t bprev = __DECONST(buflet_t, bprev0); |
1072 | buflet_t bnew = __DECONST(buflet_t, bnew0); |
1073 | |
1074 | if (__improbable(!PKT_ADDR(ph) || !bnew || (bnew == bprev))) { |
1075 | return EINVAL; |
1076 | } |
1077 | #endif /* !KERNEL */ |
1078 | |
1079 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1080 | bcnt = PKT_ADDR(ph)->pkt_bufs_cnt; |
1081 | |
1082 | #ifdef KERNEL |
1083 | VERIFY((bprev != NULL || bcnt == 0) && |
1084 | (bcnt < PKT_ADDR(ph)->pkt_bufs_max)); |
1085 | #else /* !KERNEL */ |
1086 | if (__improbable(bcnt >= PKT_ADDR(ph)->pkt_bufs_max) || |
1087 | (bprev == NULL && bcnt != 0)) { |
1088 | return EINVAL; |
1089 | } |
1090 | #endif /* !KERNEL */ |
1091 | |
1092 | #ifdef KERNEL |
1093 | #if DEVELOPMENT || DEBUG |
1094 | /* check if bprev is the last buflet in the chain */ |
1095 | struct __kern_buflet *__single pbft, *__single kbft; |
1096 | int n = bcnt; |
1097 | |
1098 | PKT_GET_FIRST_BUFLET(PKT_ADDR(ph), bcnt, pbft); |
1099 | kbft = pbft; |
1100 | |
1101 | while ((kbft != NULL) && n--) { |
1102 | pbft = kbft; |
1103 | kbft = __unsafe_forge_single(struct __kern_buflet *, |
1104 | __DECONST(struct __kern_buflet *, kbft->buf_nbft_addr)); |
1105 | } |
1106 | ASSERT(n == 0); |
1107 | ASSERT(bprev == pbft); |
1108 | #endif /* DEVELOPMENT || DEBUG */ |
1109 | #endif /* KERNEL */ |
1110 | |
1111 | if (bprev == NULL) { |
1112 | bprev = &PKT_ADDR(ph)->pkt_qum_buf; |
1113 | } |
1114 | #ifdef KERNEL |
1115 | KBUF_LINK(bprev, bnew); |
1116 | #else /* !KERNEL */ |
1117 | UBUF_LINK(bprev, bnew); |
1118 | #endif /* !KERNEL */ |
1119 | |
1120 | *(uint16_t *)(uintptr_t)&PKT_ADDR(ph)->pkt_bufs_cnt = ++bcnt; |
1121 | return 0; |
1122 | } |
1123 | |
1124 | __attribute__((always_inline)) |
1125 | static inline void * |
1126 | __packet_get_next_buflet(const uint64_t ph, const void *bprev0) |
1127 | { |
1128 | #ifdef KERNEL |
1129 | kern_buflet_t bprev = __DECONST(kern_buflet_t, bprev0); |
1130 | struct __kern_buflet *__single bcur = NULL; |
1131 | #else /* !KERNEL */ |
1132 | buflet_t bprev = __DECONST(buflet_t, bprev0); |
1133 | void *bcur = NULL; |
1134 | #endif /* !KERNEL */ |
1135 | |
1136 | switch (SK_PTR_TYPE(ph)) { |
1137 | case NEXUS_META_TYPE_PACKET: { |
1138 | uint32_t bcnt = PKT_ADDR(ph)->pkt_bufs_cnt; |
1139 | #ifdef KERNEL |
1140 | ASSERT(bcnt != 0 || |
1141 | PP_HAS_BUFFER_ON_DEMAND(PKT_ADDR(ph)->pkt_qum.qum_pp)); |
1142 | #else /* !KERNEL */ |
1143 | /* |
1144 | * Handle the case where the metadata region gets |
1145 | * redirected to anonymous zero-filled pages at |
1146 | * defunct time. There's always 1 buflet in the |
1147 | * packet metadata, so pretend that's the count. |
1148 | */ |
1149 | if (__improbable(bcnt == 0)) { |
1150 | bcnt = 1; |
1151 | bprev = NULL; |
1152 | } |
1153 | #endif /* !KERNEL */ |
1154 | PKT_GET_NEXT_BUFLET(PKT_ADDR(ph), bcnt, BLT_ADDR(bprev), bcur); |
1155 | break; |
1156 | } |
1157 | case NEXUS_META_TYPE_QUANTUM: |
1158 | QUM_GET_NEXT_BUFLET(QUM_ADDR(ph), BLT_ADDR(bprev), bcur); |
1159 | break; |
1160 | default: |
1161 | #ifdef KERNEL |
1162 | VERIFY(0); |
1163 | /* NOTREACHED */ |
1164 | __builtin_unreachable(); |
1165 | #endif /* KERNEL */ |
1166 | break; |
1167 | } |
1168 | return bcur; |
1169 | } |
1170 | |
1171 | __attribute__((always_inline)) |
1172 | static inline uint8_t |
1173 | __packet_get_segment_count(const uint64_t ph) |
1174 | { |
1175 | _CASSERT(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t))); |
1176 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1177 | |
1178 | return PKT_ADDR(ph)->pkt_seg_cnt; |
1179 | } |
1180 | |
1181 | __attribute__((always_inline)) |
1182 | static inline void |
1183 | __packet_set_segment_count(const uint64_t ph, uint8_t segcount) |
1184 | { |
1185 | _CASSERT(sizeof(PKT_ADDR(ph)->pkt_seg_cnt == sizeof(uint8_t))); |
1186 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1187 | |
1188 | PKT_ADDR(ph)->pkt_seg_cnt = segcount; |
1189 | } |
1190 | |
1191 | __attribute__((always_inline)) |
1192 | static inline uint16_t |
1193 | __packet_get_protocol_segment_size(const uint64_t ph) |
1194 | { |
1195 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1196 | return PKT_ADDR(ph)->pkt_proto_seg_sz; |
1197 | } |
1198 | |
1199 | __attribute__((always_inline)) |
1200 | static inline errno_t |
1201 | __packet_set_protocol_segment_size(const uint64_t ph, uint16_t proto_seg_sz) |
1202 | { |
1203 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1204 | PKT_ADDR(ph)->pkt_proto_seg_sz = proto_seg_sz; |
1205 | return 0; |
1206 | } |
1207 | |
1208 | __attribute__((always_inline)) |
1209 | static inline void |
1210 | __packet_get_tso_flags(const uint64_t ph, packet_tso_flags_t *flags) |
1211 | { |
1212 | _CASSERT(sizeof(PKT_ADDR(ph)->pkt_proto_seg_sz == sizeof(uint16_t))); |
1213 | |
1214 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1215 | *flags = PKT_ADDR(ph)->pkt_csum_flags & (PACKET_CSUM_TSO_FLAGS); |
1216 | } |
1217 | |
1218 | __attribute__((always_inline)) |
1219 | static inline void |
1220 | __packet_set_tso_flags(const uint64_t ph, packet_tso_flags_t flags) |
1221 | { |
1222 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1223 | |
1224 | PKT_ADDR(ph)->pkt_csum_flags |= flags & (PACKET_CSUM_TSO_FLAGS); |
1225 | } |
1226 | |
1227 | __attribute__((always_inline)) |
1228 | static inline uint32_t |
1229 | __buflet_get_data_limit(const void *buf) |
1230 | { |
1231 | return BLT_ADDR(buf)->buf_dlim; |
1232 | } |
1233 | |
1234 | #ifdef KERNEL |
1235 | __attribute__((always_inline)) |
1236 | static inline errno_t |
1237 | __buflet_set_data_limit(const void *buf, const uint32_t dlim) |
1238 | { |
1239 | /* buffer region is always marked as shareable */ |
1240 | ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK); |
1241 | |
1242 | /* full bounds checking will be performed during finalize */ |
1243 | if (__probable((uint32_t)dlim <= BLT_ADDR(buf)->buf_objlim)) { |
1244 | _CASSERT(sizeof(BLT_ADDR(buf)->buf_dlim) == sizeof(uint32_t)); |
1245 | /* deconst */ |
1246 | *(uint32_t *)(uintptr_t)&BLT_ADDR(buf)->buf_dlim = dlim; |
1247 | return 0; |
1248 | } |
1249 | return ERANGE; |
1250 | } |
1251 | #endif /* KERNEL */ |
1252 | |
1253 | __attribute__((always_inline)) |
1254 | static inline uint32_t |
1255 | __buflet_get_data_offset(const void *buf) |
1256 | { |
1257 | return BLT_ADDR(buf)->buf_doff; |
1258 | } |
1259 | |
1260 | /* |
1261 | * ****************************************************************** |
1262 | * Checks in __packet_finalize for packet finalized from userland |
1263 | * ****************************************************************** |
1264 | * +-------+---------------------------+---------------------------+ |
1265 | * | NEXUS_META_SUBTYPE_RAW | NEXUS_META_SUBTYPE_PAYLOAD| |
1266 | * |-------+---------------------------+---------------------------+ |
1267 | * |buflet | (bdoff + len) <= dlim | (bdoff + len) <= dlim | |
1268 | * |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0 | |
1269 | * |l3_off | l3 = l2 | l3 == 0 | |
1270 | * |l4_off | l4 = l3 = l2 | l4 = l3 = 0 | |
1271 | * +-------+---------------------------+---------------------------+ |
1272 | * |
1273 | * ****************************************************************** |
1274 | * Checks in __packet_finalize for packet finalized from kernel |
1275 | * ****************************************************************** |
1276 | * +-------+---------------------------+---------------------------+ |
1277 | * | NEXUS_META_SUBTYPE_RAW | NEXUS_META_SUBTYPE_PAYLOAD| |
1278 | * |-------+---------------------------+---------------------------+ |
1279 | * |buflet | (bdoff + len) <= dlim | (bdoff + len) <= dlim | |
1280 | * |l2_off | l2 == bdoff && l2 < bdlim | l2 = l3 = 0 && doff == 0 | |
1281 | * |l3_off | l3 >= l2 && l3 <bdlim | l3 == 0 | |
1282 | * |l4_off | l4 = l3 | l4 = l3 = 0 | |
1283 | * +-------+---------------------------+---------------------------+ |
1284 | * |
1285 | */ |
1286 | __attribute__((always_inline)) |
1287 | static inline int |
1288 | __packet_finalize(const uint64_t ph) |
1289 | { |
1290 | void *__single bcur = NULL, *__single bprev = NULL; |
1291 | uint32_t len, bcnt, bdoff0, bdlim0; |
1292 | int err = 0; |
1293 | |
1294 | #ifdef KERNEL |
1295 | ASSERT(QUM_ADDR(ph)->qum_qflags & QUM_F_INTERNALIZED); |
1296 | #endif /* KERNEL */ |
1297 | QUM_ADDR(ph)->qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED); |
1298 | |
1299 | bcnt = __packet_get_buflet_count(ph); |
1300 | len = QUM_ADDR(ph)->qum_len = 0; |
1301 | |
1302 | while (bcnt--) { |
1303 | bcur = __packet_get_next_buflet(ph, bprev0: bprev); |
1304 | |
1305 | #ifdef KERNEL |
1306 | ASSERT(bcur != NULL); |
1307 | ASSERT(BLT_ADDR(bcur)->buf_addr != 0); |
1308 | #else /* !KERNEL */ |
1309 | if (__improbable(bcur == NULL)) { |
1310 | err = ERANGE; |
1311 | break; |
1312 | } |
1313 | #endif /* KERNEL */ |
1314 | |
1315 | /* save data offset from the first buflet */ |
1316 | if (bprev == NULL) { |
1317 | bdoff0 = __buflet_get_data_offset(buf: bcur); |
1318 | bdlim0 = __buflet_get_data_limit(buf: bcur); |
1319 | } |
1320 | |
1321 | #ifndef KERNEL |
1322 | if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)))) { |
1323 | err = ERANGE; |
1324 | break; |
1325 | } |
1326 | #else /* !KERNEL */ |
1327 | if (__improbable(!BUF_IN_RANGE(BLT_ADDR(bcur)) && |
1328 | !PKT_HAS_ATTACHED_MBUF(ph))) { |
1329 | err = ERANGE; |
1330 | break; |
1331 | } |
1332 | #endif /* KERNEL */ |
1333 | len += BLT_ADDR(bcur)->buf_dlen; |
1334 | bprev = bcur; |
1335 | } |
1336 | |
1337 | if (__improbable(err != 0)) { |
1338 | goto done; |
1339 | } |
1340 | |
1341 | switch (SK_PTR_TYPE(ph)) { |
1342 | case NEXUS_META_TYPE_PACKET: |
1343 | if (__improbable(bdoff0 > UINT8_MAX)) { |
1344 | err = ERANGE; |
1345 | goto done; |
1346 | } |
1347 | /* internalize headroom value from offset */ |
1348 | PKT_ADDR(ph)->pkt_headroom = (uint8_t)bdoff0; |
1349 | /* validate header offsets in packet */ |
1350 | switch (SK_PTR_SUBTYPE(ph)) { |
1351 | case NEXUS_META_SUBTYPE_RAW: |
1352 | #ifndef KERNEL |
1353 | /* Overwrite L2 len for raw packets from user space */ |
1354 | PKT_ADDR(ph)->pkt_l2_len = 0; |
1355 | #else /* !KERNEL */ |
1356 | /* ensure that L3 >= L2 && L3 < bdlim */ |
1357 | if (__improbable((PKT_ADDR(ph)->pkt_headroom + |
1358 | PKT_ADDR(ph)->pkt_l2_len) >= bdlim0)) { |
1359 | err = ERANGE; |
1360 | goto done; |
1361 | } |
1362 | #endif /* KERNEL */ |
1363 | break; |
1364 | case NEXUS_META_SUBTYPE_PAYLOAD: |
1365 | /* |
1366 | * For payload packet there is no concept of headroom |
1367 | * and L3 offset should always be 0 |
1368 | */ |
1369 | if (__improbable((PKT_ADDR(ph)->pkt_headroom != 0) || |
1370 | (PKT_ADDR(ph)->pkt_l2_len != 0))) { |
1371 | err = ERANGE; |
1372 | goto done; |
1373 | } |
1374 | break; |
1375 | default: |
1376 | #ifdef KERNEL |
1377 | VERIFY(0); |
1378 | /* NOTREACHED */ |
1379 | __builtin_unreachable(); |
1380 | #endif /* KERNEL */ |
1381 | break; |
1382 | } |
1383 | |
1384 | if (__improbable(PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_DATA)) { |
1385 | #ifdef KERNEL |
1386 | struct __packet_opt *po = PKT_ADDR(ph)->pkt_com_opt; |
1387 | #else /* !KERNEL */ |
1388 | struct __packet_opt *po = &PKT_ADDR(ph)->pkt_com_opt; |
1389 | #endif /* !KERNEL */ |
1390 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_EXPIRE_TS) && |
1391 | po->__po_expire_ts == 0) { |
1392 | err = EINVAL; |
1393 | goto done; |
1394 | } |
1395 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_OPT_TOKEN) && |
1396 | po->__po_token_len == 0) { |
1397 | err = EINVAL; |
1398 | goto done; |
1399 | } |
1400 | ASSERT(err == 0); |
1401 | } |
1402 | |
1403 | /* |
1404 | * NOTE: we don't need the validation for total packet length |
1405 | * as checking if each buflet is in range and that |
1406 | * (pkt_headroom == bdoff0), should cover this check. |
1407 | */ |
1408 | break; |
1409 | |
1410 | default: |
1411 | /* nothing to do currently for quantum */ |
1412 | break; |
1413 | } |
1414 | |
1415 | done: |
1416 | if (__probable(err == 0)) { |
1417 | QUM_ADDR(ph)->qum_len = len; |
1418 | QUM_ADDR(ph)->qum_qflags |= QUM_F_FINALIZED; |
1419 | } else { |
1420 | QUM_ADDR(ph)->qum_len = 0; |
1421 | QUM_ADDR(ph)->qum_qflags |= QUM_F_DROPPED; |
1422 | } |
1423 | |
1424 | return err; |
1425 | } |
1426 | |
1427 | __attribute__((always_inline)) |
1428 | static inline boolean_t |
1429 | __packet_is_finalized(const uint64_t ph) |
1430 | { |
1431 | return QUM_ADDR(ph)->qum_qflags & QUM_F_FINALIZED; |
1432 | } |
1433 | |
1434 | #ifdef KERNEL |
1435 | /* |
1436 | * function to initialize a packet with mbuf chain. |
1437 | * Apart from the attached mbuf, the packet can also be used to convey |
1438 | * additional metadata like the headroom and L2 header length. |
1439 | * For a packet with attached mbuf, the pkt_length conveys the length of |
1440 | * the attached mbuf. If the data copied is partial then PKT_F_TRUNCATED is |
1441 | * also set. |
1442 | */ |
1443 | __attribute__((always_inline)) |
1444 | static inline int |
1445 | __packet_initialize_with_mbufchain(struct __kern_packet *pkt, struct mbuf *mbuf, |
1446 | uint8_t headroom, uint8_t l2len) |
1447 | { |
1448 | VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET); |
1449 | VERIFY(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED); |
1450 | VERIFY((pkt->pkt_pflags & PKT_F_MBUF_MASK) == 0); |
1451 | VERIFY((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0); |
1452 | VERIFY(pkt->pkt_mbuf == NULL); |
1453 | |
1454 | pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED); |
1455 | pkt->pkt_mbuf = mbuf; |
1456 | pkt->pkt_pflags |= (PKT_F_MBUF_DATA | PKT_F_TRUNCATED); |
1457 | pkt->pkt_headroom = headroom; |
1458 | pkt->pkt_l2_len = l2len; |
1459 | pkt->pkt_length = m_pktlen(mbuf); |
1460 | pkt->pkt_qum_buf.buf_dlen = 0; |
1461 | pkt->pkt_qum_buf.buf_doff = 0; |
1462 | pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED; |
1463 | return 0; |
1464 | } |
1465 | |
1466 | __attribute__((always_inline)) |
1467 | static inline int |
1468 | __packet_initialize_with_mbuf(struct __kern_packet *pkt, struct mbuf *mbuf, |
1469 | uint8_t headroom, uint8_t l2len) |
1470 | { |
1471 | __packet_initialize_with_mbufchain(pkt, mbuf, headroom, l2len); |
1472 | VERIFY(mbuf->m_nextpkt == NULL); |
1473 | return 0; |
1474 | } |
1475 | |
1476 | /* |
1477 | * function to finalize a packet with attached mbuf. |
1478 | */ |
1479 | __attribute__((always_inline)) |
1480 | static inline int |
1481 | __packet_finalize_with_mbuf(struct __kern_packet *pkt) |
1482 | { |
1483 | uint32_t bdlen, bdoff, bdlim; |
1484 | struct __kern_buflet *buf; |
1485 | int err = 0; |
1486 | |
1487 | VERIFY(METADATA_TYPE(pkt) == NEXUS_META_TYPE_PACKET); |
1488 | VERIFY((pkt->pkt_pflags & (PKT_F_MBUF_DATA | PKT_F_PKT_DATA)) == |
1489 | PKT_F_MBUF_DATA); |
1490 | VERIFY(pkt->pkt_mbuf != NULL); |
1491 | ASSERT(pkt->pkt_qum.qum_qflags & QUM_F_INTERNALIZED); |
1492 | VERIFY(pkt->pkt_bufs_cnt == 1); |
1493 | PKT_GET_FIRST_BUFLET(pkt, pkt->pkt_bufs_cnt, buf); |
1494 | ASSERT(buf->buf_addr != 0); |
1495 | |
1496 | pkt->pkt_qum.qum_qflags &= ~(QUM_F_DROPPED | QUM_F_FINALIZED); |
1497 | pkt->pkt_pflags &= ~PKT_F_TRUNCATED; |
1498 | bdlen = buf->buf_dlen; |
1499 | bdlim = buf->buf_dlim; |
1500 | bdoff = buf->buf_doff; |
1501 | if (__improbable(!BUF_IN_RANGE(buf))) { |
1502 | err = ERANGE; |
1503 | goto done; |
1504 | } |
1505 | |
1506 | /* validate header offsets in packet */ |
1507 | switch (METADATA_SUBTYPE(pkt)) { |
1508 | case NEXUS_META_SUBTYPE_RAW: |
1509 | if (__improbable((pkt->pkt_headroom != bdoff) || |
1510 | (pkt->pkt_headroom >= bdlim))) { |
1511 | err = ERANGE; |
1512 | goto done; |
1513 | } |
1514 | if (__improbable((pkt->pkt_headroom + |
1515 | pkt->pkt_l2_len) >= bdlim)) { |
1516 | err = ERANGE; |
1517 | goto done; |
1518 | } |
1519 | break; |
1520 | |
1521 | case NEXUS_META_SUBTYPE_PAYLOAD: |
1522 | /* |
1523 | * For payload packet there is no concept of headroom. |
1524 | */ |
1525 | if (__improbable((pkt->pkt_headroom != 0) || (bdoff != 0) || |
1526 | (pkt->pkt_l2_len != 0))) { |
1527 | err = ERANGE; |
1528 | goto done; |
1529 | } |
1530 | break; |
1531 | |
1532 | default: |
1533 | VERIFY(0); |
1534 | /* NOTREACHED */ |
1535 | __builtin_unreachable(); |
1536 | break; |
1537 | } |
1538 | |
1539 | |
1540 | if (__improbable(pkt->pkt_pflags & PKT_F_OPT_DATA)) { |
1541 | struct __packet_opt *po = pkt->pkt_com_opt; |
1542 | |
1543 | if ((pkt->pkt_pflags & PKT_F_OPT_EXPIRE_TS) && |
1544 | po->__po_expire_ts == 0) { |
1545 | err = EINVAL; |
1546 | goto done; |
1547 | } |
1548 | if ((pkt->pkt_pflags & PKT_F_OPT_TOKEN) && |
1549 | po->__po_token_len == 0) { |
1550 | err = EINVAL; |
1551 | goto done; |
1552 | } |
1553 | } |
1554 | ASSERT(err == 0); |
1555 | |
1556 | done: |
1557 | if (__probable(err == 0)) { |
1558 | pkt->pkt_length = (uint32_t)m_pktlen(pkt->pkt_mbuf); |
1559 | if (bdlen < pkt->pkt_length) { |
1560 | pkt->pkt_pflags |= PKT_F_TRUNCATED; |
1561 | } |
1562 | pkt->pkt_qum.qum_qflags |= QUM_F_FINALIZED; |
1563 | } else { |
1564 | pkt->pkt_length = 0; |
1565 | pkt->pkt_qum.qum_qflags |= QUM_F_DROPPED; |
1566 | } |
1567 | |
1568 | return err; |
1569 | } |
1570 | |
1571 | __attribute__((always_inline)) |
1572 | static inline uint32_t |
1573 | __packet_get_object_index(const uint64_t ph) |
1574 | { |
1575 | return METADATA_IDX(QUM_ADDR(ph)); |
1576 | } |
1577 | |
1578 | __attribute__((always_inline)) |
1579 | static inline errno_t |
1580 | __packet_get_timestamp(const uint64_t ph, uint64_t *ts, boolean_t *valid) |
1581 | { |
1582 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1583 | |
1584 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TS_VALID) != 0) { |
1585 | if (valid != NULL) { |
1586 | *valid = TRUE; |
1587 | } |
1588 | *ts = PKT_ADDR(ph)->pkt_timestamp; |
1589 | } else { |
1590 | if (valid != NULL) { |
1591 | *valid = FALSE; |
1592 | } |
1593 | *ts = 0; |
1594 | } |
1595 | |
1596 | return 0; |
1597 | } |
1598 | |
1599 | __attribute__((always_inline)) |
1600 | static inline errno_t |
1601 | __packet_set_timestamp(const uint64_t ph, uint64_t ts, boolean_t valid) |
1602 | { |
1603 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1604 | |
1605 | if (valid) { |
1606 | PKT_ADDR(ph)->pkt_timestamp = ts; |
1607 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_TS_VALID; |
1608 | } else { |
1609 | PKT_ADDR(ph)->pkt_pflags &= ~PKT_F_TS_VALID; |
1610 | PKT_ADDR(ph)->pkt_timestamp = 0; |
1611 | } |
1612 | |
1613 | return 0; |
1614 | } |
1615 | |
1616 | __attribute__((always_inline)) |
1617 | static inline errno_t |
1618 | __packet_get_tx_completion_data(const uint64_t ph, uintptr_t *cb_arg, |
1619 | uintptr_t *cb_data) |
1620 | { |
1621 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1622 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) { |
1623 | ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC)); |
1624 | *cb_arg = PKT_ADDR(ph)->pkt_tx_compl_cb_arg; |
1625 | *cb_data = PKT_ADDR(ph)->pkt_tx_compl_cb_data; |
1626 | } else { |
1627 | *cb_arg = 0; |
1628 | *cb_data = 0; |
1629 | } |
1630 | return 0; |
1631 | } |
1632 | |
1633 | __attribute__((always_inline)) |
1634 | static inline errno_t |
1635 | __packet_set_tx_completion_data(const uint64_t ph, uintptr_t cb_arg, |
1636 | uintptr_t cb_data) |
1637 | { |
1638 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1639 | _KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph)); |
1640 | PKT_ADDR(ph)->pkt_tx_compl_cb_arg = cb_arg; |
1641 | PKT_ADDR(ph)->pkt_tx_compl_cb_data = cb_data; |
1642 | return 0; |
1643 | } |
1644 | |
1645 | __attribute__((always_inline)) |
1646 | static inline errno_t |
1647 | __packet_get_timestamp_requested(const uint64_t ph, boolean_t *requested) |
1648 | { |
1649 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1650 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) { |
1651 | *requested = TRUE; |
1652 | } else { |
1653 | *requested = FALSE; |
1654 | } |
1655 | return 0; |
1656 | } |
1657 | |
1658 | __attribute__((always_inline)) |
1659 | static inline errno_t |
1660 | __packet_get_tx_completion_status(const uint64_t ph, kern_return_t *status) |
1661 | { |
1662 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1663 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_DATA) != 0) { |
1664 | ASSERT((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_COMPL_ALLOC)); |
1665 | *status = (kern_return_t)PKT_ADDR(ph)->pkt_tx_compl_status; |
1666 | } else { |
1667 | *status = 0; |
1668 | } |
1669 | return 0; |
1670 | } |
1671 | |
1672 | __attribute__((always_inline)) |
1673 | static inline errno_t |
1674 | __packet_set_tx_completion_status(const uint64_t ph, kern_return_t status) |
1675 | { |
1676 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1677 | _KPKT_INIT_TX_COMPL_DATA(PKT_ADDR(ph)); |
1678 | PKT_ADDR(ph)->pkt_tx_compl_status = (uint32_t)status; |
1679 | return 0; |
1680 | } |
1681 | |
1682 | __attribute__((always_inline)) |
1683 | static inline errno_t |
1684 | __packet_set_tx_nx_port(const uint64_t ph, nexus_port_t nx_port, |
1685 | uint16_t vpna_gencnt) |
1686 | { |
1687 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1688 | PKT_ADDR(ph)->pkt_nx_port = nx_port; |
1689 | PKT_ADDR(ph)->pkt_vpna_gencnt = vpna_gencnt; |
1690 | PKT_ADDR(ph)->pkt_pflags |= PKT_F_TX_PORT_DATA; |
1691 | return 0; |
1692 | } |
1693 | |
1694 | __attribute__((always_inline)) |
1695 | static inline errno_t |
1696 | __packet_get_tx_nx_port(const uint64_t ph, nexus_port_t *nx_port, |
1697 | uint16_t *vpna_gencnt) |
1698 | { |
1699 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1700 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_TX_PORT_DATA) == 0) { |
1701 | return ENOTSUP; |
1702 | } |
1703 | |
1704 | *nx_port = PKT_ADDR(ph)->pkt_nx_port; |
1705 | *vpna_gencnt = PKT_ADDR(ph)->pkt_vpna_gencnt; |
1706 | return 0; |
1707 | } |
1708 | |
1709 | __attribute__((always_inline)) |
1710 | static inline errno_t |
1711 | __packet_get_tx_nx_port_id(const uint64_t ph, uint32_t *nx_port_id) |
1712 | { |
1713 | errno_t err; |
1714 | nexus_port_t nx_port; |
1715 | uint16_t vpna_gencnt; |
1716 | |
1717 | _CASSERT(sizeof(nx_port) == sizeof(uint16_t)); |
1718 | |
1719 | err = __packet_get_tx_nx_port(ph, nx_port: &nx_port, vpna_gencnt: &vpna_gencnt); |
1720 | if (err == 0) { |
1721 | *nx_port_id = PKT_COMPOSE_NX_PORT_ID(nx_port, vpna_gencnt); |
1722 | } |
1723 | return err; |
1724 | } |
1725 | |
1726 | |
1727 | __attribute__((always_inline)) |
1728 | static inline errno_t |
1729 | __packet_get_flowid(const uint64_t ph, packet_flowid_t *pflowid) |
1730 | { |
1731 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1732 | if ((PKT_ADDR(ph)->pkt_pflags & PKT_F_FLOW_ID) == 0) { |
1733 | return ENOENT; |
1734 | } |
1735 | *pflowid = PKT_ADDR(ph)->pkt_flow_token; |
1736 | return 0; |
1737 | } |
1738 | #endif /* KERNEL */ |
1739 | |
1740 | extern uint32_t os_cpu_in_cksum(const void *, uint32_t, uint32_t); |
1741 | |
1742 | __attribute__((always_inline)) |
1743 | static inline uint16_t |
1744 | __packet_fold_sum(uint32_t sum) |
1745 | { |
1746 | sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */ |
1747 | sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ |
1748 | sum = (sum >> 16) + (sum & 0xffff); /* final carry */ |
1749 | return sum & 0xffff; |
1750 | } |
1751 | |
1752 | __attribute__((always_inline)) |
1753 | static inline uint16_t |
1754 | __packet_fold_sum_final(uint32_t sum) |
1755 | { |
1756 | sum = (sum >> 16) + (sum & 0xffff); /* 17-bit */ |
1757 | sum = (sum >> 16) + (sum & 0xffff); /* 16-bit + carry */ |
1758 | sum = (sum >> 16) + (sum & 0xffff); /* final carry */ |
1759 | return ~sum & 0xffff; |
1760 | } |
1761 | |
1762 | __attribute__((always_inline)) |
1763 | static inline uint32_t |
1764 | __packet_cksum(const void *data, uint32_t len, uint32_t sum0) |
1765 | { |
1766 | return os_cpu_in_cksum(data, len, sum0); |
1767 | } |
1768 | |
1769 | extern uint32_t os_cpu_copy_in_cksum(const void *, void *, uint32_t, uint32_t); |
1770 | |
1771 | __attribute__((always_inline)) |
1772 | static inline uint32_t |
1773 | __packet_copy_and_sum(const void *src, void *dst, uint32_t len, uint32_t sum0) |
1774 | { |
1775 | return os_cpu_copy_in_cksum(src, dst, len, sum0); |
1776 | } |
1777 | |
1778 | __attribute__((always_inline)) |
1779 | static inline uint16_t |
1780 | __packet_fix_sum(uint16_t csum, uint16_t old, uint16_t new) |
1781 | { |
1782 | uint32_t c = csum + old - new; |
1783 | c = (c >> 16) + (c & 0xffff); /* Only add carry once */ |
1784 | |
1785 | return c & 0xffff; |
1786 | } |
1787 | |
1788 | /* MUST be used for uint32_t fields */ |
1789 | __attribute__((always_inline)) |
1790 | static inline void |
1791 | __packet_fix_hdr_sum(uint8_t *__sized_by(4)field, uint16_t *csum, uint32_t new) |
1792 | { |
1793 | uint32_t old; |
1794 | memcpy(dst: &old, src: field, n: sizeof(old)); |
1795 | memcpy(dst: field, src: &new, n: sizeof(uint32_t)); |
1796 | *csum = __packet_fix_sum(csum: __packet_fix_sum(csum: *csum, old: (uint16_t)(old >> 16), |
1797 | new: (uint16_t)(new >> 16)), old: (uint16_t)(old & 0xffff), |
1798 | new: (uint16_t)(new & 0xffff)); |
1799 | } |
1800 | |
1801 | __attribute__((always_inline)) |
1802 | static inline void * |
1803 | __buflet_get_data_address(const void *buf) |
1804 | { |
1805 | return __unsafe_forge_single(void *, (void *)(BLT_ADDR(buf)->buf_addr)); |
1806 | } |
1807 | |
1808 | #ifdef KERNEL |
1809 | __attribute__((always_inline)) |
1810 | static inline errno_t |
1811 | __buflet_set_data_address(const void *buf, const void *addr) |
1812 | { |
1813 | /* buffer region is always marked as shareable */ |
1814 | ASSERT(BLT_ADDR(buf)->buf_ctl->bc_flags & SKMEM_BUFCTL_SHAREOK); |
1815 | |
1816 | /* full bounds checking will be performed during finalize */ |
1817 | if (__probable((uintptr_t)addr >= |
1818 | (uintptr_t)BLT_ADDR(buf)->buf_objaddr)) { |
1819 | _CASSERT(sizeof(BLT_ADDR(buf)->buf_addr) == |
1820 | sizeof(mach_vm_address_t)); |
1821 | /* deconst */ |
1822 | *(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr = |
1823 | (mach_vm_address_t)addr; |
1824 | return 0; |
1825 | } |
1826 | return ERANGE; |
1827 | } |
1828 | |
1829 | /* |
1830 | * Equivalent to __buflet_set_data_address but based on offset, packets/buflets |
1831 | * set with this should not be directly passed to userspace, since shared buffer |
1832 | * is not yet supported by user facing pool. |
1833 | */ |
1834 | __attribute__((always_inline)) |
1835 | static inline int |
1836 | __buflet_set_buffer_offset(const void *buf, const uint32_t off) |
1837 | { |
1838 | ASSERT(BLT_ADDR(buf)->buf_objlim != 0); |
1839 | |
1840 | if (__probable(off <= BLT_ADDR(buf)->buf_objlim)) { |
1841 | *(mach_vm_address_t *)(uintptr_t)&BLT_ADDR(buf)->buf_addr = |
1842 | (mach_vm_address_t)BLT_ADDR(buf)->buf_objaddr + off; |
1843 | return 0; |
1844 | } |
1845 | return ERANGE; |
1846 | } |
1847 | #endif /* KERNEL */ |
1848 | |
1849 | __attribute__((always_inline)) |
1850 | static inline int |
1851 | __buflet_set_data_offset(const void *buf, const uint32_t doff) |
1852 | { |
1853 | #ifdef KERNEL |
1854 | /* |
1855 | * Kernel-specific assertion. For user space, the metadata |
1856 | * region gets redirected to anonymous zero-filled pages at |
1857 | * defunct time, so ignore it there. |
1858 | */ |
1859 | ASSERT(BLT_ADDR(buf)->buf_dlim != 0); |
1860 | |
1861 | if (__probable((uint32_t)doff <= BLT_ADDR(buf)->buf_objlim)) { |
1862 | BLT_ADDR(buf)->buf_doff = doff; |
1863 | return 0; |
1864 | } |
1865 | return ERANGE; |
1866 | #else /* !KERNEL */ |
1867 | BLT_ADDR(buf)->buf_doff = doff; |
1868 | return 0; |
1869 | #endif /* KERNEL */ |
1870 | } |
1871 | |
1872 | __attribute__((always_inline)) |
1873 | static inline int |
1874 | __buflet_set_data_length(const void *buf, const uint32_t dlen) |
1875 | { |
1876 | #ifdef KERNEL |
1877 | /* |
1878 | * Kernel-specific assertion. For user space, the metadata |
1879 | * region gets redirected to anonymous zero-filled pages at |
1880 | * defunct time, so ignore it there. |
1881 | */ |
1882 | ASSERT(BLT_ADDR(buf)->buf_dlim != 0); |
1883 | |
1884 | if (__probable((uint32_t)dlen <= BLT_ADDR(buf)->buf_objlim)) { |
1885 | BLT_ADDR(buf)->buf_dlen = dlen; |
1886 | return 0; |
1887 | } |
1888 | return ERANGE; |
1889 | #else /* !KERNEL */ |
1890 | BLT_ADDR(buf)->buf_dlen = dlen; |
1891 | return 0; |
1892 | #endif /* KERNEL */ |
1893 | } |
1894 | |
1895 | __attribute__((always_inline)) |
1896 | static inline uint32_t |
1897 | __buflet_get_data_length(const void *buf) |
1898 | { |
1899 | return BLT_ADDR(buf)->buf_dlen; |
1900 | } |
1901 | |
1902 | #ifdef KERNEL |
1903 | __attribute__((always_inline)) |
1904 | static inline struct sksegment * |
1905 | __buflet_get_object_segment(const void *buf, kern_obj_idx_seg_t *idx) |
1906 | { |
1907 | _CASSERT(sizeof(obj_idx_t) == sizeof(kern_obj_idx_seg_t)); |
1908 | |
1909 | if (idx != NULL) { |
1910 | *idx = BLT_ADDR(buf)->buf_ctl->bc_idx; |
1911 | } |
1912 | |
1913 | return BLT_ADDR(buf)->buf_ctl->bc_slab->sl_seg; |
1914 | } |
1915 | #endif /* KERNEL */ |
1916 | |
1917 | __attribute__((always_inline)) |
1918 | static inline void * |
1919 | __buflet_get_object_address(const void *buf) |
1920 | { |
1921 | #ifdef KERNEL |
1922 | return (void *)(BLT_ADDR(buf)->buf_objaddr); |
1923 | #else /* !KERNEL */ |
1924 | /* |
1925 | * For user space, shared buffer is not available and hence the data |
1926 | * address is immutable and is always the same as the underlying |
1927 | * buffer object address itself. |
1928 | */ |
1929 | return __buflet_get_data_address(buf); |
1930 | #endif /* !KERNEL */ |
1931 | } |
1932 | |
1933 | __attribute__((always_inline)) |
1934 | static inline uint32_t |
1935 | __buflet_get_object_limit(const void *buf) |
1936 | { |
1937 | #ifdef KERNEL |
1938 | return BLT_ADDR(buf)->buf_objlim; |
1939 | #else /* !KERNEL */ |
1940 | /* |
1941 | * For user space, shared buffer is not available and hence the data |
1942 | * limit is immutable and is always the same as the underlying buffer |
1943 | * object limit itself. |
1944 | */ |
1945 | return (uint32_t)__buflet_get_data_limit(buf); |
1946 | #endif /* !KERNEL */ |
1947 | } |
1948 | |
1949 | __attribute__((always_inline)) |
1950 | static inline packet_trace_id_t |
1951 | __packet_get_trace_id(const uint64_t ph) |
1952 | { |
1953 | switch (SK_PTR_TYPE(ph)) { |
1954 | case NEXUS_META_TYPE_PACKET: |
1955 | return PKT_ADDR(ph)->pkt_trace_id; |
1956 | break; |
1957 | default: |
1958 | return 0; |
1959 | } |
1960 | } |
1961 | |
1962 | __attribute__((always_inline)) |
1963 | static inline void |
1964 | __packet_set_trace_id(const uint64_t ph, packet_trace_id_t id) |
1965 | { |
1966 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1967 | PKT_ADDR(ph)->pkt_trace_id = id; |
1968 | } |
1969 | |
1970 | __attribute__((always_inline)) |
1971 | static inline void |
1972 | __packet_trace_event(const uint64_t ph, uint32_t event) |
1973 | { |
1974 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1975 | #ifdef KERNEL |
1976 | #pragma unused(event, ph) |
1977 | KDBG(event, PKT_ADDR(ph)->pkt_trace_id); |
1978 | #else /* !KERNEL */ |
1979 | kdebug_trace(event, PKT_ADDR(ph)->pkt_trace_id, 0, 0, 0); |
1980 | #endif /* !KERNEL */ |
1981 | } |
1982 | |
1983 | #ifdef KERNEL |
1984 | __attribute__((always_inline)) |
1985 | static inline packet_trace_tag_t |
1986 | __packet_get_trace_tag(const uint64_t ph) |
1987 | { |
1988 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1989 | return PKT_ADDR(ph)->pkt_trace_tag; |
1990 | } |
1991 | |
1992 | __attribute__((always_inline)) |
1993 | static inline void |
1994 | __packet_set_trace_tag(const uint64_t ph, packet_trace_tag_t tag) |
1995 | { |
1996 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
1997 | PKT_ADDR(ph)->pkt_trace_tag = tag; |
1998 | } |
1999 | |
2000 | static inline void |
2001 | __packet_perform_tx_completion_callbacks(const kern_packet_t ph, ifnet_t ifp) |
2002 | { |
2003 | /* |
2004 | * NOTE: this function can be called with ifp as NULL. |
2005 | */ |
2006 | uint64_t ts; |
2007 | kern_return_t tx_status; |
2008 | uintptr_t cb_arg, cb_data; |
2009 | struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph); |
2010 | |
2011 | ASSERT((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0); |
2012 | (void) __packet_get_tx_completion_status(ph, status: &tx_status); |
2013 | __packet_get_tx_completion_data(ph, cb_arg: &cb_arg, cb_data: &cb_data); |
2014 | __packet_get_timestamp(ph, ts: &ts, NULL); |
2015 | while (kpkt->pkt_tx_compl_callbacks != 0) { |
2016 | mbuf_tx_compl_func cb; |
2017 | uint32_t i; |
2018 | |
2019 | i = ffs(kpkt->pkt_tx_compl_callbacks) - 1; |
2020 | kpkt->pkt_tx_compl_callbacks &= ~(1 << i); |
2021 | cb = m_get_tx_compl_callback(i); |
2022 | if (__probable(cb != NULL)) { |
2023 | cb(kpkt->pkt_tx_compl_context, ifp, ts, cb_arg, cb_data, |
2024 | tx_status); |
2025 | } |
2026 | } |
2027 | kpkt->pkt_pflags &= ~PKT_F_TX_COMPL_TS_REQ; |
2028 | } |
2029 | |
2030 | static inline void * |
2031 | __packet_get_priv(const kern_packet_t ph) |
2032 | { |
2033 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
2034 | return PKT_ADDR(ph)->pkt_priv; |
2035 | } |
2036 | |
2037 | static inline void |
2038 | __packet_set_priv(const uint64_t ph, void *priv) |
2039 | { |
2040 | PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET); |
2041 | PKT_ADDR(ph)->pkt_priv = priv; |
2042 | } |
2043 | #endif /* KERNEL */ |
2044 | |
2045 | #endif /* PRIVATE || BSD_KERNEL_PRIVATE */ |
2046 | #endif /* !_SKYWALK_PACKET_COMMON_H_ */ |
2047 | |