1/*
2 * Copyright (c) 2019-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <skywalk/os_skywalk_private.h>
29#include <skywalk/nexus/netif/nx_netif.h>
30#include <net/pktap.h>
31#include <sys/sdt.h>
32
33SK_NO_INLINE_ATTRIBUTE
34struct __kern_packet *
35nx_netif_alloc_packet(struct kern_pbufpool *pp, uint32_t sz, kern_packet_t *php)
36{
37 kern_packet_t ph;
38 ph = pp_alloc_packet_by_size(pp, sz, SKMEM_NOSLEEP);
39 if (__improbable(ph == 0)) {
40 DTRACE_SKYWALK2(alloc__fail, struct kern_pbufpool *,
41 pp, size_t, sz);
42 return NULL;
43 }
44 if (php != NULL) {
45 *php = ph;
46 }
47 return SK_PTR_ADDR_KPKT(ph);
48}
49
50SK_NO_INLINE_ATTRIBUTE
51void
52nx_netif_free_packet(struct __kern_packet *pkt)
53{
54 pp_free_packet_single(pkt);
55}
56
57SK_NO_INLINE_ATTRIBUTE
58void
59nx_netif_free_packet_chain(struct __kern_packet *pkt_chain, int *cnt)
60{
61 pp_free_packet_chain(pkt_chain, cnt);
62}
63
64static void
65__check_convert_flags(uint32_t flags)
66{
67 VERIFY((flags & (NETIF_CONVERT_TX | NETIF_CONVERT_RX)) != 0);
68 VERIFY((flags & (NETIF_CONVERT_TX | NETIF_CONVERT_RX)) !=
69 (NETIF_CONVERT_TX | NETIF_CONVERT_RX));
70}
71
72SK_NO_INLINE_ATTRIBUTE
73static void
74fill_vlan_info(struct __kern_packet *fpkt)
75{
76 uint8_t *buf;
77 struct ether_vlan_header *evl;
78 uint16_t tag;
79 boolean_t tag_in_pkt = FALSE;
80
81 if (fpkt->pkt_length < sizeof(*evl)) {
82 DTRACE_SKYWALK2(bad__len, struct __kern_packet *, fpkt,
83 uint32_t, fpkt->pkt_length);
84 return;
85 }
86 MD_BUFLET_ADDR_ABS(fpkt, buf);
87 buf += fpkt->pkt_headroom;
88 evl = (struct ether_vlan_header *)(void *)buf;
89 if (ntohs(evl->evl_encap_proto) == ETHERTYPE_VLAN) {
90 tag = ntohs(evl->evl_tag);
91 tag_in_pkt = TRUE;
92 DTRACE_SKYWALK1(tag__in__pkt, uint16_t, tag);
93 } else {
94 struct mbuf *m;
95 struct __kern_packet *pkt;
96
97 /*
98 * A filter packet must always have an mbuf or a packet
99 * attached.
100 */
101 VERIFY((fpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0 ||
102 (fpkt->pkt_pflags & PKT_F_PKT_DATA) != 0);
103
104 if ((fpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
105 m = fpkt->pkt_mbuf;
106 VERIFY(m != NULL);
107 if (mbuf_get_vlan_tag(mbuf: m, vlan: &tag) != 0) {
108 return;
109 }
110 DTRACE_SKYWALK1(tag__from__mbuf, uint16_t, tag);
111 } else if ((fpkt->pkt_pflags & PKT_F_PKT_DATA) != 0) {
112 pkt = fpkt->pkt_pkt;
113 VERIFY(pkt != NULL);
114
115 /*
116 * The attached packet could have an mbuf attached
117 * if it came from the compat path.
118 */
119 if ((pkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
120 m = fpkt->pkt_mbuf;
121 VERIFY(m != NULL);
122 if (mbuf_get_vlan_tag(mbuf: m, vlan: &tag) != 0) {
123 return;
124 }
125 DTRACE_SKYWALK1(tag__from__inner__mbuf,
126 uint16_t, tag);
127 } else {
128 /*
129 * XXX
130 * No native driver today fills in the vlan tag
131 * metadata. This code will work when the driver
132 * adds support for this.
133 */
134 VERIFY((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
135 if (__packet_get_vlan_tag(SK_PKT2PH(pkt), vlan_tag: &tag,
136 NULL) != 0) {
137 return;
138 }
139 DTRACE_SKYWALK1(tag__from__pkt, uint16_t, tag);
140 }
141 } else {
142 panic("filter packet has no mbuf or packet attached: "
143 "pkt_pflags 0x%llx\n", fpkt->pkt_pflags);
144 /* NOTREACHED */
145 __builtin_unreachable();
146 }
147 }
148 kern_packet_set_vlan_tag(SK_PKT2PH(fpkt), tag, tag_in_pkt);
149}
150
151static struct __kern_packet *
152nx_netif_mbuf_to_filter_pkt(struct nexus_netif_adapter *nifna,
153 struct mbuf *m, uint32_t flags)
154{
155 struct __kern_packet *fpkt = NULL;
156 struct nx_netif *nif = nifna->nifna_netif;
157 struct netif_stats *nifs = &nif->nif_stats;
158 struct kern_pbufpool *pp = nif->nif_filter_pp;
159 ifnet_t ifp = nif->nif_ifp;
160 boolean_t is_l3, truncated = FALSE;
161 enum txrx type;
162 uint8_t off, hlen;
163 kern_packet_t fph;
164 int err, mlen;
165
166 __check_convert_flags(flags);
167 is_l3 = (ifp->if_family == IFNET_FAMILY_UTUN ||
168 ifp->if_family == IFNET_FAMILY_IPSEC);
169
170 off = ((flags & NETIF_CONVERT_TX) != 0) ?
171 (uint8_t)ifp->if_tx_headroom : 0;
172 hlen = is_l3 ? 0 : ifnet_hdrlen(interface: ifp);
173 mlen = m_pktlen(m);
174
175 ASSERT(pp != NULL);
176 if (__improbable((off + mlen) > PP_BUF_SIZE_DEF(pp))) {
177 VERIFY(off < PP_BUF_SIZE_DEF(pp));
178 mlen = PP_BUF_SIZE_DEF(pp) - off;
179 truncated = TRUE;
180
181 DTRACE_SKYWALK5(mbuf__truncated,
182 struct nexus_netif_adapter *, nifna,
183 struct mbuf *, m, uint8_t, off, int, mlen,
184 uint32_t, PP_BUF_SIZE_DEF(pp));
185 STATS_INC(nifs, NETIF_STATS_FILTER_PKT_TRUNCATED);
186 }
187 fpkt = nx_netif_alloc_packet(pp, sz: off + mlen, php: &fph);
188 if (__improbable(fpkt == NULL)) {
189 DTRACE_SKYWALK2(alloc__fail, struct nexus_netif_adapter *,
190 nifna, struct mbuf *, m);
191 STATS_INC(nifs, NETIF_STATS_FILTER_DROP_PKT_ALLOC_FAIL);
192 goto drop;
193 }
194 type = ((flags & NETIF_CONVERT_TX) != 0) ? NR_TX : NR_RX;
195
196 if (__improbable((m->m_flags & M_HASFCS) != 0)) {
197 if (type != NR_RX) {
198 /*
199 * There shouldn't be an FCS for TX packets
200 */
201 DTRACE_SKYWALK2(bad__flags,
202 struct nexus_netif_adapter *,
203 nifna, struct mbuf *, m);
204 goto drop;
205 }
206 if (mlen > ETHER_CRC_LEN) {
207 mlen -= ETHER_CRC_LEN;
208 } else {
209 DTRACE_SKYWALK3(bad__pkt__size,
210 struct nexus_netif_adapter *,
211 nifna, struct mbuf *, m, int, mlen);
212 goto drop;
213 }
214 }
215 /*
216 * XXX
217 * If the source packet has any checksum flags, the filter packet will
218 * not have valid checksums. To fill in the checksums, we need to do
219 * something similar to bridge_finalize_cksum() for packets.
220 */
221 err = __packet_initialize_with_mbuf(pkt: fpkt, mbuf: m, headroom: off, l2len: hlen);
222 VERIFY(err == 0);
223 nif->nif_pkt_copy_from_mbuf(type, fph, off, m, 0,
224 mlen, FALSE, 0);
225
226 err = __packet_finalize_with_mbuf(pkt: fpkt);
227 VERIFY(err == 0);
228
229 /*
230 * XXX
231 * __packet_finalize_with_mbuf() sets pkt_length to the non-truncated
232 * length. We need to change it back to the truncated length.
233 */
234 fpkt->pkt_length = mlen;
235 if (!is_l3) {
236 fill_vlan_info(fpkt);
237 }
238
239 /*
240 * Verify that __packet_finalize_with_mbuf() is setting the truncated
241 * flag correctly.
242 */
243 if (truncated) {
244 VERIFY((fpkt->pkt_pflags & PKT_F_TRUNCATED) != 0);
245 } else {
246 VERIFY((fpkt->pkt_pflags & PKT_F_TRUNCATED) == 0);
247 }
248 return fpkt;
249drop:
250 if (fpkt != NULL) {
251 /* ensure mbuf hasn't been attached */
252 ASSERT(fpkt->pkt_mbuf == NULL &&
253 (fpkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
254 nx_netif_free_packet(pkt: fpkt);
255 }
256 STATS_INC(nifs, NETIF_STATS_DROP);
257 m_freem(m);
258 return NULL;
259}
260
261struct __kern_packet *
262nx_netif_mbuf_to_filter_pkt_chain(struct nexus_netif_adapter *nifna,
263 struct mbuf *m_chain, uint32_t flags)
264{
265 struct mbuf *m = m_chain, *next;
266 struct __kern_packet *pkt_head = NULL, *pkt;
267 struct __kern_packet **pkt_tailp = &pkt_head;
268 int c = 0;
269
270 while (m != NULL) {
271 next = m->m_nextpkt;
272 m->m_nextpkt = NULL;
273
274 pkt = nx_netif_mbuf_to_filter_pkt(nifna, m, flags);
275 if (pkt != NULL) {
276 c++;
277 *pkt_tailp = pkt;
278 pkt_tailp = &pkt->pkt_nextpkt;
279 }
280 m = next;
281 }
282 DTRACE_SKYWALK2(pkt__chain, struct __kern_packet *, pkt_head,
283 int, c);
284 return pkt_head;
285}
286
287static struct mbuf *
288nx_netif_filter_pkt_to_mbuf(struct nexus_netif_adapter *nifna,
289 struct __kern_packet *pkt, uint32_t flags)
290{
291#pragma unused (nifna)
292 struct mbuf *m;
293
294 __check_convert_flags(flags);
295 ASSERT((pkt->pkt_pflags & PKT_F_MBUF_DATA) != 0);
296
297 m = pkt->pkt_mbuf;
298 ASSERT(m != NULL);
299 KPKT_CLEAR_MBUF_DATA(pkt);
300 nx_netif_free_packet(pkt);
301 return m;
302}
303
304struct mbuf *
305nx_netif_filter_pkt_to_mbuf_chain(struct nexus_netif_adapter *nifna,
306 struct __kern_packet *pkt_chain, uint32_t flags)
307{
308 struct __kern_packet *pkt = pkt_chain, *next;
309 struct mbuf *m_head = NULL, *m;
310 struct mbuf **m_tailp = &m_head;
311 int c = 0;
312
313 while (pkt != NULL) {
314 next = pkt->pkt_nextpkt;
315 pkt->pkt_nextpkt = NULL;
316
317 m = nx_netif_filter_pkt_to_mbuf(nifna, pkt, flags);
318 if (m != NULL) {
319 c++;
320 *m_tailp = m;
321 m_tailp = &m->m_nextpkt;
322 }
323 pkt = next;
324 }
325 DTRACE_SKYWALK2(mbuf__chain, struct mbuf *, m_head, int, c);
326 return m_head;
327}
328
329struct __kern_packet *
330nx_netif_pkt_to_filter_pkt(struct nexus_netif_adapter *nifna,
331 struct __kern_packet *pkt, uint32_t flags)
332{
333 struct __kern_packet *fpkt = NULL;
334 struct nx_netif *nif = nifna->nifna_netif;
335 struct netif_stats *nifs = &nif->nif_stats;
336 struct kern_pbufpool *pp = nif->nif_filter_pp;
337 ifnet_t ifp = nif->nif_ifp;
338 boolean_t is_l3, truncated = FALSE;
339 enum txrx type;
340 uint8_t off, hlen;
341 struct mbuf *m = NULL;
342 kern_packet_t fph, ph;
343 int err, plen;
344
345 __check_convert_flags(flags);
346 ph = SK_PKT2PH(pkt);
347 is_l3 = (ifp->if_family == IFNET_FAMILY_UTUN ||
348 ifp->if_family == IFNET_FAMILY_IPSEC);
349
350 off = ((flags & NETIF_CONVERT_TX) != 0) ?
351 (uint8_t)ifp->if_tx_headroom : 0;
352 hlen = is_l3 ? 0 : ifnet_hdrlen(interface: ifp);
353
354 /*
355 * The packet coming from the compat path could be empty or has
356 * truncated contents. We have to copy the contents from the
357 * attached mbuf. We also don't support attaching a filter
358 * packet (one that already has a packet attached) to another
359 * filter packet.
360 */
361 ASSERT((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
362 if ((pkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
363 m = pkt->pkt_mbuf;
364 plen = m_pktlen(m);
365 } else {
366 plen = pkt->pkt_length;
367 }
368 ASSERT(pp != NULL);
369 if (__improbable((off + plen) > PP_BUF_SIZE_DEF(pp))) {
370 VERIFY(off < PP_BUF_SIZE_DEF(pp));
371 plen = PP_BUF_SIZE_DEF(pp) - off;
372 truncated = TRUE;
373
374 DTRACE_SKYWALK5(pkt__truncated,
375 struct nexus_netif_adapter *, nifna,
376 struct __kern_packet *, pkt, uint8_t, off,
377 int, plen, uint32_t, PP_BUF_SIZE_DEF(pp));
378 STATS_INC(nifs, NETIF_STATS_FILTER_PKT_TRUNCATED);
379 }
380 fpkt = nx_netif_alloc_packet(pp, sz: off + plen, php: &fph);
381 if (__improbable(fpkt == NULL)) {
382 DTRACE_SKYWALK2(alloc__fail, struct nexus_netif_adapter *,
383 nifna, struct __kern_packet *, pkt);
384 STATS_INC(nifs, NETIF_STATS_FILTER_DROP_PKT_ALLOC_FAIL);
385 goto drop;
386 }
387 fpkt->pkt_link_flags = 0;
388 fpkt->pkt_headroom = off;
389 fpkt->pkt_l2_len = hlen;
390 type = ((flags & NETIF_CONVERT_TX) != 0) ? NR_TX : NR_RX;
391
392 if (__improbable((pkt->pkt_link_flags & PKT_LINKF_ETHFCS) != 0 ||
393 (m != NULL && (m->m_flags & M_HASFCS) != 0))) {
394 if (type != NR_RX) {
395 /*
396 * There shouldn't be an FCS for TX packets
397 */
398 DTRACE_SKYWALK2(bad__flags,
399 struct nexus_netif_adapter *, nifna,
400 struct __kern_packet *, pkt);
401 goto drop;
402 }
403 if (plen > ETHER_CRC_LEN) {
404 plen -= ETHER_CRC_LEN;
405 } else {
406 DTRACE_SKYWALK3(bad__pkt__size,
407 struct nexus_netif_adapter *, nifna,
408 struct __kern_packet *, pkt, int, plen);
409 goto drop;
410 }
411 }
412 /*
413 * XXX
414 * If the source packet has any checksum flags, the filter packet will
415 * not have valid checksums. To fill in the checksums, we need to do
416 * something similar to bridge_finalize_cksum() for packets.
417 */
418 if (m != NULL) {
419 nif->nif_pkt_copy_from_mbuf(type, fph, off, m, 0,
420 plen, FALSE, 0);
421 } else {
422 nif->nif_pkt_copy_from_pkt(type, fph, off, ph,
423 pkt->pkt_headroom, plen, FALSE, 0, 0, FALSE);
424 }
425 ASSERT((fpkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
426 ASSERT((fpkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
427 ASSERT(fpkt->pkt_pkt == NULL);
428 ASSERT(pkt->pkt_nextpkt == NULL);
429 fpkt->pkt_pkt = pkt;
430 fpkt->pkt_pflags |= PKT_F_PKT_DATA;
431 if (truncated) {
432 fpkt->pkt_pflags |= PKT_F_TRUNCATED;
433 }
434 /*
435 * XXX
436 * Unlike the mbuf case, __packet_finalize below correctly sets
437 * pkt_length to the buflet length (possibly truncated). We set
438 * pkt_length here so that fill_vlan_info can use it.
439 */
440 fpkt->pkt_length = plen;
441 if (!is_l3) {
442 fill_vlan_info(fpkt);
443 }
444 err = __packet_finalize(ph: fph);
445 VERIFY(err == 0);
446 return fpkt;
447drop:
448 if (fpkt != NULL) {
449 /* ensure pkt hasn't been attached */
450 ASSERT(fpkt->pkt_pkt == NULL &&
451 (fpkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
452 nx_netif_free_packet(pkt: fpkt);
453 }
454 STATS_INC(nifs, NETIF_STATS_DROP);
455 nx_netif_free_packet(pkt);
456 return NULL;
457}
458
459struct __kern_packet *
460nx_netif_pkt_to_filter_pkt_chain(struct nexus_netif_adapter *nifna,
461 struct __kern_packet *pkt_chain, uint32_t flags)
462{
463 struct __kern_packet *pkt = pkt_chain, *next;
464 struct __kern_packet *p_head = NULL, *p;
465 struct __kern_packet **p_tailp = &p_head;
466 int c = 0;
467
468 while (pkt != NULL) {
469 next = pkt->pkt_nextpkt;
470 pkt->pkt_nextpkt = NULL;
471
472 p = nx_netif_pkt_to_filter_pkt(nifna, pkt, flags);
473 if (p != NULL) {
474 c++;
475 *p_tailp = p;
476 p_tailp = &p->pkt_nextpkt;
477 }
478 pkt = next;
479 }
480 DTRACE_SKYWALK2(pkt__chain, struct __kern_packet *, p_head, int, c);
481 return p_head;
482}
483
484static struct __kern_packet *
485nx_netif_filter_pkt_to_pkt(struct nexus_netif_adapter *nifna,
486 struct __kern_packet *fpkt, uint32_t flags)
487{
488#pragma unused (nifna)
489 struct __kern_packet *pkt;
490
491 __check_convert_flags(flags);
492 ASSERT((fpkt->pkt_pflags & PKT_F_PKT_DATA) != 0);
493 ASSERT((fpkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
494
495 pkt = fpkt->pkt_pkt;
496 ASSERT(pkt != NULL);
497 KPKT_CLEAR_PKT_DATA(fpkt);
498 nx_netif_free_packet(pkt: fpkt);
499 return pkt;
500}
501
502struct __kern_packet *
503nx_netif_filter_pkt_to_pkt_chain(struct nexus_netif_adapter *nifna,
504 struct __kern_packet *pkt_chain, uint32_t flags)
505{
506 struct __kern_packet *pkt = pkt_chain, *next;
507 struct __kern_packet *p_head = NULL, *p;
508 struct __kern_packet **p_tailp = &p_head;
509 int c = 0;
510
511 while (pkt != NULL) {
512 next = pkt->pkt_nextpkt;
513 pkt->pkt_nextpkt = NULL;
514
515 p = nx_netif_filter_pkt_to_pkt(nifna, fpkt: pkt, flags);
516 if (p != NULL) {
517 c++;
518 *p_tailp = p;
519 p_tailp = &p->pkt_nextpkt;
520 }
521 pkt = next;
522 }
523 DTRACE_SKYWALK2(pkt__chain, struct __kern_packet *, p_head, int, c);
524 return p_head;
525}
526
527struct mbuf *
528nx_netif_pkt_to_mbuf(struct nexus_netif_adapter *nifna,
529 struct __kern_packet *pkt, uint32_t flags)
530{
531 struct nx_netif *nif = nifna->nifna_netif;
532 ifnet_t ifp = nif->nif_ifp;
533 struct mbuf *m;
534 unsigned int one = 1;
535 size_t len;
536 uint16_t pad, hlen;
537 kern_packet_t ph;
538 enum txrx type;
539 int err;
540
541 __check_convert_flags(flags);
542 /* Compat packets or filter packets should never land here */
543 ASSERT((pkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
544 ASSERT((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
545
546 /* Outbound packets should not have this */
547 ASSERT((pkt->pkt_link_flags & PKT_LINKF_ETHFCS) == 0);
548
549 /* This function is only meant to be used in the custom ether TX path */
550 ASSERT((flags & NETIF_CONVERT_TX) != 0);
551 type = NR_TX;
552
553 /* Packet must include L2 header */
554 hlen = ifnet_hdrlen(interface: ifp);
555 pad = (uint16_t)P2ROUNDUP(hlen, sizeof(uint32_t)) - hlen;
556 len = pkt->pkt_length;
557
558 err = mbuf_allocpacket(how: MBUF_WAITOK, packetlen: pad + len, maxchunks: &one, mbuf: &m);
559 VERIFY(err == 0);
560 m->m_data += pad;
561 m->m_pkthdr.pkt_hdr = mtod(m, uint8_t *);
562 ph = SK_PTR_ENCODE(pkt, METADATA_TYPE(pkt), METADATA_SUBTYPE(pkt));
563
564 nif->nif_pkt_copy_to_mbuf(type, ph, pkt->pkt_headroom,
565 m, 0, (uint32_t)len, FALSE, 0);
566 nx_netif_free_packet(pkt);
567 return m;
568}
569
570struct __kern_packet *
571nx_netif_pkt_to_pkt(struct nexus_netif_adapter *nifna,
572 struct __kern_packet *pkt, uint32_t flags)
573{
574 struct nx_netif *nif = nifna->nifna_netif;
575 struct nexus_adapter *na = &nifna->nifna_up;
576 struct netif_stats *nifs = &nif->nif_stats;
577 ifnet_t ifp = nif->nif_ifp;
578 struct kern_pbufpool *pp;
579 struct __kern_packet *dpkt = NULL;
580 struct mbuf *m = NULL;
581 uint8_t off, hlen;
582 int len;
583 kern_packet_t ph, dph;
584 enum txrx type;
585 int err;
586
587 __check_convert_flags(flags);
588 /* Filter packets should never land here */
589 ASSERT((pkt->pkt_pflags & PKT_F_PKT_DATA) == 0);
590
591 /* Only support these target NAs for now */
592 type = ((flags & NETIF_CONVERT_TX) != 0) ? NR_TX : NR_RX;
593 if (type == NR_TX) {
594 ASSERT(na->na_type == NA_NETIF_DEV ||
595 na->na_type == NA_NETIF_COMPAT_DEV);
596 pp = skmem_arena_nexus(ar: na->na_arena)->arn_tx_pp;
597 off = (uint8_t)ifp->if_tx_headroom;
598 } else {
599 ASSERT(na->na_type == NA_NETIF_VP ||
600 na->na_type == NA_NETIF_DEV);
601 pp = skmem_arena_nexus(ar: na->na_arena)->arn_rx_pp;
602 off = 0;
603 }
604 /* Packet must include L2 header */
605 hlen = ifnet_hdrlen(interface: ifp);
606
607 /*
608 * Source packet has no data. Need to copy from the attached mbuf.
609 */
610 if ((pkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
611 /* An outbound packet shouldn't have an mbuf attached */
612 ASSERT(na->na_type == NA_NETIF_VP ||
613 na->na_type == NA_NETIF_DEV);
614 m = pkt->pkt_mbuf;
615 len = m_pktlen(m);
616 } else {
617 len = pkt->pkt_length;
618 }
619 ASSERT(pp != NULL);
620
621 ph = SK_PKT2PH(pkt);
622 dpkt = nx_netif_alloc_packet(pp, sz: off + len, php: &dph);
623 if (__improbable(dpkt == NULL)) {
624 if (type == NR_TX) {
625 STATS_INC(nifs, NETIF_STATS_VP_DROP_TX_ALLOC_FAIL);
626 } else {
627 STATS_INC(nifs, NETIF_STATS_VP_DROP_RX_ALLOC_FAIL);
628 }
629 DTRACE_SKYWALK2(alloc__fail, struct nexus_netif_adapter *,
630 nifna, struct __kern_packet *, pkt);
631 goto drop;
632 }
633 if (__improbable((off + len) > PP_BUF_SIZE_DEF(pp))) {
634 STATS_INC(nifs, NETIF_STATS_VP_DROP_PKT_TOO_BIG);
635 DTRACE_SKYWALK5(pkt__too__large,
636 struct nexus_netif_adapter *, nifna,
637 struct __kern_packet *, pkt, uint8_t, off, int, len,
638 uint32_t, PP_BUF_SIZE_DEF(pp));
639 goto drop;
640 }
641 if (__improbable((pkt->pkt_link_flags & PKT_LINKF_ETHFCS) != 0 ||
642 (m != NULL && (m->m_flags & M_HASFCS) != 0))) {
643 if (type != NR_RX) {
644 /*
645 * There shouldn't be an FCS for TX packets
646 */
647 DTRACE_SKYWALK2(bad__flags,
648 struct nexus_netif_adapter *, nifna,
649 struct __kern_packet *, pkt);
650 goto drop;
651 }
652 if (len > ETHER_CRC_LEN) {
653 len -= ETHER_CRC_LEN;
654 } else {
655 DTRACE_SKYWALK3(bad__pkt__size,
656 struct nexus_netif_adapter *, nifna,
657 struct __kern_packet *, pkt, int, len);
658 goto drop;
659 }
660 }
661 dpkt->pkt_link_flags = 0;
662 dpkt->pkt_headroom = off;
663 dpkt->pkt_l2_len = hlen;
664
665 /* Copy optional metadata */
666 dpkt->pkt_pflags = (pkt->pkt_pflags & PKT_F_COPY_MASK);
667 _PKT_COPY_OPT_DATA(pkt, dpkt);
668
669 /* Copy Transmit completion metadata */
670 _PKT_COPY_TX_PORT_DATA(pkt, dpkt);
671
672 /* Copy packet contents */
673 if (m != NULL) {
674 nif->nif_pkt_copy_from_mbuf(type, dph, off, m, 0,
675 len, FALSE, 0);
676 } else {
677 nif->nif_pkt_copy_from_pkt(type, dph, off, ph,
678 pkt->pkt_headroom, len, FALSE, 0, 0, FALSE);
679 }
680 err = __packet_finalize(ph: dph);
681 VERIFY(err == 0);
682 nx_netif_free_packet(pkt);
683 return dpkt;
684
685drop:
686 if (dpkt != NULL) {
687 nx_netif_free_packet(pkt: dpkt);
688 }
689 STATS_INC(nifs, NETIF_STATS_DROP);
690 nx_netif_free_packet(pkt);
691 return NULL;
692}
693
694void
695nx_netif_mbuf_chain_info(struct mbuf *m_head, struct mbuf **m_tail,
696 uint32_t *cnt, uint32_t *bytes)
697{
698 struct mbuf *m = m_head, *tail = NULL;
699 uint32_t c = 0, b = 0;
700
701 while (m != NULL) {
702 c++;
703 b += m_pktlen(m);
704 tail = m;
705 m = m->m_nextpkt;
706 }
707 if (m_tail != NULL) {
708 *m_tail = tail;
709 }
710 if (cnt != NULL) {
711 *cnt = c;
712 }
713 if (bytes != NULL) {
714 *bytes = b;
715 }
716}
717
718void
719nx_netif_pkt_chain_info(struct __kern_packet *p_head,
720 struct __kern_packet **p_tail, uint32_t *cnt, uint32_t *bytes)
721{
722 struct __kern_packet *p = p_head, *tail = NULL;
723 uint32_t c = 0, b = 0;
724
725 while (p != NULL) {
726 c++;
727 b += p->pkt_length;
728 tail = p;
729 p = p->pkt_nextpkt;
730 }
731 if (p_tail != NULL) {
732 *p_tail = tail;
733 }
734 if (cnt != NULL) {
735 *cnt = c;
736 }
737 if (bytes != NULL) {
738 *bytes = b;
739 }
740}
741
742int
743nx_netif_get_max_mtu(ifnet_t ifp, uint32_t *max_mtu)
744{
745 struct ifreq ifr;
746 int err;
747
748 bzero(s: &ifr, n: sizeof(ifr));
749 err = ifnet_ioctl(interface: ifp, protocol: 0, SIOCGIFDEVMTU, ioctl_arg: &ifr);
750 if (err != 0) {
751 SK_ERR("SIOCGIFDEVMTU failed for %s\n", if_name(ifp));
752 return err;
753 }
754 *max_mtu = MAX(ifr.ifr_devmtu.ifdm_max, ifr.ifr_devmtu.ifdm_current);
755 return 0;
756}
757
758void
759nx_netif_pktap_output(ifnet_t ifp, int af, struct __kern_packet *pkt)
760{
761 uint32_t dlt;
762 uint32_t flags = PTH_FLAG_SOCKET;
763
764 switch (ifp->if_family) {
765 case IFNET_FAMILY_ETHERNET:
766 dlt = DLT_EN10MB;
767 break;
768 case IFNET_FAMILY_CELLULAR:
769 case IFNET_FAMILY_UTUN:
770 case IFNET_FAMILY_IPSEC:
771 dlt = DLT_RAW;
772 break;
773 default:
774 DTRACE_SKYWALK1(invalid__family, ifnet_t, ifp);
775 return;
776 }
777 if ((pkt->pkt_pflags & PKT_F_KEEPALIVE) != 0) {
778 flags |= PTH_FLAG_KEEP_ALIVE;
779 }
780 if ((pkt->pkt_pflags & PKT_F_REXMT) != 0) {
781 flags |= PTH_FLAG_REXMIT;
782 }
783 pktap_output_packet(ifp, af, dlt, -1, NULL, -1, NULL, SK_PKT2PH(pkt),
784 NULL, 0, pkt->pkt_flow_ip_proto, pkt->pkt_flow_token, flags);
785}
786
787__attribute__((always_inline))
788inline void
789netif_ifp_inc_traffic_class_out_pkt(struct ifnet *ifp, uint32_t svc,
790 uint32_t cnt, uint32_t len)
791{
792 switch (svc) {
793 case PKT_TC_BE:
794 ifp->if_tc.ifi_obepackets += cnt;
795 ifp->if_tc.ifi_obebytes += len;
796 break;
797 case PKT_TC_BK:
798 ifp->if_tc.ifi_obkpackets += cnt;
799 ifp->if_tc.ifi_obkbytes += len;
800 break;
801 case PKT_TC_VI:
802 ifp->if_tc.ifi_ovipackets += cnt;
803 ifp->if_tc.ifi_ovibytes += len;
804 break;
805 case PKT_TC_VO:
806 ifp->if_tc.ifi_ovopackets += cnt;
807 ifp->if_tc.ifi_ovobytes += len;
808 break;
809 default:
810 break;
811 }
812}
813