1/*
2 * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <skywalk/os_skywalk_private.h>
30#include <netinet/tcp_var.h>
31
32static int kern_packet_clone_internal(const kern_packet_t, kern_packet_t *,
33 uint32_t, kern_packet_copy_mode_t);
34
35#if (DEBUG || DEVELOPMENT)
36__attribute__((noreturn))
37void
38pkt_subtype_assert_fail(const kern_packet_t ph, uint64_t type, uint64_t subtype)
39{
40 panic("invalid packet handle 0x%llx (type %llu != %llu || "
41 "subtype %llu != %llu)", ph, SK_PTR_TYPE(ph), type,
42 SK_PTR_SUBTYPE(ph), subtype);
43 /* NOTREACHED */
44 __builtin_unreachable();
45}
46
47__attribute__((noreturn))
48void
49pkt_type_assert_fail(const kern_packet_t ph, uint64_t type)
50{
51 panic("invalid packet handle 0x%llx (type %llu != %llu)",
52 ph, SK_PTR_TYPE(ph), type);
53 /* NOTREACHED */
54 __builtin_unreachable();
55}
56#endif /* DEBUG || DEVELOPMENT */
57
58errno_t
59kern_packet_set_headroom(const kern_packet_t ph, const uint8_t headroom)
60{
61 return __packet_set_headroom(ph, headroom);
62}
63
64uint8_t
65kern_packet_get_headroom(const kern_packet_t ph)
66{
67 return __packet_get_headroom(ph);
68}
69
70errno_t
71kern_packet_set_link_header_offset(const kern_packet_t ph, const uint8_t off)
72{
73 return __packet_set_headroom(ph, headroom: off);
74}
75
76uint16_t
77kern_packet_get_link_header_offset(const kern_packet_t ph)
78{
79 return __packet_get_headroom(ph);
80}
81
82errno_t
83kern_packet_set_link_header_length(const kern_packet_t ph, const uint8_t off)
84{
85 return __packet_set_link_header_length(ph, len: off);
86}
87
88uint8_t
89kern_packet_get_link_header_length(const kern_packet_t ph)
90{
91 return __packet_get_link_header_length(ph);
92}
93
94errno_t
95kern_packet_set_link_broadcast(const kern_packet_t ph)
96{
97 return __packet_set_link_broadcast(ph);
98}
99
100boolean_t
101kern_packet_get_link_broadcast(const kern_packet_t ph)
102{
103 return __packet_get_link_broadcast(ph);
104}
105
106errno_t
107kern_packet_set_link_multicast(const kern_packet_t ph)
108{
109 return __packet_set_link_multicast(ph);
110}
111
112errno_t
113kern_packet_set_link_ethfcs(const kern_packet_t ph)
114{
115 return __packet_set_link_ethfcs(ph);
116}
117
118boolean_t
119kern_packet_get_link_multicast(const kern_packet_t ph)
120{
121 return __packet_get_link_multicast(ph);
122}
123
124boolean_t
125kern_packet_get_link_ethfcs(const kern_packet_t ph)
126{
127 return __packet_get_link_ethfcs(ph);
128}
129
130/* deprecated -- no effect, use set_link_header_length instead */
131errno_t
132kern_packet_set_network_header_offset(const kern_packet_t ph,
133 const uint16_t off)
134{
135#pragma unused(ph, off)
136 return 0;
137}
138
139/* deprecated -- use get_link_header_length instead */
140uint16_t
141kern_packet_get_network_header_offset(const kern_packet_t ph)
142{
143 return (uint16_t)__packet_get_headroom(ph) +
144 (uint16_t)__packet_get_link_header_length(ph);
145}
146
147/* deprecated */
148errno_t
149kern_packet_set_transport_header_offset(const kern_packet_t ph,
150 const uint16_t off)
151{
152#pragma unused(ph, off)
153 return 0;
154}
155
156/* deprecated */
157uint16_t
158kern_packet_get_transport_header_offset(const kern_packet_t ph)
159{
160#pragma unused(ph)
161 return 0;
162}
163
164boolean_t
165kern_packet_get_transport_traffic_background(const kern_packet_t ph)
166{
167 return __packet_get_transport_traffic_background(ph);
168}
169
170boolean_t
171kern_packet_get_transport_traffic_realtime(const kern_packet_t ph)
172{
173 return __packet_get_transport_traffic_realtime(ph);
174}
175
176boolean_t
177kern_packet_get_transport_retransmit(const kern_packet_t ph)
178{
179 return __packet_get_transport_retransmit(ph);
180}
181
182boolean_t
183kern_packet_get_transport_new_flow(const kern_packet_t ph)
184{
185 return __packet_get_transport_new_flow(ph);
186}
187
188boolean_t
189kern_packet_get_transport_last_packet(const kern_packet_t ph)
190{
191 return __packet_get_transport_last_packet(ph);
192}
193
194int
195kern_packet_set_service_class(const kern_packet_t ph,
196 const kern_packet_svc_class_t sc)
197{
198 return __packet_set_service_class(ph, sc);
199}
200
201kern_packet_svc_class_t
202kern_packet_get_service_class(const kern_packet_t ph)
203{
204 return __packet_get_service_class(ph);
205}
206
207errno_t
208kern_packet_set_compression_generation_count(const kern_packet_t ph,
209 uint32_t gencnt)
210{
211 return __packet_set_comp_gencnt(ph, gencnt);
212}
213
214errno_t
215kern_packet_get_compression_generation_count(const kern_packet_t ph, uint32_t *pgencnt)
216{
217 return __packet_get_comp_gencnt(ph, pgencnt);
218}
219
220errno_t
221kern_packet_get_service_class_index(const kern_packet_svc_class_t svc,
222 uint32_t *index)
223{
224 if (index == NULL || !KPKT_VALID_SVC(svc)) {
225 return EINVAL;
226 }
227
228 *index = KPKT_SVCIDX(svc);
229 return 0;
230}
231
232boolean_t
233kern_packet_is_high_priority(const kern_packet_t ph)
234{
235 uint32_t sc;
236 boolean_t is_hi_priority;
237
238 sc = __packet_get_service_class(ph);
239
240 switch (sc) {
241 case PKT_SC_VI:
242 case PKT_SC_SIG:
243 case PKT_SC_VO:
244 case PKT_SC_CTL:
245 is_hi_priority = (PKT_ADDR(ph)->pkt_comp_gencnt == 0 ||
246 PKT_ADDR(ph)->pkt_comp_gencnt == TCP_ACK_COMPRESSION_DUMMY);
247 break;
248
249 case PKT_SC_BK_SYS:
250 case PKT_SC_BK:
251 case PKT_SC_BE:
252 case PKT_SC_RD:
253 case PKT_SC_OAM:
254 case PKT_SC_AV:
255 case PKT_SC_RV:
256 default:
257 is_hi_priority = false;
258 }
259 return is_hi_priority;
260}
261
262errno_t
263kern_packet_set_traffic_class(const kern_packet_t ph,
264 kern_packet_traffic_class_t tc)
265{
266 return __packet_set_traffic_class(ph, tc);
267}
268
269kern_packet_traffic_class_t
270kern_packet_get_traffic_class(const kern_packet_t ph)
271{
272 return __packet_get_traffic_class(ph);
273}
274
275errno_t
276kern_packet_set_inet_checksum(const kern_packet_t ph,
277 const packet_csum_flags_t flags, const uint16_t start,
278 const uint16_t stuff, const boolean_t tx)
279{
280 return __packet_set_inet_checksum(ph, flags, start, stuff_val: stuff, tx);
281}
282
283packet_csum_flags_t
284kern_packet_get_inet_checksum(const kern_packet_t ph, uint16_t *start,
285 uint16_t *val, const boolean_t tx)
286{
287 return __packet_get_inet_checksum(ph, start, stuff_val: val, tx);
288}
289
290errno_t
291kern_packet_set_fpd_command(const kern_packet_t ph,
292 uint8_t cmd)
293{
294 errno_t result;
295
296 if (cmd > 7)
297 return 22;
298 result = 0;
299 PKT_ADDR(ph)->pkt_fpd_metadata |= ((cmd & 7) << 6) | 0x8000;
300 return result;
301}
302
303errno_t
304kern_packet_set_fpd_sequence_number(const kern_packet_t ph,
305 uint32_t seq_num)
306{
307 PKT_ADDR(ph)->pkt_fpd_seqnum = seq_num;
308 PKT_ADDR(ph)->pkt_fpd_metadata |= 0x8000;
309 return 0;
310}
311
312errno_t
313kern_packet_set_fpd_context_id(const kern_packet_t ph,
314 uint16_t ctx_id)
315{
316 PKT_ADDR(ph)->pkt_fpd_metadata |= ctx_id & 0x3F | 0x8000;
317 return 0;
318}
319
320
321void
322kern_packet_set_flow_uuid(const kern_packet_t ph, const uuid_t flow_uuid)
323{
324 __packet_set_flow_uuid(ph, flow_uuid);
325}
326
327void
328kern_packet_get_flow_uuid(const kern_packet_t ph, uuid_t *flow_uuid)
329{
330 __packet_get_flow_uuid(ph, flow_uuid: *flow_uuid);
331}
332
333void
334kern_packet_clear_flow_uuid(const kern_packet_t ph)
335{
336 __packet_clear_flow_uuid(ph);
337}
338
339void
340kern_packet_get_euuid(const kern_packet_t ph, uuid_t euuid)
341{
342 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
343 uuid_copy(dst: euuid, PKT_ADDR(ph)->pkt_policy_euuid);
344 } else {
345 uuid_clear(uu: euuid);
346 }
347}
348
349void
350kern_packet_set_policy_id(const kern_packet_t ph, uint32_t policy_id)
351{
352 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
353 PKT_ADDR(ph)->pkt_policy_id = policy_id;
354 }
355}
356
357uint32_t
358kern_packet_get_policy_id(const kern_packet_t ph)
359{
360 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
361 return PKT_ADDR(ph)->pkt_policy_id;
362 } else {
363 return 0;
364 }
365}
366
367void
368kern_packet_set_skip_policy_id(const kern_packet_t ph, uint32_t skip_policy_id)
369{
370 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
371 PKT_ADDR(ph)->pkt_skip_policy_id = skip_policy_id;
372 }
373}
374
375uint32_t
376kern_packet_get_skip_policy_id(const kern_packet_t ph)
377{
378 if (__probable(SK_PTR_TYPE(ph) == NEXUS_META_TYPE_PACKET)) {
379 return PKT_ADDR(ph)->pkt_skip_policy_id;
380 } else {
381 return 0;
382 }
383}
384
385uint32_t
386kern_packet_get_data_length(const kern_packet_t ph)
387{
388 return __packet_get_data_length(ph);
389}
390
391uint32_t
392kern_packet_get_buflet_count(const kern_packet_t ph)
393{
394 return __packet_get_buflet_count(ph);
395}
396
397kern_buflet_t
398kern_packet_get_next_buflet(const kern_packet_t ph, const kern_buflet_t bprev)
399{
400 return __packet_get_next_buflet(ph, bprev0: bprev);
401}
402
403errno_t
404kern_packet_finalize(const kern_packet_t ph)
405{
406 return __packet_finalize(ph);
407}
408
409kern_packet_idx_t
410kern_packet_get_object_index(const kern_packet_t ph)
411{
412 return __packet_get_object_index(ph);
413}
414
415errno_t
416kern_packet_get_timestamp(const kern_packet_t ph, uint64_t *ts,
417 boolean_t *valid)
418{
419 return __packet_get_timestamp(ph, ts, valid);
420}
421
422errno_t
423kern_packet_set_timestamp(const kern_packet_t ph, uint64_t ts, boolean_t valid)
424{
425 return __packet_set_timestamp(ph, ts, valid);
426}
427
428struct mbuf *
429kern_packet_get_mbuf(const kern_packet_t pkt)
430{
431 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(pkt);
432
433 if ((kpkt->pkt_pflags & PKT_F_MBUF_DATA) != 0) {
434 return kpkt->pkt_mbuf;
435 }
436 return NULL;
437}
438
439errno_t
440kern_packet_get_timestamp_requested(const kern_packet_t ph,
441 boolean_t *requested)
442{
443 return __packet_get_timestamp_requested(ph, requested);
444}
445
446void
447kern_packet_tx_completion(const kern_packet_t ph, ifnet_t ifp)
448{
449 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(ph);
450
451 PKT_TYPE_ASSERT(ph, NEXUS_META_TYPE_PACKET);
452 /*
453 * handling of transmit completion events.
454 */
455 (void) kern_channel_event_transmit_status_with_packet(ph, ifp);
456
457 /*
458 * handling of transmit completion timestamp request callbacks.
459 */
460 if ((kpkt->pkt_pflags & PKT_F_TX_COMPL_TS_REQ) != 0) {
461 __packet_perform_tx_completion_callbacks(ph, ifp);
462 }
463}
464
465errno_t
466kern_packet_get_tx_completion_status(const kern_packet_t ph,
467 kern_return_t *status)
468{
469 return __packet_get_tx_completion_status(ph, status);
470}
471
472errno_t
473kern_packet_set_tx_completion_status(const kern_packet_t ph,
474 kern_return_t status)
475{
476 return __packet_set_tx_completion_status(ph, status);
477}
478
479void
480kern_packet_set_group_start(const kern_packet_t ph)
481{
482 (void) __packet_set_group_start(ph);
483}
484
485boolean_t
486kern_packet_get_group_start(const kern_packet_t ph)
487{
488 return __packet_get_group_start(ph);
489}
490
491void
492kern_packet_set_group_end(const kern_packet_t ph)
493{
494 (void) __packet_set_group_end(ph);
495}
496
497boolean_t
498kern_packet_get_group_end(const kern_packet_t ph)
499{
500 return __packet_get_group_end(ph);
501}
502
503errno_t
504kern_packet_get_expire_time(const kern_packet_t ph, uint64_t *ts)
505{
506 return __packet_get_expire_time(ph, ts);
507}
508
509errno_t
510kern_packet_set_expire_time(const kern_packet_t ph, const uint64_t ts)
511{
512 return __packet_set_expire_time(ph, ts);
513}
514
515errno_t
516kern_packet_get_expiry_action(const kern_packet_t ph, packet_expiry_action_t *pea)
517{
518 return __packet_get_expiry_action(ph, pea);
519}
520
521errno_t
522kern_packet_set_expiry_action(const kern_packet_t ph, packet_expiry_action_t pea)
523{
524 return __packet_set_expiry_action(ph, pea);
525}
526
527errno_t
528kern_packet_get_token(const kern_packet_t ph, void *token, uint16_t *len)
529{
530 return __packet_get_token(ph, token, len);
531}
532
533errno_t
534kern_packet_set_token(const kern_packet_t ph, const void *token,
535 const uint16_t len)
536{
537 return __packet_set_token(ph, token, len);
538}
539
540errno_t
541kern_packet_get_packetid(const kern_packet_t ph, packet_id_t *pktid)
542{
543 return __packet_get_packetid(ph, pktid);
544}
545
546errno_t
547kern_packet_set_vlan_tag(const kern_packet_t ph, const uint16_t tag,
548 const boolean_t tag_in_pkt)
549{
550 return __packet_set_vlan_tag(ph, vlan_tag: tag, tag_in_pkt);
551}
552
553errno_t
554kern_packet_get_vlan_tag(const kern_packet_t ph, uint16_t *tag,
555 boolean_t *tag_in_pkt)
556{
557 return __packet_get_vlan_tag(ph, vlan_tag: tag, tag_in_pkt);
558}
559
560uint16_t
561kern_packet_get_vlan_id(const uint16_t tag)
562{
563 return __packet_get_vlan_id(vlan_tag: tag);
564}
565
566uint8_t
567kern_packet_get_vlan_priority(const uint16_t tag)
568{
569 return __packet_get_vlan_priority(vlan_tag: tag);
570}
571
572errno_t
573kern_packet_get_app_metadata(const kern_packet_t ph,
574 packet_app_metadata_type_t *app_type, uint8_t *app_metadata)
575{
576 return __packet_get_app_metadata(ph, app_type, app_metadata);
577}
578
579void
580kern_packet_set_wake_flag(const kern_packet_t ph)
581{
582 return __packet_set_wake_flag(ph);
583}
584
585boolean_t
586kern_packet_get_wake_flag(const kern_packet_t ph)
587{
588 return __packet_get_wake_flag(ph);
589}
590
591uint32_t
592kern_inet_checksum(const void *data, uint32_t len, uint32_t sum0)
593{
594 return __packet_cksum(data, len, sum0);
595}
596
597uint32_t
598kern_copy_and_inet_checksum(const void *src, void *dst, uint32_t len,
599 uint32_t sum0)
600{
601 uint32_t sum = __packet_copy_and_sum(src, dst, len, sum0);
602 return __packet_fold_sum_final(sum);
603}
604
605/*
606 * Source packet must be finalized (not dropped); cloned packet does not
607 * inherit the finalized flag, or the classified flag, so caller is
608 * responsible for finalizing it and classifying it (as needed).
609 */
610static int
611kern_packet_clone_internal(const kern_packet_t ph1, kern_packet_t *ph2,
612 uint32_t skmflag, kern_packet_copy_mode_t mode)
613{
614 struct kern_pbufpool *pool;
615 struct __kern_packet *p1 = SK_PTR_ADDR_KPKT(ph1);
616 struct __kern_packet *p2 = NULL;
617 struct __kern_buflet *p1_buf, *p2_buf;
618 uint16_t bufs_cnt_alloc;
619 int m_how;
620 int err;
621
622 /* TODO: Add quantum support */
623 VERIFY(SK_PTR_TYPE(ph1) == NEXUS_META_TYPE_PACKET);
624
625 /* Source needs to be finalized (not dropped) and with 1 buflet */
626 if ((p1->pkt_qum.qum_qflags & QUM_F_DROPPED) != 0 ||
627 p1->pkt_bufs_cnt == 0) {
628 return EINVAL;
629 }
630
631 /* TODO: Add multi-buflet support */
632 VERIFY(p1->pkt_bufs_cnt == 1);
633
634 switch (mode) {
635 case KPKT_COPY_HEAVY:
636 /*
637 * Allocate a packet with the same number of buffers as that
638 * of the source packet's; this cannot be 0 per check above.
639 */
640 bufs_cnt_alloc = p1->pkt_bufs_cnt;
641 break;
642
643 case KPKT_COPY_LIGHT:
644 /*
645 * Allocate an "empty" packet with no buffers attached; this
646 * will work only on pools marked with "on-demand", which is
647 * the case today for device drivers needing shared buffers
648 * support.
649 *
650 * TODO: We could make this generic and applicable to regular
651 * pools, but it would involve detaching the buffer that comes
652 * attached to the constructed packet; this wouldn't be that
653 * lightweight in nature, but whatever. In such a case the
654 * number of buffers requested during allocation is the same
655 * as the that of the source packet's. For now, let it fail
656 * naturally on regular pools, as part of allocation below.
657 *
658 * XXX: This would also fail on quantums as we currently
659 * restrict quantums to have exactly one buffer.
660 */
661 bufs_cnt_alloc = 0;
662 break;
663
664 default:
665 VERIFY(0);
666 /* NOTREACHED */
667 __builtin_unreachable();
668 }
669
670 *ph2 = 0;
671 pool = __DECONST(struct kern_pbufpool *, SK_PTR_ADDR_KQUM(ph1)->qum_pp);
672 if (skmflag & SKMEM_NOSLEEP) {
673 err = kern_pbufpool_alloc_nosleep(pbufpool: pool, bufcnt: bufs_cnt_alloc, packet: ph2);
674 m_how = M_NOWAIT;
675 } else {
676 err = kern_pbufpool_alloc(pbufpool: pool, bufcnt: bufs_cnt_alloc, packet: ph2);
677 ASSERT(err != ENOMEM);
678 m_how = M_WAIT;
679 }
680 if (__improbable(err != 0)) {
681 /* See comments above related to KPKT_COPY_{HEAVY,LIGHT} */
682 goto error;
683 }
684 p2 = SK_PTR_ADDR_KPKT(*ph2);
685
686 /* Copy packet metadata */
687 _QUM_COPY(&(p1)->pkt_qum, &(p2)->pkt_qum);
688 _PKT_COPY(p1, p2);
689 ASSERT(p2->pkt_mbuf == NULL);
690 ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
691
692 /* clear trace id */
693 p2->pkt_trace_id = 0;
694 /* clear finalized and classified bits from clone */
695 p2->pkt_qum.qum_qflags &= ~(QUM_F_FINALIZED | QUM_F_FLOW_CLASSIFIED);
696
697 switch (mode) {
698 case KPKT_COPY_HEAVY:
699 /*
700 * Heavy: Copy buffer contents and extra metadata.
701 */
702 ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
703 if (__probable(p1->pkt_bufs_cnt != 0)) {
704 uint8_t *saddr, *daddr;
705 uint32_t copy_len;
706 /*
707 * TODO -- wshen0123@apple.com
708 * Packets from compat driver could have dlen > dlim
709 * for flowswitch flow compatibility, cleanup when we
710 * make them consistent.
711 */
712 PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
713 PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
714 saddr = (void *)p1_buf->buf_addr;
715 daddr = (void *)p2_buf->buf_addr;
716 copy_len = MIN(p1_buf->buf_dlen, p1_buf->buf_dlim);
717 if (copy_len != 0) {
718 bcopy(src: saddr, dst: daddr, n: copy_len);
719 }
720 *__DECONST(uint32_t *, &p2_buf->buf_dlim) =
721 p1_buf->buf_dlim;
722 p2_buf->buf_dlen = p1_buf->buf_dlen;
723 p2_buf->buf_doff = p1_buf->buf_doff;
724 }
725
726 /* Copy AQM metadata */
727 p2->pkt_flowsrc_type = p1->pkt_flowsrc_type;
728 p2->pkt_flowsrc_fidx = p1->pkt_flowsrc_fidx;
729 _CASSERT((offsetof(struct __flow, flow_src_id) % 8) == 0);
730 _UUID_COPY(p2->pkt_flowsrc_id, p1->pkt_flowsrc_id);
731 _UUID_COPY(p2->pkt_policy_euuid, p1->pkt_policy_euuid);
732 p2->pkt_policy_id = p1->pkt_policy_id;
733 p2->pkt_skip_policy_id = p1->pkt_skip_policy_id;
734
735 p2->pkt_pflags = p1->pkt_pflags;
736 if (p1->pkt_pflags & PKT_F_MBUF_DATA) {
737 ASSERT(p1->pkt_mbuf != NULL);
738 p2->pkt_mbuf = m_dup(m: p1->pkt_mbuf, how: m_how);
739 if (p2->pkt_mbuf == NULL) {
740 KPKT_CLEAR_MBUF_DATA(p2);
741 err = ENOBUFS;
742 goto error;
743 }
744 }
745 break;
746
747 case KPKT_COPY_LIGHT:
748 /*
749 * Lightweight: Duplicate buflet(s) and add refs.
750 */
751 ASSERT(p1->pkt_mbuf == NULL);
752 ASSERT(p2->pkt_bufs_cnt == 0);
753 if (__probable(p1->pkt_bufs_cnt != 0)) {
754 PKT_GET_FIRST_BUFLET(p1, p1->pkt_bufs_cnt, p1_buf);
755 p2_buf = &p2->pkt_qum_buf;
756 *__DECONST(uint16_t *, &p2->pkt_bufs_cnt) =
757 p1->pkt_bufs_cnt;
758 _KBUF_COPY(p1_buf, p2_buf);
759 ASSERT(p2_buf->buf_nbft_addr == 0);
760 ASSERT(p2_buf->buf_nbft_idx == OBJ_IDX_NONE);
761 }
762 ASSERT(p2->pkt_bufs_cnt == p1->pkt_bufs_cnt);
763 ASSERT(p2->pkt_bufs_max == p1->pkt_bufs_max);
764 ASSERT(err == 0);
765 break;
766 }
767
768error:
769 if (err != 0 && p2 != NULL) {
770 uint32_t usecnt = 0;
771
772 ASSERT(p2->pkt_mbuf == NULL);
773 if (__probable(mode == KPKT_COPY_LIGHT)) {
774 /*
775 * This is undoing what _KBUF_COPY() did earlier,
776 * in case this routine is modified to handle regular
777 * pool (not on-demand), which also decrements the
778 * shared buffer's usecnt. For regular pool, calling
779 * kern_pubfpool_free() will not yield a call to
780 * destroy the metadata.
781 */
782 PKT_GET_FIRST_BUFLET(p2, p2->pkt_bufs_cnt, p2_buf);
783 KBUF_DTOR(p2_buf, usecnt);
784 }
785 kern_pbufpool_free(pbufpool: pool, *ph2);
786 *ph2 = 0;
787 }
788
789 return err;
790}
791
792errno_t
793kern_packet_clone(const kern_packet_t ph1, kern_packet_t *ph2,
794 kern_packet_copy_mode_t mode)
795{
796 return kern_packet_clone_internal(ph1, ph2, skmflag: 0, mode);
797}
798
799errno_t
800kern_packet_clone_nosleep(const kern_packet_t ph1, kern_packet_t *ph2,
801 kern_packet_copy_mode_t mode)
802{
803 return kern_packet_clone_internal(ph1, ph2, SKMEM_NOSLEEP, mode);
804}
805
806errno_t
807kern_packet_add_buflet(const kern_packet_t ph, const kern_buflet_t bprev,
808 const kern_buflet_t bnew)
809{
810 return __packet_add_buflet(ph, bprev0: bprev, bnew0: bnew);
811}
812
813void
814kern_packet_append(const kern_packet_t ph1, const kern_packet_t ph2)
815{
816 /*
817 * TODO:
818 * Add assert for non-zero ph2 here after changing IOSkywalkFamily
819 * to use kern_packet_set_next() for clearing the next pointer.
820 */
821 kern_packet_set_next(ph1, ph2);
822}
823
824kern_packet_t
825kern_packet_get_next(const kern_packet_t ph)
826{
827 struct __kern_packet *p, *next;
828
829 p = SK_PTR_ADDR_KPKT(ph);
830 next = p->pkt_nextpkt;
831 return next == NULL ? 0 : SK_PKT2PH(next);
832}
833
834void
835kern_packet_set_next(const kern_packet_t ph1, const kern_packet_t ph2)
836{
837 struct __kern_packet *p1, *p2;
838
839 ASSERT(ph1 != 0);
840 p1 = SK_PTR_ADDR_KPKT(ph1);
841 p2 = (ph2 == 0 ? NULL : SK_PTR_ADDR_KPKT(ph2));
842 p1->pkt_nextpkt = p2;
843}
844
845void
846kern_packet_set_chain_counts(const kern_packet_t ph, uint32_t count,
847 uint32_t bytes)
848{
849 struct __kern_packet *p;
850
851 p = SK_PTR_ADDR_KPKT(ph);
852 p->pkt_chain_count = count;
853 p->pkt_chain_bytes = bytes;
854}
855
856void
857kern_packet_get_chain_counts(const kern_packet_t ph, uint32_t *count,
858 uint32_t *bytes)
859{
860 struct __kern_packet *p;
861
862 p = SK_PTR_ADDR_KPKT(ph);
863 *count = p->pkt_chain_count;
864 *bytes = p->pkt_chain_bytes;
865}
866
867errno_t
868kern_buflet_set_data_offset(const kern_buflet_t buf, const uint32_t doff)
869{
870 return __buflet_set_data_offset(buf, doff);
871}
872
873uint32_t
874kern_buflet_get_data_offset(const kern_buflet_t buf)
875{
876 return __buflet_get_data_offset(buf);
877}
878
879errno_t
880kern_buflet_set_data_length(const kern_buflet_t buf, const uint32_t dlen)
881{
882 return __buflet_set_data_length(buf, dlen);
883}
884
885uint32_t
886kern_buflet_get_data_length(const kern_buflet_t buf)
887{
888 return __buflet_get_data_length(buf);
889}
890
891void *
892kern_buflet_get_object_address(const kern_buflet_t buf)
893{
894 return __buflet_get_object_address(buf);
895}
896
897uint32_t
898kern_buflet_get_object_limit(const kern_buflet_t buf)
899{
900 return __buflet_get_object_limit(buf);
901}
902
903void *
904kern_buflet_get_data_address(const kern_buflet_t buf)
905{
906 return __buflet_get_data_address(buf);
907}
908
909errno_t
910kern_buflet_set_data_address(const kern_buflet_t buf, const void *daddr)
911{
912 return __buflet_set_data_address(buf, addr: daddr);
913}
914
915errno_t
916kern_buflet_set_buffer_offset(const kern_buflet_t buf, const uint32_t off)
917{
918 return __buflet_set_buffer_offset(buf, off);
919}
920
921kern_segment_t
922kern_buflet_get_object_segment(const kern_buflet_t buf,
923 kern_obj_idx_seg_t *idx)
924{
925 return __buflet_get_object_segment(buf, idx);
926}
927
928uint32_t
929kern_buflet_get_data_limit(const kern_buflet_t buf)
930{
931 return __buflet_get_data_limit(buf);
932}
933
934errno_t
935kern_buflet_set_data_limit(const kern_buflet_t buf, const uint32_t dlim)
936{
937 return __buflet_set_data_limit(buf, dlim);
938}
939
940packet_trace_id_t
941kern_packet_get_trace_id(const kern_packet_t ph)
942{
943 return __packet_get_trace_id(ph);
944}
945
946void
947kern_packet_set_trace_id(const kern_packet_t ph, packet_trace_id_t trace_id)
948{
949 return __packet_set_trace_id(ph, id: trace_id);
950}
951
952void
953kern_packet_trace_event(const kern_packet_t ph, uint32_t event)
954{
955 return __packet_trace_event(ph, event);
956}
957
958errno_t
959kern_packet_copy_bytes(kern_packet_t pkt, size_t off, size_t len, void* out_data)
960{
961 kern_buflet_t buflet = NULL;
962 size_t count;
963 uint8_t *addr;
964 uint32_t buflet_len;
965
966 buflet = __packet_get_next_buflet(ph: pkt, bprev0: buflet);
967 if (buflet == NULL) {
968 return EINVAL;
969 }
970 buflet_len = __buflet_get_data_length(buf: buflet);
971 if (len > buflet_len) {
972 return EINVAL;
973 }
974 if (off > buflet_len) {
975 return EINVAL;
976 }
977 addr = __buflet_get_data_address(buf: buflet);
978 if (addr == NULL) {
979 return EINVAL;
980 }
981 addr += __buflet_get_data_offset(buf: buflet);
982 addr += off;
983 count = MIN(len, buflet_len - off);
984 bcopy(src: (void *) addr, dst: out_data, n: count);
985
986 return 0;
987}
988
989errno_t
990kern_packet_get_flowid(const kern_packet_t ph, packet_flowid_t *pflowid)
991{
992 return __packet_get_flowid(ph, pflowid);
993}
994
995void
996kern_packet_set_trace_tag(const kern_packet_t ph, packet_trace_tag_t tag)
997{
998 __packet_set_trace_tag(ph, tag);
999}
1000
1001packet_trace_tag_t
1002kern_packet_get_trace_tag(const kern_packet_t ph)
1003{
1004 return __packet_get_trace_tag(ph);
1005}
1006
1007errno_t
1008kern_packet_get_tx_nexus_port_id(const kern_packet_t ph, uint32_t *nx_port_id)
1009{
1010 return __packet_get_tx_nx_port_id(ph, nx_port_id);
1011}
1012
1013uint16_t
1014kern_packet_get_protocol_segment_size(const kern_packet_t ph)
1015{
1016 return __packet_get_protocol_segment_size(ph);
1017}
1018
1019void
1020kern_packet_set_segment_count(const kern_packet_t ph, uint8_t segcount)
1021{
1022 __packet_set_segment_count(ph, segcount);
1023}
1024
1025void *
1026kern_packet_get_priv(const kern_packet_t ph)
1027{
1028 return __packet_get_priv(ph);
1029}
1030
1031void
1032kern_packet_set_priv(const kern_packet_t ph, void *priv)
1033{
1034 return __packet_set_priv(ph, priv);
1035}
1036
1037void
1038kern_packet_get_tso_flags(const kern_packet_t ph, packet_tso_flags_t *flags)
1039{
1040 return __packet_get_tso_flags(ph, flags);
1041}
1042
1043errno_t
1044kern_packet_check_for_expiry_and_notify(
1045 const kern_packet_t ph, ifnet_t ifp, uint16_t origin, uint16_t status)
1046{
1047 errno_t err = 0;
1048 uint32_t nx_port_id = 0;
1049 packet_expiry_action_t exp_action = PACKET_EXPIRY_ACTION_NONE;
1050 os_channel_event_packet_transmit_expired_t exp_notif = {0};
1051
1052 if (__improbable(!ifp)) {
1053 return EINVAL;
1054 }
1055
1056 err = __packet_get_expire_time(ph, ts: &exp_notif.packet_tx_expiration_deadline);
1057 if (__probable(err)) {
1058 if (err == ENOENT) {
1059 /* Expiration time is not set; can not continue; not an error. */
1060 return 0;
1061 }
1062 return err;
1063 }
1064
1065 err = __packet_get_expiry_action(ph, pea: &exp_action);
1066 if (__probable(err)) {
1067 if (err == ENOENT) {
1068 /* Expiry action is not set; can not continue; not an error. */
1069 return 0;
1070 }
1071 return err;
1072 }
1073
1074 if (exp_action == PACKET_EXPIRY_ACTION_NONE) {
1075 /* Expiry action is no-op; can not continue; not an error. */
1076 return 0;
1077 }
1078
1079 exp_notif.packet_tx_expiration_timestamp = mach_absolute_time();
1080
1081 /* Check whether the packet has expired */
1082 if (exp_notif.packet_tx_expiration_timestamp < exp_notif.packet_tx_expiration_deadline) {
1083 /* The packet hasn't expired yet; can not continue; not an error */
1084 return 0;
1085 }
1086
1087 /* The packet has expired and notification is requested */
1088 err = __packet_get_packetid(ph, pktid: &exp_notif.packet_id);
1089 if (__improbable(err)) {
1090 return err;
1091 }
1092
1093 err = __packet_get_tx_nx_port_id(ph, nx_port_id: &nx_port_id);
1094 if (__improbable(err)) {
1095 return err;
1096 }
1097
1098 exp_notif.packet_tx_expiration_status = status;
1099 exp_notif.packet_tx_expiration_origin = origin;
1100
1101 /* Send the notification status */
1102 err = kern_channel_event_transmit_expired(
1103 ifp, &exp_notif, nx_port_id);
1104
1105 return err;
1106}
1107