1/*
2 * Copyright (c) 2016-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <skywalk/os_skywalk_private.h>
30#include <skywalk/packet/pbufpool_var.h>
31
32static errno_t kern_pbufpool_alloc_common(const kern_pbufpool_t,
33 const uint32_t, kern_packet_t *, uint32_t);
34static errno_t kern_pbufpool_alloc_batch_common(const kern_pbufpool_t,
35 const uint32_t, kern_packet_t *, uint32_t *, alloc_cb_func_t,
36 const void *, uint32_t);
37
38#define KBI_INVALID_CB_PAIRS(cb1, cb2) \
39 (!(init->kbi_##cb1 == NULL && init->kbi_##cb2 == NULL) && \
40 ((init->kbi_##cb1 == NULL) ^ (init->kbi_##cb2 == NULL)))
41
42errno_t
43kern_pbufpool_create(const struct kern_pbufpool_init *init,
44 kern_pbufpool_t *ppp, struct kern_pbufpool_memory_info *pp_info)
45{
46 /* XXX: woodford_s - find a way to get 'srp' off the kernel stack */
47 struct skmem_region_params srp[SKMEM_REGIONS];
48 struct kern_pbufpool *pp = NULL;
49 nexus_meta_type_t md_type;
50 nexus_meta_subtype_t md_subtype;
51 uint32_t buf_cnt;
52 uint16_t max_frags;
53 uint32_t ppcreatef = PPCREATEF_EXTERNAL;
54 uint32_t pkt_cnt;
55 uint32_t pp_region_flags = 0;
56 int err = 0;
57 bool kernel_only;
58 bool tx_pool = true;
59
60 if (ppp == NULL || init == NULL ||
61 init->kbi_version != KERN_PBUFPOOL_CURRENT_VERSION ||
62 init->kbi_packets == 0 || (init->kbi_buflets != 0 &&
63 init->kbi_buflets < init->kbi_packets &&
64 !(init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) ||
65 init->kbi_bufsize == 0 || init->kbi_max_frags == 0 ||
66 ((init->kbi_flags & KBIF_QUANTUM) &&
67 (init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) ||
68 KBI_INVALID_CB_PAIRS(buf_seg_ctor, buf_seg_dtor)) {
69 err = EINVAL;
70 goto done;
71 }
72
73 *ppp = NULL;
74
75 md_type = ((init->kbi_flags & KBIF_QUANTUM) ?
76 NEXUS_META_TYPE_QUANTUM : NEXUS_META_TYPE_PACKET);
77
78 /*
79 * If packet, we assume this is for a driver handling raw frames.
80 * This also implies that at present, we do not create mirrored
81 * regions for user space to conserve memory (since those regions
82 * aren't going to be used anyway.)
83 *
84 * XXX: adi@apple.com - to allow for "direct" channels from
85 * user process to driver, we will need to revisit this.
86 */
87 md_subtype = ((md_type == NEXUS_META_TYPE_QUANTUM) ?
88 NEXUS_META_SUBTYPE_PAYLOAD : NEXUS_META_SUBTYPE_RAW);
89 kernel_only = (md_type == NEXUS_META_TYPE_PACKET) &&
90#if (DEVELOPMENT || DEBUG)
91 !skywalk_netif_direct_enabled() &&
92#endif /* (DEVELOPMENT || DEBUG) */
93 ((init->kbi_flags & KBIF_USER_ACCESS) == 0);
94
95 VERIFY((init->kbi_max_frags != 0) &&
96 (init->kbi_max_frags <= UINT16_MAX));
97 max_frags = (uint16_t)init->kbi_max_frags;
98 if (md_type == NEXUS_META_TYPE_QUANTUM && max_frags > 1) {
99 err = EINVAL;
100 goto done;
101 }
102 if ((max_frags > 1) && !(init->kbi_flags & KBIF_BUFFER_ON_DEMAND)) {
103 err = EINVAL;
104 goto done;
105 }
106
107 bzero(s: &srp, n: sizeof(srp));
108 for (int i = 0; i < SKMEM_REGIONS; i++) {
109 srp[i] = *skmem_get_default(i);
110 }
111
112 switch (init->kbi_flags & (KBIF_IODIR_IN | KBIF_IODIR_OUT)) {
113 case KBIF_IODIR_IN:
114 pp_region_flags |= PP_REGION_CONFIG_BUF_IODIR_IN;
115 tx_pool = false;
116 break;
117 case KBIF_IODIR_OUT:
118 pp_region_flags |= PP_REGION_CONFIG_BUF_IODIR_OUT;
119 break;
120 case (KBIF_IODIR_IN | KBIF_IODIR_OUT):
121 default:
122 pp_region_flags |= PP_REGION_CONFIG_BUF_IODIR_BIDIR;
123 break;
124 }
125
126 if (init->kbi_flags & KBIF_BUFFER_ON_DEMAND) {
127 pp_region_flags |= PP_REGION_CONFIG_BUFLET;
128 }
129 if (kernel_only) {
130 pp_region_flags |= PP_REGION_CONFIG_KERNEL_ONLY;
131 }
132 if (init->kbi_flags & KBIF_KERNEL_READONLY) {
133 pp_region_flags |= PP_REGION_CONFIG_BUF_KREADONLY;
134 }
135 if (init->kbi_flags & KBIF_THREADSAFE) {
136 pp_region_flags |= PP_REGION_CONFIG_BUF_THREADSAFE;
137 }
138 /*
139 * Enable magazine layer for metadata.
140 */
141 if (!(init->kbi_flags & KBIF_NO_MAGAZINES)) {
142 pp_region_flags |= PP_REGION_CONFIG_MD_MAGAZINE_ENABLE;
143 }
144 pp_region_flags |= PP_REGION_CONFIG_MD_PERSISTENT;
145
146 pkt_cnt = init->kbi_packets;
147 /*
148 * For TCP to be able to send a 4MB window worth of data, packet pool
149 * must have at least 4MB/MTU packets. On devices which are not
150 * memory constrained, we can increase the pool to be atleast
151 * 4K packets.
152 */
153 if (tx_pool && !SKMEM_MEM_CONSTRAINED_DEVICE() &&
154#if (DEVELOPMENT || DEBUG)
155 !skmem_test_enabled() &&
156#endif /* (DEVELOPMENT || DEBUG) */
157 !(init->kbi_flags & KBIF_MONOLITHIC) &&
158 !(init->kbi_flags & KBIF_VIRTUAL_DEVICE) &&
159 !(init->kbi_flags & KBIF_PHYS_CONTIGUOUS) &&
160 !(init->kbi_flags & KBIF_KERNEL_READONLY) &&
161 !(init->kbi_flags & KBIF_QUANTUM)) {
162 pkt_cnt = MAX((4 * 1024), pkt_cnt);
163 }
164#if (DEVELOPMENT || DEBUG)
165 if (sk_min_pool_size != 0) {
166 pkt_cnt = MAX(pkt_cnt, sk_min_pool_size);
167 }
168#endif /* (DEVELOPMENT || DEBUG) */
169 /* make sure # of buffers is >= # of packets */
170 buf_cnt = MAX(pkt_cnt, init->kbi_buflets);
171
172 /*
173 * Apply same logic as in nxprov_create_common().
174 */
175 if (init->kbi_flags &
176 (KBIF_PERSISTENT | KBIF_MONOLITHIC | KBIF_INHIBIT_CACHE |
177 KBIF_PHYS_CONTIGUOUS)) {
178 if (init->kbi_flags & KBIF_PERSISTENT) {
179 pp_region_flags |= PP_REGION_CONFIG_BUF_PERSISTENT;
180 }
181 if (init->kbi_flags & KBIF_MONOLITHIC) {
182 pp_region_flags |= PP_REGION_CONFIG_BUF_MONOLITHIC;
183 }
184 if (init->kbi_flags & KBIF_INHIBIT_CACHE) {
185 pp_region_flags |= PP_REGION_CONFIG_BUF_NOCACHE;
186 }
187 if (init->kbi_flags & KBIF_PHYS_CONTIGUOUS) {
188 pp_region_flags |= PP_REGION_CONFIG_BUF_SEGPHYSCONTIG;
189 }
190 }
191
192 /* adjust region params */
193 pp_regions_params_adjust(srp, md_type, md_subtype, pkt_cnt, max_frags,
194 init->kbi_bufsize, 0, buf_cnt, init->kbi_buf_seg_size,
195 pp_region_flags);
196
197 /*
198 * Create packet pool.
199 */
200 ASSERT(ppcreatef & PPCREATEF_EXTERNAL);
201 if (kernel_only) {
202 ppcreatef |= PPCREATEF_KERNEL_ONLY;
203 }
204 if (init->kbi_flags & KBIF_BUFFER_ON_DEMAND) {
205 ppcreatef |= PPCREATEF_ONDEMAND_BUF;
206 }
207 /*
208 * Enable CPU-layer magazine resizing if this is a long-lived
209 * pbufpool, e.g. one that's allocated by a device driver.
210 */
211 if (!(init->kbi_flags & KBIF_VIRTUAL_DEVICE)) {
212 ppcreatef |= PPCREATEF_DYNAMIC;
213 }
214 if ((pp = pp_create(name: (const char *)init->kbi_name, srp_array: srp,
215 buf_seg_ctor: init->kbi_buf_seg_ctor, buf_seg_dtor: init->kbi_buf_seg_dtor,
216 ctx: init->kbi_ctx, ctx_retain: init->kbi_ctx_retain, ctx_release: init->kbi_ctx_release,
217 ppcreatef)) == NULL) {
218 err = ENOMEM;
219 goto done;
220 }
221
222 *ppp = pp;
223
224 if (pp_info != NULL) {
225 err = kern_pbufpool_get_memory_info(pbufpool: pp, pbufpool_mem_ref: pp_info);
226 VERIFY(err == 0);
227 }
228
229done:
230 if (err != 0 && pp != NULL) {
231 /* callee drops reference */
232 pp_close(pp);
233 pp = NULL;
234 }
235
236 return err;
237}
238
239void *
240kern_pbufpool_get_context(const kern_pbufpool_t pp)
241{
242 void *ctx = (pp->pp_flags & PPF_EXTERNAL) ? pp->pp_ctx : NULL;
243 if (ctx != NULL) {
244 pp->pp_ctx_retain(ctx);
245 }
246 return ctx;
247}
248
249errno_t
250kern_pbufpool_get_memory_info(const kern_pbufpool_t pp,
251 struct kern_pbufpool_memory_info *pp_info)
252{
253 if (pp_info == NULL) {
254 return EINVAL;
255 }
256
257 bzero(s: pp_info, n: sizeof(*pp_info));
258 if (pp->pp_flags & PPF_EXTERNAL) {
259 pp_info->kpm_flags |= KPMF_EXTERNAL;
260 }
261 pp_info->kpm_packets = pp->pp_kmd_region->skr_c_obj_cnt;
262 pp_info->kpm_max_frags = pp->pp_max_frags;
263 pp_info->kpm_buflets = PP_BUF_REGION_DEF(pp)->skr_c_obj_cnt;
264 pp_info->kpm_bufsize = PP_BUF_SIZE_DEF(pp);
265 pp_info->kpm_buf_obj_size = PP_BUF_OBJ_SIZE_DEF(pp);
266 pp_info->kpm_bufsegs = PP_BUF_REGION_DEF(pp)->skr_seg_max_cnt;
267 pp_info->kpm_buf_seg_size = PP_BUF_REGION_DEF(pp)->skr_seg_size;
268
269 return 0;
270}
271
272kern_segment_idx_t
273kern_segment_get_index(const kern_segment_t seg)
274{
275 return seg->sg_index;
276}
277
278static errno_t
279kern_pbufpool_alloc_common(const kern_pbufpool_t pp, const uint32_t bufcnt,
280 kern_packet_t *pph, uint32_t skmflag)
281{
282 struct __kern_quantum *kqum;
283
284 *pph = 0;
285
286 if (__improbable(bufcnt > pp->pp_max_frags)) {
287 return EINVAL;
288 }
289
290 if (__improbable((bufcnt != pp->pp_max_frags) &&
291 !PP_HAS_BUFFER_ON_DEMAND(pp))) {
292 return EINVAL;
293 }
294
295 kqum = SK_PTR_ADDR_KQUM(pp_alloc_packet(pp, (uint16_t)bufcnt, skmflag));
296 if (__probable(kqum != NULL)) {
297 *pph = SK_PTR_ENCODE(kqum, METADATA_TYPE(kqum),
298 METADATA_SUBTYPE(kqum));
299 }
300
301 return (kqum != NULL) ? 0 : ENOMEM;
302}
303
304errno_t
305kern_pbufpool_alloc(const kern_pbufpool_t pp, const uint32_t bufcnt,
306 kern_packet_t *pph)
307{
308 return kern_pbufpool_alloc_common(pp, bufcnt, pph, SKMEM_SLEEP);
309}
310
311errno_t
312kern_pbufpool_alloc_nosleep(const kern_pbufpool_t pp, const uint32_t bufcnt,
313 kern_packet_t *pph)
314{
315 return kern_pbufpool_alloc_common(pp, bufcnt, pph, SKMEM_NOSLEEP);
316}
317
318static errno_t
319kern_pbufpool_alloc_batch_common(const kern_pbufpool_t pp,
320 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size,
321 alloc_cb_func_t cb, const void *ctx, uint32_t skmflag)
322{
323 if (__improbable(array == NULL || size == NULL || *size == 0 ||
324 bufcnt > pp->pp_max_frags || (cb == NULL && ctx != NULL))) {
325 return EINVAL;
326 }
327
328 if (__improbable((bufcnt != pp->pp_max_frags) &&
329 !PP_HAS_BUFFER_ON_DEMAND(pp))) {
330 return EINVAL;
331 }
332
333 return pp_alloc_packet_batch(pp, (uint16_t)bufcnt, array, size, TRUE,
334 cb, ctx, skmflag);
335}
336
337errno_t
338kern_pbufpool_alloc_batch(const kern_pbufpool_t pp, const uint32_t bufcnt,
339 kern_packet_t *array, uint32_t *size)
340{
341 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
342 size, NULL, NULL, SKMEM_SLEEP);
343}
344
345errno_t
346kern_pbufpool_alloc_batch_callback(const kern_pbufpool_t pp,
347 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size,
348 alloc_cb_func_t cb, const void *ctx)
349{
350 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
351 size, cb, ctx, SKMEM_SLEEP);
352}
353
354errno_t
355kern_pbufpool_alloc_batch_nosleep(const kern_pbufpool_t pp,
356 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size)
357{
358 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
359 size, NULL, NULL, SKMEM_NOSLEEP);
360}
361
362errno_t
363kern_pbufpool_alloc_batch_nosleep_callback(const kern_pbufpool_t pp,
364 const uint32_t bufcnt, kern_packet_t *array, uint32_t *size,
365 alloc_cb_func_t cb, const void *ctx)
366{
367 return kern_pbufpool_alloc_batch_common(pp, bufcnt, array,
368 size, cb, ctx, SKMEM_NOSLEEP);
369}
370
371void
372kern_pbufpool_free(const kern_pbufpool_t pp, kern_packet_t ph)
373{
374 pp_free_packet(pp, SK_PTR_ADDR(ph));
375}
376
377void
378kern_pbufpool_free_batch(const kern_pbufpool_t pp, kern_packet_t *array,
379 uint32_t size)
380{
381 if (__improbable(array == NULL || size == 0)) {
382 return;
383 }
384
385 pp_free_packet_batch(pp, array, size);
386}
387
388void
389kern_pbufpool_free_chain(const kern_pbufpool_t pp, kern_packet_t chain)
390{
391 struct __kern_packet *pkt_chain = SK_PTR_ADDR_KPKT(chain);
392
393 VERIFY(pp == pkt_chain->pkt_qum.qum_pp);
394 pp_free_packet_chain(pkt_chain, NULL);
395}
396
397errno_t
398kern_pbufpool_alloc_buffer(const kern_pbufpool_t pp, mach_vm_address_t *buf,
399 kern_segment_t *sg, kern_obj_idx_seg_t *sg_idx)
400{
401 return pp_alloc_buffer(pp, buf, sg, sg_idx, 0);
402}
403
404
405errno_t
406kern_pbufpool_alloc_buffer_nosleep(const kern_pbufpool_t pp,
407 mach_vm_address_t *buf, kern_segment_t *sg, kern_obj_idx_seg_t *sg_idx)
408{
409 return pp_alloc_buffer(pp, buf, sg, sg_idx, SKMEM_NOSLEEP);
410}
411
412void
413kern_pbufpool_free_buffer(const kern_pbufpool_t pp, mach_vm_address_t baddr)
414{
415 pp_free_buffer(pp, baddr);
416}
417
418void
419kern_pbufpool_destroy(kern_pbufpool_t pp)
420{
421 VERIFY(pp->pp_flags & PPF_EXTERNAL);
422 pp_close(pp);
423}
424
425errno_t
426kern_pbufpool_alloc_buflet(const kern_pbufpool_t pp, kern_buflet_t *pbuf)
427{
428 return pp_alloc_buflet(pp, kbft: pbuf, SKMEM_SLEEP, false);
429}
430
431errno_t
432kern_pbufpool_alloc_buflet_nosleep(const kern_pbufpool_t pp,
433 kern_buflet_t *pbuf)
434{
435 return pp_alloc_buflet(pp, kbft: pbuf, SKMEM_NOSLEEP, false);
436}
437