1/*
2 * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#if (DEVELOPMENT || DEBUG) /* XXX make this whole file a config option? */
30
31#include <skywalk/os_skywalk_private.h>
32
33/*
34 * Ignore -Wxnu-typed-allocators for this file, because
35 * this is test-only code
36 */
37__typed_allocators_ignore_push
38
39#define SKMEM_TEST_BUFSIZE 2048
40
41#if XNU_TARGET_OS_OSX && defined(__arm64__)
42#define TEST_OPTION_INHIBIT_CACHE 0
43#else /* !(XNU_TARGET_OS_OSX && defined(__arm64__)) */
44#define TEST_OPTION_INHIBIT_CACHE KBIF_INHIBIT_CACHE
45#endif /* XNU_TARGET_OS_OSX && defined(__arm64__) */
46
47static void skmem_test_start(void *, wait_result_t);
48static void skmem_test_stop(void *, wait_result_t);
49static void skmem_test_func(void *v, wait_result_t w);
50static void skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg);
51static void skmem_test_alloccb(kern_packet_t, uint32_t, const void *);
52
53extern unsigned int ml_wait_max_cpus(void);
54extern kern_return_t thread_terminate(thread_t);
55
56static int skmt_enabled;
57static int skmt_busy;
58static int skmt_mbcnt;
59
60decl_lck_mtx_data(static, skmt_lock);
61
62struct skmt_alloc_ctx {
63 uint32_t stc_req; /* # of objects requested */
64 uint32_t stc_idx; /* expected index */
65};
66
67static struct skmt_alloc_ctx skmt_alloccb_ctx;
68
69struct skmt_thread_info {
70 kern_packet_t sti_mph; /* master packet */
71 kern_packet_t sti_mpc; /* cloned packet */
72 thread_t sti_thread; /* thread instance */
73 boolean_t sti_nosleep; /* non-sleeping allocation */
74} __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
75
76static struct skmt_thread_info *skmth_info;
77static uint32_t skmth_info_size;
78static int32_t skmth_cnt;
79static boolean_t skmth_run;
80static kern_pbufpool_t skmth_pp;
81
82void
83skmem_test_init(void)
84{
85 lck_mtx_init(&skmt_lock, &sk_lock_group, &sk_lock_attr);
86}
87
88void
89skmem_test_fini(void)
90{
91 lck_mtx_destroy(&skmt_lock, &sk_lock_group);
92}
93
94bool
95skmem_test_enabled(void)
96{
97 bool enabled;
98 lck_mtx_lock(&skmt_lock);
99 enabled = (skmt_busy != 0);
100 lck_mtx_unlock(&skmt_lock);
101 return enabled;
102}
103
104typedef union {
105 char c[2];
106 uint16_t s;
107} short_union_t;
108
109typedef union {
110 uint16_t s[2];
111 long l;
112} long_union_t;
113
114static void
115_reduce(int *sum)
116{
117 long_union_t l_util;
118
119 l_util.l = *sum;
120 *sum = l_util.s[0] + l_util.s[1];
121 if (*sum > 65535) {
122 *sum -= 65535;
123 }
124}
125
126static uint16_t
127skmem_reference_sum(void *buffer, int len, int sum0)
128{
129 uint16_t *w;
130 int sum = sum0;
131
132 w = (uint16_t *)buffer;
133 while ((len -= 32) >= 0) {
134 sum += w[0]; sum += w[1];
135 sum += w[2]; sum += w[3];
136 sum += w[4]; sum += w[5];
137 sum += w[6]; sum += w[7];
138 sum += w[8]; sum += w[9];
139 sum += w[10]; sum += w[11];
140 sum += w[12]; sum += w[13];
141 sum += w[14]; sum += w[15];
142 w += 16;
143 }
144 len += 32;
145 while ((len -= 8) >= 0) {
146 sum += w[0]; sum += w[1];
147 sum += w[2]; sum += w[3];
148 w += 4;
149 }
150 len += 8;
151 if (len) {
152 _reduce(&sum);
153 while ((len -= 2) >= 0) {
154 sum += *w++;
155 }
156 }
157 if (len == -1) { /* odd-length packet */
158 short_union_t s_util;
159
160 s_util.s = 0;
161 s_util.c[0] = *((char *)w);
162 s_util.c[1] = 0;
163 sum += s_util.s;
164 }
165 _reduce(&sum);
166 return sum & 0xffff;
167}
168
169/*
170 * At present, the number of objects created in the pool will be
171 * higher than the requested amount, if the pool is allowed to use
172 * the magazines layer. Round up a bit to accomodate any rounding
173 * ups done by the pool allocator.
174 */
175#define MAX_PH_ARY P2ROUNDUP(skmem_cache_magazine_max(1) + 129, 256)
176
177struct skmem_pp_ctx_s {
178 os_refcnt_t skmem_pp_ctx_refcnt;
179};
180
181static struct skmem_pp_ctx_s skmem_pp_ctx;
182
183static uint32_t
184skmem_pp_ctx_refcnt(void *ctx)
185{
186 struct skmem_pp_ctx_s *pp_ctx = ctx;
187 VERIFY(pp_ctx == &skmem_pp_ctx);
188 return os_ref_get_count(&pp_ctx->skmem_pp_ctx_refcnt);
189}
190
191static void
192skmem_pp_ctx_retain(void *ctx)
193{
194 struct skmem_pp_ctx_s *pp_ctx = ctx;
195 VERIFY(pp_ctx == &skmem_pp_ctx);
196 os_ref_retain(&pp_ctx->skmem_pp_ctx_refcnt);
197}
198
199static void
200skmem_pp_ctx_release(void *ctx)
201{
202 struct skmem_pp_ctx_s *pp_ctx = ctx;
203 VERIFY(pp_ctx == &skmem_pp_ctx);
204 (void)os_ref_release(&pp_ctx->skmem_pp_ctx_refcnt);
205}
206
207#define BUFLEN 2048
208
209static void
210skmem_buflet_tests(uint32_t flags)
211{
212 struct kern_pbufpool_init pp_init;
213 struct kern_pbufpool_memory_info pp_mem_info;
214 kern_pbufpool_t pp = NULL;
215 struct kern_pbufpool_init pp_init_mb;
216 kern_pbufpool_t pp_mb = NULL;
217 mach_vm_address_t baddr = 0;
218 kern_obj_idx_seg_t sg_idx;
219 kern_segment_t sg;
220 kern_packet_t *phary = NULL;
221 kern_packet_t *phary2 = NULL;
222 kern_packet_t *pharyc = NULL;
223 struct mbuf **mbary = NULL;
224 uint32_t mbcnt = 0;
225 uint32_t phcnt = 0, maxphcnt = 0;
226 uint32_t phcloned = 0;
227 size_t mblen = BUFLEN;
228 kern_packet_t ph, ph_mb;
229 uint32_t i;
230 errno_t err;
231
232 /* packets only */
233 VERIFY(!(flags & KBIF_QUANTUM));
234
235 SK_ERR("flags 0x%x", flags);
236
237 phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
238 Z_WAITOK | Z_ZERO);
239 phary2 = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
240 Z_WAITOK | Z_ZERO);
241 pharyc = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
242 Z_WAITOK | Z_ZERO);
243 mbary = kalloc_type(struct mbuf *, MAX_PH_ARY, Z_WAITOK | Z_ZERO);
244
245 os_ref_init(&skmem_pp_ctx.skmem_pp_ctx_refcnt, NULL);
246 bzero(&pp_init, sizeof(pp_init));
247 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
248 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
249 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
250 "%s", "skmem_buflet_tests");
251 pp_init.kbi_flags = flags;
252 pp_init.kbi_ctx = &skmem_pp_ctx;
253 pp_init.kbi_ctx_retain = skmem_pp_ctx_retain;
254 pp_init.kbi_ctx_release = skmem_pp_ctx_release;
255
256 /* must fail if packets is 0 */
257 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
258 pp_init.kbi_packets = 64;
259 /* must fail if bufsize is 0 */
260 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
261 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
262 /* must fail if max_frags is 0 */
263 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
264
265 pp_init.kbi_max_frags = 1;
266 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
267 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
268 void *ctx = kern_pbufpool_get_context(pp);
269 VERIFY(ctx == &skmem_pp_ctx);
270 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 3);
271 skmem_pp_ctx_release(ctx);
272 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 2);
273 bzero(&pp_mem_info, sizeof(pp_mem_info));
274 VERIFY(kern_pbufpool_get_memory_info(pp, NULL) == EINVAL);
275 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
276 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
277 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
278 VERIFY(pp_mem_info.kpm_packets >= 64);
279 VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
280 VERIFY(pp_mem_info.kpm_max_frags == 1);
281 VERIFY(pp_mem_info.kpm_buflets >= 64);
282 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
283 VERIFY(kern_pbufpool_alloc(pp, 0, &ph) == EINVAL ||
284 (flags & KBIF_BUFFER_ON_DEMAND));
285 if (ph != 0) {
286 kern_packet_t phc = 0;
287 kern_buflet_t buflet;
288
289 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
290 VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) == NULL);
291 VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_LIGHT) == EINVAL);
292 VERIFY(kern_packet_clone(ph, &phc, KPKT_COPY_HEAVY) == EINVAL);
293 kern_pbufpool_free(pp, ph);
294 ph = 0;
295 }
296 maxphcnt = 32;
297 VERIFY(kern_pbufpool_alloc(pp, 5, &ph) == EINVAL);
298 if (flags & KBIF_BUFFER_ON_DEMAND) {
299 /* allocate and free one at a time (no buflet) */
300 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
301 boolean_t stop = FALSE;
302 /*
303 * This may fail if skmem_region_mtbf is set, or if
304 * the system is short on memory. Perform retries at
305 * this layer to get at least 32 packets.
306 */
307 while ((err = kern_pbufpool_alloc_nosleep(pp, 0, &ph)) != 0) {
308 VERIFY(err == ENOMEM);
309 if (phcnt < 32) {
310 SK_ERR("[a] retrying alloc for packet %u",
311 phcnt);
312 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
313 continue;
314 }
315 stop = TRUE;
316 break;
317 }
318 if (stop) {
319 break;
320 }
321 VERIFY(ph != 0);
322 VERIFY(kern_packet_get_data_length(ph) == 0);
323 VERIFY(kern_packet_get_buflet_count(ph) == 0);
324 phary[phcnt++] = ph;
325 }
326 VERIFY(phcnt >= 32);
327 for (i = 0; i < phcnt; i++) {
328 kern_pbufpool_free(pp, phary[i]);
329 phary[i] = 0;
330 }
331 }
332 /* allocate and free one at a time (1 buflet) */
333 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
334 boolean_t stop = FALSE;
335 /*
336 * This may fail if skmem_region_mtbf is set, or if
337 * the system is short on memory. Perform retries at
338 * this layer to get at least 32 packets.
339 */
340 while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
341 VERIFY(err == ENOMEM);
342 if (phcnt < 32) {
343 SK_ERR("[a] retrying alloc for packet %u",
344 phcnt);
345 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
346 continue;
347 }
348 stop = TRUE;
349 break;
350 }
351 if (stop) {
352 break;
353 }
354 VERIFY(ph != 0);
355 VERIFY(kern_packet_get_data_length(ph) == 0);
356 VERIFY(kern_packet_get_buflet_count(ph) == 1);
357 phary[phcnt++] = ph;
358 }
359 VERIFY(phcnt >= 32);
360 for (i = 0; i < phcnt; i++) {
361 kern_pbufpool_free(pp, phary[i]);
362 phary[i] = 0;
363 }
364 /* allocate and free in batch */
365 phcnt = maxphcnt;
366 for (;;) {
367 err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
368 VERIFY(err != EINVAL);
369 if (err == ENOMEM) {
370 phcnt = maxphcnt;
371 SK_ERR("retrying batch alloc for %u packets", phcnt);
372 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
373 } else if (err == EAGAIN) {
374 SK_ERR("batch alloc for %u packets only returned %u",
375 maxphcnt, phcnt);
376 break;
377 } else {
378 VERIFY(err == 0);
379 break;
380 }
381 }
382 VERIFY(phcnt > 0);
383 for (i = 0; i < phcnt; i++) {
384 VERIFY(phary[i] != 0);
385 VERIFY(kern_packet_get_data_length(phary[i]) == 0);
386 VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
387 }
388 kern_pbufpool_free_batch(pp, phary, phcnt);
389 /* allocate and free one at a time (blocking) */
390 for (i = 0, phcnt = 0; i < maxphcnt; i++) {
391 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
392 VERIFY(ph != 0);
393 VERIFY(kern_packet_get_data_length(ph) == 0);
394 VERIFY(kern_packet_get_buflet_count(ph) == 1);
395 phary[phcnt++] = ph;
396 }
397 VERIFY(phcnt >= 32);
398 for (i = 0; i < phcnt; i++) {
399 kern_pbufpool_free(pp, phary[i]);
400 phary[i] = 0;
401 }
402 /* allocate with callback */
403 bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
404 skmt_alloccb_ctx.stc_req = phcnt;
405 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
406 NULL, &skmt_alloccb_ctx) == EINVAL);
407 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
408 skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
409 VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
410 kern_pbufpool_free_batch(pp, phary, phcnt);
411
412 /*
413 * Allocate and free test
414 * Case 1: Packet has an mbuf attached
415 */
416 mbcnt = phcnt;
417 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
418 /* clone packets (lightweight, without mbufs) */
419 for (i = 0; i < phcnt; i++) {
420 kern_buflet_t buflet, buflet2;
421 kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
422
423 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
424 NULL)) != NULL);
425 VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
426 VERIFY(__packet_finalize(phary[i]) == 0);
427 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
428 (void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
429 kern_packet_set_trace_id(phary[i], i);
430 VERIFY(kern_packet_get_trace_id(phary[i]) == i);
431 VERIFY(kern_packet_clone(phary[i], &pharyc[i],
432 KPKT_COPY_LIGHT) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
433 if (pharyc[i] != 0) {
434 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
435 /*
436 * Source packet was allocated with 1 buffer, so
437 * validate that the clone packet points to that
438 * same buffer, and that the buffer's usecnt is 2.
439 */
440 VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
441 VERIFY(kpkt2->pkt_mbuf == NULL);
442 VERIFY(!(kpkt2->pkt_pflags & PKT_F_MBUF_MASK));
443 VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
444 NULL)) != NULL);
445 VERIFY(kern_buflet_get_object_address(buflet) ==
446 kern_buflet_get_object_address(buflet2));
447 VERIFY(kern_buflet_get_data_address(buflet) ==
448 kern_buflet_get_data_address(buflet2));
449 VERIFY(kern_buflet_get_data_limit(buflet) ==
450 kern_buflet_get_data_limit(buflet2));
451 VERIFY(kern_buflet_get_data_offset(buflet) ==
452 kern_buflet_get_data_offset(buflet2));
453 VERIFY(kern_buflet_get_data_length(buflet) ==
454 kern_buflet_get_data_length(buflet2));
455 VERIFY(kern_buflet_set_data_limit(buflet2,
456 (uint16_t)kern_buflet_get_object_limit(buflet2) + 1)
457 == ERANGE);
458 VERIFY(kern_buflet_set_data_limit(buflet2,
459 (uint16_t)kern_buflet_get_object_limit(buflet2) - 16)
460 == 0);
461 VERIFY(kern_buflet_set_data_address(buflet2,
462 (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) - 1))
463 == ERANGE);
464 VERIFY(kern_buflet_set_data_address(buflet2,
465 (const void *)((uintptr_t)kern_buflet_get_object_address(buflet2) + 16))
466 == 0);
467 VERIFY(kern_buflet_set_data_length(buflet2,
468 kern_buflet_get_data_length(buflet2) - 32) == 0);
469 VERIFY(kern_buflet_get_object_segment(buflet,
470 &buf_idx_seg) ==
471 kern_buflet_get_object_segment(buflet2,
472 &buf2_idx_seg));
473 VERIFY(buf_idx_seg == buf2_idx_seg);
474 VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
475 VERIFY(buflet->buf_ctl->bc_usecnt == 2);
476 ++phcloned;
477 VERIFY(__packet_finalize(pharyc[i]) == 0);
478 /* verify trace id isn't reused */
479 VERIFY(kern_packet_get_trace_id(pharyc[i]) == 0);
480 kern_packet_set_trace_id(pharyc[i], phcnt - i);
481 VERIFY(kern_packet_get_trace_id(pharyc[i]) == (phcnt - i));
482 VERIFY(kern_packet_get_trace_id(phary[i]) == i);
483 }
484 }
485 VERIFY(phcloned == phcnt || phcloned == 0);
486 if (phcloned != 0) {
487 kern_pbufpool_free_batch(pp, pharyc, phcloned);
488 phcloned = 0;
489 }
490 kern_pbufpool_free_batch(pp, phary, phcnt);
491 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
492 VERIFY(phcnt == mbcnt);
493 VERIFY(skmt_mbcnt == 0);
494 for (i = 0; i < mbcnt; i++) {
495 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
496 kern_buflet_t buflet;
497
498 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
499 NULL)) != NULL);
500 VERIFY(kern_buflet_set_data_length(buflet, BUFLEN) == 0);
501 (void) memset(kern_buflet_get_data_address(buflet), i, BUFLEN);
502 /* attach mbuf to packets and initialize packets */
503 mblen = BUFLEN;
504 VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
505 &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
506 VERIFY(mblen == BUFLEN);
507 VERIFY(mbary[i] != NULL);
508 VERIFY(mbary[i]->m_nextpkt == NULL);
509 mbuf_setlen(mbary[i], mblen);
510 mbuf_pkthdr_setlen(mbary[i], mblen);
511 VERIFY((size_t)m_pktlen(mbary[i]) == mblen);
512 (void) memset(mbuf_data(mbary[i]), i, mblen);
513 kpkt->pkt_mbuf = mbary[i];
514 kpkt->pkt_pflags |= PKT_F_MBUF_DATA;
515 VERIFY(__packet_finalize_with_mbuf(kpkt) == 0);
516 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
517 VERIFY(mbuf_ring_cluster_activate(kpkt->pkt_mbuf) == 0);
518 }
519 /* clone packets (heavyweight) */
520 for (i = 0; i < phcnt; i++) {
521 VERIFY(kern_packet_clone(phary[i], &pharyc[i],
522 KPKT_COPY_HEAVY) == 0);
523 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
524 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(pharyc[i]);
525 kern_buflet_t buflet, buflet2;
526 /*
527 * Source packet was allocated with 1 buffer, so
528 * validate that the clone packet points to different
529 * buffer, and that the clone's attached mbuf is also
530 * different than the source's.
531 */
532 VERIFY(!(QUM_ADDR(pharyc[i])->qum_qflags & QUM_F_FINALIZED));
533 VERIFY((buflet = kern_packet_get_next_buflet(phary[i],
534 NULL)) != NULL);
535 VERIFY((buflet2 = kern_packet_get_next_buflet(pharyc[i],
536 NULL)) != NULL);
537 VERIFY(kern_buflet_get_object_address(buflet) !=
538 kern_buflet_get_object_address(buflet2));
539 VERIFY(kern_buflet_get_data_address(buflet) !=
540 kern_buflet_get_data_address(buflet2));
541 VERIFY(kern_buflet_get_data_limit(buflet) ==
542 kern_buflet_get_data_limit(buflet2));
543 VERIFY(kern_buflet_get_data_offset(buflet) ==
544 kern_buflet_get_data_offset(buflet2));
545 VERIFY(kern_buflet_get_data_length(buflet) == BUFLEN);
546 VERIFY(kern_buflet_get_data_length(buflet) ==
547 kern_buflet_get_data_length(buflet2));
548 VERIFY(kpkt->pkt_pflags & PKT_F_MBUF_DATA);
549 VERIFY(kpkt2->pkt_pflags & PKT_F_MBUF_DATA);
550 VERIFY(m_pktlen(kpkt2->pkt_mbuf) == m_pktlen(kpkt->pkt_mbuf));
551 VERIFY(kern_packet_get_data_length(phary[i]) == BUFLEN);
552 VERIFY(kern_packet_get_data_length(phary[i]) ==
553 kern_packet_get_data_length(pharyc[i]));
554 VERIFY(buflet->buf_ctl != buflet2->buf_ctl);
555 VERIFY(buflet->buf_ctl->bc_usecnt == 1);
556 VERIFY(buflet2->buf_ctl->bc_usecnt == 1);
557 VERIFY(memcmp(kern_buflet_get_data_address(buflet),
558 kern_buflet_get_data_address(buflet2),
559 kern_buflet_get_data_length(buflet)) == 0);
560 VERIFY(kpkt->pkt_mbuf != NULL);
561 VERIFY(kpkt2->pkt_mbuf != NULL);
562 VERIFY(mbuf_data(kpkt->pkt_mbuf) != mbuf_data(kpkt2->pkt_mbuf));
563 VERIFY(mbuf_len(kpkt->pkt_mbuf) == mbuf_len(kpkt2->pkt_mbuf));
564 /* mbuf contents must have been copied */
565 VERIFY(memcmp(mbuf_data(kpkt->pkt_mbuf),
566 mbuf_data(kpkt2->pkt_mbuf), mbuf_len(kpkt->pkt_mbuf)) == 0);
567 VERIFY(__packet_finalize(pharyc[i]) == 0);
568 ++phcloned;
569 }
570 VERIFY(phcloned == phcnt);
571 kern_pbufpool_free_batch(pp, pharyc, phcloned);
572 phcloned = 0;
573 skmt_mbcnt = mbcnt;
574 kern_pbufpool_free_batch(pp, phary, phcnt);
575 /* skmem_test_mbfreecb() should have been called for all mbufs by now */
576 VERIFY(skmt_mbcnt == 0);
577 for (i = 0; i < mbcnt; i++) {
578 VERIFY(mbary[i] != NULL);
579 m_freem(mbary[i]);
580 mbary[i] = NULL;
581 }
582 mbcnt = 0;
583
584 /*
585 * Allocate and free test
586 * Case 2: Packet has a packet attached
587 */
588 VERIFY(pp_mem_info.kpm_packets >= 64);
589 phcnt = 32;
590 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
591 VERIFY(phcnt == 32);
592 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
593 VERIFY(phcnt == 32);
594 /* attach each packet to a packet */
595 for (i = 0; i < phcnt; i++) {
596 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
597 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
598
599 kpkt->pkt_pkt = kpkt2;
600 kpkt->pkt_pflags |= PKT_F_PKT_DATA;
601 }
602 /* free the batch of packets (also free the attached packets) */
603 kern_pbufpool_free_batch(pp, phary, phcnt);
604
605 /*
606 * Allocate and free test
607 * Case 3: Packet has a packet attached. The attached packet itself has
608 * an mbuf attached.
609 */
610 VERIFY(pp_mem_info.kpm_packets >= 64);
611 phcnt = 32;
612 mbcnt = 32;
613 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
614 VERIFY(phcnt == 32);
615 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary2, &phcnt) == 0);
616 VERIFY(phcnt == 32);
617 VERIFY(skmt_mbcnt == 0);
618 for (i = 0; i < mbcnt; i++) {
619 mblen = BUFLEN;
620 VERIFY(mbuf_ring_cluster_alloc(MBUF_WAITOK, MBUF_TYPE_HEADER,
621 &mbary[i], skmem_test_mbfreecb, &mblen) == 0);
622 VERIFY(mbary[i] != NULL);
623 VERIFY(mbary[i]->m_nextpkt == NULL);
624 }
625 /* attach each packet to a packet */
626 for (i = 0; i < phcnt; i++) {
627 struct __kern_packet *kpkt = SK_PTR_ADDR_KPKT(phary[i]);
628 struct __kern_packet *kpkt2 = SK_PTR_ADDR_KPKT(phary2[i]);
629
630 VERIFY(mbary[i] != NULL);
631 VERIFY(__packet_initialize_with_mbuf(kpkt2,
632 mbary[i], 0, 0) == 0);
633 VERIFY(mbuf_ring_cluster_activate(kpkt2->pkt_mbuf) == 0);
634 kpkt->pkt_pkt = kpkt2;
635 kpkt->pkt_pflags |= PKT_F_PKT_DATA;
636 }
637 skmt_mbcnt = mbcnt;
638 /* free the batch of packets (also free the attached packets) */
639 kern_pbufpool_free_batch(pp, phary, phcnt);
640 /* skmem_test_mbfreecb() should have been called for all mbufs by now */
641 VERIFY(skmt_mbcnt == 0);
642 for (i = 0; i < mbcnt; i++) {
643 VERIFY(mbary[i] != NULL);
644 m_freem(mbary[i]);
645 mbary[i] = NULL;
646 }
647 mbcnt = 0;
648
649 kern_pbufpool_destroy(pp);
650 pp = NULL;
651 /* check that ctx_release has been called */
652 VERIFY(skmem_pp_ctx_refcnt(&skmem_pp_ctx) == 1);
653
654 pp_init.kbi_ctx = NULL;
655 pp_init.kbi_ctx_retain = NULL;
656 pp_init.kbi_ctx_release = NULL;
657 pp_init.kbi_buflets = 1;
658 /* must fail if buflets is non-zero and less than packets */
659 if (!(flags & KBIF_BUFFER_ON_DEMAND)) {
660 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
661 } else {
662 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
663 kern_pbufpool_destroy(pp);
664 pp = NULL;
665 }
666 pp_init.kbi_buflets = (64 * 2);
667 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
668 bzero(&pp_mem_info, sizeof(pp_mem_info));
669 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
670 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
671 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
672 VERIFY(pp_mem_info.kpm_packets >= 64);
673 VERIFY(pp_mem_info.kpm_max_frags == 1);
674 VERIFY(pp_mem_info.kpm_buflets >= (64 * 2));
675 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
676 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
677 VERIFY(kern_packet_get_buflet_count(ph) == 1);
678 kern_pbufpool_free(pp, ph);
679 ph = 0;
680 phcnt = 4;
681 VERIFY(kern_pbufpool_alloc_batch(pp, 4, phary, &phcnt) == EINVAL);
682 VERIFY(kern_pbufpool_alloc_batch(pp, 1, phary, &phcnt) == 0);
683 VERIFY(kern_packet_get_buflet_count(phary[0]) == 1);
684 VERIFY(kern_packet_get_buflet_count(phary[1]) == 1);
685 VERIFY(kern_packet_get_buflet_count(phary[2]) == 1);
686 VERIFY(kern_packet_get_buflet_count(phary[3]) == 1);
687 kern_pbufpool_free_batch(pp, phary, phcnt);
688 kern_pbufpool_destroy(pp);
689 pp = NULL;
690
691 /* check multi-buflet KPIs */
692 bzero(&pp_init_mb, sizeof(pp_init_mb));
693 pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
694 pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
695 (void) snprintf((char *)pp_init_mb.kbi_name,
696 sizeof(pp_init_mb.kbi_name), "%s", "skmem_buflet_tests_mb");
697 pp_init_mb.kbi_flags = flags;
698 pp_init_mb.kbi_max_frags = 4;
699 pp_init_mb.kbi_packets = 64;
700 pp_init_mb.kbi_bufsize = 512;
701 pp_init_mb.kbi_buflets =
702 pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
703
704 VERIFY((kern_pbufpool_create(&pp_init_mb, &pp_mb, NULL) == EINVAL) ||
705 (flags & KBIF_BUFFER_ON_DEMAND));
706
707 if (pp_mb != NULL) {
708 bzero(&pp_mem_info, sizeof(pp_mem_info));
709 VERIFY(kern_pbufpool_get_memory_info(pp_mb, &pp_mem_info) == 0);
710 VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0 ||
711 !(flags & KBIF_BUFFER_ON_DEMAND));
712 if (ph_mb != 0) {
713 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
714 kern_pbufpool_free(pp_mb, ph_mb);
715 ph_mb = 0;
716 }
717 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg,
718 &sg_idx) == 0 || !(flags & KBIF_BUFFER_ON_DEMAND));
719 if (baddr != 0) {
720 VERIFY(flags & KBIF_BUFFER_ON_DEMAND);
721 kern_pbufpool_free_buffer(pp_mb, baddr);
722 baddr = 0;
723 }
724 kern_pbufpool_destroy(pp_mb);
725 pp_mb = NULL;
726 }
727
728 kfree_type(struct mbuf *, MAX_PH_ARY, mbary);
729 mbary = NULL;
730
731 kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
732 phary = NULL;
733
734 kfree_data(phary2, sizeof(kern_packet_t) * MAX_PH_ARY);
735 phary2 = NULL;
736
737 kfree_data(pharyc, sizeof(kern_packet_t) * MAX_PH_ARY);
738 pharyc = NULL;
739}
740
741static void
742skmem_test_mbfreecb(caddr_t cl, uint32_t size, caddr_t arg)
743{
744#pragma unused(cl, size)
745 struct mbuf *m = (void *)arg;
746
747 VERIFY(!mbuf_ring_cluster_is_active(m));
748 VERIFY(skmt_mbcnt > 0);
749 os_atomic_dec(&skmt_mbcnt, relaxed);
750}
751
752static void
753skmem_test_alloccb(kern_packet_t ph, uint32_t idx, const void *ctx)
754{
755 VERIFY(ph != 0);
756 VERIFY(ctx == &skmt_alloccb_ctx);
757 VERIFY(idx < skmt_alloccb_ctx.stc_req);
758 VERIFY(idx == os_atomic_inc_orig(&skmt_alloccb_ctx.stc_idx, relaxed));
759}
760static void
761skmem_packet_tests(uint32_t flags)
762{
763 struct kern_pbufpool_memory_info pp_mb_mem_info;
764 struct kern_pbufpool_memory_info pp_mem_info;
765 struct kern_pbufpool_init pp_init;
766 kern_pbufpool_t pp = NULL;
767 struct kern_pbufpool_init pp_init_mb;
768 kern_pbufpool_t pp_mb = NULL;
769 mach_vm_address_t baddr = 0;
770 uint8_t *buffer, *ref_buffer;
771 kern_obj_idx_seg_t sg_idx;
772 kern_buflet_t buflet;
773 kern_segment_t sg;
774 kern_packet_t ph = 0, ph_mb = 0;
775 struct mbuf *m = NULL;
776 uint16_t len;
777 uint32_t i;
778 uint32_t csum_eee_ref, csum_eeo_ref, csum_eoe_ref, csum_eoo_ref;
779 uint32_t csum_oee_ref, csum_oeo_ref, csum_ooe_ref, csum_ooo_ref, csum;
780 boolean_t test_unaligned;
781 kern_buflet_t bft0, bft1;
782
783 SK_ERR("flags 0x%x", flags);
784
785 /*
786 * XXX: Skip packet tests involving unaligned addresses when
787 * KBIF_INHIBIT_CACHE is set, as the copy-and-checksum routine
788 * currently assumes normal memory, rather than device memory.
789 */
790 test_unaligned = !(flags & KBIF_INHIBIT_CACHE);
791
792 /* allocate separately in case pool is setup for device memory */
793 ref_buffer = (uint8_t *) kalloc_data(SKMEM_TEST_BUFSIZE,
794 Z_WAITOK | Z_ZERO);
795
796 bzero(&pp_init_mb, sizeof(pp_init_mb));
797 pp_init_mb.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
798 pp_init_mb.kbi_buf_seg_size = skmem_usr_buf_seg_size;
799 (void) snprintf((char *)pp_init_mb.kbi_name,
800 sizeof(pp_init_mb.kbi_name), "%s", "skmem_packet_tests_mb");
801 pp_init_mb.kbi_flags = flags | KBIF_BUFFER_ON_DEMAND;
802 pp_init_mb.kbi_max_frags = 4;
803 pp_init_mb.kbi_packets = 64;
804 pp_init_mb.kbi_bufsize = 512;
805 pp_init_mb.kbi_buflets =
806 pp_init_mb.kbi_packets * pp_init_mb.kbi_max_frags;
807 pp_init_mb.kbi_ctx = NULL;
808 pp_init_mb.kbi_ctx_retain = NULL;
809 pp_init_mb.kbi_ctx_release = NULL;
810
811 VERIFY(kern_pbufpool_create(&pp_init_mb, &pp_mb, &pp_mb_mem_info) == 0);
812 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, NULL, NULL) == 0);
813 kern_pbufpool_free_buffer(pp_mb, baddr);
814 VERIFY(kern_pbufpool_alloc_buffer(pp_mb, &baddr, &sg, &sg_idx) == 0);
815 VERIFY(sg != NULL);
816 VERIFY(sg->sg_region != NULL);
817 VERIFY(sg->sg_md != NULL);
818 VERIFY(sg->sg_start != 0);
819 VERIFY(sg->sg_end != 0);
820 VERIFY(sg->sg_type == SKSEG_TYPE_ALLOC);
821 kern_pbufpool_free_buffer(pp_mb, baddr);
822 baddr = 0;
823
824 /* add buflet to a packet with buf count 1 */
825 VERIFY(kern_pbufpool_alloc(pp_mb, 1, &ph_mb) == 0);
826 VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1) == 0);
827 VERIFY(bft1 != NULL);
828 VERIFY(kern_buflet_get_data_address(bft1) != NULL);
829 VERIFY(kern_buflet_get_object_address(bft1) != NULL);
830 VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) != NULL);
831 VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
832 VERIFY(kern_packet_get_buflet_count(ph_mb) == 2);
833 VERIFY(kern_packet_get_next_buflet(ph_mb, NULL) == bft0);
834 VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
835 VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
836 VERIFY(kern_packet_finalize(ph_mb) == 0);
837 kern_pbufpool_free(pp_mb, ph_mb);
838 ph_mb = 0;
839
840 /* add buflet to a packet with buf count 0 */
841 VERIFY(kern_pbufpool_alloc(pp_mb, 0, &ph_mb) == 0);
842 VERIFY(kern_packet_get_buflet_count(ph_mb) == 0);
843 VERIFY((bft0 = kern_packet_get_next_buflet(ph_mb, NULL)) == NULL);
844 VERIFY(kern_pbufpool_alloc_buflet(pp_mb, &bft1) == 0);
845 VERIFY(bft1 != NULL);
846 VERIFY(kern_packet_add_buflet(ph_mb, bft0, bft1) == 0);
847 VERIFY(kern_packet_get_buflet_count(ph_mb) == 1);
848 VERIFY(kern_packet_get_next_buflet(ph_mb, bft0) == bft1);
849 VERIFY(kern_packet_get_next_buflet(ph_mb, bft1) == NULL);
850 VERIFY(kern_buflet_get_data_address(bft1) != NULL);
851 VERIFY(kern_buflet_get_object_address(bft1) != NULL);
852 VERIFY(kern_buflet_get_data_limit(bft1) != 0);
853 VERIFY(kern_buflet_get_data_length(bft1) == 0);
854 VERIFY(kern_packet_finalize(ph_mb) == 0);
855 kern_pbufpool_free(pp_mb, ph_mb);
856 ph_mb = 0;
857
858 bzero(&pp_init, sizeof(pp_init));
859 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
860 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
861 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
862 "%s", "skmem_packet_tests");
863 pp_init.kbi_flags = flags;
864 pp_init.kbi_packets = 64;
865 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
866 pp_init.kbi_max_frags = 1;
867 pp_init.kbi_buflets = (64 * 2);
868 pp_init.kbi_ctx = NULL;
869 pp_init.kbi_ctx_retain = NULL;
870 pp_init.kbi_ctx_release = NULL;
871
872 /* validate multi-buflet packet checksum/copy+checksum routines */
873 VERIFY(kern_pbufpool_create(&pp_init, &pp, &pp_mem_info) == 0);
874 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
875 VERIFY(kern_packet_get_buflet_count(ph) == 1);
876
877 VERIFY((buflet = kern_packet_get_next_buflet(ph, NULL)) != NULL);
878 VERIFY((buffer = kern_buflet_get_data_address(buflet)) != NULL);
879 len = SKMEM_TEST_BUFSIZE;
880 for (i = 0; i < len; i++) {
881 ref_buffer[i] = (i & 0xff);
882 }
883 /* enforce load/store byte for device memory case */
884 volatile uint8_t *bufp = buffer;
885 for (i = 0; i < len; i++) {
886 bufp[i] = ref_buffer[i];
887 }
888 VERIFY(kern_buflet_set_data_length(buflet, len) == 0);
889 VERIFY(__packet_finalize(ph) == 0);
890
891 /* calculate and validate reference value */
892 csum_eee_ref = __packet_cksum(buffer, len, 0);
893 VERIFY(skmem_reference_sum(ref_buffer, len, 0) == csum_eee_ref);
894 csum_eoe_ref = __packet_cksum(buffer, len - 2, 0);
895 VERIFY(skmem_reference_sum(ref_buffer, len - 2, 0) == csum_eoe_ref);
896 csum_eoo_ref = csum_eeo_ref = __packet_cksum(buffer, len - 1, 0);
897 VERIFY(skmem_reference_sum(ref_buffer, len - 1, 0) == csum_eoo_ref);
898 csum_oeo_ref = csum_ooo_ref = __packet_cksum(buffer + 1, len - 1, 0);
899 VERIFY(skmem_reference_sum(ref_buffer + 1, len - 1, 0) == csum_oeo_ref);
900 csum_ooe_ref = csum_oee_ref = __packet_cksum(buffer + 1, len - 2, 0);
901 VERIFY(skmem_reference_sum(ref_buffer + 1, len - 2, 0) == csum_ooe_ref);
902
903 /* sanity tests */
904 VERIFY(skmem_reference_sum(ref_buffer + 2, len - 2, 0) ==
905 __packet_cksum(buffer + 2, len - 2, 0));
906 VERIFY(skmem_reference_sum(ref_buffer + 3, len - 3, 0) ==
907 __packet_cksum(buffer + 3, len - 3, 0));
908 VERIFY(skmem_reference_sum(ref_buffer + 4, len - 4, 0) ==
909 __packet_cksum(buffer + 4, len - 4, 0));
910 VERIFY(skmem_reference_sum(ref_buffer + 5, len - 5, 0) ==
911 __packet_cksum(buffer + 5, len - 5, 0));
912 VERIFY(skmem_reference_sum(ref_buffer + 6, len - 6, 0) ==
913 __packet_cksum(buffer + 6, len - 6, 0));
914 VERIFY(skmem_reference_sum(ref_buffer + 7, len - 7, 0) ==
915 __packet_cksum(buffer + 7, len - 7, 0));
916
917 VERIFY(mbuf_gethdr(MBUF_WAITOK, MBUF_TYPE_HEADER, &m) == 0);
918 VERIFY(mbuf_copyback(m, 0, len, buffer, MBUF_WAITOK) == 0);
919
920 /* verify copy-checksum between packets */
921 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
922 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
923 pkt_copypkt_sum(ph, 0, ph_mb, 0, len - 1, &csum, TRUE);
924 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
925 VERIFY(__packet_finalize(ph_mb) == 0);
926 if (csum_eeo_ref != csum) {
927 SK_ERR("pkt_copypkt_sum: csum_eeo_mismatch 0x%x, "
928 "0x%x, 0x%llx", csum_eeo_ref, csum,
929 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
930 }
931 VERIFY(csum_eeo_ref == csum);
932 kern_pbufpool_free(pp_mb, ph_mb);
933 ph_mb = 0;
934
935 if (test_unaligned) {
936 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
937 pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 2, &csum, TRUE);
938 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
939 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
940 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
941 VERIFY(__packet_finalize(ph_mb) == 0);
942 if (csum_eoe_ref != csum) {
943 SK_ERR("pkt_copypkt_sum: csum_eoe_mismatch 0x%x, "
944 "0x%x, 0x%llx", csum_eoe_ref, csum,
945 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
946 }
947 VERIFY(csum_eoe_ref == csum);
948 kern_pbufpool_free(pp_mb, ph_mb);
949 ph_mb = 0;
950
951 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
952 pkt_copypkt_sum(ph, 0, ph_mb, 1, len - 1, &csum, TRUE);
953 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
954 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
955 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
956 VERIFY(__packet_finalize(ph_mb) == 0);
957 if (csum_eoo_ref != csum) {
958 SK_ERR("pkt_copypkt_sum: csum_eoo_mismatch 0x%x, "
959 "0x%x, 0x%llx", csum_eoo_ref, csum,
960 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
961 }
962 VERIFY(csum_eoo_ref == csum);
963 kern_pbufpool_free(pp_mb, ph_mb);
964 ph_mb = 0;
965
966 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
967 pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 1, &csum, TRUE);
968 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
969 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
970 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
971 VERIFY(__packet_finalize(ph_mb) == 0);
972 if (csum_oeo_ref != csum) {
973 SK_ERR("pkt_copypkt_sum: csum_oeo_mismatch 0x%x, "
974 "0x%x, 0x%llx", csum_oeo_ref, csum,
975 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
976 }
977 VERIFY(csum_oeo_ref == csum);
978 kern_pbufpool_free(pp_mb, ph_mb);
979 ph_mb = 0;
980
981 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
982 pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 1, &csum, TRUE);
983 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
984 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
985 VERIFY(__packet_finalize(ph_mb) == 0);
986 if (csum_ooo_ref != csum) {
987 SK_ERR("pkt_copypkt_sum: csum_ooo_mismatch 0x%x, "
988 "0x%x, 0x%llx", csum_ooo_ref, csum,
989 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
990 }
991 VERIFY(csum_ooo_ref == csum);
992 kern_pbufpool_free(pp_mb, ph_mb);
993 ph_mb = 0;
994
995 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
996 pkt_copypkt_sum(ph, 1, ph_mb, 1, len - 2, &csum, TRUE);
997 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
998 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
999 VERIFY(__packet_finalize(ph_mb) == 0);
1000 if (csum_ooe_ref != csum) {
1001 SK_ERR("pkt_copypkt_sum: csum_ooe_mismatch 0x%x, "
1002 "0x%x, 0x%llx", csum_ooe_ref, csum,
1003 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1004 }
1005 VERIFY(csum_ooe_ref == csum);
1006 kern_pbufpool_free(pp_mb, ph_mb);
1007 ph_mb = 0;
1008
1009 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1010 pkt_copypkt_sum(ph, 1, ph_mb, 0, len - 2, &csum, TRUE);
1011 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1012 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1013 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1014 VERIFY(__packet_finalize(ph_mb) == 0);
1015 if (csum_ooe_ref != csum) {
1016 SK_ERR("pkt_copypkt_sum: csum_oee_mismatch 0x%x, "
1017 "0x%x, 0x%llx", csum_oee_ref, csum,
1018 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1019 }
1020 VERIFY(csum_oee_ref == csum);
1021 kern_pbufpool_free(pp_mb, ph_mb);
1022 ph_mb = 0;
1023 }
1024
1025 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1026 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1027 pkt_copypkt_sum(ph, 0, ph_mb, 0, len, &csum, TRUE);
1028 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1029 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1030 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1031 VERIFY(__packet_finalize(ph_mb) == 0);
1032 if (csum_eee_ref != csum) {
1033 SK_ERR("pkt_copypkt_sum: csum_eee_mismatch 0x%x, "
1034 "0x%x, 0x%llx", csum_eee_ref, csum,
1035 SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)));
1036 }
1037 VERIFY(csum_eee_ref == csum);
1038
1039 /* verify copy-checksum from packet to buffer */
1040 csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len - 1, TRUE, 0, NULL);
1041 if (csum_eeo_ref != csum) {
1042 SK_ERR("pkt_copyaddr_sum: csum_eeo_mismatch "
1043 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1044 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1045 SK_KVA(buffer));
1046 }
1047 VERIFY(csum_eeo_ref == csum);
1048
1049 if (test_unaligned) {
1050 csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 1, TRUE, 0, NULL);
1051 if (csum_eoo_ref != csum) {
1052 SK_ERR("pkt_copyaddr_sum: csum_eoo_mismatch "
1053 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1054 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1055 SK_KVA(buffer));
1056 }
1057 VERIFY(csum_eoo_ref == csum);
1058
1059 csum = pkt_copyaddr_sum(ph_mb, 0, buffer + 1, len - 2, TRUE, 0, NULL);
1060 if (csum_eoe_ref != csum) {
1061 SK_ERR("pkt_copyaddr_sum: csum_eoe_mismatch "
1062 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1063 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1064 SK_KVA(buffer));
1065 }
1066 VERIFY(csum_eoe_ref == csum);
1067
1068 csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 2, TRUE, 0, NULL);
1069 if (csum_ooe_ref != csum) {
1070 SK_ERR("pkt_copyaddr_sum: csum_ooe_mismatch "
1071 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1072 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1073 SK_KVA(buffer));
1074 }
1075 VERIFY(csum_ooe_ref == csum);
1076
1077 csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 2, TRUE, 0, NULL);
1078 if (csum_oee_ref != csum) {
1079 SK_ERR("pkt_copyaddr_sum: csum_oee_mismatch "
1080 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1081 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1082 SK_KVA(buffer));
1083 }
1084 VERIFY(csum_oee_ref == csum);
1085
1086 csum = pkt_copyaddr_sum(ph_mb, 1, buffer, len - 1, TRUE, 0, NULL);
1087 if (csum_oeo_ref != csum) {
1088 SK_ERR("pkt_copyaddr_sum: csum_oeo_mismatch "
1089 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1090 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1091 SK_KVA(buffer));
1092 }
1093 VERIFY(csum_oeo_ref == csum);
1094
1095 csum = pkt_copyaddr_sum(ph_mb, 1, buffer + 1, len - 1, TRUE, 0, NULL);
1096 if (csum_ooo_ref != csum) {
1097 SK_ERR("pkt_copyaddr_sum: csum_ooo_mismatch "
1098 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1099 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1100 SK_KVA(buffer));
1101 }
1102 VERIFY(csum_ooo_ref == csum);
1103 }
1104
1105 csum = pkt_copyaddr_sum(ph_mb, 0, buffer, len, TRUE, 0, NULL);
1106 if (csum_eee_ref != csum) {
1107 SK_ERR("pkt_copyaddr_sum: csum_eee_mismatch "
1108 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1109 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1110 SK_KVA(buffer));
1111 }
1112 VERIFY(csum_eee_ref == csum);
1113
1114 for (i = 0; i < len; i++) {
1115 VERIFY(buffer[i] == (i & 0xff));
1116 }
1117 kern_pbufpool_free(pp_mb, ph_mb);
1118 ph_mb = 0;
1119
1120 if (test_unaligned) {
1121 /* verify copy-checksum from mbuf to packet */
1122 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1123 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1124 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len, TRUE);
1125 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1126 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1127 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1128 VERIFY(__packet_finalize(ph_mb) == 0);
1129 if (csum_eee_ref != csum) {
1130 SK_ERR("pkt_mcopypkt_sum: csum_eee_mismatch "
1131 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eee_ref,
1132 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1133 SK_KVA(m));
1134 }
1135 VERIFY(csum_eee_ref == csum);
1136 kern_pbufpool_free(pp_mb, ph_mb);
1137 ph_mb = 0;
1138
1139 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1140 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1141 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 2, TRUE);
1142 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1143 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1144 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1145 VERIFY(__packet_finalize(ph_mb) == 0);
1146 if (csum_eoe_ref != csum) {
1147 SK_ERR("pkt_mcopypkt_sum: csum_eoe_mismatch "
1148 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoe_ref,
1149 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1150 SK_KVA(m));
1151 }
1152 VERIFY(csum_eoe_ref == csum);
1153 kern_pbufpool_free(pp_mb, ph_mb);
1154 ph_mb = 0;
1155
1156 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1157 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1158 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 1, len - 1, TRUE);
1159 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1160 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1161 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1162 VERIFY(__packet_finalize(ph_mb) == 0);
1163 if (csum_eoo_ref != csum) {
1164 SK_ERR("pkt_mcopypkt_sum: csum_eoo_mismatch "
1165 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eoo_ref,
1166 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1167 SK_KVA(m));
1168 }
1169 VERIFY(csum_eoo_ref == csum);
1170 kern_pbufpool_free(pp_mb, ph_mb);
1171 ph_mb = 0;
1172 }
1173
1174 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1175 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1176 csum = pkt_mcopypkt_sum(m, 0, ph_mb, 0, len - 1, TRUE);
1177 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1178 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1179 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1180 VERIFY(__packet_finalize(ph_mb) == 0);
1181 if (csum_eeo_ref != csum) {
1182 SK_ERR("pkt_mcopypkt_sum: csum_eeo_mismatch "
1183 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_eeo_ref,
1184 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1185 SK_KVA(m));
1186 }
1187 VERIFY(csum_eeo_ref == csum);
1188 kern_pbufpool_free(pp_mb, ph_mb);
1189 ph_mb = 0;
1190
1191 if (test_unaligned) {
1192 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1193 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1194 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 1, TRUE);
1195 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1196 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1197 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1198 VERIFY(__packet_finalize(ph_mb) == 0);
1199 if (csum_oeo_ref != csum) {
1200 SK_ERR("pkt_mcopypkt_sum: csum_oeo_mismatch "
1201 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oeo_ref,
1202 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1203 SK_KVA(m));
1204 }
1205 VERIFY(csum_oeo_ref == csum);
1206 kern_pbufpool_free(pp_mb, ph_mb);
1207 ph_mb = 0;
1208
1209 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1210 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1211 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 0, len - 2, TRUE);
1212 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 0);
1213 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 0;
1214 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1215 VERIFY(__packet_finalize(ph_mb) == 0);
1216 if (csum_oee_ref != csum) {
1217 SK_ERR("pkt_mcopypkt_sum: csum_oee_mismatch "
1218 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_oee_ref,
1219 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1220 SK_KVA(m));
1221 }
1222 VERIFY(csum_oee_ref == csum);
1223 kern_pbufpool_free(pp_mb, ph_mb);
1224 ph_mb = 0;
1225
1226 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1227 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1228 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 2, TRUE);
1229 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1230 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1231 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1232 VERIFY(__packet_finalize(ph_mb) == 0);
1233 if (csum_ooe_ref != csum) {
1234 SK_ERR("pkt_mcopypkt_sum: csum_ooe_mismatch "
1235 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooe_ref,
1236 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1237 SK_KVA(m));
1238 }
1239 VERIFY(csum_ooe_ref == csum);
1240 kern_pbufpool_free(pp_mb, ph_mb);
1241 ph_mb = 0;
1242
1243 VERIFY(kern_pbufpool_alloc(pp_mb, 4, &ph_mb) == 0);
1244 VERIFY(kern_packet_get_buflet_count(ph_mb) == 4);
1245 csum = pkt_mcopypkt_sum(m, 1, ph_mb, 1, len - 1, TRUE);
1246 METADATA_ADJUST_LEN(SK_PTR_ADDR_KQUM(ph_mb), 0, 1);
1247 SK_PTR_ADDR_KPKT(ph_mb)->pkt_headroom = 1;
1248 SK_PTR_ADDR_KPKT(ph_mb)->pkt_l2_len = 0;
1249 VERIFY(__packet_finalize(ph_mb) == 0);
1250 if (csum_ooo_ref != csum) {
1251 SK_ERR("pkt_mcopypkt_sum: csum_ooo_mismatch "
1252 "0x%x, 0x%x, 0x%llx, 0x%llx", csum_ooo_ref,
1253 csum, SK_KVA(SK_PTR_ADDR_KQUM(ph_mb)),
1254 SK_KVA(m));
1255 }
1256 VERIFY(csum_ooo_ref == csum);
1257 kern_pbufpool_free(pp_mb, ph_mb);
1258 ph_mb = 0;
1259 }
1260
1261 kern_pbufpool_free(pp, ph);
1262 ph = 0;
1263 m_freem(m);
1264 m = NULL;
1265 kern_pbufpool_destroy(pp_mb);
1266 pp_mb = NULL;
1267 kern_pbufpool_destroy(pp);
1268 pp = NULL;
1269
1270 kfree_data(ref_buffer, SKMEM_TEST_BUFSIZE);
1271 ref_buffer = NULL;
1272}
1273
1274static void
1275skmem_quantum_tests(uint32_t flags)
1276{
1277 struct kern_pbufpool_init pp_init;
1278 struct kern_pbufpool_memory_info pp_mem_info;
1279 kern_pbufpool_t pp = NULL;
1280 kern_packet_t *phary = NULL;
1281 uint32_t phcnt = 0;
1282 kern_packet_t ph = 0;
1283 uint32_t i;
1284 errno_t err;
1285
1286 flags |= KBIF_QUANTUM;
1287
1288 SK_ERR("flags 0x%x", flags);
1289
1290 phary = (kern_packet_t *) kalloc_data(sizeof(kern_packet_t) * MAX_PH_ARY,
1291 Z_WAITOK | Z_ZERO);
1292
1293 bzero(&pp_init, sizeof(pp_init));
1294 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1295 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1296 (void) snprintf((char *)pp_init.kbi_name, sizeof(pp_init.kbi_name),
1297 "%s", "skmem_quantum_tests");
1298 pp_init.kbi_flags = (KBIF_QUANTUM | flags);
1299 pp_init.kbi_packets = 64;
1300 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1301 pp_init.kbi_buflets = (64 * 2);
1302 pp_init.kbi_ctx = NULL;
1303 pp_init.kbi_ctx_retain = NULL;
1304 pp_init.kbi_ctx_release = NULL;
1305
1306 pp_init.kbi_max_frags = 4;
1307 /* max_frags must be 1 for quantum type */
1308 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1309 pp_init.kbi_max_frags = 1;
1310 if ((flags & KBIF_QUANTUM) && (flags & KBIF_BUFFER_ON_DEMAND)) {
1311 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == EINVAL);
1312 goto done;
1313 }
1314 VERIFY(kern_pbufpool_create(&pp_init, &pp, NULL) == 0);
1315 bzero(&pp_mem_info, sizeof(pp_mem_info));
1316 VERIFY(kern_pbufpool_get_memory_info(pp, &pp_mem_info) == 0);
1317 VERIFY(pp_mem_info.kpm_flags & KPMF_EXTERNAL);
1318 VERIFY(pp_mem_info.kpm_buflets >= pp_mem_info.kpm_packets);
1319 VERIFY(pp_mem_info.kpm_packets >= 64);
1320 VERIFY(pp_mem_info.kpm_packets <= MAX_PH_ARY);
1321 VERIFY(pp_mem_info.kpm_max_frags == 1);
1322 VERIFY(pp_mem_info.kpm_buflets >= 64);
1323 VERIFY(pp_mem_info.kpm_bufsize == SKMEM_TEST_BUFSIZE);
1324 VERIFY(kern_pbufpool_alloc(pp, 4, &ph) == EINVAL);
1325 /* allocate and free one at a time */
1326 for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1327 boolean_t stop = FALSE;
1328 /*
1329 * This may fail if skmem_region_mtbf is set, or if
1330 * the system is short on memory. Perform retries
1331 * at this layer to get at least 64 packets.
1332 */
1333 while ((err = kern_pbufpool_alloc_nosleep(pp, 1, &ph)) != 0) {
1334 VERIFY(err == ENOMEM);
1335 if (phcnt < 64) {
1336 SK_ERR("retrying alloc for quantum %u", phcnt);
1337 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1338 continue;
1339 }
1340 stop = TRUE;
1341 break;
1342 }
1343 if (stop) {
1344 break;
1345 }
1346 VERIFY(ph != 0);
1347 VERIFY(kern_packet_get_data_length(ph) == 0);
1348 VERIFY(kern_packet_get_buflet_count(ph) == 1);
1349 phary[phcnt++] = ph;
1350 }
1351 VERIFY(phcnt >= 64);
1352 for (i = 0; i < phcnt; i++) {
1353 kern_pbufpool_free(pp, phary[i]);
1354 phary[i] = 0;
1355 }
1356 /* allocate and free in batch */
1357 phcnt = pp_mem_info.kpm_packets;
1358 for (;;) {
1359 err = kern_pbufpool_alloc_batch_nosleep(pp, 1, phary, &phcnt);
1360 VERIFY(err != EINVAL && err != ENOTSUP);
1361 if (err == ENOMEM) {
1362 phcnt = pp_mem_info.kpm_packets;
1363 SK_ERR("retrying batch alloc for %u quantums", phcnt);
1364 delay(250 * NSEC_PER_USEC); /* 1/4 sec */
1365 } else if (err == EAGAIN) {
1366 SK_ERR("batch alloc for %u quantums only returned %u",
1367 pp_mem_info.kpm_packets, phcnt);
1368 break;
1369 } else {
1370 VERIFY(err == 0);
1371 break;
1372 }
1373 }
1374 VERIFY(phcnt > 0);
1375 for (i = 0; i < phcnt; i++) {
1376 VERIFY(phary[i] != 0);
1377 VERIFY(kern_packet_get_data_length(phary[i]) == 0);
1378 VERIFY(kern_packet_get_buflet_count(phary[i]) == 1);
1379 }
1380 kern_pbufpool_free_batch(pp, phary, phcnt);
1381 /* allocate and free one at a time (blocking) */
1382 for (i = 0, phcnt = 0; i < pp_mem_info.kpm_packets; i++) {
1383 VERIFY(kern_pbufpool_alloc(pp, 1, &ph) == 0);
1384 VERIFY(ph != 0);
1385 VERIFY(kern_packet_get_data_length(ph) == 0);
1386 VERIFY(kern_packet_get_buflet_count(ph) == 1);
1387 phary[phcnt++] = ph;
1388 }
1389 VERIFY(phcnt >= 64);
1390 for (i = 0; i < phcnt; i++) {
1391 kern_pbufpool_free(pp, phary[i]);
1392 phary[i] = 0;
1393 }
1394 /* allocate and free in batch (blocking) */
1395 bzero(&skmt_alloccb_ctx, sizeof(skmt_alloccb_ctx));
1396 skmt_alloccb_ctx.stc_req = phcnt;
1397 VERIFY(kern_pbufpool_alloc_batch_callback(pp, 1, phary, &phcnt,
1398 skmem_test_alloccb, &skmt_alloccb_ctx) == 0);
1399 VERIFY(skmt_alloccb_ctx.stc_idx == phcnt);
1400 kern_pbufpool_free_batch(pp, phary, phcnt);
1401 kern_pbufpool_destroy(pp);
1402 pp = NULL;
1403done:
1404 kfree_data(phary, sizeof(kern_packet_t) * MAX_PH_ARY);
1405 phary = NULL;
1406}
1407
1408static void
1409skmem_basic_tests(void)
1410{
1411 /* basic sanity (alloc/free) tests on packet buflet KPIs */
1412 skmem_buflet_tests(0);
1413 skmem_buflet_tests(KBIF_PERSISTENT);
1414 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1415 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1416 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS |
1417 KBIF_USER_ACCESS);
1418 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1419 KBIF_USER_ACCESS);
1420 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1421 skmem_buflet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1422 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1423 KBIF_BUFFER_ON_DEMAND);
1424 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1425 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1426 skmem_buflet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1427 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1428 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1429 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1430 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1431 KBIF_NO_MAGAZINES);
1432 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS |
1433 KBIF_USER_ACCESS);
1434 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1435 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1436 TEST_OPTION_INHIBIT_CACHE);
1437 skmem_buflet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1438 TEST_OPTION_INHIBIT_CACHE);
1439 skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1440 skmem_buflet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1441
1442 /* basic sanity (alloc/free) tests on packet buflet KPIs (vdev) */
1443 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE);
1444 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1445 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1446 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1447 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1448 KBIF_PHYS_CONTIGUOUS);
1449 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1450 KBIF_MONOLITHIC | KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1451 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1452 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1453 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1454 KBIF_BUFFER_ON_DEMAND);
1455 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1456 TEST_OPTION_INHIBIT_CACHE);
1457 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1458 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1459 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1460 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1461 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1462 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1463 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1464 KBIF_USER_ACCESS);
1465 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1466 KBIF_PHYS_CONTIGUOUS);
1467 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1468 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1469 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1470 KBIF_BUFFER_ON_DEMAND);
1471 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1472 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1473 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1474 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1475 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1476 skmem_buflet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1477 TEST_OPTION_INHIBIT_CACHE);
1478
1479 /* check packet KPIs (also touches data) */
1480 skmem_packet_tests(0);
1481 skmem_packet_tests(KBIF_PHYS_CONTIGUOUS);
1482 skmem_packet_tests(KBIF_PERSISTENT);
1483 skmem_packet_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1484 skmem_packet_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1485 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1486 KBIF_PHYS_CONTIGUOUS | KBIF_USER_ACCESS);
1487 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1488 KBIF_USER_ACCESS);
1489 skmem_packet_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1490 skmem_packet_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1491 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1492 KBIF_BUFFER_ON_DEMAND);
1493 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1494 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1495 skmem_packet_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1496 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1497 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1498 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1499 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1500 KBIF_NO_MAGAZINES);
1501 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1502 KBIF_PHYS_CONTIGUOUS);
1503 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1504#if 0
1505 /* XXX: commented out failed tests on ARM64e platforms */
1506 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1507 TEST_OPTION_INHIBIT_CACHE);
1508 skmem_packet_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1509 TEST_OPTION_INHIBIT_CACHE);
1510 skmem_packet_tests(KBIF_BUFFER_ON_DEMAND);
1511 skmem_packet_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1512#endif
1513
1514 /* check packet KPIs (also touches data) (vdev) */
1515 skmem_packet_tests(KBIF_VIRTUAL_DEVICE);
1516 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1517 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1518 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1519 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1520 KBIF_PHYS_CONTIGUOUS);
1521 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1522 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1523 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1524 KBIF_BUFFER_ON_DEMAND);
1525 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1526 TEST_OPTION_INHIBIT_CACHE);
1527 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1528 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1529 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1530 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1531 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1532 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1533 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1534 KBIF_PHYS_CONTIGUOUS);
1535 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1536 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1537 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1538 KBIF_USER_ACCESS);
1539 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1540 KBIF_BUFFER_ON_DEMAND);
1541 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1542 KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1543#if 0
1544 /* XXX: commented out failed tests on ARM64e platforms */
1545 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1546 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1547 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1548 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1549 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1550 skmem_packet_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1551 TEST_OPTION_INHIBIT_CACHE);
1552#endif
1553
1554 /* check quantum KPIs */
1555 skmem_quantum_tests(0);
1556 skmem_quantum_tests(KBIF_PHYS_CONTIGUOUS);
1557 skmem_quantum_tests(KBIF_PERSISTENT);
1558 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_NO_MAGAZINES);
1559 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_PHYS_CONTIGUOUS);
1560 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1561 KBIF_USER_ACCESS);
1562 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_BUFFER_ON_DEMAND);
1563 skmem_quantum_tests(KBIF_PERSISTENT | TEST_OPTION_INHIBIT_CACHE);
1564 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1565 KBIF_BUFFER_ON_DEMAND);
1566 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1567 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1568 skmem_quantum_tests(KBIF_PERSISTENT | KBIF_MONOLITHIC |
1569 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1570 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_PHYS_CONTIGUOUS);
1571 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1572 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1573 KBIF_PHYS_CONTIGUOUS);
1574 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1575 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1576 TEST_OPTION_INHIBIT_CACHE);
1577 skmem_quantum_tests(KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND |
1578 TEST_OPTION_INHIBIT_CACHE);
1579 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND);
1580 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_NO_MAGAZINES);
1581 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | KBIF_PHYS_CONTIGUOUS);
1582 skmem_quantum_tests(KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1583
1584 /* check quantum KPIs (vdev) */
1585 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE);
1586 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_NO_MAGAZINES);
1587 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PHYS_CONTIGUOUS);
1588 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT);
1589 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1590 KBIF_MONOLITHIC | KBIF_USER_ACCESS);
1591 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1592 KBIF_BUFFER_ON_DEMAND);
1593 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1594 TEST_OPTION_INHIBIT_CACHE);
1595 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1596 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND);
1597 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1598 KBIF_MONOLITHIC | KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1599 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_PERSISTENT |
1600 KBIF_MONOLITHIC | KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1601 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1602 KBIF_PHYS_CONTIGUOUS);
1603 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1604 KBIF_USER_ACCESS | KBIF_PHYS_CONTIGUOUS);
1605 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1606 KBIF_USER_ACCESS);
1607 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1608 KBIF_BUFFER_ON_DEMAND);
1609 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1610 KBIF_USER_ACCESS | TEST_OPTION_INHIBIT_CACHE);
1611 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_MONOLITHIC |
1612 KBIF_BUFFER_ON_DEMAND | TEST_OPTION_INHIBIT_CACHE);
1613 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND);
1614 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1615 KBIF_PHYS_CONTIGUOUS);
1616 skmem_quantum_tests(KBIF_VIRTUAL_DEVICE | KBIF_BUFFER_ON_DEMAND |
1617 TEST_OPTION_INHIBIT_CACHE);
1618}
1619
1620static void
1621skmem_advanced_tests(int n, int32_t th_max, uint32_t mode, boolean_t nosleep,
1622 uint32_t flags)
1623{
1624 struct kern_pbufpool_init pp_init;
1625 kern_packet_t mph = 0;
1626 kern_buflet_t buflet = 0;
1627 int i;
1628
1629 VERIFY(skmth_pp == NULL);
1630 VERIFY(skmth_cnt == 0);
1631
1632 bzero(&pp_init, sizeof(pp_init));
1633 pp_init.kbi_version = KERN_PBUFPOOL_CURRENT_VERSION;
1634 pp_init.kbi_buf_seg_size = skmem_usr_buf_seg_size;
1635 pp_init.kbi_flags |= flags;
1636 (void) snprintf((char *)pp_init.kbi_name,
1637 sizeof(pp_init.kbi_name), "%s", "skmem_advanced");
1638
1639 /* prepare */
1640 switch (mode) {
1641 case 0:
1642 pp_init.kbi_packets = th_max;
1643 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1644 pp_init.kbi_max_frags = 1;
1645 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS;
1646 VERIFY(kern_pbufpool_create(&pp_init,
1647 &skmth_pp, NULL) == 0);
1648 break;
1649
1650 case 1:
1651 pp_init.kbi_packets = th_max;
1652 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1653 pp_init.kbi_max_frags = 1;
1654 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1655 KBIF_VIRTUAL_DEVICE;
1656 VERIFY(kern_pbufpool_create(&pp_init,
1657 &skmth_pp, NULL) == 0);
1658 break;
1659
1660 case 2:
1661 pp_init.kbi_packets = th_max;
1662 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1663 pp_init.kbi_max_frags = 1;
1664 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1665 KBIF_PERSISTENT;
1666 VERIFY(kern_pbufpool_create(&pp_init,
1667 &skmth_pp, NULL) == 0);
1668 break;
1669
1670 case 3:
1671 pp_init.kbi_packets = th_max;
1672 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1673 pp_init.kbi_max_frags = 1;
1674 pp_init.kbi_flags |= KBIF_MONOLITHIC | KBIF_USER_ACCESS |
1675 KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1676 VERIFY(kern_pbufpool_create(&pp_init,
1677 &skmth_pp, NULL) == 0);
1678 break;
1679
1680 case 4:
1681 pp_init.kbi_packets = th_max;
1682 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1683 pp_init.kbi_max_frags = 1;
1684 pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_USER_ACCESS;
1685 VERIFY(kern_pbufpool_create(&pp_init,
1686 &skmth_pp, NULL) == 0);
1687 break;
1688
1689 case 5:
1690 pp_init.kbi_packets = th_max;
1691 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1692 pp_init.kbi_max_frags = 1;
1693 pp_init.kbi_flags |= KBIF_PERSISTENT | KBIF_VIRTUAL_DEVICE;
1694 VERIFY(kern_pbufpool_create(&pp_init,
1695 &skmth_pp, NULL) == 0);
1696 break;
1697
1698 case 6:
1699 pp_init.kbi_packets = th_max;
1700 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1701 pp_init.kbi_max_frags = 1;
1702 pp_init.kbi_flags |= 0;
1703 VERIFY(kern_pbufpool_create(&pp_init,
1704 &skmth_pp, NULL) == 0);
1705 break;
1706
1707 case 7:
1708 pp_init.kbi_packets = th_max;
1709 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1710 pp_init.kbi_max_frags = 1;
1711 pp_init.kbi_flags |= KBIF_VIRTUAL_DEVICE;
1712 VERIFY(kern_pbufpool_create(&pp_init,
1713 &skmth_pp, NULL) == 0);
1714 break;
1715
1716 case 8:
1717 pp_init.kbi_packets = (th_max * 2) + 1;
1718 pp_init.kbi_bufsize = SKMEM_TEST_BUFSIZE;
1719 pp_init.kbi_max_frags = 1;
1720 pp_init.kbi_flags |= KBIF_BUFFER_ON_DEMAND;
1721 VERIFY(kern_pbufpool_create(&pp_init,
1722 &skmth_pp, NULL) == 0);
1723 break;
1724
1725 default:
1726 VERIFY(0);
1727 /* NOTREACHED */
1728 __builtin_unreachable();
1729 }
1730
1731 SK_ERR("%d: th_max %d mode %u nosleep %u nomagazines %u",
1732 n, th_max, mode, nosleep, !!(flags & KBIF_NO_MAGAZINES));
1733
1734 if (pp_init.kbi_flags & KBIF_BUFFER_ON_DEMAND) {
1735 /* create 1 master packet to clone */
1736 VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &mph) == 0);
1737 VERIFY((buflet = kern_packet_get_next_buflet(mph, NULL)) != NULL);
1738 VERIFY(kern_buflet_set_data_length(buflet, SKMEM_TEST_BUFSIZE) == 0);
1739 VERIFY(__packet_finalize(mph) == 0);
1740 }
1741
1742 bzero(skmth_info, skmth_info_size);
1743
1744 /* spawn as many threads as there are CPUs */
1745 for (i = 0; i < th_max; i++) {
1746 skmth_info[i].sti_mph = mph;
1747 skmth_info[i].sti_nosleep = nosleep;
1748 if (kernel_thread_start(skmem_test_func, (void *)(uintptr_t)i,
1749 &skmth_info[i].sti_thread) != KERN_SUCCESS) {
1750 panic("Failed to create skmem test thread");
1751 /* NOTREACHED */
1752 __builtin_unreachable();
1753 }
1754 }
1755
1756 lck_mtx_lock(&skmt_lock);
1757 do {
1758 struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1759 (void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1760 "skmtstartw", &ts);
1761 } while (skmth_cnt < th_max);
1762 VERIFY(skmth_cnt == th_max);
1763 lck_mtx_unlock(&skmt_lock);
1764
1765 lck_mtx_lock(&skmt_lock);
1766 VERIFY(!skmth_run);
1767 skmth_run = TRUE;
1768 wakeup((caddr_t)&skmth_run);
1769 lck_mtx_unlock(&skmt_lock);
1770
1771 /* wait until all threads are done */
1772 lck_mtx_lock(&skmt_lock);
1773 do {
1774 struct timespec ts = { .tv_sec = 0, .tv_nsec = 100 * USEC_PER_SEC };
1775 (void) msleep(&skmth_cnt, &skmt_lock, (PZERO - 1),
1776 "skmtstopw", &ts);
1777 } while (skmth_cnt != 0);
1778 skmth_run = FALSE;
1779 lck_mtx_unlock(&skmt_lock);
1780
1781 if (mph != 0) {
1782 VERIFY((buflet = kern_packet_get_next_buflet( mph, NULL)) != NULL);
1783 VERIFY(buflet->buf_ctl->bc_usecnt == 1);
1784 kern_pbufpool_free(skmth_pp, mph);
1785 mph = 0;
1786 }
1787 kern_pbufpool_destroy(skmth_pp);
1788 skmth_pp = NULL;
1789}
1790
1791__attribute__((noreturn))
1792static void
1793skmem_test_func(void *v, wait_result_t w)
1794{
1795#pragma unused(w)
1796 int i = (int)(uintptr_t)v, c;
1797 kern_packet_t ph = 0;
1798
1799 /* let skmem_test_start() know we're ready */
1800 lck_mtx_lock(&skmt_lock);
1801 os_atomic_inc(&skmth_cnt, relaxed);
1802 wakeup((caddr_t)&skmth_cnt);
1803 do {
1804 (void) msleep(&skmth_run, &skmt_lock, (PZERO - 1),
1805 "skmtfuncw", NULL);
1806 } while (!skmth_run);
1807 lck_mtx_unlock(&skmt_lock);
1808
1809 for (c = 0; c < 41; c++) {
1810 /* run alloc tests */
1811 VERIFY(skmth_pp != NULL);
1812 if (skmth_info[i].sti_nosleep) {
1813 errno_t err = kern_pbufpool_alloc_nosleep(skmth_pp,
1814 1, &ph);
1815 VERIFY(ph != 0 || err != 0);
1816 } else {
1817 VERIFY(kern_pbufpool_alloc(skmth_pp, 1, &ph) == 0);
1818 }
1819
1820 if (ph != 0) {
1821 kern_pbufpool_free(skmth_pp, ph);
1822 ph = 0;
1823 }
1824
1825 /* run clone tests */
1826 if (skmth_info[i].sti_mph != 0) {
1827 kern_buflet_t buflet, buflet2;
1828 kern_obj_idx_seg_t buf_idx_seg, buf2_idx_seg;
1829
1830 if (skmth_info[i].sti_nosleep) {
1831 errno_t err;
1832 err = kern_packet_clone_nosleep(skmth_info[i].sti_mph,
1833 &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT);
1834 VERIFY(skmth_info[i].sti_mpc != 0 || err != 0);
1835 } else {
1836 VERIFY(kern_packet_clone(skmth_info[i].sti_mph,
1837 &skmth_info[i].sti_mpc, KPKT_COPY_LIGHT) == 0);
1838 }
1839 if (skmth_info[i].sti_mpc != 0) {
1840 VERIFY(!(QUM_ADDR(skmth_info[i].sti_mpc)->qum_qflags & QUM_F_FINALIZED));
1841 VERIFY((buflet = kern_packet_get_next_buflet(
1842 skmth_info[i].sti_mph, NULL)) != NULL);
1843 VERIFY((buflet2 = kern_packet_get_next_buflet(
1844 skmth_info[i].sti_mpc, NULL)) != NULL);
1845 VERIFY(kern_buflet_get_object_address(buflet) ==
1846 kern_buflet_get_object_address(buflet2));
1847 VERIFY(kern_buflet_get_data_address(buflet) ==
1848 kern_buflet_get_data_address(buflet2));
1849 VERIFY(kern_buflet_get_data_limit(buflet) ==
1850 kern_buflet_get_data_limit(buflet2));
1851 VERIFY(kern_buflet_get_data_offset(buflet) ==
1852 kern_buflet_get_data_offset(buflet2));
1853 VERIFY(kern_buflet_get_data_length(buflet) ==
1854 kern_buflet_get_data_length(buflet2));
1855 VERIFY(kern_buflet_get_object_segment(buflet,
1856 &buf_idx_seg) ==
1857 kern_buflet_get_object_segment(buflet2,
1858 &buf2_idx_seg));
1859 VERIFY(buf_idx_seg == buf2_idx_seg);
1860 VERIFY(buflet->buf_ctl == buflet2->buf_ctl);
1861 VERIFY(__packet_finalize(skmth_info[i].sti_mpc) == 0);
1862 kern_pbufpool_free(skmth_pp, skmth_info[i].sti_mpc);
1863 skmth_info[i].sti_mpc = 0;
1864 }
1865 skmth_info[i].sti_mph = 0;
1866 }
1867
1868 /* force cache purges to exercise related code paths */
1869 if (skmth_pp->pp_kmd_cache != NULL) {
1870 skmem_cache_reap_now(skmth_pp->pp_kmd_cache, TRUE);
1871 }
1872 if (PP_BUF_CACHE_DEF(skmth_pp) != NULL) {
1873 skmem_cache_reap_now(PP_BUF_CACHE_DEF(skmth_pp), TRUE);
1874 }
1875 if (PP_KBFT_CACHE_DEF(skmth_pp) != NULL) {
1876 skmem_cache_reap_now(PP_KBFT_CACHE_DEF(skmth_pp), TRUE);
1877 }
1878 }
1879
1880 /* let skmem_test_start() know we're finished */
1881 lck_mtx_lock(&skmt_lock);
1882 VERIFY(os_atomic_dec_orig(&skmth_cnt, relaxed) != 0);
1883 wakeup((caddr_t)&skmth_cnt);
1884 lck_mtx_unlock(&skmt_lock);
1885
1886 /* for the extra refcnt from kernel_thread_start() */
1887 thread_deallocate(current_thread());
1888
1889 thread_terminate(current_thread());
1890 __builtin_unreachable();
1891 /* NOTREACHED */
1892}
1893
1894static int skmem_test_objs;
1895
1896struct skmem_test_obj {
1897 uint64_t sto_val[2];
1898};
1899
1900static int
1901skmem_test_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
1902 void *arg, uint32_t skmflag)
1903{
1904#pragma unused(skmflag)
1905 struct skmem_test_obj *sto = SKMEM_OBJ_ADDR(oi);
1906
1907 VERIFY(oim == NULL);
1908 VERIFY(arg == &skmem_test_init);
1909 VERIFY(SKMEM_OBJ_SIZE(oi) >= sizeof(struct skmem_test_obj));
1910 sto->sto_val[0] = (uint64_t)(void *)sto ^
1911 (uint64_t)(void *)&sto->sto_val[0];
1912 sto->sto_val[1] = (uint64_t)(void *)sto ^
1913 (uint64_t)(void *)&sto->sto_val[1];
1914 os_atomic_inc(&skmem_test_objs, relaxed);
1915
1916 return 0;
1917}
1918
1919static void
1920skmem_test_dtor(void *addr, void *arg)
1921{
1922 struct skmem_test_obj *sto = addr;
1923
1924 VERIFY(arg == &skmem_test_init);
1925 VERIFY((sto->sto_val[0] ^ (uint64_t)(void *)&sto->sto_val[0]) ==
1926 (uint64_t)(void *)sto);
1927 VERIFY((sto->sto_val[1] ^ (uint64_t)(void *)&sto->sto_val[1]) ==
1928 (uint64_t)(void *)sto);
1929 VERIFY(skmem_test_objs > 0);
1930 os_atomic_dec(&skmem_test_objs, relaxed);
1931}
1932
1933static void
1934skmem_tests(uint32_t align)
1935{
1936 struct skmem_cache *skm;
1937 uint32_t bufsize = sizeof(struct skmem_test_obj);
1938
1939 uint32_t objary_max = (uint32_t)MAX_PH_ARY;
1940 void **objary = NULL;
1941 char name[64];
1942
1943 VERIFY(align != 0);
1944
1945 SK_ERR("bufsize %u align %u", bufsize, align);
1946
1947 objary = kalloc_type(void *, objary_max, Z_WAITOK | Z_ZERO);
1948
1949 (void) snprintf(name, sizeof(name), "skmem_test.%u.%u", bufsize, align);
1950
1951 skm = skmem_cache_create(name, bufsize, align, skmem_test_ctor,
1952 skmem_test_dtor, NULL, &skmem_test_init, NULL, 0);
1953
1954 VERIFY(skmem_test_objs == 0);
1955 for (int i = 0; i < objary_max; i++) {
1956 objary[i] = skmem_cache_alloc(skm, SKMEM_SLEEP);
1957 VERIFY(objary[i] != NULL);
1958 VERIFY(IS_P2ALIGNED(objary[i], align));
1959 }
1960 for (int i = 0; i < objary_max; i++) {
1961 VERIFY(objary[i] != NULL);
1962 skmem_cache_free(skm, objary[i]);
1963 objary[i] = NULL;
1964 }
1965 skmem_cache_destroy(skm);
1966 VERIFY(skmem_test_objs == 0);
1967
1968 kfree_type(void *, objary_max, objary);
1969 objary = NULL;
1970}
1971
1972static void
1973skmem_test_start(void *v, wait_result_t w)
1974{
1975 int32_t ncpus = ml_wait_max_cpus();
1976 int error = 0, n;
1977 uint32_t flags;
1978 uint64_t mtbf_saved;
1979
1980 lck_mtx_lock(&skmt_lock);
1981 VERIFY(!skmt_busy);
1982 skmt_busy = 1;
1983 skmem_cache_test_start(1); /* 1 second update interval */
1984 lck_mtx_unlock(&skmt_lock);
1985
1986 VERIFY(skmth_info == NULL);
1987 skmth_info_size = sizeof(struct skmt_thread_info) * ncpus;
1988 skmth_info = (struct skmt_thread_info *) kalloc_data(skmth_info_size,
1989 Z_WAITOK | Z_ZERO);
1990
1991 /*
1992 * Sanity tests.
1993 */
1994 (void) skmem_cache_magazine_max(1);
1995 (void) skmem_cache_magazine_max(32);
1996 (void) skmem_cache_magazine_max(64);
1997 (void) skmem_cache_magazine_max(128);
1998 (void) skmem_cache_magazine_max(256);
1999 (void) skmem_cache_magazine_max(512);
2000 (void) skmem_cache_magazine_max(1024);
2001 (void) skmem_cache_magazine_max(2048);
2002 (void) skmem_cache_magazine_max(4096);
2003 (void) skmem_cache_magazine_max(8192);
2004 (void) skmem_cache_magazine_max(16384);
2005 (void) skmem_cache_magazine_max(32768);
2006 (void) skmem_cache_magazine_max(65536);
2007
2008 /*
2009 * skmem allocator tests
2010 */
2011 skmem_tests(8);
2012 skmem_tests(16);
2013 skmem_tests(32);
2014 skmem_tests(64);
2015 skmem_tests(128);
2016
2017 /*
2018 * Basic packet buffer pool sanity tests
2019 */
2020 skmem_basic_tests();
2021
2022 /*
2023 * Multi-threaded alloc and free tests (blocking).
2024 */
2025 for (n = 0; n < 7; n++) {
2026 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2027 skmem_advanced_tests(n, ncpus, 0, FALSE, flags);
2028 }
2029 for (n = 0; n < 7; n++) {
2030 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2031 skmem_advanced_tests(n, ncpus, 0, TRUE, flags);
2032 }
2033 for (n = 0; n < 7; n++) {
2034 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2035 skmem_advanced_tests(n, ncpus, 1, FALSE, flags);
2036 }
2037 for (n = 0; n < 7; n++) {
2038 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2039 skmem_advanced_tests(n, ncpus, 1, TRUE, flags);
2040 }
2041 for (n = 0; n < 7; n++) {
2042 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2043 skmem_advanced_tests(n, ncpus, 2, FALSE, flags);
2044 }
2045 for (n = 0; n < 7; n++) {
2046 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2047 skmem_advanced_tests(n, ncpus, 2, TRUE, flags);
2048 }
2049 for (n = 0; n < 7; n++) {
2050 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2051 skmem_advanced_tests(n, ncpus, 3, FALSE, flags);
2052 }
2053 for (n = 0; n < 7; n++) {
2054 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2055 skmem_advanced_tests(n, ncpus, 3, TRUE, flags);
2056 }
2057 for (n = 0; n < 7; n++) {
2058 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2059 skmem_advanced_tests(n, ncpus, 4, FALSE, flags);
2060 }
2061 for (n = 0; n < 7; n++) {
2062 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2063 skmem_advanced_tests(n, ncpus, 5, FALSE, flags);
2064 }
2065
2066 /*
2067 * Modes 4-5 deal with persistent/mirrored regions, and to
2068 * maximize the chance of exercising the allocation failures
2069 * handling we lower the MTBF (if set) to the minimum possible,
2070 * and restore it to the saved value later.
2071 */
2072 mtbf_saved = skmem_region_get_mtbf();
2073 if (mtbf_saved != 0) {
2074 skmem_region_set_mtbf(SKMEM_REGION_MTBF_MIN);
2075 }
2076
2077 /*
2078 * Multi-threaded alloc and free tests (non-blocking).
2079 */
2080
2081 for (n = 0; n < 7; n++) {
2082 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2083 skmem_advanced_tests(n, ncpus, 4, TRUE, flags);
2084 }
2085 for (n = 0; n < 7; n++) {
2086 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2087 skmem_advanced_tests(n, ncpus, 5, TRUE, flags);
2088 }
2089
2090 /*
2091 * Restore MTBF to previous set value.
2092 */
2093 if (mtbf_saved != 0) {
2094 skmem_region_set_mtbf(mtbf_saved);
2095 }
2096
2097 for (n = 0; n < 7; n++) {
2098 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2099 skmem_advanced_tests(n, ncpus, 6, FALSE, flags);
2100 }
2101 for (n = 0; n < 7; n++) {
2102 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2103 skmem_advanced_tests(n, ncpus, 6, TRUE, flags);
2104 }
2105 for (n = 0; n < 7; n++) {
2106 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2107 skmem_advanced_tests(n, ncpus, 7, FALSE, flags);
2108 }
2109 for (n = 0; n < 7; n++) {
2110 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2111 skmem_advanced_tests(n, ncpus, 7, TRUE, flags);
2112 }
2113 for (n = 0; n < 7; n++) {
2114 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2115 skmem_advanced_tests(n, ncpus, 8, FALSE, flags);
2116 }
2117 for (n = 0; n < 7; n++) {
2118 flags = ((n & 1) ? KBIF_NO_MAGAZINES : 0);
2119 skmem_advanced_tests(n, ncpus, 8, TRUE, flags);
2120 }
2121
2122 lck_mtx_lock(&skmt_lock);
2123 skmt_enabled = 1;
2124 wakeup((caddr_t)&skmt_enabled);
2125 lck_mtx_unlock(&skmt_lock);
2126
2127 if (error != 0) {
2128 skmem_test_stop(v, w);
2129 }
2130}
2131
2132static void
2133skmem_test_stop(void *v, wait_result_t w)
2134{
2135#pragma unused(v, w)
2136
2137 if (skmth_info != NULL) {
2138 kfree_data(skmth_info, skmth_info_size);
2139 skmth_info = NULL;
2140 }
2141
2142 lck_mtx_lock(&skmt_lock);
2143 skmem_cache_test_stop();
2144 VERIFY(skmt_busy);
2145 skmt_busy = 0;
2146 skmt_enabled = 0;
2147 wakeup((caddr_t)&skmt_enabled);
2148 lck_mtx_unlock(&skmt_lock);
2149}
2150
2151static int
2152sysctl_skmem_test(__unused struct sysctl_oid *oidp,
2153 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2154{
2155 int error, newvalue, changed;
2156
2157 lck_mtx_lock(&skmt_lock);
2158 if ((error = sysctl_io_number(req, skmt_enabled, sizeof(int),
2159 &newvalue, &changed)) != 0) {
2160 goto done;
2161 }
2162
2163 if (changed && skmt_enabled != newvalue) {
2164 thread_t th;
2165 thread_continue_t func;
2166
2167 if (newvalue && skmt_busy) {
2168 SK_ERR("Older skmem test instance is still active");
2169 error = EBUSY;
2170 goto done;
2171 }
2172
2173 if (newvalue) {
2174 func = skmem_test_start;
2175 } else {
2176 func = skmem_test_stop;
2177 }
2178
2179 if (kernel_thread_start(func, NULL, &th) != KERN_SUCCESS) {
2180 SK_ERR("Failed to create skmem test action thread");
2181 error = EBUSY;
2182 goto done;
2183 }
2184 do {
2185 SK_DF(SK_VERB_MEM, "Waiting for %s to complete",
2186 newvalue ? "startup" : "shutdown");
2187 error = msleep(&skmt_enabled, &skmt_lock,
2188 PWAIT | PCATCH, "skmtw", NULL);
2189 /* BEGIN CSTYLED */
2190 /*
2191 * Loop exit conditions:
2192 * - we were interrupted
2193 * OR
2194 * - we are starting up and are enabled
2195 * (Startup complete)
2196 * OR
2197 * - we are starting up and are not busy
2198 * (Failed startup)
2199 * OR
2200 * - we are shutting down and are not busy
2201 * (Shutdown complete)
2202 */
2203 /* END CSTYLED */
2204 } while (!((error == EINTR) || (newvalue && skmt_enabled) ||
2205 (newvalue && !skmt_busy) || (!newvalue && !skmt_busy)));
2206
2207 thread_deallocate(th);
2208 }
2209
2210done:
2211 lck_mtx_unlock(&skmt_lock);
2212 return error;
2213}
2214
2215SYSCTL_PROC(_kern_skywalk_mem, OID_AUTO, test,
2216 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
2217 sysctl_skmem_test, "I", "Start Skywalk memory test");
2218
2219__typed_allocators_ignore_pop
2220
2221#endif /* DEVELOPMENT || DEBUG */
2222