1/*
2 * Copyright (c) 2018-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#if (DEVELOPMENT || DEBUG)
30
31#pragma clang optimize off
32
33#include <libkern/OSAtomic.h>
34#include <os/refcnt.h>
35#include <skywalk/os_skywalk_private.h>
36#include <skywalk/lib/cuckoo_hashtable.h>
37
38#define CUCKOO_TEST_TAG "com.apple.skywalk.libcuckoo.test"
39SKMEM_TAG_DEFINE(cuckoo_test_tag, CUCKOO_TEST_TAG);
40
41os_refgrp_decl(static, cht_obj_refgrp, "CuckooTestRefGroup", NULL);
42
43static void cuckoo_test_start(void *, wait_result_t);
44static void cuckoo_test_stop(void *, wait_result_t);
45
46extern unsigned int ml_wait_max_cpus(void);
47
48// threading related
49static int cht_inited = 0;
50static int cht_enabled;
51static int cht_busy;
52
53decl_lck_mtx_data(static, cht_lock);
54
55static struct cuckoo_hashtable *h = NULL;
56
57struct cht_thread_conf {
58 thread_t ctc_thread; /* thread instance */
59 uint32_t ctc_nthreads; /* number of threads */
60 uint32_t ctc_id; /* thread id */
61} __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
62
63static struct cht_thread_conf *chth_confs;
64static uint32_t chth_nthreads;
65static uint32_t chth_cnt;
66static boolean_t chth_run;
67
68enum {
69 COS_NOT_ADDED = 0, /* no inserted, available for insertion */
70 COS_BUSY = -1, /* being inserted/deleted */
71 COS_ADDED = 1, /* inserted, available for deletion */
72} co_state_t;
73
74// Cuckoo hashtable key object
75
76struct cht_obj {
77 struct cuckoo_node co_cnode; // cuckoo node
78 int64_t co_key; // unique key
79 uint32_t co_hash; // dummy hash value (not collision-free)
80 os_refcnt_t co_refcnt; // reference count
81 volatile int32_t co_state; // co_state_t
82 uint32_t co_seen; // number of times seen
83};
84
85#if XNU_PLATFORM_WatchOS
86static const uint32_t CHT_OBJ_MAX = 16 * 1024;
87#else /* XNU_PLATFORM_WatchOS */
88static const uint32_t CHT_OBJ_MAX = 512 * 1024;
89#endif /* !XNU_PLATFORM_WatchOS */
90static struct cht_obj *cht_objs;
91
92static int
93cht_obj_cmp__(struct cuckoo_node *node, void *key)
94{
95 struct cht_obj *co = container_of(node, struct cht_obj, co_cnode);
96 int64_t key1 = *(int64_t *)key;
97
98 if (co->co_key < key1) {
99 return -1;
100 } else if (co->co_key > key1) {
101 return 1;
102 }
103
104 return 0;
105}
106
107static void
108cht_obj_retain(struct cht_obj *co)
109{
110 (void)os_ref_retain(&co->co_refcnt);
111}
112
113static void
114cht_obj_retain__(struct cuckoo_node *node)
115{
116 struct cht_obj *co = container_of(node, struct cht_obj, co_cnode);
117 return cht_obj_retain(co);
118}
119
120static void
121cht_obj_release(struct cht_obj *co)
122{
123 (void)os_ref_release(&co->co_refcnt);
124}
125
126static void
127cht_obj_release__(struct cuckoo_node *node)
128{
129 struct cht_obj *co = container_of(node, struct cht_obj, co_cnode);
130 cht_obj_release(co);
131}
132
133static int
134cht_obj_refcnt(struct cht_obj *co)
135{
136 return os_ref_get_count(&co->co_refcnt);
137}
138
139static struct cuckoo_hashtable_params params_template = {
140 .cht_capacity = 1024,
141 .cht_obj_cmp = cht_obj_cmp__,
142 .cht_obj_retain = cht_obj_retain__,
143 .cht_obj_release = cht_obj_release__,
144};
145
146void
147cht_test_init(void)
148{
149 if (OSCompareAndSwap(0, 1, &cht_inited)) {
150 lck_mtx_init(&cht_lock, &sk_lock_group, &sk_lock_attr);
151 }
152}
153
154void
155cht_test_fini(void)
156{
157 lck_mtx_destroy(&cht_lock, &sk_lock_group);
158}
159
160static void
161cht_obj_init()
162{
163 // init testing objects
164 cht_objs = sk_alloc_type_array(struct cht_obj, CHT_OBJ_MAX,
165 Z_WAITOK, cuckoo_test_tag);
166 VERIFY(cht_objs != NULL);
167
168 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
169 cht_objs[i].co_key = i;
170 do {
171 read_random(&cht_objs[i].co_hash, sizeof(cht_objs[i].co_hash));
172 } while (cht_objs[i].co_hash == 0);
173 os_ref_init(&cht_objs[i].co_refcnt, &cht_obj_refgrp);
174 cht_objs[i].co_state = COS_NOT_ADDED;
175 }
176}
177
178static void
179cht_obj_fini()
180{
181 VERIFY(cht_objs != NULL);
182 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
183 ASSERT(os_ref_release(&cht_objs[i].co_refcnt) == 0);
184 cht_objs[i].co_state = COS_NOT_ADDED;
185 cht_objs[i].co_seen = 0;
186 }
187 // init testing objects
188 sk_free_type_array(struct cht_obj, CHT_OBJ_MAX, cht_objs);
189}
190
191static void
192cht_basic_tests(void)
193{
194 SK_ERR("start");
195
196 // Cuckoo hashtable creation
197 h = cuckoo_hashtable_create(&params_template);
198
199 // basic add/del
200 struct cht_obj co1 = {
201 .co_cnode = {NULL},
202 .co_key = -1,
203 .co_hash = 1,
204 .co_state = COS_NOT_ADDED,
205 .co_seen = 0
206 };
207 struct cht_obj co2 = {
208 .co_cnode = {NULL},
209 .co_key = -2,
210 .co_hash = 1,
211 .co_state = COS_NOT_ADDED,
212 .co_seen = 0
213 };
214 os_ref_init(&co1.co_refcnt, &cht_obj_refgrp);
215 os_ref_init(&co2.co_refcnt, &cht_obj_refgrp);
216
217 struct cuckoo_node *node = NULL;
218 __block struct cht_obj *co = NULL;
219 int error = 0;
220
221 // add objs with duplicate hash
222 error = cuckoo_hashtable_add_with_hash(h, &co1.co_cnode, co1.co_hash);
223 ASSERT(error == 0);
224
225 error = cuckoo_hashtable_add_with_hash(h, &co2.co_cnode, co2.co_hash);
226 ASSERT(error == 0);
227
228 ASSERT(cuckoo_hashtable_entries(h) == 2);
229
230 node = cuckoo_hashtable_find_with_hash(h, &co1.co_key, co1.co_hash);
231 ASSERT(node != NULL);
232 ASSERT(node == &co1.co_cnode);
233
234 node = cuckoo_hashtable_find_with_hash(h, &co2.co_key, co2.co_hash);
235 ASSERT(node != NULL);
236 ASSERT(node == &co2.co_cnode);
237
238 cuckoo_hashtable_del(h, &co1.co_cnode, co1.co_hash);
239
240 node = cuckoo_hashtable_find_with_hash(h, &co1.co_key, co1.co_hash);
241 ASSERT(node == NULL);
242
243 node = cuckoo_hashtable_find_with_hash(h, &co2.co_key, co2.co_hash);
244 ASSERT(node != NULL);
245 ASSERT(node == &co2.co_cnode);
246
247 cuckoo_hashtable_del(h, &co2.co_cnode, co2.co_hash);
248 node = cuckoo_hashtable_find_with_hash(h, &co2.co_key, co2.co_hash);
249 ASSERT(node == NULL);
250
251 ASSERT(cuckoo_hashtable_entries(h) == 0);
252
253 // add all objs
254 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
255 co = &cht_objs[i];
256 error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
257 ASSERT(error == 0);
258 ASSERT(cuckoo_hashtable_entries(h) == i + 1);
259 co->co_state = COS_ADDED;
260 }
261
262 // find all objs
263 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
264 co = &cht_objs[i];
265 ASSERT(co->co_state = COS_ADDED);
266 node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
267 ASSERT(node != NULL);
268 ASSERT(node == &co->co_cnode);
269 ASSERT(cht_obj_refcnt(co) == 3);
270 cht_obj_release(co);
271 }
272
273 // walk all objs
274 cuckoo_hashtable_foreach(h, ^(struct cuckoo_node *curr_node, uint32_t curr_hash) {
275 co = container_of(curr_node, struct cht_obj, co_cnode);
276 ASSERT(co->co_hash == curr_hash);
277 ASSERT(cht_obj_refcnt(co) == 2);
278 co->co_seen++;
279 });
280
281 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
282 co = &cht_objs[i];
283 ASSERT(co->co_seen == 1);
284 }
285
286 size_t memory_use_before_shrink = cuckoo_hashtable_memory_footprint(h);
287
288 // del all objs
289 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
290 co = &cht_objs[i];
291 ASSERT(co->co_state = COS_ADDED);
292 node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
293 ASSERT(cht_obj_refcnt(co) == 3);
294 cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
295 cht_obj_release(co);
296 ASSERT(cht_obj_refcnt(co) == 1);
297 ASSERT(cuckoo_hashtable_entries(h) == CHT_OBJ_MAX - i - 1);
298 co->co_seen = 0;
299 }
300
301 // shrink
302 cuckoo_hashtable_try_shrink(h);
303
304 ASSERT(cuckoo_hashtable_memory_footprint(h) < memory_use_before_shrink);
305
306 // self healthy check
307 cuckoo_hashtable_health_check(h);
308
309 cuckoo_hashtable_free(h);
310
311 SK_ERR("done");
312}
313
314static void
315cht_concurrent_ops_begin()
316{
317 /* let skmem_test_start() know we're ready */
318 lck_mtx_lock(&cht_lock);
319 os_atomic_inc(&chth_cnt, relaxed);
320 wakeup((caddr_t)&chth_cnt);
321
322 do {
323 (void) msleep(&chth_run, &cht_lock, (PZERO - 1),
324 "chthfuncw", NULL);
325 } while (!chth_run);
326 lck_mtx_unlock(&cht_lock);
327}
328
329static void
330cht_concurrent_ops_done()
331{
332 /* let skmem_test_start() know we're finished */
333 lck_mtx_lock(&cht_lock);
334 VERIFY(os_atomic_dec_orig(&chth_cnt, relaxed) != 0);
335 wakeup((caddr_t)&chth_cnt);
336 lck_mtx_unlock(&cht_lock);
337}
338
339static void
340cht_concurrent_add_init(void)
341{
342 h = cuckoo_hashtable_create(&params_template);
343}
344
345static void
346cht_concurrent_add(void *v, wait_result_t w)
347{
348#pragma unused(v, w)
349 cht_concurrent_ops_begin();
350
351 struct cht_thread_conf *conf = v;
352 uint32_t objs_per_cpu = CHT_OBJ_MAX / conf->ctc_nthreads;
353 uint32_t objs_start_idx = objs_per_cpu * conf->ctc_id;
354 uint32_t objs_to_add = objs_per_cpu;
355
356 // last thread id add any tailing objs
357 if (conf->ctc_id == conf->ctc_nthreads - 1) {
358 objs_to_add += (CHT_OBJ_MAX % conf->ctc_nthreads);
359 }
360
361 for (uint32_t i = 0; i < objs_to_add; i++) {
362 struct cht_obj *co = &cht_objs[objs_start_idx + i];
363 int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
364 ASSERT(error == 0);
365 co->co_state = COS_ADDED;
366
367 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
368 ASSERT(node != NULL);
369 ASSERT(node == &co->co_cnode);
370 cht_obj_release(co);
371 }
372
373 cht_concurrent_ops_done();
374}
375
376static void
377cht_concurrent_add_check(void)
378{
379 __block struct cht_obj *co = NULL;
380 struct cuckoo_node *node = NULL;
381
382 // find all objs
383 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
384 co = &cht_objs[i];
385 ASSERT(co->co_state = COS_ADDED);
386 node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
387 ASSERT(node != NULL);
388 ASSERT(node == &co->co_cnode);
389 ASSERT(cht_obj_refcnt(co) == 3);
390 cht_obj_release(co);
391 }
392
393 // walk all objs
394 cuckoo_hashtable_foreach(h, ^(struct cuckoo_node *curr_node, uint32_t curr_hash) {
395 co = container_of(curr_node, struct cht_obj, co_cnode);
396 ASSERT(co->co_hash == curr_hash);
397 ASSERT(cht_obj_refcnt(co) == 2);
398 co->co_seen++;
399 });
400
401 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
402 co = &cht_objs[i];
403 //ASSERT(co->co_seen == 1);
404 }
405}
406
407static void
408cht_concurrent_add_fini(void)
409{
410 struct cht_obj *co = NULL;
411 struct cuckoo_node *node = NULL;
412
413 // del all objs
414 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
415 co = &cht_objs[i];
416 ASSERT(co->co_state = COS_ADDED);
417 node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
418 ASSERT(cht_obj_refcnt(co) == 3);
419 cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
420 cht_obj_release(co);
421 ASSERT(cht_obj_refcnt(co) == 1);
422 ASSERT(cuckoo_hashtable_entries(h) == CHT_OBJ_MAX - i - 1);
423 }
424
425 cuckoo_hashtable_free(h);
426}
427
428
429static void
430cht_concurrent_del_init(void)
431{
432 h = cuckoo_hashtable_create(&params_template);
433
434 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
435 struct cht_obj *co = &cht_objs[i];
436 int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
437 ASSERT(error == 0);
438 ASSERT(cuckoo_hashtable_entries(h) == i + 1);
439 co->co_state = COS_ADDED;
440 }
441}
442
443static void
444cht_concurrent_del(void *v, wait_result_t w)
445{
446#pragma unused(v, w)
447 cht_concurrent_ops_begin();
448
449 struct cht_thread_conf *conf = v;
450 uint32_t objs_per_cpu = CHT_OBJ_MAX / conf->ctc_nthreads;
451 uint32_t objs_start_idx = objs_per_cpu * conf->ctc_id;
452 uint32_t objs_to_del = objs_per_cpu;
453
454 // last thread id add any tailing objs
455 if (conf->ctc_id == conf->ctc_nthreads - 1) {
456 objs_to_del += (CHT_OBJ_MAX % conf->ctc_nthreads);
457 }
458
459 for (uint32_t i = 0; i < objs_to_del; i++) {
460 struct cht_obj *co = &cht_objs[objs_start_idx + i];
461 int error = cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
462 ASSERT(error == 0);
463 co->co_state = COS_NOT_ADDED;
464
465 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
466 ASSERT(node == NULL);
467 ASSERT(cht_obj_refcnt(co) == 1);
468 }
469
470 cht_concurrent_ops_done();
471}
472
473static void
474cht_concurrent_del_check(void)
475{
476 ASSERT(cuckoo_hashtable_entries(h) == 0);
477
478 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
479 struct cht_obj *co = &cht_objs[i];
480 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
481 ASSERT(node == NULL);
482 ASSERT(cht_obj_refcnt(co) == 1);
483 }
484}
485
486static void
487cht_concurrent_del_fini(void)
488{
489 cuckoo_hashtable_free(h);
490}
491
492static void
493cht_concurrent_duo_init(void)
494{
495 struct cuckoo_hashtable_params p = params_template;
496 p.cht_capacity = CHT_OBJ_MAX / 2;
497 h = cuckoo_hashtable_create(&p);
498
499 // populate 1/3 of the objects
500 for (uint32_t i = 0; i < CHT_OBJ_MAX; i += 3) {
501 struct cht_obj *co = &cht_objs[i];
502 int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
503 ASSERT(error == 0);
504 co->co_state = COS_ADDED;
505 }
506}
507
508static void
509cht_concurrent_duo(void *v, wait_result_t w)
510{
511#pragma unused(v, w)
512#define DUO_ITERATIONS (2 * CHT_OBJ_MAX)
513
514#define DUO_OPS_MASK 0x0000000f
515#define DUO_OPS_ADD 0x9
516
517#define DUO_IDX_MASK 0xfffffff0
518#define DUO_IDX_SHIFT 0x8
519
520 cht_concurrent_ops_begin();
521
522 uint32_t *rands;
523 rands = sk_alloc_data(sizeof(uint32_t) * DUO_ITERATIONS, Z_WAITOK, cuckoo_test_tag);
524 VERIFY(rands != NULL);
525 read_random(rands, sizeof(uint32_t) * DUO_ITERATIONS);
526
527 for (uint32_t i = 0; i < DUO_ITERATIONS; i++) {
528 uint32_t rand, ops, idx;
529 rand = rands[i];
530 ops = rand & DUO_OPS_MASK;
531 idx = (rand >> DUO_IDX_SHIFT) % CHT_OBJ_MAX;
532
533 // choose an ops (add, del, shrink)
534 if (ops < DUO_OPS_ADD) {
535 struct cht_obj *co = &cht_objs[idx];
536 if (os_atomic_cmpxchg(&co->co_state, COS_NOT_ADDED, COS_BUSY, acq_rel)) {
537 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
538 ASSERT(node == NULL);
539 int error = cuckoo_hashtable_add_with_hash(h, &co->co_cnode, co->co_hash);
540 ASSERT(error == 0);
541 ASSERT(cht_obj_refcnt(co) == 2);
542
543 co->co_state = COS_ADDED;
544 }
545 } else {
546 struct cht_obj *co = &cht_objs[idx];
547 if (os_atomic_cmpxchg(&co->co_state, COS_ADDED, COS_BUSY, acq_rel)) {
548 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
549 ASSERT(node != NULL);
550 ASSERT(node == &co->co_cnode);
551 int error = cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
552 ASSERT(error == 0);
553 ASSERT(cht_obj_refcnt(co) == 2);
554 cht_obj_release(co);
555
556 co->co_state = COS_NOT_ADDED;
557 }
558 }
559 }
560
561 sk_free_data(rands, sizeof(uint32_t) * DUO_ITERATIONS);
562 cht_concurrent_ops_done();
563}
564
565static void
566cht_concurrent_duo_check(void)
567{
568 size_t added = 0;
569 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
570 struct cht_obj *co = &cht_objs[i];
571 if (co->co_state == COS_ADDED) {
572 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
573 ASSERT(node != NULL);
574 ASSERT(node == &co->co_cnode);
575 added++;
576 cht_obj_release(co);
577 } else {
578 struct cuckoo_node *node = cuckoo_hashtable_find_with_hash(h, &co->co_key, co->co_hash);
579 ASSERT(node == NULL);
580 }
581 }
582
583 ASSERT(added == cuckoo_hashtable_entries(h));
584}
585
586static void
587cht_concurrent_duo_fini(void)
588{
589 for (uint32_t i = 0; i < CHT_OBJ_MAX; i++) {
590 struct cht_obj *co = &cht_objs[i];
591 if (co->co_state == COS_ADDED) {
592 int error = cuckoo_hashtable_del(h, &co->co_cnode, co->co_hash);
593 ASSERT(error == 0);
594 }
595 }
596
597 ASSERT(cuckoo_hashtable_entries(h) == 0);
598
599 cuckoo_hashtable_free(h);
600}
601
602static void
603cht_concurrent_tests(
604 void (*cht_concurrent_init)(void),
605 void (*cht_concurrent_ops)(void *v, wait_result_t w),
606 void (*cht_concurrent_check)(void),
607 void (*cht_concurrent_fini)(void))
608{
609 uint32_t nthreads = MAX(2, ml_wait_max_cpus() * 3 / 4);
610
611 SK_ERR("start, nthreads %d", nthreads);
612
613 cht_concurrent_init();
614
615 // init multithread test config
616 if (chth_confs == NULL) {
617 chth_nthreads = nthreads;
618 chth_confs = sk_alloc_type_array(struct cht_thread_conf, nthreads,
619 Z_WAITOK | Z_NOFAIL, cuckoo_test_tag);
620 }
621
622 for (uint32_t i = 0; i < nthreads; i++) {
623 chth_confs[i].ctc_nthreads = nthreads;
624 chth_confs[i].ctc_id = i;
625 if (kernel_thread_start(cht_concurrent_ops, (void *)&chth_confs[i],
626 &chth_confs[i].ctc_thread) != KERN_SUCCESS) {
627 panic("failed to create cuckoo test thread");
628 __builtin_unreachable();
629 }
630 }
631
632 // wait for threads to spwan
633 lck_mtx_lock(&cht_lock);
634 do {
635 struct timespec ts = { 0, 100 * USEC_PER_SEC };
636 (void) msleep(&chth_cnt, &cht_lock, (PZERO - 1),
637 "skmtstartw", &ts);
638 } while (chth_cnt < nthreads);
639 VERIFY(chth_cnt == nthreads);
640 lck_mtx_unlock(&cht_lock);
641
642 // signal threads to run
643 lck_mtx_lock(&cht_lock);
644 VERIFY(!chth_run);
645 chth_run = TRUE;
646 wakeup((caddr_t)&chth_run);
647 lck_mtx_unlock(&cht_lock);
648
649 // wait until all threads are done
650 lck_mtx_lock(&cht_lock);
651 do {
652 struct timespec ts = { 0, 100 * USEC_PER_SEC };
653 (void) msleep(&chth_cnt, &cht_lock, (PZERO - 1),
654 "skmtstopw", &ts);
655 } while (chth_cnt != 0);
656 chth_run = FALSE;
657 lck_mtx_unlock(&cht_lock);
658
659 // check results
660 cht_concurrent_check();
661
662 cht_concurrent_fini();
663
664 SK_ERR("done");
665}
666
667static void
668cuckoo_test_start(void *v, wait_result_t w)
669{
670#pragma unused(v, w)
671 lck_mtx_lock(&cht_lock);
672 VERIFY(!cht_busy);
673 cht_busy = 1;
674 lck_mtx_unlock(&cht_lock);
675
676 cht_obj_init();
677
678 cht_basic_tests();
679
680 cht_concurrent_tests(cht_concurrent_add_init, cht_concurrent_add, cht_concurrent_add_check, cht_concurrent_add_fini);
681 cht_concurrent_tests(cht_concurrent_del_init, cht_concurrent_del, cht_concurrent_del_check, cht_concurrent_del_fini);
682 cht_concurrent_tests(cht_concurrent_duo_init, cht_concurrent_duo, cht_concurrent_duo_check, cht_concurrent_duo_fini);
683
684 lck_mtx_lock(&cht_lock);
685 cht_enabled = 1;
686 wakeup((caddr_t)&cht_enabled);
687 lck_mtx_unlock(&cht_lock);
688}
689
690static void
691cuckoo_test_stop(void *v, wait_result_t w)
692{
693#pragma unused(v, w)
694
695 if (chth_confs != NULL) {
696 sk_free_type_array(struct cht_thread_conf, chth_nthreads, chth_confs);
697 chth_confs = NULL;
698 chth_nthreads = 0;
699 }
700
701 cht_obj_fini();
702
703 lck_mtx_lock(&cht_lock);
704 VERIFY(cht_busy);
705 cht_busy = 0;
706 cht_enabled = 0;
707 wakeup((caddr_t)&cht_enabled);
708 lck_mtx_unlock(&cht_lock);
709}
710
711static int
712sysctl_cuckoo_test(__unused struct sysctl_oid *oidp,
713 __unused void *arg1, __unused int arg2, struct sysctl_req *req)
714{
715 int error, newvalue, changed;
716 thread_t th;
717 thread_continue_t func;
718
719 lck_mtx_lock(&cht_lock);
720 if ((error = sysctl_io_number(req, cht_enabled, sizeof(int),
721 &newvalue, &changed)) != 0) {
722 SK_ERR("failed to get new sysctl value");
723 goto done;
724 }
725
726 if (changed && cht_enabled != newvalue) {
727 if (newvalue && cht_busy) {
728 SK_ERR("previous cuckoo test instance is still active");
729 error = EBUSY;
730 goto done;
731 }
732
733 if (newvalue) {
734 func = cuckoo_test_start;
735 } else {
736 func = cuckoo_test_stop;
737 }
738
739 if (kernel_thread_start(func, NULL, &th) != KERN_SUCCESS) {
740 SK_ERR("failed to create cuckoo test action thread");
741 error = EBUSY;
742 goto done;
743 }
744 do {
745 SK_ERR("waiting for %s to complete",
746 newvalue ? "startup" : "shutdown");
747 error = msleep(&cht_enabled, &cht_lock,
748 PWAIT | PCATCH, "skmtw", NULL);
749 /* BEGIN CSTYLED */
750 /*
751 * Loop exit conditions:
752 * - we were interrupted
753 * OR
754 * - we are starting up and are enabled
755 * (Startup complete)
756 * OR
757 * - we are starting up and are not busy
758 * (Failed startup)
759 * OR
760 * - we are shutting down and are not busy
761 * (Shutdown complete)
762 */
763 /* END CSTYLED */
764 } while (!((error == EINTR) || (newvalue && cht_enabled) ||
765 (newvalue && !cht_busy) || (!newvalue && !cht_busy)));
766
767 SK_ERR("exited from msleep");
768 thread_deallocate(th);
769 }
770
771done:
772 lck_mtx_unlock(&cht_lock);
773 return error;
774}
775
776SYSCTL_PROC(_kern_skywalk_libcuckoo, OID_AUTO, test,
777 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, NULL, 0,
778 sysctl_cuckoo_test, "I", "Start Cuckoo hashtable test");
779
780#endif /* DEVELOPMENT || DEBUG */
781