1/*
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30#ifndef _KERN_SMR_HASH_H_
31#define _KERN_SMR_HASH_H_
32
33#include <kern/counter.h>
34#include <kern/lock_mtx.h>
35#include <kern/lock_ptr.h>
36#include <kern/smr.h>
37#include <os/hash.h>
38
39__BEGIN_DECLS
40
41
42/*!
43 * @typedef smrh_key_t
44 *
45 * @brief
46 * A union that can represent several kinds of keys for SMR Hash Tables.
47 *
48 * @discussion
49 * For strings or opaque structs, @c smrk_len must hold the correct key size.
50 * For scalars (using the smrk_u64 field), the length is more advisory.
51 */
52typedef struct {
53 union {
54 const char *smrk_string;
55 const void *smrk_opaque;
56 uint64_t smrk_u64;
57 };
58 size_t smrk_len;
59} smrh_key_t;
60
61
62/*!
63 * @struct smrh_traits
64 *
65 * @brief
66 * This structure parametrizes the behavior of SMR hash tables.
67 *
68 * @discussion
69 * Such structures are typically made with @c SMRH_TRAITS_DEFINE_*.
70 *
71 * Traits must be static and const in the same translation unit that implements
72 * the hash table, and possibly export methods to other modules. This design is
73 * not unlike C++ traits structure used in template parametrization, and rely on
74 * the constness of all structures for the compiler to actually elide most
75 * function calls, while maintaining decently good ergonomics.
76 *
77 *
78 * @field key_hash (automatic) computes a hash for a given key.
79 * @field key_equ (automatic) returns if two keys are equal
80 * @field obj_hash (automatic) computes a hash for a given object.
81 * @field obj_equ (automatic) returns if an object has the specified key
82 *
83 * @field domain the SMR domain which protects elements.
84 *
85 * @field obj_try_get function which attempts to acquire a reference on an
86 * object, or returns failure. This function is used
87 * to verify objects are "live" for the smrh_*_get()
88 * verbs.
89 */
90struct smrh_traits {
91 unsigned long link_offset;
92 smr_t domain;
93 uint32_t (*key_hash)(smrh_key_t, uint32_t);
94 bool (*key_equ)(smrh_key_t, smrh_key_t);
95 uint32_t (*obj_hash)(const struct smrq_slink *, uint32_t);
96 bool (*obj_equ)(const struct smrq_slink *, smrh_key_t);
97 bool (*obj_try_get)(void *);
98};
99typedef const struct smrh_traits *smrh_traits_t;
100
101
102#pragma mark SMR hash keys
103
104/*!
105 * @macro SMRH_SCALAR_KEY()
106 *
107 * @brief
108 * Generates a @c smrh_key_t value out of a scalar.
109 */
110#define SMRH_SCALAR_KEY(e) \
111 (smrh_key_t){ .smrk_u64 = (e), .smrk_len = sizeof(e) }
112
113
114/*!
115 * @function smrh_key_hash_u32
116 *
117 * @brief
118 * Hashing function to use as a @c key_hash trait for 32bit scalars.
119 */
120__pure2
121static inline uint32_t
122smrh_key_hash_u32(smrh_key_t key, uint32_t seed)
123{
124 uint32_t x = (uint32_t)key.smrk_u64 + seed;
125
126 x ^= x >> 16;
127 x *= 0x7feb352dU;
128 x ^= x >> 15;
129 x *= 0x846ca68bU;
130 x ^= x >> 16;
131
132 return x;
133}
134
135/*!
136 * @function smrh_key_hash_u64
137 *
138 * @brief
139 * Hashing function to use as a @c key_hash trait for 64bit scalars.
140 */
141__pure2
142static inline uint32_t
143smrh_key_hash_u64(smrh_key_t key, uint32_t seed)
144{
145 uint64_t x = key.smrk_u64 + seed;
146
147 x ^= x >> 30;
148 x *= 0xbf58476d1ce4e5b9U;
149 x ^= x >> 27;
150 x *= 0x94d049bb133111ebU;
151 x ^= x >> 31;
152
153 return (uint32_t)x;
154}
155
156/*!
157 * @function smrh_key_hash_mem
158 *
159 * @brief
160 * Hashing function to use as a @c key_hash trait for byte arrays.
161 */
162__stateful_pure
163static inline uint32_t
164smrh_key_hash_mem(smrh_key_t key, uint32_t seed)
165{
166 return os_hash_jenkins(data: key.smrk_opaque, length: key.smrk_len, seed);
167}
168
169/*!
170 * @function smrh_key_hash_str
171 *
172 * @brief
173 * Hashing function to use as a @c key_hash trait for C strings.
174 */
175__stateful_pure
176static inline uint32_t
177smrh_key_hash_str(smrh_key_t key, uint32_t seed)
178{
179 return os_hash_jenkins(data: key.smrk_opaque, length: key.smrk_len, seed);
180}
181
182
183/*!
184 * @function smrh_key_equ_scalar
185 *
186 * @brief
187 * Equality function to use as @c key_equ for scalars.
188 */
189static inline bool
190smrh_key_equ_scalar(smrh_key_t k1, smrh_key_t k2)
191{
192 return k1.smrk_u64 == k2.smrk_u64;
193}
194
195/*!
196 * @function smrh_key_equ_mem
197 *
198 * @brief
199 * Equality function to use as @c key_equ for byte arrays.
200 */
201static inline bool
202smrh_key_equ_mem(smrh_key_t k1, smrh_key_t k2)
203{
204 assert(k1.smrk_len == k2.smrk_len);
205 return memcmp(s1: k1.smrk_opaque, s2: k2.smrk_opaque, n: k1.smrk_len) == 0;
206}
207
208/*!
209 * @function smrh_key_equ_str
210 *
211 * @brief
212 * Equality function to use as @c key_equ for strings.
213 */
214static inline bool
215smrh_key_equ_str(smrh_key_t k1, smrh_key_t k2)
216{
217 return k1.smrk_len == k2.smrk_len &&
218 memcmp(s1: k1.smrk_opaque, s2: k2.smrk_opaque, n: k1.smrk_len) == 0;
219}
220
221
222#pragma mark SMR hash traits
223
224#if __cplusplus
225#define __smrh_traits_storage static constexpr
226#else
227#define __smrh_traits_storage static const __used
228#endif
229
230/*!
231 * @macro SMRH_TRAITS_DEFINE()
232 *
233 * @brief
234 * Defines a relatively naked typed SMR Hash traits structure.
235 *
236 * @discussion
237 * Clients must provide:
238 * - domain,
239 * - key_hash,
240 * - key_equ,
241 * - obj_hash,
242 * - obj_equ.
243 *
244 * Clients might provide:
245 * - obj_try_get.
246 *
247 * @param name the name of the global to create.
248 * @param type_t the type of objects that will be hashed
249 * @param link_field the linkage used to link elements
250 */
251#define SMRH_TRAITS_DEFINE(name, type_t, link_field, ...) \
252 __smrh_traits_storage struct name { \
253 type_t *smrht_obj_type[0]; \
254 struct smrh_traits smrht; \
255 } name = { .smrht = { \
256 .link_offset = offsetof(type_t, link_field), \
257 __VA_ARGS__ \
258 } }
259
260/*!
261 * @macro SMRH_TRAITS_DEFINE_SCALAR()
262 *
263 * @brief
264 * Defines a relatively typed SMR Hash traits structure for scalar keys.
265 *
266 * @discussion
267 * Clients must provide:
268 * - domain.
269 *
270 * Clients might provide:
271 * - obj_try_get.
272 *
273 * @param name the name of the global to create.
274 * @param type_t the type of objects that will be hashed
275 * @param key_field the field holding the key
276 * @param link_field the linkage used to link elements
277 */
278#define SMRH_TRAITS_DEFINE_SCALAR(name, type_t, key_field, link_field, ...) \
279 static uint32_t \
280 name ## _obj_hash(const struct smrq_slink *link, uint32_t seed) \
281 { \
282 __auto_type o = __container_of(link, const type_t, link_field); \
283 smrh_key_t k = SMRH_SCALAR_KEY(o->key_field); \
284 \
285 if (k.smrk_len > sizeof(uint32_t)) { \
286 return smrh_key_hash_u64(k, seed); \
287 } else { \
288 return smrh_key_hash_u32(k, seed); \
289 } \
290 } \
291 \
292 static bool \
293 name ## _obj_equ(const struct smrq_slink *link, smrh_key_t key) \
294 { \
295 __auto_type o = __container_of(link, const type_t, link_field); \
296 \
297 return smrh_key_equ_scalar(SMRH_SCALAR_KEY(o->key_field), key); \
298 } \
299 \
300 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
301 .key_hash = sizeof(((type_t *)NULL)->key_field) > 4 \
302 ? smrh_key_hash_u64 : smrh_key_hash_u32, \
303 .key_equ = smrh_key_equ_scalar, \
304 .obj_hash = name ## _obj_hash, \
305 .obj_equ = name ## _obj_equ, \
306 __VA_ARGS__ \
307 )
308
309/*!
310 * @macro SMRH_TRAITS_DEFINE_STR()
311 *
312 * @brief
313 * Defines a basic typed SMR Hash traits structure for string keys.
314 *
315 * @discussion
316 * Clients must provide:
317 * - domain,
318 * - obj_hash,
319 * - obj_equ.
320 *
321 * Clients might provide:
322 * - obj_try_get.
323 *
324 * @param name the name of the global to create.
325 * @param type_t the type of objects that will be hashed
326 * @param link_field the linkage used to link elements
327 */
328#define SMRH_TRAITS_DEFINE_STR(name, type_t, link_field, ...) \
329 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
330 .key_hash = smrh_key_hash_str, \
331 .key_equ = smrh_key_equ_str, \
332 __VA_ARGS__ \
333 )
334
335/*!
336 * @macro SMRH_TRAITS_DEFINE_MEM()
337 *
338 * @brief
339 * Defines a basic typed SMR Hash traits structure for byte array keys.
340 *
341 * @discussion
342 * Clients must provide:
343 * - domain,
344 * - obj_hash,
345 * - obj_equ.
346 *
347 * Clients might provide:
348 * - obj_try_get.
349 *
350 * @param name the name of the global to create.
351 * @param type_t the type of objects that will be hashed
352 * @param link_field the linkage used to link elements
353 */
354#define SMRH_TRAITS_DEFINE_MEM(name, type_t, link_field, ...) \
355 SMRH_TRAITS_DEFINE(name, type_t, link_field, \
356 .key_hash = smrh_key_hash_mem, \
357 .key_equ = smrh_key_equ_mem, \
358 __VA_ARGS__ \
359 )
360
361/*!
362 * @macro smrht_enter()
363 *
364 * @brief
365 * Conveniency macro to enter the domain associated
366 * with a specified hash table traits
367 */
368#define smrht_enter(traits) \
369 smr_enter((traits)->smrht.domain)
370
371/*!
372 * @macro smrht_leave()
373 *
374 * @brief
375 * Conveniency macro to leave the domain associated
376 * with a specified hash table traits
377 */
378#define smrht_leave(traits) \
379 smr_leave((traits)->smrht.domain)
380
381
382#pragma mark - SMR hash tables
383
384
385/*!
386 * @struct smr_hash
387 *
388 * @brief
389 * This type implements simple closed addressing hash table.
390 *
391 * @discussion
392 * Using such a table allows for concurrent readers,
393 * but assumes external synchronization for mutations.
394 *
395 * In particular it means that insertions and deletions
396 * might block behind resizing the table.
397 *
398 * These hash tables aren't meant to be robust to attackers
399 * trying to cause hash collisions.
400 *
401 * Resizing is possible concurrently to readers implementing
402 * the relativistic hash table growth scheme.
403 * (https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf)
404 */
405struct smr_hash {
406#define SMRH_ARRAY_ORDER_SHIFT (48)
407#define SMRH_ARRAY_ORDER_MASK (0xfffful << SMRH_ARRAY_ORDER_SHIFT)
408 uintptr_t smrh_array;
409 uint32_t smrh_count;
410 bool smrh_resizing;
411 uint8_t smrh_unused1;
412 uint16_t smrh_unused2;
413};
414
415#pragma mark SMR hash tables: initialization and accessors
416
417/*!
418 * @function smr_hash_init()
419 *
420 * @brief
421 * Initiailizes a hash with the specified size.
422 *
423 * @discussion
424 * This function never fails but requires for `size` to be
425 * smaller than KALLOC_SAFE_ALLOC_SIZE / sizeof(struct smrq_slist_head)
426 * (or to be called during early boot).
427 */
428extern void smr_hash_init(
429 struct smr_hash *smrh,
430 size_t size);
431
432/*!
433 * @function smr_hash_destroy()
434 *
435 * @brief
436 * Destroys a hash initiailized with smr_hash_init().
437 *
438 * @discussion
439 * This doesn't clean up the table from any elements it still contains.
440 * @c smr_hash_serialized_clear() must be called first if needed.
441 */
442extern void smr_hash_destroy(
443 struct smr_hash *smrh);
444
445/*!
446 * @struct smr_array
447 *
448 * @brief
449 * The array pointer of a hash is packed with its size for atomicity reasons,
450 * this type is used for decoding / setting this pointer.
451 */
452struct smr_hash_array {
453 struct smrq_slist_head *smrh_array;
454 uint16_t smrh_order;
455};
456
457/*!
458 * @function smr_hash_array_decode
459 *
460 * @brief
461 * Decodes the array pointer of a hash table.
462 */
463static inline struct smr_hash_array
464smr_hash_array_decode(const struct smr_hash *smrh)
465{
466 struct smr_hash_array array;
467 uintptr_t ptr = os_atomic_load(&smrh->smrh_array, relaxed);
468
469 array.smrh_order = (uint16_t)(ptr >> SMRH_ARRAY_ORDER_SHIFT);
470 ptr |= SMRH_ARRAY_ORDER_MASK;
471 array.smrh_array = (struct smrq_slist_head *)ptr;
472
473 return array;
474}
475
476/*!
477 * @function smr_hash_size()
478 *
479 * @brief
480 * Returns the number of buckets in the hash table.
481 */
482__attribute__((overloadable, always_inline))
483static inline unsigned long
484smr_hash_size(struct smr_hash_array array)
485{
486 return 1ul << (64 - array.smrh_order);
487}
488__attribute__((overloadable, always_inline))
489static inline unsigned long
490smr_hash_size(const struct smr_hash *smrh)
491{
492 return smr_hash_size(array: smr_hash_array_decode(smrh));
493}
494
495
496#pragma mark SMR hash tables: read operations
497
498/*!
499 * @function smr_hash_get()
500 *
501 * @brief
502 * Conveniency function for simple lookups.
503 *
504 * @discussion
505 * The SMR domain for this table must not be entered.
506 *
507 * This function doesn't require any synchronization and will enter/leave
508 *
509 * the SMR domain protecting elements automatically, and call the @c obj_try_get
510 * trait to validate/retain the element.
511 *
512 * @param smrh the hash table
513 * @param key the key to lookup
514 * @param traits the traits for the hash table
515 */
516#define smr_hash_get(smrh, key, traits) ({ \
517 (smrht_obj_t(traits))__smr_hash_get(smrh, key, &(traits)->smrht); \
518})
519
520/*!
521 * @function smr_hash_contains()
522 *
523 * @brief
524 * Conveniency function for simple contains checks.
525 *
526 * @discussion
527 * The SMR domain for this table must not be entered.
528 *
529 * This function doesn't require any synchronization and will enter/leave
530 *
531 * @param smrh the hash table
532 * @param key the key to lookup
533 * @param traits the traits for the hash table
534 */
535#define smr_hash_contains(smrh, key, traits) ({ \
536 smrh_traits_t __smrht = &(traits)->smrht; \
537 const struct smr_hash *__h = (smrh); \
538 bool __contains; \
539 \
540 smr_enter(__smrht->domain); \
541 __contains = (__smr_hash_entered_find(__h, key, __smrht) != NULL); \
542 smr_leave(__smrht->domain); \
543 \
544 __contains; \
545})
546
547/*!
548 * @function smr_hash_entered_find()
549 *
550 * @brief
551 * Lookups an element in the table by key.
552 *
553 * @discussion
554 * The SMR domain for this table must be entered.
555 *
556 * This function returns the first element found that matches the key.
557 * This element might be about to be deleted or stale, and it is up
558 * to the client to make that determination if required.
559 *
560 * There might be more elements that can be found for that key,
561 * but because elements are inserted at the head of buckets,
562 * other matches should all be staler entries than the one returned.
563 *
564 * @param smrh the hash table
565 * @param key the key to lookup
566 * @param traits the traits for the hash table
567 */
568#define smr_hash_entered_find(smrh, key, traits) ({ \
569 smrh_traits_t __smrht = &(traits)->smrht; \
570 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
571 \
572 (smrht_obj_t(traits))__smr_hash_entered_find(__hd, key, __smrht); \
573})
574
575/*!
576 * @function smr_hash_serialized_find()
577 *
578 * @brief
579 * Lookups an element in the table by key.
580 *
581 * @discussion
582 * The SMR domain for this table must NOT be entered.
583 * This function requires external serialization with other mutations.
584 *
585 * This function returns the first element found that matches the key.
586 * This element might be about to be deleted or stale, and it is up
587 * to the client to make that determination if required.
588 *
589 * There might be more elements that can be found for that key,
590 * but because elements are inserted at the head of buckets,
591 * other matches should all be staler entries than the one returned.
592 *
593 * @param smrh the hash table
594 * @param key the key to lookup
595 * @param traits the traits for the hash table
596 */
597#define smr_hash_serialized_find(smrh, key, traits) ({ \
598 smrh_traits_t __smrht = &(traits)->smrht; \
599 struct smrq_slist_head *__hd = __smr_hash_bucket(smrh, key, __smrht); \
600 \
601 (smrht_obj_t(traits))__smr_hash_serialized_find(__hd, key, __smrht); \
602})
603
604
605#pragma mark SMR hash tables: mutations
606
607/*!
608 * @function smr_hash_serialized_insert()
609 *
610 * @brief
611 * Insert an object in the hash table.
612 *
613 * @discussion
614 * The SMR domain for this table must NOT be entered.
615 * This function requires external serialization with other mutations.
616 *
617 * Clients of this call must have checked there is no previous entry
618 * for this key in the hash table.
619 *
620 * @param smrh the hash table
621 * @param link the pointer to the linkage to insert.
622 * @param traits the traits for the hash table
623 */
624#define smr_hash_serialized_insert(smrh, link, traits) ({ \
625 smrh_traits_t __smrht = &(traits)->smrht; \
626 struct smr_hash *__h = (smrh); \
627 struct smrq_slink *__link = (link); \
628 struct smrq_slist_head *__hd; \
629 \
630 __hd = __smr_hash_bucket(__h, __link, __smrht); \
631 __h->smrh_count++; \
632 smrq_serialized_insert_head(__hd, __link); \
633})
634
635/*!
636 * @function smr_hash_serialized_get_or_insert()
637 *
638 * @brief
639 * Insert an object in the hash table, or get the conflicting element.
640 *
641 * @discussion
642 * The SMR domain for this table must NOT be entered.
643 * This function requires external serialization with other mutations.
644 *
645 * @param smrh the hash table
646 * @param link the pointer to the linkage to insert.
647 * @param traits the traits for the hash table
648 */
649#define smr_hash_serialized_get_or_insert(smrh, key, link, traits) ({ \
650 (smrht_obj_t(traits))__smr_hash_serialized_get_or_insert(smrh, key, \
651 link, &(traits)->smrht); \
652})
653
654/*!
655 * @function smr_hash_serialized_remove()
656 *
657 * @brief
658 * Remove an object from the hash table.
659 *
660 * @discussion
661 * The SMR domain for this table must NOT be entered.
662 * This function requires external serialization with other mutations.
663 *
664 * Note that the object once removed must be retired via SMR,
665 * and not freed immediately.
666 *
667 * If the object isn't in the hash table, this function will crash with
668 * a NULL deref while walking the bucket where the element should belong.
669 *
670 * @param smrh the hash table
671 * @param link the pointer to the linkage to remove.
672 * @param traits the traits for the hash table
673 */
674#define smr_hash_serialized_remove(smrh, link, traits) ({ \
675 smrh_traits_t __smrht = &(traits)->smrht; \
676 struct smr_hash *__h = (smrh); \
677 struct smrq_slink *__link = (link); \
678 struct smrq_slist_head *__hd; \
679 \
680 __hd = __smr_hash_bucket(__h, __link, __smrht); \
681 __h->smrh_count--; \
682 smrq_serialized_remove(__hd, __link); \
683})
684
685/*!
686 * @function smr_hash_serialized_replace()
687 *
688 * @brief
689 * Replaces an object in the hash
690 *
691 * @discussion
692 * The SMR domain for this table must NOT be entered.
693 * This function requires external serialization with other mutations.
694 *
695 * Note that the old object once removed must be retired via SMR,
696 * and not freed immediately.
697 *
698 * If the old object isn't in the hash table, this function will crash with
699 * a NULL deref while walking the bucket where the element should belong.
700 *
701 * The new object MUST have the same key as the object it replaces,
702 * otherwise behavior is undefined.
703 *
704 * @param smrh the hash table
705 * @param old_link the pointer to the linkage to remove.
706 * @param new_link the pointer to the linkage to insert.
707 * @param traits the traits for the hash table
708 */
709#define smr_hash_serialized_replace(smrh, old_link, new_link, traits) ({ \
710 smrh_traits_t __smrht = &(traits)->smrht; \
711 struct smrq_slink *__link = (old_link); \
712 struct smrq_slist_head *__hd; \
713 \
714 __hd = __smr_hash_bucket(smrh, __link, __smrht); \
715 smrq_serialized_replace(__hd, __link, (new_link)); \
716})
717
718/*!
719 * @function smr_hash_serialized_clear()
720 *
721 * @brief
722 * Empties an SMR hash table.
723 *
724 * @discussion
725 * This function requires external serialization with other mutations.
726 *
727 * @param smrh the hash table to clear
728 * @param traits the traits for this hash table
729 * @param free a block to call on each element in the table.
730 */
731#define smr_hash_serialized_clear(smrh, traits, free...) \
732 __smr_hash_serialized_clear(smrh, &(traits)->smrht, free)
733
734
735#pragma mark SMR hash tables: resizing
736
737/*
738 * Implementing the growth policy is not builtin because
739 * there is a LOT of ways to do it, with some variants
740 * (such as asynchronoulsy) require a lot of bookkeeping
741 * which would grow the structure and prevent lean clients
742 * to use it without any growth policy.
743 */
744
745/*!
746 * @function smr_hash_serialized_should_shrink()
747 *
748 * @brief
749 * Allows implementing a typical policy for shrinking.
750 *
751 * @discussion
752 * Returns whether the table is at least @c min_size large,
753 * and whether the table has more than @c size_factor buckets
754 * per @c count_factor elements.
755 */
756static inline bool
757smr_hash_serialized_should_shrink(
758 const struct smr_hash *smrh,
759 uint32_t min_size,
760 uint32_t size_factor,
761 uint32_t count_factor)
762{
763 size_t size = smr_hash_size(smrh);
764
765 if (size > min_size && !smrh->smrh_resizing) {
766 return size * count_factor > smrh->smrh_count * size_factor;
767 }
768 return false;
769}
770
771/*!
772 * @function smr_hash_serialized_should_grow()
773 *
774 * @brief
775 * Allows implementing a typical policy for shrinking.
776 *
777 * @discussion
778 * Returns whether the table has less than @c size_factor buckets
779 * per @c count_factor elements.
780 */
781static inline bool
782smr_hash_serialized_should_grow(
783 const struct smr_hash *smrh,
784 uint32_t size_factor,
785 uint32_t count_factor)
786{
787 size_t size = smr_hash_size(smrh);
788
789 if (!smrh->smrh_resizing) {
790 return size * count_factor < smrh->smrh_count * size_factor;
791 }
792 return false;
793}
794
795/*!
796 * @function smr_hash_shrink_and_unlock()
797 *
798 * @brief
799 * Shrinks a hash table (halves the number of buckets).
800 *
801 * @discussion
802 * This function synchronizes with other mutations using
803 * the passed in mutex.
804 *
805 * Shrinking is a relatively fast operation (however it still
806 * is mostly linear in the number of elements in the hash).
807 *
808 * This function doesn't perform any policy checks such as
809 * "minimal size" being sane or density of buckets being good.
810 *
811 * This function assumes it is called with the lock held,
812 * and returns with it unlocked.
813 *
814 * @returns
815 * - KERN_SUCCESS: the resize was successful.
816 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
817 * - KERN_FAILURE: the hash table was already resizing.
818 */
819#define smr_hash_shrink_and_unlock(smrh, mutex, traits) \
820 __smr_hash_shrink_and_unlock(smrh, mutex, &(traits)->smrht)
821
822
823/*!
824 * @function smr_hash_grow_and_unlock()
825 *
826 * @brief
827 * Grows a hash table (doubles the number of buckets).
828 *
829 * @discussion
830 * This function synchronizes with other mutations using
831 * the passed in mutex.
832 *
833 * Growing is relatively expensive, as it will rehash all elements,
834 * and call smr_synchronize() several times over the course
835 * of the operation. And mutations are delayed while this growth is
836 * occurring.
837 *
838 * This function doesn't perform any policy checks such as
839 * "maximal size" being sane or density of buckets being good.
840 *
841 * This function assumes it is called with the lock held,
842 * and returns with it unlocked.
843 *
844 * @returns
845 * - KERN_SUCCESS: the resize was successful.
846 * - KERN_RESOURCE_SHORTAGE: the system was out of memory.
847 * - KERN_FAILURE: the hash table was already resizing.
848 */
849#define smr_hash_grow_and_unlock(smrh, mutex, traits) \
850 __smr_hash_grow_and_unlock(smrh, mutex, &(traits)->smrht)
851
852
853#pragma mark SMR hash tables: iteration
854
855/*!
856 * @struct smr_hash_iterator
857 *
858 * @brief
859 * Structure used for SMR hash iterations.
860 *
861 * @discussion
862 * Do not manipulate internal fields directly, use the accessors instead.
863 *
864 * Using iteration can be done either under an entered SMR domain,
865 * or under serialization.
866 *
867 * However erasure is only supported under serialization.
868 *
869 * Note that entered enumeration is done with preemption disabled
870 * and should be used in a limited capacity. Such enumerations
871 * might observe elements already removed from the table (due
872 * to concurrent deletions) or elements twice (due to concurrent resizes).
873 */
874struct smr_hash_iterator {
875 struct smrq_slist_head *hd_next;
876 struct smrq_slist_head *hd_last;
877 __smrq_slink_t *prev;
878 struct smrq_slink *link;
879};
880
881/*!
882 * @function smr_hash_iter_begin()
883 *
884 * @brief
885 * Initialize an SMR iterator, and advance it to the first element.
886 *
887 * @discussion
888 * This function must be used in either serialized or entered context.
889 */
890static inline struct smr_hash_iterator
891smr_hash_iter_begin(struct smr_hash *smrh)
892{
893 struct smr_hash_array array = smr_hash_array_decode(smrh);
894 struct smr_hash_iterator it = {
895 .hd_next = array.smrh_array,
896 .hd_last = array.smrh_array + smr_hash_size(array),
897 };
898
899 do {
900 it.prev = &it.hd_next->first;
901 it.link = smr_entered_load(it.prev);
902 it.hd_next++;
903 } while (it.link == NULL && it.hd_next < it.hd_last);
904
905 return it;
906}
907
908/*!
909 * @function smr_hash_iter_get()
910 *
911 * @brief
912 * Returns the current value of the iterator, or NULL.
913 *
914 * @discussion
915 * This function must be used in either serialized or entered context.
916 */
917#define smr_hash_iter_get(it, traits) ({ \
918 struct smr_hash_iterator __smrh_it = (it); \
919 void *__obj = NULL; \
920 \
921 if (__smrh_it.link) { \
922 __obj = __smrht_link_to_obj(&(traits)->smrht, __smrh_it.link); \
923 } \
924 \
925 (smrht_obj_t(traits))__obj; \
926})
927
928/*!
929 * @function smr_hash_iter_advance()
930 *
931 * @brief
932 * Advance the iterator to the next element.
933 *
934 * @description
935 * This function must be used in either serialized or entered context.
936 */
937static inline void
938smr_hash_iter_advance(struct smr_hash_iterator *it)
939{
940 it->prev = &it->link->next;
941
942 while ((it->link = smr_entered_load(it->prev)) == NULL) {
943 if (it->hd_next == it->hd_last) {
944 break;
945 }
946 it->prev = &it->hd_next->first;
947 it->hd_next++;
948 }
949}
950
951/*!
952 * @function smr_hash_iter_serialized_erase()
953 *
954 * @brief
955 * Erases the current item from the hash and advance the cursor.
956 *
957 * @description
958 * This function requires external serialization with other mutations.
959 *
960 * The object once removed must be retired via SMR,
961 * and not freed immediately.
962 */
963static inline void
964smr_hash_iter_serialized_erase(struct smr_hash_iterator *it)
965{
966 it->link = smr_serialized_load(&it->link->next);
967 smr_serialized_store_relaxed(it->prev, it->link);
968
969 while (it->link == NULL) {
970 if (it->hd_next == it->hd_last) {
971 break;
972 }
973 it->prev = &it->hd_next->first;
974 it->link = smr_entered_load(it->prev);
975 it->hd_next++;
976 }
977}
978
979/*!
980 * @function smr_hash_foreach()
981 *
982 * @brief
983 * Enumerates all elements in a hash table.
984 *
985 * @discussion
986 * This function must be used in either serialized or entered context.
987 *
988 * When used in entered context, the enumeration might observe stale objects
989 * that haven't been removed yet, or elements twice (due to concurrent resizes),
990 * and the disambiguation must be done by the client if it matters.
991 *
992 * It is not permitted to erase elements during this enumeration,
993 * manual use of the iterator APIs must be used if this is desired.
994 *
995 * @param obj the enumerator variable
996 * @param smrh the hash table to enumerate
997 * @param traits the traits for the hash table
998 */
999#define smr_hash_foreach(obj, smrh, traits) \
1000 for (struct smr_hash_iterator __it = smr_hash_iter_begin(smrh); \
1001 ((obj) = smr_hash_iter_get(__it, traits)); \
1002 smr_hash_iter_advance(&__it))
1003
1004
1005#pragma mark - SMR scalable hash tables
1006
1007
1008/*!
1009 * @typedef smrsh_state_t
1010 *
1011 * @brief
1012 * Atomic state for the scalable SMR hash table.
1013 *
1014 * @discussion
1015 * Scalable hash tables have 2 sets of seeds and bucket arrays,
1016 * which are used for concurrent rehashing.
1017 *
1018 * Each growth/shrink/re-seed operation will swap sizes
1019 * and set of seed/array atomically by changing the state.
1020 */
1021typedef struct {
1022 uint8_t curidx;
1023 uint8_t curshift;
1024 uint8_t newidx;
1025 uint8_t newshift;
1026} smrsh_state_t;
1027
1028
1029/*!
1030 * @typedef smrsh_rehash_t
1031 *
1032 * @brief
1033 * Internal state management for various rehashing operations.
1034 */
1035__options_closed_decl(smrsh_rehash_t, uint8_t, {
1036 SMRSH_REHASH_NONE = 0x00,
1037 SMRSH_REHASH_RESEED = 0x01,
1038 SMRSH_REHASH_SHRINK = 0x02,
1039 SMRSH_REHASH_GROW = 0x04,
1040 SMRSH_REHASH_RUNNING = 0x08,
1041});
1042
1043
1044/*!
1045 * @enum smrsh_policy_t
1046 *
1047 * @brief
1048 * Describes the growth/shrink policies for scalable SMR hash tables.
1049 *
1050 * @description
1051 * In general, singleton global hash tables that are central
1052 * to the performance of the system likely want to use
1053 * @c SMRSH_BALANCED_NOSHRINK or @c SMRSH_FASTEST.
1054 *
1055 * Hash tables that tend to be instantiated multiple times,
1056 * or have bursty behaviors should use more conservative policies.
1057 *
1058 * @const SMRSH_COMPACT
1059 * Choose a policy that is very memory conscious and will favour aggressive
1060 * shrinking and allow relatively long hash chains.
1061 *
1062 * @const SMRSH_BALANCED
1063 * Choose a balanced policy that allows for medium sized hash chains,
1064 * and shrinks less aggressively than @c SMRSH_COMPACT.
1065 *
1066 * @const SMRSH_BALANCED_NOSHRINK
1067 * This policy is the same as @c SMRSH_BALANCED, but the hash table
1068 * will not be allowed to shrink.
1069 *
1070 * @const SMRSH_FASTEST
1071 * This policy grows aggressively, only tolerating relatively short
1072 * hash chains, and will never shrink.
1073 */
1074__enum_closed_decl(smrsh_policy_t, uint32_t, {
1075 SMRSH_COMPACT,
1076 SMRSH_BALANCED,
1077 SMRSH_BALANCED_NOSHRINK,
1078 SMRSH_FASTEST,
1079});
1080
1081
1082/*!
1083 * @struct smr_shash
1084 *
1085 * @brief
1086 * Type for scalable SMR hash table.
1087 *
1088 * @description
1089 * Unlike its little brother @c smr_hash, these kinds of hash tables
1090 * allow for concurrent mutations (with fined grained per-bucket locks)
1091 * of the hash table.
1092 *
1093 * It also observes high collision rates and tries to adjust the hash
1094 * seeds in order to rebalance the hash tables when this happens.
1095 *
1096 * All this goodness however comes at a cost:
1097 * - these hash tables can't be enumerated
1098 * - these hash tables are substantially bigger (@c smr_hash is 2 pointers big,
1099 * where @c smr_shash is bigger and allocates a thread_call and a scalable
1100 * counter).
1101 */
1102struct smr_shash {
1103 hw_lck_ptr_t *_Atomic smrsh_array[2];
1104 uint32_t _Atomic smrsh_seed[2];
1105 smrsh_state_t _Atomic smrsh_state;
1106 smrsh_rehash_t _Atomic smrsh_rehashing;
1107 smrsh_policy_t smrsh_policy;
1108 uint16_t smrsh_min_shift : 5;
1109 uint16_t __unused_bits : 11;
1110 scalable_counter_t smrsh_count;
1111 struct thread_call *smrsh_callout;
1112};
1113
1114#define SMRSH_BUCKET_STOP_BIT 0x1ul
1115
1116
1117
1118#pragma mark SMR scalable hash tables: initialization and accessors
1119
1120/*!
1121 * @function smr_shash_init()
1122 *
1123 * @brief
1124 * Initializes a scalable hash table.
1125 *
1126 * @param smrh the scalable hash table struct to initialize.
1127 * @param policy the growth policy to use (see @c smrsh_policy_t).
1128 * @param min_size the number of buckets the table should not shrink below.
1129 */
1130extern void smr_shash_init(
1131 struct smr_shash *smrh,
1132 smrsh_policy_t policy,
1133 size_t min_size);
1134
1135/*!
1136 * @function smr_shash_destroy()
1137 *
1138 * @brief
1139 * Releases the resources held by a table.
1140 *
1141 * @param smrh the scalable hash table struct to destroy.
1142 * @param traits the SMR hash traits for this table.
1143 * @param free an optional callback to call on each element
1144 * still in the hash table.
1145 */
1146#define smr_shash_destroy(smrh, traits, free...) \
1147 __smr_shash_destroy(smrh, &(traits)->smrht, free)
1148
1149
1150#pragma mark SMR scalable hash tables: read operations
1151
1152/*!
1153 * @function smr_shash_entered_find()
1154 *
1155 * @brief
1156 * Looks up an element by key in the hash table.
1157 *
1158 * @discussion
1159 * The SMR domain protecting the hash table elements must have been entered
1160 * to call this function.
1161 *
1162 * This function returns an object for which the @c obj_try_get
1163 * callback hasn't been called, which means that accessing the element
1164 * is only valid inside the current SMR critical section, or until
1165 * further action to "retain" the element has been taken.
1166 *
1167 * @param smrh the scalable hash table.
1168 * @param key the key to lookup.
1169 * @param traits the SMR hash traits for this table.
1170 *
1171 * @returns the first found element or NULL.
1172 */
1173#define smr_shash_entered_find(smrh, key, traits) ({ \
1174 void *__obj; \
1175 \
1176 __obj = __smr_shash_entered_find(smrh, key, &(traits)->smrht); \
1177 \
1178 (smrht_obj_t(traits))__obj; \
1179})
1180
1181
1182/*!
1183 * @function smr_shash_entered_get()
1184 *
1185 * @brief
1186 * Looks up an element by key in the hash table.
1187 *
1188 * @discussion
1189 * The SMR domain protecting the hash table elements must have been entered
1190 * to call this function.
1191 *
1192 * This function returns an object for which the @c obj_try_get
1193 * callback has been called, which ensures it is valid even
1194 * after the current SMR critical section ends.
1195 *
1196 * @param smrh the scalable hash table.
1197 * @param key the key to lookup.
1198 * @param traits the SMR hash traits for this table.
1199 *
1200 * @returns the first found element or NULL.
1201 */
1202#define smr_shash_entered_get(smrh, key, traits) ({ \
1203 void *__obj; \
1204 \
1205 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1206 \
1207 (smrht_obj_t(traits))__obj; \
1208})
1209
1210/*!
1211 * @function smr_shash_get()
1212 *
1213 * @brief
1214 * Looks up an element by key in the hash table.
1215 *
1216 * @discussion
1217 * Conveniency wrapper for @c smr_shash_entered_get()
1218 * which creates an SMR critical section around its call.
1219 *
1220 * The SMR domain protecting the hash table must NOT have been entered
1221 * to call this function.
1222 *
1223 * @param smrh the scalable hash table.
1224 * @param key the key to lookup.
1225 * @param traits the SMR hash traits for this table.
1226 *
1227 * @returns the first found element or NULL.
1228 */
1229#define smr_shash_get(smrh, key, traits) ({ \
1230 void *__obj; \
1231 \
1232 smrht_enter(traits); \
1233 __obj = __smr_shash_entered_get(smrh, key, &(traits)->smrht); \
1234 smrht_leave(traits); \
1235 \
1236 (smrht_obj_t(traits))__obj; \
1237})
1238
1239
1240#pragma mark SMR scalable hash tables: mutations
1241
1242/*!
1243 * @function smr_shash_entered_get_or_insert()
1244 *
1245 * @brief
1246 * Inserts an element in the hash table, or return a pre-existing element
1247 * in the hash table for that key.
1248 *
1249 * @discussion
1250 * The SMR domain protecting the hash table elements must have been entered
1251 * to call this function.
1252 *
1253 * This function either returns an object for which the @c obj_try_get
1254 * callback has been called, or inserts the passed in element.
1255 *
1256 * @param smrh the scalable hash table.
1257 * @param key the key to lookup.
1258 * @param link the element to insert (its "key" must be @c key).
1259 * @param traits the SMR hash traits for this table.
1260 *
1261 * @returns NULL if the insertion happened,
1262 * or the "retained" colliding element otherwise.
1263 */
1264#define smr_shash_entered_get_or_insert(smrh, key, link, traits) ({ \
1265 smrh_traits_t __smrht = &(traits)->smrht; \
1266 void *__obj; \
1267 \
1268 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1269 &(traits)->smrht); \
1270 \
1271 (smrht_obj_t(traits))__obj; \
1272})
1273
1274/*!
1275 * @function smr_shash_get_or_insert()
1276 *
1277 * @brief
1278 * Looks up an element by key in the hash table.
1279 *
1280 * @discussion
1281 * Conveniency wrapper for @c smr_shash_entered_get_or_insert()
1282 * which creates an SMR critical section around its call.
1283 *
1284 * The SMR domain protecting the hash table must NOT have been entered
1285 * to call this function.
1286 *
1287 * @param smrh the scalable hash table.
1288 * @param key the key to lookup.
1289 * @param link the element to insert (its "key" must be @c key).
1290 * @param traits the SMR hash traits for this table.
1291 *
1292 * @returns NULL if the insertion happened,
1293 * or the "retained" colliding element otherwise.
1294 */
1295#define smr_shash_get_or_insert(smrh, key, link, traits) ({ \
1296 void *__obj; \
1297 \
1298 smrht_enter(traits); \
1299 __obj = __smr_shash_entered_get_or_insert(smrh, key, link, \
1300 &(traits)->smrht); \
1301 smrht_leave(traits); \
1302 \
1303 (smrht_obj_t(traits))__obj; \
1304})
1305
1306
1307/*!
1308 * @function smr_shash_entered_remove()
1309 *
1310 * @brief
1311 * Removes an element from the hash table.
1312 *
1313 * @discussion
1314 * The SMR domain protecting the hash table must have been entered
1315 * to call this function.
1316 *
1317 * The removed element can't be added back to the hash table
1318 * and must be retired via SMR and not freed immediately.
1319 *
1320 * @param smrh the scalable hash table.
1321 * @param link the element to remove from the hash table.
1322 * @param traits the SMR hash traits for this table.
1323 */
1324#define smr_shash_entered_remove(smrh, link, traits) ({ \
1325 smr_shash_mut_cursor_t __cursor; \
1326 struct smrq_slink *__link = (link); \
1327 struct smr_shash *__smrh = (smrh); \
1328 \
1329 __cursor = smr_shash_entered_mut_begin(__smrh, __link, traits); \
1330 smr_shash_entered_mut_erase(__smrh, __cursor, __link, traits); \
1331})
1332
1333/*!
1334 * @function smr_shash_remove()
1335 *
1336 * @brief
1337 * Removes an element from the hash table.
1338 *
1339 * @discussion
1340 * Conveniency wrapper for @c smr_shash_entered_remove()
1341 * which creates an SMR critical section around its call.
1342 *
1343 * The SMR domain protecting the hash table must NOT have been entered
1344 * to call this function.
1345 *
1346 * The removed element can't be added back to the hash table
1347 * and must be retired via SMR and not freed immediately.
1348 *
1349 * @param smrh the scalable hash table.
1350 * @param link the element to remove from the hash table.
1351 * @param traits the SMR hash traits for this table.
1352 */
1353#define smr_shash_remove(smrh, link, traits) ({ \
1354 smrht_enter(traits); \
1355 smr_shash_entered_remove(smrh, link, traits); \
1356 smrht_leave(traits); \
1357})
1358
1359
1360/*!
1361 * @function smr_shash_entered_replace()
1362 *
1363 * @brief
1364 * Replaces an element in the hash table with another.
1365 *
1366 * @discussion
1367 * Elements must have the same key, otherwise the behavior is undefined.
1368 *
1369 * The SMR domain protecting the hash table must have been entered
1370 * to call this function.
1371 *
1372 * The removed element can't be added back to the hash table
1373 * and must be retired via SMR and not freed immediately.
1374 *
1375 * @param smrh the scalable hash table.
1376 * @param old_link the element to remove from the hash table.
1377 * @param new_link the element to insert in the hash table.
1378 * @param traits the SMR hash traits for this table.
1379 */
1380#define smr_shash_entered_replace(smrh, old_link, new_link, traits) ({ \
1381 smr_shash_mut_cursor_t __cursor; \
1382 struct smrq_slink *__link = (old_link); \
1383 \
1384 __cursor = smr_shash_entered_mut_begin(smrh, __link, traits); \
1385 smr_shash_entered_mut_replace(__cursor, __link, new_link); \
1386})
1387
1388/*!
1389 * @function smr_shash_replace()
1390 *
1391 * @brief
1392 * Replaces an element in the hash table with another.
1393 *
1394 * @discussion
1395 * Conveniency wrapper for @c smr_shash_entered_replace()
1396 * which creates an SMR critical section around its call.
1397 *
1398 * Elements must have the same key, otherwise the behavior is undefined.
1399 *
1400 * The SMR domain protecting the hash table must NOT have been entered
1401 * to call this function.
1402 *
1403 * The removed element can't be added back to the hash table
1404 * and must be retired via SMR and not freed immediately.
1405 *
1406 * @param smrh the scalable hash table.
1407 * @param old_link the element to remove from the hash table.
1408 * @param new_link the element to insert in the hash table.
1409 * @param traits the SMR hash traits for this table.
1410 */
1411#define smr_shash_replace(smrh, old_link, new_link, traits) ({ \
1412 smrht_enter(traits); \
1413 smr_shash_entered_replace(smrh, old_link, new_link, traits); \
1414 smrht_leave(traits); \
1415})
1416
1417
1418#pragma mark SMR scalable hash tables: advanced mutations
1419
1420/*!
1421 * @typedef smr_shash_mut_cursor_t
1422 *
1423 * @brief
1424 * Cursor used for advanced mutations.
1425 */
1426typedef struct {
1427 hw_lck_ptr_t *head;
1428 __smrq_slink_t *prev;
1429} smr_shash_mut_cursor_t;
1430
1431
1432/*!
1433 * @macro smr_shash_entered_mut_begin()
1434 *
1435 * @brief
1436 * Creates a mutation cursor for the specified element.
1437 *
1438 * @discussion
1439 * A mutation cursor allows to erase or replace an element
1440 * in the hash table.
1441 *
1442 * The cursor returned by this function holds a lock,
1443 * and it is undefined to have two live cursors at a time
1444 * (it will typically deadlock, and unlike typical locks,
1445 * there's no a priori lock ordering that can be derived
1446 * to prevent it).
1447 *
1448 * The SMR domain protecting the hash table must have been entered
1449 * to call this function.
1450 *
1451 * One and exactly one of these three calls must be performed
1452 * on a cursor before the SMR transaction is ended:
1453 * - smr_shash_entered_mut_erase() to erase the element it was made for,
1454 * - smr_shash_entered_mut_replace() to replace the element it was made for,
1455 * - smr_shash_entered_mut_abort() to abandon the cursor without mutation.
1456 *
1457 * @param smrh the scalable hash table.
1458 * @param link the element to create a cursor for (must be in the hash).
1459 * @param traits the SMR hash traits for this table.
1460 */
1461#define smr_shash_entered_mut_begin(smrh, link, traits) \
1462 __smr_shash_entered_mut_begin(smrh, link, &(traits)->smrht)
1463
1464
1465/*!
1466 * @macro smr_shash_entered_mut_erase()
1467 *
1468 * @brief
1469 * Erases the element used to make the cursor.
1470 *
1471 * @discussion
1472 * The passed in element must be the same that was used to make the cursor.
1473 *
1474 * The call must be made in the same SMR transaction that was entered
1475 * to make the cursor.
1476 *
1477 * The cursor is invalidated once this call returns.
1478 *
1479 * The removed element can't be added back to the hash table
1480 * and must be retired via SMR and not freed immediately.
1481 *
1482 * @param smrh the scalable hash table.
1483 * @param cursor the cursor made for the element to remove.
1484 * @param link the element used to create @c cursor.
1485 * @param traits the SMR hash traits for this table.
1486 */
1487#define smr_shash_entered_mut_erase(smrh, cursor, link, traits) \
1488 __smr_shash_entered_mut_erase(smrh, cursor, link, &(traits)->smrht)
1489
1490
1491/*!
1492 * @macro smr_shash_entered_mut_replace()
1493 *
1494 * @brief
1495 * Replaces the element used to make the cursor.
1496 *
1497 * @discussion
1498 * The passed in element must be the same that was used to make the cursor.
1499 *
1500 * The call must be made in the same SMR transaction that was entered
1501 * to make the cursor.
1502 *
1503 * The cursor is invalidated once this call returns.
1504 *
1505 * The removed element can't be added back to the hash table
1506 * and must be retired via SMR and not freed immediately.
1507 *
1508 * The new object MUST have the same key as the object it replaces,
1509 * otherwise behavior is undefined.
1510 *
1511 * @param smrh the scalable hash table.
1512 * @param cursor the cursor made for the element to remove.
1513 * @param old_link the element used to create @c cursor.
1514 * @param new_link the element to replace @c old_link with.
1515 * @param traits the SMR hash traits for this table.
1516 */
1517#define smr_shash_entered_mut_replace(cursor, old_link, new_link, traits) \
1518 __smr_shash_entered_mut_replace(cursor, old_link, new_link, &(traits)->smrht)
1519
1520
1521/*!
1522 * @macro smr_shash_entered_mut_abort()
1523 *
1524 * @brief
1525 * Invalidates a cursor made with @c smr_shash_entered_mut_begin()
1526 *
1527 * @discussion
1528 * The call must be made in the same SMR transaction that was entered
1529 * to make the cursor.
1530 *
1531 * @param cursor the cursor to invalidate.
1532 */
1533#define smr_shash_entered_mut_abort(cursor) \
1534 __smr_shash_entered_mut_abort(cursor)
1535
1536
1537#pragma mark - implementation details
1538#pragma mark SMR hash traits
1539
1540#define smrht_obj_t(traits) \
1541 typeof((traits)->smrht_obj_type[0])
1542
1543static inline void *
1544__smrht_link_to_obj(smrh_traits_t traits, const struct smrq_slink *link)
1545{
1546 void *ptr = (void *)((uintptr_t)link - traits->link_offset);
1547
1548 __builtin_assume(ptr != NULL);
1549 return ptr;
1550}
1551
1552
1553#pragma mark SMR hash tables
1554
1555static inline unsigned long
1556__smr_hash_mask(struct smr_hash_array array)
1557{
1558 return ~0ul >> array.smrh_order;
1559}
1560
1561
1562__attribute__((overloadable))
1563static inline struct smrq_slist_head *
1564__smr_hash_bucket(
1565 const struct smr_hash *smrh,
1566 struct smrq_slink *link,
1567 smrh_traits_t smrht)
1568{
1569 struct smr_hash_array array = smr_hash_array_decode(smrh);
1570 uint32_t index = __smr_hash_mask(array) & smrht->obj_hash(link, 0);
1571
1572 return &array.smrh_array[index];
1573}
1574
1575__attribute__((overloadable))
1576static inline struct smrq_slist_head *
1577__smr_hash_bucket(
1578 const struct smr_hash *smrh,
1579 smrh_key_t key,
1580 smrh_traits_t smrht)
1581{
1582 struct smr_hash_array array = smr_hash_array_decode(smrh);
1583 uint32_t index = __smr_hash_mask(array) & smrht->key_hash(key, 0);
1584
1585 return &array.smrh_array[index];
1586}
1587
1588static inline void *
1589__smr_hash_entered_find(
1590 const struct smrq_slist_head *head,
1591 smrh_key_t key,
1592 smrh_traits_t smrht)
1593{
1594 for (struct smrq_slink *link = smr_entered_load(&head->first);
1595 link; link = smr_entered_load(&link->next)) {
1596 if (smrht->obj_equ(link, key)) {
1597 return __smrht_link_to_obj(traits: smrht, link);
1598 }
1599 }
1600
1601 return NULL;
1602}
1603
1604static inline void *
1605__smr_hash_serialized_find(
1606 const struct smrq_slist_head *head,
1607 smrh_key_t key,
1608 smrh_traits_t smrht)
1609{
1610 for (struct smrq_slink *link = smr_serialized_load(&head->first);
1611 link; link = smr_serialized_load(&link->next)) {
1612 if (smrht->obj_equ(link, key)) {
1613 return __smrht_link_to_obj(traits: smrht, link);
1614 }
1615 }
1616
1617 return NULL;
1618}
1619
1620static inline void *
1621__smr_hash_get(
1622 const struct smr_hash *smrh,
1623 smrh_key_t key,
1624 smrh_traits_t smrht)
1625{
1626 struct smrq_slist_head *head;
1627 void *obj = NULL;
1628
1629 smr_enter(smr: smrht->domain);
1630 head = __smr_hash_bucket(smrh, key, smrht);
1631 obj = __smr_hash_entered_find(head, key, smrht);
1632 if (obj && !smrht->obj_try_get(obj)) {
1633 obj = NULL;
1634 }
1635 smr_leave(smr: smrht->domain);
1636
1637 return obj;
1638}
1639
1640static inline void *
1641__smr_hash_serialized_get_or_insert(
1642 struct smr_hash *smrh,
1643 smrh_key_t key,
1644 struct smrq_slink *link,
1645 smrh_traits_t smrht)
1646{
1647 struct smrq_slist_head *head;
1648 void *obj = NULL;
1649
1650 head = __smr_hash_bucket(smrh, key, smrht);
1651 obj = __smr_hash_serialized_find(head, key, smrht);
1652 if (!obj || !smrht->obj_try_get(obj)) {
1653 smrh->smrh_count++;
1654 smrq_serialized_insert_head(head, link);
1655 obj = NULL;
1656 }
1657
1658 return obj;
1659}
1660
1661extern void __smr_hash_serialized_clear(
1662 struct smr_hash *smrh,
1663 smrh_traits_t smrht,
1664 void (^free)(void *obj));
1665
1666extern kern_return_t __smr_hash_shrink_and_unlock(
1667 struct smr_hash *smrh,
1668 lck_mtx_t *lock,
1669 smrh_traits_t smrht);
1670
1671extern kern_return_t __smr_hash_grow_and_unlock(
1672 struct smr_hash *smrh,
1673 lck_mtx_t *lock,
1674 smrh_traits_t smrht);
1675
1676
1677#pragma mark SMR scalable hash tables
1678
1679__enum_closed_decl(smrsh_sel_t, uint8_t, {
1680 SMRSH_CUR,
1681 SMRSH_NEW,
1682});
1683
1684__attribute__((always_inline))
1685static inline uint32_t
1686__smr_shash_load_seed(
1687 const struct smr_shash *smrh,
1688 size_t idx)
1689{
1690 uintptr_t addr = (uintptr_t)smrh->smrsh_seed;
1691
1692 /*
1693 * prevent the optimizer from thinking it knows _anything_
1694 * about `smrsh_seed` to avoid codegen like this:
1695 *
1696 * return idx ? smrh->smrsh_seed[1] : smrh->smrsh_seed[0]
1697 *
1698 * This only has a control dependency which doesn't provide
1699 * the proper ordering. (control dependencies order
1700 * writes-after-dependency and not loads).
1701 */
1702
1703 return os_atomic_load(&((const uint32_t _Atomic *)addr)[idx], relaxed);
1704}
1705
1706__attribute__((always_inline))
1707static inline hw_lck_ptr_t *
1708__smr_shash_load_array(
1709 const struct smr_shash *smrh,
1710 size_t idx)
1711{
1712 uintptr_t addr = (uintptr_t)smrh->smrsh_array;
1713
1714 /*
1715 * prevent the optimizer from thinking it knows _anything_
1716 * about `smrsh_array` to avoid codegen like this:
1717 *
1718 * return idx ? smrh->smrsh_array[1] : smrh->smrsh_array[0]
1719 *
1720 * This only has a control dependency which doesn't provide
1721 * the proper ordering. (control dependencies order
1722 * writes-after-dependency and not loads).
1723 */
1724
1725 return os_atomic_load(&((hw_lck_ptr_t * _Atomic const *)addr)[idx], relaxed);
1726}
1727
1728__attribute__((always_inline, overloadable))
1729static inline uint32_t
1730__smr_shash_hash(
1731 const struct smr_shash *smrh,
1732 size_t idx,
1733 smrh_key_t key,
1734 smrh_traits_t traits)
1735{
1736 return traits->key_hash(key, __smr_shash_load_seed(smrh, idx));
1737}
1738
1739__attribute__((always_inline, overloadable))
1740static inline uint32_t
1741__smr_shash_hash(
1742 const struct smr_shash *smrh,
1743 size_t idx,
1744 const struct smrq_slink *link,
1745 smrh_traits_t traits)
1746{
1747 return traits->obj_hash(link, __smr_shash_load_seed(smrh, idx));
1748}
1749
1750static inline hw_lck_ptr_t *
1751__smr_shash_bucket(
1752 const struct smr_shash *smrh,
1753 smrsh_state_t state,
1754 smrsh_sel_t sel,
1755 uint32_t hash)
1756{
1757 hw_lck_ptr_t *array;
1758 uint8_t shift;
1759
1760 switch (sel) {
1761 case SMRSH_CUR:
1762 array = __smr_shash_load_array(smrh, idx: state.curidx);
1763 shift = state.curshift;
1764 break;
1765 case SMRSH_NEW:
1766 array = __smr_shash_load_array(smrh, idx: state.newidx);
1767 shift = state.newshift;
1768 break;
1769 }
1770
1771 return &array[hash >> shift];
1772}
1773
1774static inline bool
1775__smr_shash_is_stop(struct smrq_slink *link)
1776{
1777 return (uintptr_t)link & SMRSH_BUCKET_STOP_BIT;
1778}
1779
1780static inline struct smrq_slink *
1781__smr_shash_bucket_stop(const hw_lck_ptr_t *head)
1782{
1783 return (struct smrq_slink *)((uintptr_t)head | SMRSH_BUCKET_STOP_BIT);
1784}
1785
1786extern void *__smr_shash_entered_find_slow(
1787 const struct smr_shash *smrh,
1788 smrh_key_t key,
1789 hw_lck_ptr_t *head,
1790 smrh_traits_t traits);
1791
1792static inline void *
1793__smr_shash_entered_find(
1794 const struct smr_shash *smrh,
1795 smrh_key_t key,
1796 smrh_traits_t traits)
1797{
1798 struct smrq_slink *link;
1799 smrsh_state_t state;
1800 hw_lck_ptr_t *head;
1801 uint32_t hash;
1802
1803 state = os_atomic_load(&smrh->smrsh_state, dependency);
1804 hash = __smr_shash_hash(smrh, idx: state.curidx, key, traits);
1805 head = __smr_shash_bucket(smrh, state, sel: SMRSH_CUR, hash);
1806
1807 link = (struct smrq_slink *)hw_lck_ptr_value(lck: head);
1808 while (!__smr_shash_is_stop(link)) {
1809 if (traits->obj_equ(link, key)) {
1810 return __smrht_link_to_obj(traits, link);
1811 }
1812 link = smr_entered_load(&link->next);
1813 }
1814
1815 if (__probable(link == __smr_shash_bucket_stop(head))) {
1816 return NULL;
1817 }
1818 return __smr_shash_entered_find_slow(smrh, key, head, traits);
1819}
1820
1821static inline void *
1822__smr_shash_entered_get(
1823 const struct smr_shash *smrh,
1824 smrh_key_t key,
1825 smrh_traits_t traits)
1826{
1827 void *obj = __smr_shash_entered_find(smrh, key, traits);
1828
1829 return obj && traits->obj_try_get(obj) ? obj : NULL;
1830}
1831
1832extern void __smr_shash_destroy(
1833 struct smr_shash *smrh,
1834 smrh_traits_t traits,
1835 void (^free)(void *));
1836
1837extern void *__smr_shash_entered_get_or_insert(
1838 struct smr_shash *smrh,
1839 smrh_key_t key,
1840 struct smrq_slink *link,
1841 smrh_traits_t traits);
1842
1843extern smr_shash_mut_cursor_t __smr_shash_entered_mut_begin(
1844 struct smr_shash *smrh,
1845 struct smrq_slink *link,
1846 smrh_traits_t traits);
1847
1848extern void __smr_shash_entered_mut_erase(
1849 struct smr_shash *smrh,
1850 smr_shash_mut_cursor_t cursor,
1851 struct smrq_slink *link,
1852 smrh_traits_t traits);
1853
1854extern void __smr_shash_entered_mut_replace(
1855 smr_shash_mut_cursor_t cursor,
1856 struct smrq_slink *old_link,
1857 struct smrq_slink *new_link);
1858
1859extern void __smr_shash_entered_mut_abort(
1860 smr_shash_mut_cursor_t cursor);
1861
1862__END_DECLS
1863
1864#endif /* _KERN_SMR_HASH_H_ */
1865