| 1 | /* |
| 2 | * Copyright (c) 2016-2022 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | |
| 29 | #ifndef _SKYWALK_PACKET_PBUFPOOLVAR_H_ |
| 30 | #define _SKYWALK_PACKET_PBUFPOOLVAR_H_ |
| 31 | |
| 32 | #ifdef BSD_KERNEL_PRIVATE |
| 33 | #include <skywalk/core/skywalk_var.h> |
| 34 | |
| 35 | struct __kern_quantum; |
| 36 | struct __kern_packet; |
| 37 | |
| 38 | /* |
| 39 | * User packet pool hash bucket. Packets allocated by user space are |
| 40 | * kept in the hash table. This allows the kernel to validate whether |
| 41 | * or not a given packet object is valid or is already-freed, and thus |
| 42 | * take the appropriate measure during internalize. |
| 43 | */ |
| 44 | struct kern_pbufpool_u_bkt { |
| 45 | SLIST_HEAD(, __kern_quantum) upp_head; |
| 46 | }; |
| 47 | |
| 48 | struct kern_pbufpool_u_bft_bkt { |
| 49 | SLIST_HEAD(, __kern_buflet_ext) upp_head; |
| 50 | }; |
| 51 | |
| 52 | #define PBUFPOOL_MAX_BUF_REGIONS 2 |
| 53 | #define PBUFPOOL_BUF_IDX_DEF 0 |
| 54 | #define PBUFPOOL_BUF_IDX_LARGE 1 |
| 55 | |
| 56 | struct kern_pbufpool { |
| 57 | decl_lck_mtx_data(, pp_lock); |
| 58 | uint32_t pp_refcnt; |
| 59 | uint32_t pp_flags; |
| 60 | uint32_t pp_buf_obj_size[PBUFPOOL_MAX_BUF_REGIONS]; |
| 61 | uint32_t pp_buf_size[PBUFPOOL_MAX_BUF_REGIONS]; |
| 62 | uint16_t pp_max_frags; |
| 63 | |
| 64 | /* |
| 65 | * Caches |
| 66 | */ |
| 67 | struct skmem_cache *pp_buf_cache[PBUFPOOL_MAX_BUF_REGIONS]; |
| 68 | struct skmem_cache *pp_kmd_cache; |
| 69 | struct skmem_cache *pp_kbft_cache[PBUFPOOL_MAX_BUF_REGIONS]; |
| 70 | |
| 71 | /* |
| 72 | * Regions |
| 73 | */ |
| 74 | struct skmem_region *pp_buf_region[PBUFPOOL_MAX_BUF_REGIONS]; |
| 75 | struct skmem_region *pp_kmd_region; |
| 76 | struct skmem_region *pp_umd_region; |
| 77 | struct skmem_region *pp_ubft_region; |
| 78 | struct skmem_region *pp_kbft_region; |
| 79 | |
| 80 | /* |
| 81 | * User packet pool: packet metadata hash table |
| 82 | */ |
| 83 | struct kern_pbufpool_u_bkt *pp_u_hash_table; |
| 84 | uint64_t pp_u_bufinuse; |
| 85 | |
| 86 | /* |
| 87 | * User packet pool: buflet hash table |
| 88 | */ |
| 89 | struct kern_pbufpool_u_bft_bkt *pp_u_bft_hash_table; |
| 90 | uint64_t pp_u_bftinuse; |
| 91 | |
| 92 | void *pp_ctx; |
| 93 | pbuf_ctx_retain_fn_t pp_ctx_retain; |
| 94 | pbuf_ctx_release_fn_t pp_ctx_release; |
| 95 | nexus_meta_type_t pp_md_type; |
| 96 | nexus_meta_subtype_t pp_md_subtype; |
| 97 | uint32_t pp_midx_start; |
| 98 | uint32_t pp_bidx_start; |
| 99 | pbufpool_name_t pp_name; |
| 100 | pbuf_seg_ctor_fn_t pp_pbuf_seg_ctor; |
| 101 | pbuf_seg_dtor_fn_t pp_pbuf_seg_dtor; |
| 102 | }; |
| 103 | |
| 104 | /* valid values for pp_flags */ |
| 105 | #define PPF_EXTERNAL 0x1 /* externally configured */ |
| 106 | #define PPF_CLOSED 0x2 /* closed; awaiting final destruction */ |
| 107 | #define PPF_MONOLITHIC 0x4 /* non slab-based buffer region */ |
| 108 | /* buflet is truncated and may not contain the full payload */ |
| 109 | #define PPF_TRUNCATED_BUF 0x8 |
| 110 | #define PPF_KERNEL 0x10 /* kernel only, no user region(s) */ |
| 111 | #define PPF_BUFFER_ON_DEMAND 0x20 /* attach buffers to packet on demand */ |
| 112 | #define PPF_BATCH 0x40 /* capable of batch alloc/free */ |
| 113 | #define PPF_DYNAMIC 0x80 /* capable of magazine resizing */ |
| 114 | #define PPF_LARGE_BUF 0x100 /* configured with large buffers */ |
| 115 | |
| 116 | #define PP_KERNEL_ONLY(_pp) \ |
| 117 | (((_pp)->pp_flags & PPF_KERNEL) != 0) |
| 118 | |
| 119 | #define PP_HAS_TRUNCATED_BUF(_pp) \ |
| 120 | (((_pp)->pp_flags & PPF_TRUNCATED_BUF) != 0) |
| 121 | |
| 122 | #define PP_HAS_BUFFER_ON_DEMAND(_pp) \ |
| 123 | (((_pp)->pp_flags & PPF_BUFFER_ON_DEMAND) != 0) |
| 124 | |
| 125 | #define PP_BATCH_CAPABLE(_pp) \ |
| 126 | (((_pp)->pp_flags & PPF_BATCH) != 0) |
| 127 | |
| 128 | #define PP_DYNAMIC(_pp) \ |
| 129 | (((_pp)->pp_flags & PPF_DYNAMIC) != 0) |
| 130 | |
| 131 | #define PP_HAS_LARGE_BUF(_pp) \ |
| 132 | (((_pp)->pp_flags & PPF_LARGE_BUF) != 0) |
| 133 | |
| 134 | #define PP_LOCK(_pp) \ |
| 135 | lck_mtx_lock(&_pp->pp_lock) |
| 136 | #define PP_LOCK_ASSERT_HELD(_pp) \ |
| 137 | LCK_MTX_ASSERT(&_pp->pp_lock, LCK_MTX_ASSERT_OWNED) |
| 138 | #define PP_LOCK_ASSERT_NOTHELD(_pp) \ |
| 139 | LCK_MTX_ASSERT(&_pp->pp_lock, LCK_MTX_ASSERT_NOTOWNED) |
| 140 | #define PP_UNLOCK(_pp) \ |
| 141 | lck_mtx_unlock(&_pp->pp_lock) |
| 142 | |
| 143 | #define PP_BUF_SIZE_DEF(_pp) ((_pp)->pp_buf_size[PBUFPOOL_BUF_IDX_DEF]) |
| 144 | #define PP_BUF_SIZE_LARGE(_pp) ((_pp)->pp_buf_size[PBUFPOOL_BUF_IDX_LARGE]) |
| 145 | |
| 146 | #define PP_BUF_OBJ_SIZE_DEF(_pp) \ |
| 147 | ((_pp)->pp_buf_obj_size[PBUFPOOL_BUF_IDX_DEF]) |
| 148 | #define PP_BUF_OBJ_SIZE_LARGE(_pp) \ |
| 149 | ((_pp)->pp_buf_obj_size[PBUFPOOL_BUF_IDX_LARGE]) |
| 150 | |
| 151 | #define PP_BUF_REGION_DEF(_pp) ((_pp)->pp_buf_region[PBUFPOOL_BUF_IDX_DEF]) |
| 152 | #define PP_BUF_REGION_LARGE(_pp) ((_pp)->pp_buf_region[PBUFPOOL_BUF_IDX_LARGE]) |
| 153 | |
| 154 | #define PP_BUF_CACHE_DEF(_pp) ((_pp)->pp_buf_cache[PBUFPOOL_BUF_IDX_DEF]) |
| 155 | #define PP_BUF_CACHE_LARGE(_pp) ((_pp)->pp_buf_cache[PBUFPOOL_BUF_IDX_LARGE]) |
| 156 | |
| 157 | #define PP_KBFT_CACHE_DEF(_pp) ((_pp)->pp_kbft_cache[PBUFPOOL_BUF_IDX_DEF]) |
| 158 | #define PP_KBFT_CACHE_LARGE(_pp) ((_pp)->pp_kbft_cache[PBUFPOOL_BUF_IDX_LARGE]) |
| 159 | |
| 160 | __BEGIN_DECLS |
| 161 | extern int pp_init(void); |
| 162 | extern void pp_fini(void); |
| 163 | extern void pp_close(struct kern_pbufpool *); |
| 164 | |
| 165 | /* create flags for pp_create() */ |
| 166 | #define PPCREATEF_EXTERNAL 0x1 /* externally requested */ |
| 167 | #define PPCREATEF_KERNEL_ONLY 0x2 /* kernel-only */ |
| 168 | #define PPCREATEF_TRUNCATED_BUF 0x4 /* compat-only (buf is short) */ |
| 169 | #define PPCREATEF_ONDEMAND_BUF 0x8 /* buf alloc/free is decoupled */ |
| 170 | #define PPCREATEF_DYNAMIC 0x10 /* dynamic per-CPU magazines */ |
| 171 | |
| 172 | extern struct kern_pbufpool *pp_create(const char *name, |
| 173 | struct skmem_region_params *srp_array, pbuf_seg_ctor_fn_t buf_seg_ctor, |
| 174 | pbuf_seg_dtor_fn_t buf_seg_dtor, const void *ctx, |
| 175 | pbuf_ctx_retain_fn_t ctx_retain, pbuf_ctx_release_fn_t ctx_release, |
| 176 | uint32_t ppcreatef); |
| 177 | extern void pp_destroy(struct kern_pbufpool *); |
| 178 | |
| 179 | extern int pp_init_upp(struct kern_pbufpool *, boolean_t); |
| 180 | extern void pp_insert_upp(struct kern_pbufpool *, struct __kern_quantum *, |
| 181 | pid_t); |
| 182 | extern void pp_insert_upp_locked(struct kern_pbufpool *, |
| 183 | struct __kern_quantum *, pid_t); |
| 184 | extern void pp_insert_upp_batch(struct kern_pbufpool *pp, pid_t pid, |
| 185 | uint64_t *array, uint32_t num); |
| 186 | extern struct __kern_quantum *pp_remove_upp(struct kern_pbufpool *, obj_idx_t, |
| 187 | int *); |
| 188 | extern struct __kern_quantum *pp_remove_upp_locked(struct kern_pbufpool *, |
| 189 | obj_idx_t, int *); |
| 190 | extern struct __kern_quantum *pp_find_upp(struct kern_pbufpool *, obj_idx_t); |
| 191 | extern void pp_purge_upp(struct kern_pbufpool *, pid_t); |
| 192 | extern struct __kern_buflet *pp_remove_upp_bft(struct kern_pbufpool *, |
| 193 | obj_idx_t, int *); |
| 194 | extern void pp_insert_upp_bft(struct kern_pbufpool *, struct __kern_buflet *, |
| 195 | pid_t); |
| 196 | extern boolean_t pp_isempty_upp(struct kern_pbufpool *); |
| 197 | |
| 198 | extern void pp_retain_locked(struct kern_pbufpool *); |
| 199 | extern void pp_retain(struct kern_pbufpool *); |
| 200 | extern boolean_t pp_release_locked(struct kern_pbufpool *); |
| 201 | extern boolean_t pp_release(struct kern_pbufpool *); |
| 202 | |
| 203 | /* flags for pp_regions_params_adjust() */ |
| 204 | /* configure packet pool regions for RX only */ |
| 205 | #define PP_REGION_CONFIG_BUF_IODIR_IN 0x00000001 |
| 206 | /* configure packet pool regions for TX only */ |
| 207 | #define PP_REGION_CONFIG_BUF_IODIR_OUT 0x00000002 |
| 208 | /* configure packet pool regions for bidirectional operation */ |
| 209 | #define PP_REGION_CONFIG_BUF_IODIR_BIDIR \ |
| 210 | (PP_REGION_CONFIG_BUF_IODIR_IN | PP_REGION_CONFIG_BUF_IODIR_OUT) |
| 211 | /* configure packet pool metadata regions as persistent (wired) */ |
| 212 | #define PP_REGION_CONFIG_MD_PERSISTENT 0x00000004 |
| 213 | /* configure packet pool buffer regions as persistent (wired) */ |
| 214 | #define PP_REGION_CONFIG_BUF_PERSISTENT 0x00000008 |
| 215 | /* Enable magazine layer (per-cpu caches) for packet pool metadata regions */ |
| 216 | #define PP_REGION_CONFIG_MD_MAGAZINE_ENABLE 0x00000010 |
| 217 | /* configure packet pool regions required for kernel-only operations */ |
| 218 | #define PP_REGION_CONFIG_KERNEL_ONLY 0x00000020 |
| 219 | /* configure packet pool buflet regions */ |
| 220 | #define PP_REGION_CONFIG_BUFLET 0x00000040 |
| 221 | /* configure packet pool buffer region as user read-only */ |
| 222 | #define PP_REGION_CONFIG_BUF_UREADONLY 0x00000080 |
| 223 | /* configure packet pool buffer region as kernel read-only */ |
| 224 | #define PP_REGION_CONFIG_BUF_KREADONLY 0x00000100 |
| 225 | /* configure packet pool buffer region as a single segment */ |
| 226 | #define PP_REGION_CONFIG_BUF_MONOLITHIC 0x00000200 |
| 227 | /* configure packet pool buffer region as physically contiguous segment */ |
| 228 | #define PP_REGION_CONFIG_BUF_SEGPHYSCONTIG 0x00000400 |
| 229 | /* configure packet pool buffer region as cache-inhibiting */ |
| 230 | #define PP_REGION_CONFIG_BUF_NOCACHE 0x00000800 |
| 231 | /* configure packet pool buffer region (backing IOMD) as thread safe */ |
| 232 | #define PP_REGION_CONFIG_BUF_THREADSAFE 0x00002000 |
| 233 | |
| 234 | extern void pp_regions_params_adjust(struct skmem_region_params *, |
| 235 | nexus_meta_type_t, nexus_meta_subtype_t, uint32_t, uint16_t, uint32_t, |
| 236 | uint32_t, uint32_t, uint32_t, uint32_t); |
| 237 | |
| 238 | extern uint64_t pp_alloc_packet(struct kern_pbufpool *, uint16_t, uint32_t); |
| 239 | extern uint64_t pp_alloc_packet_by_size(struct kern_pbufpool *, uint32_t, |
| 240 | uint32_t); |
| 241 | extern int pp_alloc_packet_batch(struct kern_pbufpool *, uint16_t, uint64_t *, |
| 242 | uint32_t *, boolean_t, alloc_cb_func_t, const void *, uint32_t); |
| 243 | extern int pp_alloc_pktq(struct kern_pbufpool *, uint16_t, struct pktq *, |
| 244 | uint32_t, alloc_cb_func_t, const void *, uint32_t); |
| 245 | extern void pp_free_packet(struct kern_pbufpool *, uint64_t); |
| 246 | extern void pp_free_packet_batch(struct kern_pbufpool *, uint64_t *, uint32_t); |
| 247 | extern void pp_free_packet_single(struct __kern_packet *); |
| 248 | extern void pp_free_packet_chain(struct __kern_packet *, int *); |
| 249 | extern void pp_free_pktq(struct pktq *); |
| 250 | extern errno_t pp_alloc_buffer(const kern_pbufpool_t, mach_vm_address_t *, |
| 251 | kern_segment_t *, kern_obj_idx_seg_t *, uint32_t); |
| 252 | extern void pp_free_buffer(const kern_pbufpool_t, mach_vm_address_t); |
| 253 | extern errno_t pp_alloc_buflet(struct kern_pbufpool *pp, kern_buflet_t *kbft, |
| 254 | uint32_t skmflag, bool large); |
| 255 | extern errno_t pp_alloc_buflet_batch(struct kern_pbufpool *pp, uint64_t *array, |
| 256 | uint32_t *size, uint32_t skmflag, bool large); |
| 257 | extern void pp_free_buflet(const kern_pbufpool_t, kern_buflet_t); |
| 258 | extern void pp_reap_caches(boolean_t); |
| 259 | __END_DECLS |
| 260 | #endif /* BSD_KERNEL_PRIVATE */ |
| 261 | #endif /* !_SKYWALK_PACKET_PBUFPOOLVAR_H_ */ |
| 262 | |