1/*
2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _SKYWALK_MEM_SKMEMARENAVAR_H
30#define _SKYWALK_MEM_SKMEMARENAVAR_H
31
32#ifdef BSD_KERNEL_PRIVATE
33#include <skywalk/core/skywalk_var.h>
34
35/*
36 * Arena types.
37 */
38typedef enum {
39 SKMEM_ARENA_TYPE_NEXUS, /* skmem_arena_nexus */
40 SKMEM_ARENA_TYPE_NECP, /* skmem_arena_necp */
41 SKMEM_ARENA_TYPE_SYSTEM, /* skmem_arena_system */
42} skmem_arena_type_t;
43
44struct skmem_arena_mmap_info;
45
46/*
47 * Structure common to all arena types.
48 */
49struct skmem_arena {
50 decl_lck_mtx_data(, ar_lock); /* arena lock */
51 uint32_t ar_refcnt; /* reference count */
52
53 /*
54 * Arena properties.
55 */
56 TAILQ_ENTRY(skmem_arena) ar_link; /* skmem_region linkage */
57 char ar_name[64]; /* arena name */
58 skmem_arena_type_t ar_type; /* arena type */
59 uint32_t ar_flags; /* ARF_* */
60 size_t ar_zsize; /* zone object size */
61 IOSKArenaRef ar_ar; /* backing IOSKArena */
62
63 /*
64 * Regions.
65 */
66 struct skmem_region *ar_regions[SKMEM_REGIONS]; /* arena regions */
67
68 /*
69 * ar_mapsize gets set the first time the arena is mapped to a task;
70 * it is an estimate since we don't update it on subsequent mappings.
71 * We use it only for statistics purposes.
72 */
73 mach_vm_size_t ar_mapsize; /* estimated mmap size */
74 uint32_t ar_mapcnt; /* # of active mmap on arena */
75 uint32_t ar_maprdrcnt; /* # of redirected mmap */
76 SLIST_HEAD(, skmem_arena_mmap_info) ar_map_head; /* list of mmap info */
77};
78
79/* valid values for ar_flags */
80#define ARF_ACTIVE 0x1 /* arena is active */
81#define ARF_DEFUNCT (1U << 31) /* arena is defunct */
82
83#define ARF_BITS "\020\01ACTIVE\040DEFUNCT"
84
85#define AR_LOCK(_ar) \
86 lck_mtx_lock(&(_ar)->ar_lock)
87#define AR_LOCK_ASSERT_HELD(_ar) \
88 LCK_MTX_ASSERT(&(_ar)->ar_lock, LCK_MTX_ASSERT_OWNED)
89#define AR_LOCK_ASSERT_NOTHELD(_ar) \
90 LCK_MTX_ASSERT(&(_ar)->ar_lock, LCK_MTX_ASSERT_NOTOWNED)
91#define AR_UNLOCK(_ar) \
92 lck_mtx_unlock(&(_ar)->ar_lock)
93
94#define AR_MEM_TOTAL(_ar, _id) \
95 ((_ar)->ar_regions[_id]->skr_memtotal)
96#define AR_MEM_INUSE(_ar, _id) \
97 ((_ar)->ar_regions[_id]->skr_meminuse)
98#define AR_MEM_WIRED_INUSE(_ar, _id) \
99 ((_ar)->ar_regions[_id]->skr_w_meminuse)
100#define AR_MEM_SEGSIZE(_ar, _id) \
101 ((_ar)->ar_regions[_id]->skr_seg_size)
102#define AR_MEM_SEGCNT(_ar, _id) \
103 ((_ar)->ar_regions[_id]->skr_seg_max_cnt)
104#define AR_MEM_OBJCNT_R(_ar, _id) \
105 ((_ar)->ar_regions[_id]->skr_r_obj_cnt)
106#define AR_MEM_OBJCNT_C(_ar, _id) \
107 ((_ar)->ar_regions[_id]->skr_c_obj_cnt)
108#define AR_MEM_OBJSIZE_R(_ar, _id) \
109 ((_ar)->ar_regions[_id]->skr_r_obj_size)
110#define AR_MEM_OBJSIZE_C(_ar, _id) \
111 ((_ar)->ar_regions[_id]->skr_c_obj_size)
112
113/*
114 * Arena Task Map Information.
115 */
116struct skmem_arena_mmap_info {
117 SLIST_ENTRY(skmem_arena_mmap_info) ami_link;
118 struct skmem_arena *ami_arena; /* backing arena */
119 IOSKMapperRef ami_mapref; /* IOSKMapper handle */
120 task_t ami_maptask; /* task where it's mapped to */
121 mach_vm_address_t ami_mapaddr; /* start address in task */
122 mach_vm_size_t ami_mapsize; /* size of memory map */
123 boolean_t ami_redirect; /* map is redirected */
124};
125
126/*
127 * Nexus Adapter Arena.
128 */
129struct skmem_arena_nexus {
130 struct skmem_arena arn_cmn; /* common arena struct */
131
132 struct kern_pbufpool *arn_rx_pp; /* rx ppool handle */
133 struct kern_pbufpool *arn_tx_pp; /* tx ppool handle */
134 uint32_t arn_mode; /* mode flags */
135 nexus_meta_type_t arn_md_type; /* mdata regions type */
136 nexus_meta_subtype_t arn_md_subtype; /* mdata regions subtype */
137 /*
138 * For arenas used by adapters with external ring, slot callbacks or
139 * invocations via KPIs accessing kernel slot descriptors, we need to
140 * make sure the ksd region is kept intact during defunct. A non-zero
141 * value indicates that we leave ksd region alone, until the time when
142 * the arena is torn down for good.
143 */
144 int arn_ksd_nodefunct;
145
146 /*
147 * Caches.
148 */
149 struct skmem_cache *arn_schema_cache; /* schema object cache */
150 struct skmem_cache *arn_ring_cache; /* ring object cache */
151 struct skmem_cache *arn_txaksd_cache; /* tx/alloc slots cache */
152 struct skmem_cache *arn_rxfksd_cache; /* rx/free slots cache */
153
154 /*
155 * Statistics.
156 *
157 * This may be NULL if the arena was created without a statistics
158 * region. Otherwise, this value contains the segment address of
159 * the object that we allocate from that region. An arena contains
160 * at most one monolithic stats region.
161 */
162 void *arn_stats_obj; /* adapter stats object */
163
164 /*
165 * Flow advisory.
166 *
167 * This may be NULL if the arena was created without a flow advisory
168 * region. Otherwise, this value contains the segment address of
169 * the object that we allocate from that region. An arena contains
170 * at most one monolithic flow advisory region.
171 */
172 struct __flowadv_entry *arn_flowadv_obj;
173
174 /*
175 * Nexus advisory.
176 *
177 * This may be NULL if the arena was created without a nexus advisory
178 * region. Otherwise, this value contains the segment address of
179 * the object that we allocate from that region. An arena contains
180 * at most one monolithic nexus advisory region, that is nexus-wide.
181 */
182 void *arn_nexusadv_obj;
183};
184
185/* valid flags for arn_mode */
186#define AR_NEXUS_MODE_EXTERNAL_PPOOL 0x1 /* external packet pool */
187
188/*
189 * Given an arena, return its nexus variant (if applicable).
190 */
191__attribute__((always_inline))
192static inline struct skmem_arena_nexus *
193skmem_arena_nexus(struct skmem_arena *ar)
194{
195 if (__improbable(ar->ar_type != SKMEM_ARENA_TYPE_NEXUS)) {
196 return NULL;
197 }
198
199 return (struct skmem_arena_nexus *)ar;
200}
201
202/*
203 * NECP Arena.
204 */
205struct skmem_arena_necp {
206 struct skmem_arena arc_cmn; /* common arena struct */
207
208 /*
209 * Caches.
210 */
211 /* stats cache (kernel master mirrored with slave ustats) */
212 struct skmem_cache *arc_kstats_cache;
213};
214
215/*
216 * System Arena.
217 */
218struct skmem_arena_system {
219 struct skmem_arena ars_cmn; /* common arena struct */
220
221 /*
222 * sysctls.
223 *
224 * This value contains the kernel virtual address of the system-wide
225 * sysctls object. This object is persistent, i.e. it does not get
226 * allocated or freed along with the arena.
227 */
228 void *ars_sysctls_obj;
229 size_t ars_sysctls_objsize;
230};
231
232struct kern_nexus_advisory;
233
234__BEGIN_DECLS
235extern struct skmem_arena *skmem_arena_create_for_nexus(
236 const struct nexus_adapter *, struct skmem_region_params[SKMEM_REGIONS],
237 struct kern_pbufpool **, struct kern_pbufpool **, boolean_t, boolean_t,
238 struct kern_nexus_advisory *, int *);
239extern void skmem_arena_nexus_sd_set_noidle(struct skmem_arena_nexus *, int);
240extern boolean_t skmem_arena_nexus_sd_idle(struct skmem_arena_nexus *);
241
242extern struct skmem_arena *skmem_arena_create_for_necp(const char *,
243 struct skmem_region_params *, struct skmem_region_params *, int *);
244extern struct skmem_arena_necp *skmem_arena_necp(struct skmem_arena *);
245
246extern struct skmem_arena *skmem_arena_create_for_system(const char *, int *);
247extern struct skmem_arena_system *skmem_arena_system(struct skmem_arena *);
248extern void *skmem_arena_system_sysctls_obj_addr(struct skmem_arena *);
249extern size_t skmem_arena_system_sysctls_obj_size(struct skmem_arena *);
250
251extern void skmem_arena_retain(struct skmem_arena *);
252extern boolean_t skmem_arena_release(struct skmem_arena *);
253extern int skmem_arena_mmap(struct skmem_arena *, struct proc *,
254 struct skmem_arena_mmap_info *);
255extern void skmem_arena_munmap(struct skmem_arena *,
256 struct skmem_arena_mmap_info *);
257extern void skmem_arena_munmap_channel(struct skmem_arena *,
258 struct kern_channel *);
259extern int skmem_arena_mredirect(struct skmem_arena *,
260 struct skmem_arena_mmap_info *, struct proc *, boolean_t *);
261extern int skmem_arena_defunct(struct skmem_arena *);
262extern void skmem_arena_get_stats(struct skmem_arena *, uint64_t *,
263 uint64_t *);
264extern mach_vm_offset_t skmem_arena_get_region_offset(struct skmem_arena *,
265 skmem_region_id_t);
266extern void skmem_arena_reap(struct skmem_arena *, boolean_t);
267__END_DECLS
268#endif /* BSD_KERNEL_PRIVATE */
269#endif /* _SKYWALK_MEM_SKMEMARENAVAR_H */
270