1 | /* |
2 | * Copyright (c) 2015-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <skywalk/os_skywalk_private.h> |
30 | #include <skywalk/nexus/upipe/nx_user_pipe.h> |
31 | #include <skywalk/nexus/kpipe/nx_kernel_pipe.h> |
32 | #include <skywalk/nexus/flowswitch/nx_flowswitch.h> |
33 | #include <skywalk/nexus/netif/nx_netif.h> |
34 | #include <skywalk/nexus/monitor/nx_monitor.h> |
35 | |
36 | static STAILQ_HEAD(, nxdom) nexus_domains = |
37 | STAILQ_HEAD_INITIALIZER(nexus_domains); |
38 | |
39 | static void nxdom_attach(struct nxdom *); |
40 | static void nxdom_detach(struct nxdom *); |
41 | static void nxdom_init(struct nxdom *); |
42 | static void nxdom_terminate(struct nxdom *); |
43 | static void nxdom_fini(struct nxdom *); |
44 | static void nxdom_del_provider_final(struct kern_nexus_domain_provider *); |
45 | |
46 | static int nxdom_prov_ext_init(struct kern_nexus_domain_provider *); |
47 | static void nxdom_prov_ext_fini(struct kern_nexus_domain_provider *); |
48 | static struct kern_nexus_domain_provider *nxdom_prov_alloc(zalloc_flags_t); |
49 | static void nxdom_prov_free(struct kern_nexus_domain_provider *); |
50 | |
51 | static uint32_t nxprov_bound_var(uint32_t *, uint32_t, uint32_t, uint32_t, |
52 | const char *); |
53 | static void nxprov_detaching_enqueue(struct kern_nexus_domain_provider *); |
54 | static struct kern_nexus_domain_provider *nxprov_detaching_dequeue(void); |
55 | static void nxprov_detacher(void *, wait_result_t); |
56 | static int nxprov_detacher_cont(int); |
57 | |
58 | static struct nexus_controller *ncd_alloc(zalloc_flags_t); |
59 | static void ncd_free(struct nexus_controller *); |
60 | |
61 | static struct nexus_attr *nxa_alloc(zalloc_flags_t); |
62 | static void nxa_free(struct nexus_attr *); |
63 | |
64 | static int _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid, |
65 | struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host, |
66 | uuid_t *nx_if_uuid); |
67 | |
68 | static SKMEM_TYPE_DEFINE(ncd_zone, struct nexus_controller); |
69 | |
70 | static SKMEM_TYPE_DEFINE(nxdom_prov_zone, struct kern_nexus_domain_provider); |
71 | |
72 | static SKMEM_TYPE_DEFINE(nxa_zone, struct nexus_attr); |
73 | |
74 | static int __nxdom_inited = 0; |
75 | static STAILQ_HEAD(, kern_nexus_domain_provider) nxprov_detaching_head = |
76 | STAILQ_HEAD_INITIALIZER(nxprov_detaching_head); |
77 | static uint32_t nxprov_detaching_cnt; |
78 | static void *nxprov_detach_wchan; /* wait channel for detacher */ |
79 | |
80 | /* |
81 | * Array of default nexus domain providers. Initialized once during |
82 | * domain attach time; no lock is needed to read as they can be treated |
83 | * as immutables, since default providers imply built-in ones and they |
84 | * never detach in practice. |
85 | */ |
86 | struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX]; |
87 | |
88 | void |
89 | nxdom_attach_all(void) |
90 | { |
91 | struct nxdom *nxdom; |
92 | thread_t tp = THREAD_NULL; |
93 | |
94 | SK_LOCK_ASSERT_HELD(); |
95 | ASSERT(!__nxdom_inited); |
96 | ASSERT(STAILQ_EMPTY(&nexus_domains)); |
97 | |
98 | #if CONFIG_NEXUS_FLOWSWITCH |
99 | nxdom_attach(&nx_flowswitch_dom_s); |
100 | #endif /* CONFIG_NEXUS_FLOWSWITCH */ |
101 | #if CONFIG_NEXUS_USER_PIPE |
102 | nxdom_attach(&nx_upipe_dom_s); |
103 | #endif /* CONFIG_NEXUS_USER_PIPE */ |
104 | #if CONFIG_NEXUS_KERNEL_PIPE |
105 | nxdom_attach(&nx_kpipe_dom_s); |
106 | #endif /* CONFIG_NEXUS_KERNEL_PIPE */ |
107 | #if CONFIG_NEXUS_NETIF |
108 | nxdom_attach(&nx_netif_dom_s); |
109 | #endif /* CONFIG_NEXUS_NETIF */ |
110 | #if CONFIG_NEXUS_MONITOR |
111 | nxdom_attach(&nx_monitor_dom_s); |
112 | #endif /* CONFIG_NEXUS_MONITOR */ |
113 | |
114 | /* ask domains to initialize */ |
115 | STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) |
116 | nxdom_init(nxdom); |
117 | |
118 | if (kernel_thread_start(continuation: nxprov_detacher, NULL, new_thread: &tp) != KERN_SUCCESS) { |
119 | panic_plain("%s: couldn't create detacher thread" , __func__); |
120 | /* NOTREACHED */ |
121 | __builtin_unreachable(); |
122 | } |
123 | thread_deallocate(thread: tp); |
124 | |
125 | __nxdom_inited = 1; |
126 | } |
127 | |
128 | void |
129 | nxdom_detach_all(void) |
130 | { |
131 | struct nxdom *nxdom, *tnxdom; |
132 | |
133 | SK_LOCK_ASSERT_HELD(); |
134 | |
135 | if (__nxdom_inited) { |
136 | STAILQ_FOREACH_SAFE(nxdom, &nexus_domains, nxdom_link, tnxdom) { |
137 | nxdom_terminate(nxdom); |
138 | nxdom_fini(nxdom); |
139 | nxdom_detach(nxdom); |
140 | } |
141 | |
142 | /* |
143 | * TODO: adi@apple.com -- terminate detacher thread. |
144 | */ |
145 | |
146 | __nxdom_inited = 0; |
147 | } |
148 | ASSERT(STAILQ_EMPTY(&nexus_domains)); |
149 | } |
150 | |
151 | #define ASSERT_NXDOM_PARAMS(_dom, _var) do { \ |
152 | ASSERT(NXDOM_MIN(_dom, _var) <= NXDOM_MAX(_dom, _var)); \ |
153 | ASSERT(NXDOM_DEF(_dom, _var) >= NXDOM_MIN(_dom, _var)); \ |
154 | ASSERT(NXDOM_DEF(_dom, _var) <= NXDOM_MAX(_dom, _var)); \ |
155 | } while (0) |
156 | |
157 | static void |
158 | nxdom_attach(struct nxdom *nxdom) |
159 | { |
160 | struct nxdom *nxdom1; |
161 | |
162 | SK_LOCK_ASSERT_HELD(); |
163 | ASSERT(!(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED)); |
164 | |
165 | STAILQ_FOREACH(nxdom1, &nexus_domains, nxdom_link) { |
166 | if (nxdom1->nxdom_type == nxdom->nxdom_type) { |
167 | /* type must be unique; this is a programming error */ |
168 | VERIFY(0); |
169 | /* NOTREACHED */ |
170 | __builtin_unreachable(); |
171 | } |
172 | } |
173 | |
174 | /* verify this is a valid type */ |
175 | switch (nxdom->nxdom_type) { |
176 | case NEXUS_TYPE_USER_PIPE: |
177 | case NEXUS_TYPE_KERNEL_PIPE: |
178 | case NEXUS_TYPE_NET_IF: |
179 | case NEXUS_TYPE_FLOW_SWITCH: |
180 | case NEXUS_TYPE_MONITOR: |
181 | break; |
182 | |
183 | default: |
184 | VERIFY(0); |
185 | /* NOTREACHED */ |
186 | __builtin_unreachable(); |
187 | } |
188 | |
189 | /* verify this is a valid metadata type */ |
190 | switch (nxdom->nxdom_md_type) { |
191 | case NEXUS_META_TYPE_QUANTUM: |
192 | case NEXUS_META_TYPE_PACKET: |
193 | break; |
194 | |
195 | default: |
196 | VERIFY(0); |
197 | /* NOTREACHED */ |
198 | __builtin_unreachable(); |
199 | } |
200 | |
201 | /* verify this is a valid metadata subtype */ |
202 | switch (nxdom->nxdom_md_subtype) { |
203 | case NEXUS_META_SUBTYPE_PAYLOAD: |
204 | case NEXUS_META_SUBTYPE_RAW: |
205 | break; |
206 | |
207 | default: |
208 | VERIFY(0); |
209 | /* NOTREACHED */ |
210 | __builtin_unreachable(); |
211 | } |
212 | |
213 | #if (DEVELOPMENT || DEBUG) |
214 | /* |
215 | * Override the default ring sizes for flowswitch if configured |
216 | * via boot-args. Each nexus provider instance can still change |
217 | * the values if so desired. |
218 | */ |
219 | if (nxdom->nxdom_type == NEXUS_TYPE_FLOW_SWITCH) { |
220 | if (sk_txring_sz != 0) { |
221 | if (sk_txring_sz < NXDOM_MIN(nxdom, tx_slots)) { |
222 | sk_txring_sz = NXDOM_MIN(nxdom, tx_slots); |
223 | } else if (sk_txring_sz > NXDOM_MAX(nxdom, tx_slots)) { |
224 | sk_txring_sz = NXDOM_MAX(nxdom, tx_slots); |
225 | } |
226 | NXDOM_DEF(nxdom, tx_slots) = sk_txring_sz; |
227 | } |
228 | if (sk_rxring_sz != 0) { |
229 | if (sk_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) { |
230 | sk_rxring_sz = NXDOM_MIN(nxdom, rx_slots); |
231 | } else if (sk_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) { |
232 | sk_rxring_sz = NXDOM_MAX(nxdom, rx_slots); |
233 | } |
234 | NXDOM_DEF(nxdom, rx_slots) = sk_rxring_sz; |
235 | } |
236 | } |
237 | /* |
238 | * Override the default ring sizes for netif if configured |
239 | * via boot-args. Each nexus provider instance can still change |
240 | * the values if so desired. |
241 | */ |
242 | if (nxdom->nxdom_type == NEXUS_TYPE_NET_IF) { |
243 | if (sk_net_txring_sz != 0) { |
244 | if (sk_net_txring_sz < NXDOM_MIN(nxdom, tx_slots)) { |
245 | sk_net_txring_sz = NXDOM_MIN(nxdom, tx_slots); |
246 | } else if (sk_net_txring_sz > NXDOM_MAX(nxdom, tx_slots)) { |
247 | sk_net_txring_sz = NXDOM_MAX(nxdom, tx_slots); |
248 | } |
249 | NXDOM_DEF(nxdom, tx_slots) = sk_net_txring_sz; |
250 | } |
251 | if (sk_net_rxring_sz != 0) { |
252 | if (sk_net_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) { |
253 | sk_net_rxring_sz = NXDOM_MIN(nxdom, rx_slots); |
254 | } else if (sk_net_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) { |
255 | sk_net_rxring_sz = NXDOM_MAX(nxdom, rx_slots); |
256 | } |
257 | NXDOM_DEF(nxdom, rx_slots) = sk_net_rxring_sz; |
258 | } |
259 | } |
260 | |
261 | #endif /* DEVELOPMENT || DEBUG */ |
262 | |
263 | /* verify that parameters are sane */ |
264 | ASSERT(NXDOM_MAX(nxdom, ports) > 0); |
265 | ASSERT(NXDOM_MAX(nxdom, ports) <= NEXUS_PORT_MAX); |
266 | ASSERT_NXDOM_PARAMS(nxdom, ports); |
267 | ASSERT_NXDOM_PARAMS(nxdom, tx_rings); |
268 | ASSERT_NXDOM_PARAMS(nxdom, rx_rings); |
269 | ASSERT(NXDOM_MAX(nxdom, tx_slots) > 0); |
270 | ASSERT_NXDOM_PARAMS(nxdom, tx_slots); |
271 | ASSERT(NXDOM_MAX(nxdom, rx_slots) > 0); |
272 | ASSERT_NXDOM_PARAMS(nxdom, rx_slots); |
273 | ASSERT_NXDOM_PARAMS(nxdom, buf_size); |
274 | ASSERT_NXDOM_PARAMS(nxdom, meta_size); |
275 | ASSERT_NXDOM_PARAMS(nxdom, pipes); |
276 | ASSERT_NXDOM_PARAMS(nxdom, extensions); |
277 | |
278 | /* these must exist */ |
279 | ASSERT(nxdom->nxdom_bind_port != NULL); |
280 | ASSERT(nxdom->nxdom_unbind_port != NULL); |
281 | ASSERT(nxdom->nxdom_connect != NULL); |
282 | ASSERT(nxdom->nxdom_disconnect != NULL); |
283 | ASSERT(nxdom->nxdom_defunct != NULL); |
284 | ASSERT(nxdom->nxdom_defunct_finalize != NULL); |
285 | |
286 | STAILQ_INSERT_TAIL(&nexus_domains, nxdom, nxdom_link); |
287 | nxdom->nxdom_flags |= NEXUSDOMF_ATTACHED; |
288 | } |
289 | |
290 | #undef VERIFY_NXDOM_PARAMS |
291 | |
292 | static void |
293 | nxdom_detach(struct nxdom *nxdom) |
294 | { |
295 | SK_LOCK_ASSERT_HELD(); |
296 | ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED); |
297 | |
298 | STAILQ_REMOVE(&nexus_domains, nxdom, nxdom, nxdom_link); |
299 | nxdom->nxdom_flags &= ~NEXUSDOMF_ATTACHED; |
300 | } |
301 | |
302 | static void |
303 | nxdom_init(struct nxdom *nxdom) |
304 | { |
305 | ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED); |
306 | |
307 | SK_LOCK_ASSERT_HELD(); |
308 | |
309 | if (!(nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED)) { |
310 | if (nxdom->nxdom_init != NULL) { |
311 | nxdom->nxdom_init(nxdom); |
312 | } |
313 | nxdom->nxdom_flags |= NEXUSDOMF_INITIALIZED; |
314 | } |
315 | } |
316 | |
317 | static void |
318 | nxdom_terminate(struct nxdom *nxdom) |
319 | { |
320 | ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED); |
321 | |
322 | SK_LOCK_ASSERT_HELD(); |
323 | |
324 | if ((nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) && |
325 | !(nxdom->nxdom_flags & NEXUSDOMF_TERMINATED)) { |
326 | if (nxdom->nxdom_terminate != NULL) { |
327 | nxdom->nxdom_terminate(nxdom); |
328 | } |
329 | nxdom->nxdom_flags |= NEXUSDOMF_TERMINATED; |
330 | } |
331 | } |
332 | |
333 | static void |
334 | nxdom_fini(struct nxdom *nxdom) |
335 | { |
336 | ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED); |
337 | |
338 | if (nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) { |
339 | if (nxdom->nxdom_fini != NULL) { |
340 | nxdom->nxdom_fini(nxdom); |
341 | } |
342 | nxdom->nxdom_flags &= ~NEXUSDOMF_INITIALIZED; |
343 | } |
344 | } |
345 | |
346 | int |
347 | nxdom_prov_add(struct nxdom *nxdom, |
348 | struct kern_nexus_domain_provider *nxdom_prov) |
349 | { |
350 | struct kern_nexus_domain_provider *nxprov1; |
351 | nexus_type_t type = nxdom->nxdom_type; |
352 | boolean_t builtin; |
353 | int err = 0; |
354 | |
355 | SK_LOCK_ASSERT_HELD(); |
356 | ASSERT(type < NEXUS_TYPE_MAX); |
357 | |
358 | builtin = !(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT); |
359 | |
360 | STAILQ_FOREACH(nxprov1, &nxdom->nxdom_prov_head, nxdom_prov_link) { |
361 | /* |
362 | * We can be a little more strict in the kernel and |
363 | * avoid namespace collision (even though each domain |
364 | * provider has UUID; this also guarantees that external |
365 | * providers won't conflict with the builtin ones. |
366 | */ |
367 | if (strcmp(s1: nxprov1->nxdom_prov_name, |
368 | s2: nxdom_prov->nxdom_prov_name) == 0) { |
369 | return EEXIST; |
370 | } |
371 | } |
372 | |
373 | VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED)); |
374 | VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED)); |
375 | |
376 | uuid_generate_random(out: nxdom_prov->nxdom_prov_uuid); |
377 | nxdom_prov->nxdom_prov_dom = nxdom; |
378 | if (nxdom_prov->nxdom_prov_init != NULL) { |
379 | err = nxdom_prov->nxdom_prov_init(nxdom_prov); |
380 | } |
381 | |
382 | if (err == 0) { |
383 | nxdom_prov->nxdom_prov_flags |= |
384 | (NXDOMPROVF_ATTACHED | NXDOMPROVF_INITIALIZED); |
385 | STAILQ_INSERT_TAIL(&nxdom->nxdom_prov_head, nxdom_prov, |
386 | nxdom_prov_link); |
387 | /* for being in the list */ |
388 | nxdom_prov_retain_locked(nxdom_prov); |
389 | |
390 | if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) { |
391 | VERIFY(builtin && nxdom_prov_default[type] == NULL); |
392 | nxdom_prov_default[type] = nxdom_prov; |
393 | /* for being in the array */ |
394 | nxdom_prov_retain_locked(nxdom_prov); |
395 | } |
396 | |
397 | SK_D("nxdom_prov 0x%llx (%s) dom %s" , |
398 | SK_KVA(nxdom_prov), nxdom_prov->nxdom_prov_name, |
399 | nxdom->nxdom_name); |
400 | } else { |
401 | uuid_clear(uu: nxdom_prov->nxdom_prov_uuid); |
402 | nxdom_prov->nxdom_prov_dom = NULL; |
403 | } |
404 | |
405 | return err; |
406 | } |
407 | |
408 | void |
409 | nxdom_prov_del(struct kern_nexus_domain_provider *nxdom_prov) |
410 | { |
411 | struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom; |
412 | nexus_type_t type = nxdom->nxdom_type; |
413 | |
414 | SK_LOCK_ASSERT_HELD(); |
415 | ASSERT(type < NEXUS_TYPE_MAX); |
416 | ASSERT(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED); |
417 | |
418 | if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DETACHING) { |
419 | return; |
420 | } |
421 | |
422 | SK_D("nxdom_prov 0x%llx (%s:%s)" , SK_KVA(nxdom_prov), nxdom->nxdom_name, |
423 | nxdom_prov->nxdom_prov_name); |
424 | |
425 | /* keep the reference around for the detaching list (see below) */ |
426 | STAILQ_REMOVE(&nxdom->nxdom_prov_head, nxdom_prov, |
427 | kern_nexus_domain_provider, nxdom_prov_link); |
428 | nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_ATTACHED; |
429 | nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_DETACHING; |
430 | |
431 | /* there can only be one default and it must match this one */ |
432 | if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) { |
433 | ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT)); |
434 | VERIFY(nxdom_prov_default[type] == nxdom_prov); |
435 | nxdom_prov_default[type] = NULL; |
436 | /* |
437 | * Release reference held for the array; this must |
438 | * not be the last reference, as there is still at |
439 | * least one which we kept for the detaching list. |
440 | */ |
441 | VERIFY(!nxdom_prov_release_locked(nxdom_prov)); |
442 | } |
443 | |
444 | /* add to detaching list and wake up detacher */ |
445 | nxprov_detaching_enqueue(nxdom_prov); |
446 | } |
447 | |
448 | static void |
449 | nxdom_del_provider_final(struct kern_nexus_domain_provider *nxdom_prov) |
450 | { |
451 | #if (DEBUG || DEVELOPMENT) |
452 | struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom; |
453 | #endif /* DEBUG || DEVELOPMENT */ |
454 | |
455 | SK_LOCK_ASSERT_HELD(); |
456 | |
457 | ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED | |
458 | NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING); |
459 | ASSERT(nxdom != NULL); |
460 | |
461 | SK_D("nxdom_prov 0x%llx (%s:%s)" , SK_KVA(nxdom_prov), nxdom->nxdom_name, |
462 | nxdom_prov->nxdom_prov_name); |
463 | |
464 | nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_DETACHING; |
465 | |
466 | /* |
467 | * Release reference held for detaching list; if this is the last |
468 | * reference, the domain provider's nxdom_prov_fini() callback will |
469 | * be called (if applicable) within the detacher thread's context. |
470 | * Otherwise, this will occur when the last nexus provider for that |
471 | * domain provider has been released. |
472 | */ |
473 | (void) nxdom_prov_release_locked(nxdom_prov); |
474 | } |
475 | |
476 | struct nxdom * |
477 | nxdom_find(nexus_type_t type) |
478 | { |
479 | struct nxdom *nxdom; |
480 | |
481 | SK_LOCK_ASSERT_HELD(); |
482 | ASSERT(type < NEXUS_TYPE_MAX); |
483 | |
484 | STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) { |
485 | if (nxdom->nxdom_type == type) { |
486 | break; |
487 | } |
488 | } |
489 | |
490 | return nxdom; |
491 | } |
492 | |
493 | struct kern_nexus_domain_provider * |
494 | nxdom_prov_find(const struct nxdom *nxdom, const char *name) |
495 | { |
496 | struct kern_nexus_domain_provider *nxdom_prov = NULL; |
497 | |
498 | SK_LOCK_ASSERT_HELD(); |
499 | |
500 | if (name != NULL) { |
501 | STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head, |
502 | nxdom_prov_link) { |
503 | if (strcmp(s1: nxdom_prov->nxdom_prov_name, s2: name) == 0) { |
504 | break; |
505 | } |
506 | } |
507 | } |
508 | |
509 | if (nxdom_prov != NULL) { |
510 | nxdom_prov_retain_locked(nxdom_prov); /* for caller */ |
511 | } |
512 | return nxdom_prov; |
513 | } |
514 | |
515 | struct kern_nexus_domain_provider * |
516 | nxdom_prov_find_uuid(const uuid_t dom_prov_uuid) |
517 | { |
518 | struct kern_nexus_domain_provider *nxdom_prov = NULL; |
519 | struct nxdom *nxdom; |
520 | |
521 | SK_LOCK_ASSERT_HELD(); |
522 | ASSERT(dom_prov_uuid != NULL && !uuid_is_null(dom_prov_uuid)); |
523 | |
524 | STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) { |
525 | STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head, |
526 | nxdom_prov_link) { |
527 | ASSERT(!uuid_is_null(nxdom_prov->nxdom_prov_uuid)); |
528 | if (uuid_compare(uu1: nxdom_prov->nxdom_prov_uuid, |
529 | uu2: dom_prov_uuid) == 0) { |
530 | break; |
531 | } |
532 | } |
533 | if (nxdom_prov != NULL) { |
534 | nxdom_prov_retain_locked(nxdom_prov); /* for caller */ |
535 | break; |
536 | } |
537 | } |
538 | |
539 | return nxdom_prov; |
540 | } |
541 | |
542 | errno_t |
543 | kern_nexus_register_domain_provider(const nexus_type_t type, |
544 | const nexus_domain_provider_name_t name, |
545 | const struct kern_nexus_domain_provider_init *init, |
546 | const uint32_t init_len, uuid_t *dom_prov_uuid) |
547 | { |
548 | struct kern_nexus_domain_provider *nxdom_prov = NULL; |
549 | struct nxdom *nxdom; |
550 | errno_t err = 0; |
551 | |
552 | _CASSERT(sizeof(*init) == sizeof(nxdom_prov->nxdom_prov_ext)); |
553 | |
554 | if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) { |
555 | return EINVAL; |
556 | } |
557 | |
558 | uuid_clear(uu: *dom_prov_uuid); |
559 | |
560 | if (name == NULL || init == NULL || init_len < sizeof(*init) || |
561 | init->nxdpi_version != KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION) { |
562 | return EINVAL; |
563 | } |
564 | |
565 | /* |
566 | * init, fini are required. |
567 | */ |
568 | if (init->nxdpi_init == NULL || init->nxdpi_fini == NULL) { |
569 | return EINVAL; |
570 | } |
571 | |
572 | SK_LOCK(); |
573 | if (nxdom_prov_default[type] == NULL) { |
574 | err = ENXIO; |
575 | goto done; |
576 | } |
577 | |
578 | nxdom = nxdom_find(type); |
579 | if (nxdom == NULL) { |
580 | err = ENXIO; |
581 | goto done; |
582 | } |
583 | |
584 | /* |
585 | * Allow only kernel pipe and netif external domain providers for |
586 | * now, until we understand the implications and requirements for |
587 | * supporting other domain types. For all other types, using |
588 | * the built-in domain providers and registering nexus should |
589 | * suffice. |
590 | */ |
591 | if (nxdom->nxdom_type != NEXUS_TYPE_KERNEL_PIPE && |
592 | nxdom->nxdom_type != NEXUS_TYPE_NET_IF) { |
593 | err = EINVAL; |
594 | goto done; |
595 | } |
596 | |
597 | nxdom_prov = nxdom_prov_alloc(Z_WAITOK); |
598 | |
599 | /* |
600 | * Point all callback routines to the default provider for this |
601 | * domain; for nxdom_prov{init,fini}, refer to externally-provided |
602 | * callback routines, if applicable. |
603 | */ |
604 | bcopy(src: init, dst: &nxdom_prov->nxdom_prov_ext, n: sizeof(*init)); |
605 | bcopy(src: &nxdom_prov_default[type]->nxdom_prov_cb, |
606 | dst: &nxdom_prov->nxdom_prov_cb, n: sizeof(struct nxdom_prov_cb)); |
607 | nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT; |
608 | nxdom_prov->nxdom_prov_init = nxdom_prov_ext_init; |
609 | nxdom_prov->nxdom_prov_fini = nxdom_prov_ext_fini; |
610 | (void) snprintf(nxdom_prov->nxdom_prov_name, |
611 | count: sizeof(nxdom_prov->nxdom_prov_name), "%s" , name); |
612 | |
613 | ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT)); |
614 | err = nxdom_prov_add(nxdom, nxdom_prov); |
615 | if (err != 0) { |
616 | nxdom_prov_free(nxdom_prov); |
617 | nxdom_prov = NULL; |
618 | } |
619 | |
620 | done: |
621 | if (nxdom_prov != NULL) { |
622 | ASSERT(err == 0 && !uuid_is_null(nxdom_prov->nxdom_prov_uuid)); |
623 | uuid_copy(dst: *dom_prov_uuid, src: nxdom_prov->nxdom_prov_uuid); |
624 | } |
625 | SK_UNLOCK(); |
626 | |
627 | return err; |
628 | } |
629 | |
630 | errno_t |
631 | kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid) |
632 | { |
633 | struct kern_nexus_domain_provider *nxdom_prov = NULL; |
634 | errno_t err = 0; |
635 | |
636 | if (dom_prov_uuid == NULL || uuid_is_null(uu: dom_prov_uuid)) { |
637 | return EINVAL; |
638 | } |
639 | |
640 | SK_LOCK(); |
641 | nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid); |
642 | if (nxdom_prov == NULL) { |
643 | err = ENXIO; |
644 | goto done; |
645 | } |
646 | |
647 | /* don't allow external request for built-in domain providers */ |
648 | if (!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT)) { |
649 | err = EINVAL; |
650 | goto done; |
651 | } |
652 | |
653 | /* schedule this to be deleted */ |
654 | nxdom_prov_del(nxdom_prov); |
655 | done: |
656 | /* release reference from nxdom_prov_find_uuid */ |
657 | if (nxdom_prov != NULL) { |
658 | (void) nxdom_prov_release_locked(nxdom_prov); |
659 | } |
660 | SK_UNLOCK(); |
661 | |
662 | return err; |
663 | } |
664 | |
665 | errno_t |
666 | kern_nexus_get_default_domain_provider(const nexus_type_t type, |
667 | uuid_t *dom_prov_uuid) |
668 | { |
669 | struct kern_nexus_domain_provider *nxdom_prov; |
670 | |
671 | if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) { |
672 | return EINVAL; |
673 | } |
674 | |
675 | uuid_clear(uu: *dom_prov_uuid); |
676 | |
677 | /* no lock is needed; array is immutable */ |
678 | if ((nxdom_prov = nxdom_prov_default[type]) == NULL) { |
679 | return ENXIO; |
680 | } |
681 | |
682 | uuid_copy(dst: *dom_prov_uuid, src: nxdom_prov->nxdom_prov_uuid); |
683 | |
684 | return 0; |
685 | } |
686 | |
687 | static int |
688 | nxdom_prov_ext_init(struct kern_nexus_domain_provider *nxdom_prov) |
689 | { |
690 | int err = 0; |
691 | |
692 | SK_D("initializing %s" , nxdom_prov->nxdom_prov_name); |
693 | |
694 | ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_init != NULL); |
695 | if ((err = nxdom_prov->nxdom_prov_ext.nxdpi_init(nxdom_prov)) == 0) { |
696 | nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT_INITED; |
697 | } |
698 | |
699 | return err; |
700 | } |
701 | |
702 | static void |
703 | nxdom_prov_ext_fini(struct kern_nexus_domain_provider *nxdom_prov) |
704 | { |
705 | SK_D("destroying %s" , nxdom_prov->nxdom_prov_name); |
706 | |
707 | if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT_INITED) { |
708 | ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_fini != NULL); |
709 | nxdom_prov->nxdom_prov_ext.nxdpi_fini(nxdom_prov); |
710 | nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_EXT_INITED; |
711 | } |
712 | } |
713 | |
714 | static struct nexus_attr * |
715 | nxa_alloc(zalloc_flags_t how) |
716 | { |
717 | return zalloc_flags(nxa_zone, how | Z_ZERO); |
718 | } |
719 | |
720 | static void |
721 | nxa_free(struct nexus_attr *nxa) |
722 | { |
723 | SK_DF(SK_VERB_MEM, "nxa 0x%llx FREE" , SK_KVA(nxa)); |
724 | zfree(nxa_zone, nxa); |
725 | } |
726 | |
727 | errno_t |
728 | kern_nexus_attr_create(nexus_attr_t *nxa) |
729 | { |
730 | errno_t err = 0; |
731 | |
732 | if (nxa == NULL) { |
733 | err = EINVAL; |
734 | } else { |
735 | *nxa = nxa_alloc(how: Z_WAITOK); |
736 | } |
737 | return err; |
738 | } |
739 | |
740 | errno_t |
741 | kern_nexus_attr_clone(const nexus_attr_t nxa, nexus_attr_t *nnxa) |
742 | { |
743 | errno_t err = 0; |
744 | |
745 | if (nnxa == NULL) { |
746 | err = EINVAL; |
747 | } else { |
748 | err = kern_nexus_attr_create(nxa: nnxa); |
749 | if (err == 0 && nxa != NULL) { |
750 | ASSERT(*nnxa != NULL); |
751 | bcopy(src: nxa, dst: *nnxa, n: sizeof(**nnxa)); |
752 | } |
753 | } |
754 | return err; |
755 | } |
756 | |
757 | errno_t |
758 | kern_nexus_attr_set(const nexus_attr_t nxa, |
759 | const nexus_attr_type_t type, const uint64_t value) |
760 | { |
761 | return __nexus_attr_set(nxa, type, value); |
762 | } |
763 | |
764 | errno_t |
765 | kern_nexus_attr_get(nexus_attr_t nxa, const nexus_attr_type_t type, |
766 | uint64_t *value) |
767 | { |
768 | return __nexus_attr_get(nxa, type, value); |
769 | } |
770 | |
771 | void |
772 | kern_nexus_attr_destroy(nexus_attr_t nxa) |
773 | { |
774 | nxa_free(nxa); |
775 | } |
776 | |
777 | static struct nexus_controller * |
778 | ncd_alloc(zalloc_flags_t how) |
779 | { |
780 | return zalloc_flags(ncd_zone, how | Z_ZERO); |
781 | } |
782 | |
783 | static void |
784 | ncd_free(struct nexus_controller *ncd) |
785 | { |
786 | SK_DF(SK_VERB_MEM, "ncd 0x%llx FREE" , SK_KVA(ncd)); |
787 | zfree(ncd_zone, ncd); |
788 | } |
789 | |
790 | nexus_controller_t |
791 | kern_nexus_shared_controller(void) |
792 | { |
793 | return &kernnxctl; |
794 | } |
795 | |
796 | errno_t |
797 | kern_nexus_controller_create(nexus_controller_t *ncd) |
798 | { |
799 | struct nxctl *nxctl = NULL; |
800 | uuid_t nxctl_uuid; |
801 | errno_t err = 0; |
802 | |
803 | uuid_generate_random(out: nxctl_uuid); |
804 | |
805 | if (ncd == NULL) { |
806 | err = EINVAL; |
807 | goto done; |
808 | } else { |
809 | *ncd = NULL; |
810 | } |
811 | |
812 | nxctl = nxctl_create(kernproc, NULL, nxctl_uuid, &err); |
813 | if (nxctl == NULL) { |
814 | ASSERT(err != 0); |
815 | goto done; |
816 | } |
817 | |
818 | *ncd = ncd_alloc(how: Z_WAITOK); |
819 | (*ncd)->ncd_nxctl = nxctl; /* ref from nxctl_create */ |
820 | |
821 | done: |
822 | if (err != 0) { |
823 | if (nxctl != NULL) { |
824 | nxctl_dtor(nxctl); |
825 | nxctl = NULL; |
826 | } |
827 | if (ncd != NULL && *ncd != NULL) { |
828 | ncd_free(ncd: *ncd); |
829 | *ncd = NULL; |
830 | } |
831 | } |
832 | |
833 | return err; |
834 | } |
835 | |
836 | #define NXPI_INVALID_CB_PAIRS(cb1, cb2) \ |
837 | (!(init->nxpi_##cb1 == NULL && init->nxpi_##cb2 == NULL) && \ |
838 | ((init->nxpi_##cb1 == NULL) ^ (init->nxpi_##cb2 == NULL))) |
839 | |
840 | static errno_t |
841 | nexus_controller_register_provider_validate_init_params( |
842 | const struct kern_nexus_provider_init *init, uint32_t init_len, |
843 | nexus_type_t nxdom_type) |
844 | { |
845 | errno_t err = 0; |
846 | struct kern_nexus_netif_provider_init *netif_init; |
847 | |
848 | _CASSERT(__builtin_offsetof(struct kern_nexus_provider_init, |
849 | nxpi_version) == 0); |
850 | _CASSERT(sizeof(init->nxpi_version) == sizeof(uint32_t)); |
851 | |
852 | if (init == NULL) { |
853 | return 0; |
854 | } |
855 | |
856 | if (init_len < sizeof(uint32_t)) { |
857 | return EINVAL; |
858 | } |
859 | |
860 | switch (init->nxpi_version) { |
861 | case KERN_NEXUS_PROVIDER_VERSION_1: |
862 | if (init_len != sizeof(struct kern_nexus_provider_init)) { |
863 | err = EINVAL; |
864 | break; |
865 | } |
866 | /* |
867 | * sync_{tx,rx} callbacks are required; the rest of the |
868 | * callback pairs are optional, but must be symmetrical. |
869 | */ |
870 | if (init->nxpi_sync_tx == NULL || init->nxpi_sync_rx == NULL || |
871 | init->nxpi_pre_connect == NULL || |
872 | init->nxpi_connected == NULL || |
873 | init->nxpi_pre_disconnect == NULL || |
874 | init->nxpi_disconnected == NULL || |
875 | NXPI_INVALID_CB_PAIRS(ring_init, ring_fini) || |
876 | NXPI_INVALID_CB_PAIRS(slot_init, slot_fini)) { |
877 | err = EINVAL; |
878 | break; |
879 | } |
880 | /* |
881 | * Tx doorbell interface is only supported for netif and |
882 | * Tx doorbell is mandatory for netif |
883 | */ |
884 | if (((init->nxpi_tx_doorbell != NULL) && |
885 | (nxdom_type != NEXUS_TYPE_NET_IF)) || |
886 | ((nxdom_type == NEXUS_TYPE_NET_IF) && |
887 | (init->nxpi_tx_doorbell == NULL))) { |
888 | err = EINVAL; |
889 | break; |
890 | } |
891 | /* |
892 | * Capabilities configuration interface is only supported for |
893 | * netif. |
894 | */ |
895 | if ((init->nxpi_config_capab != NULL) && |
896 | (nxdom_type != NEXUS_TYPE_NET_IF)) { |
897 | err = EINVAL; |
898 | break; |
899 | } |
900 | break; |
901 | |
902 | case KERN_NEXUS_PROVIDER_VERSION_NETIF: |
903 | if (init_len != sizeof(struct kern_nexus_netif_provider_init)) { |
904 | err = EINVAL; |
905 | break; |
906 | } |
907 | if (nxdom_type != NEXUS_TYPE_NET_IF) { |
908 | err = EINVAL; |
909 | break; |
910 | } |
911 | netif_init = |
912 | __DECONST(struct kern_nexus_netif_provider_init *, init); |
913 | if (netif_init->nxnpi_pre_connect == NULL || |
914 | netif_init->nxnpi_connected == NULL || |
915 | netif_init->nxnpi_pre_disconnect == NULL || |
916 | netif_init->nxnpi_disconnected == NULL || |
917 | netif_init->nxnpi_qset_init == NULL || |
918 | netif_init->nxnpi_qset_fini == NULL || |
919 | netif_init->nxnpi_queue_init == NULL || |
920 | netif_init->nxnpi_queue_fini == NULL || |
921 | netif_init->nxnpi_tx_qset_notify == NULL || |
922 | netif_init->nxnpi_config_capab == NULL) { |
923 | err = EINVAL; |
924 | break; |
925 | } |
926 | break; |
927 | |
928 | default: |
929 | err = EINVAL; |
930 | break; |
931 | } |
932 | return err; |
933 | } |
934 | |
935 | errno_t |
936 | kern_nexus_controller_register_provider(const nexus_controller_t ncd, |
937 | const uuid_t dom_prov_uuid, const nexus_name_t name, |
938 | const struct kern_nexus_provider_init *init, uint32_t init_len, |
939 | const nexus_attr_t nxa, uuid_t *prov_uuid) |
940 | { |
941 | struct kern_nexus_domain_provider *nxdom_prov = NULL; |
942 | struct kern_nexus_provider *nxprov = NULL; |
943 | nexus_type_t nxdom_type; |
944 | struct nxprov_reg reg; |
945 | struct nxctl *nxctl; |
946 | errno_t err = 0; |
947 | |
948 | if (prov_uuid == NULL) { |
949 | return EINVAL; |
950 | } |
951 | |
952 | uuid_clear(uu: *prov_uuid); |
953 | |
954 | if (ncd == NULL || |
955 | dom_prov_uuid == NULL || uuid_is_null(uu: dom_prov_uuid)) { |
956 | return EINVAL; |
957 | } |
958 | |
959 | nxctl = ncd->ncd_nxctl; |
960 | NXCTL_LOCK(nxctl); |
961 | SK_LOCK(); |
962 | nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid); |
963 | if (nxdom_prov == NULL) { |
964 | SK_UNLOCK(); |
965 | err = ENXIO; |
966 | goto done; |
967 | } |
968 | |
969 | nxdom_type = nxdom_prov->nxdom_prov_dom->nxdom_type; |
970 | ASSERT(nxdom_type < NEXUS_TYPE_MAX); |
971 | |
972 | err = nexus_controller_register_provider_validate_init_params(init, |
973 | init_len, nxdom_type); |
974 | if (err != 0) { |
975 | SK_UNLOCK(); |
976 | err = EINVAL; |
977 | goto done; |
978 | } |
979 | |
980 | if ((err = __nexus_provider_reg_prepare(reg: ®, name, |
981 | type: nxdom_type, nxa)) != 0) { |
982 | SK_UNLOCK(); |
983 | goto done; |
984 | } |
985 | |
986 | if (init && init->nxpi_version == KERN_NEXUS_PROVIDER_VERSION_NETIF) { |
987 | reg.nxpreg_params.nxp_flags |= NXPF_NETIF_LLINK; |
988 | } |
989 | |
990 | /* callee will hold reference on nxdom_prov upon success */ |
991 | if ((nxprov = nxprov_create_kern(nxctl, nxdom_prov, ®, |
992 | init, err: &err)) == NULL) { |
993 | SK_UNLOCK(); |
994 | ASSERT(err != 0); |
995 | goto done; |
996 | } |
997 | SK_UNLOCK(); |
998 | |
999 | uuid_copy(dst: *prov_uuid, src: nxprov->nxprov_uuid); |
1000 | |
1001 | done: |
1002 | SK_LOCK_ASSERT_NOTHELD(); |
1003 | NXCTL_UNLOCK(nxctl); |
1004 | |
1005 | if (err != 0 && nxprov != NULL) { |
1006 | err = nxprov_close(nxprov, FALSE); |
1007 | } |
1008 | |
1009 | /* release extra ref from nxprov_create_kern */ |
1010 | if (nxprov != NULL) { |
1011 | nxprov_release(nxprov); |
1012 | } |
1013 | /* release extra ref from nxdom_prov_find_uuid */ |
1014 | if (nxdom_prov != NULL) { |
1015 | (void) nxdom_prov_release(nxdom_prov); |
1016 | } |
1017 | |
1018 | return err; |
1019 | } |
1020 | |
1021 | #undef NXPI_INVALID_CB_PAIRS |
1022 | |
1023 | errno_t |
1024 | kern_nexus_controller_deregister_provider(const nexus_controller_t ncd, |
1025 | const uuid_t prov_uuid) |
1026 | { |
1027 | errno_t err; |
1028 | |
1029 | if (ncd == NULL || prov_uuid == NULL || uuid_is_null(uu: prov_uuid)) { |
1030 | err = EINVAL; |
1031 | } else { |
1032 | struct nxctl *nxctl = ncd->ncd_nxctl; |
1033 | NXCTL_LOCK(nxctl); |
1034 | err = nxprov_destroy(nxctl, prov_uuid); |
1035 | NXCTL_UNLOCK(nxctl); |
1036 | } |
1037 | return err; |
1038 | } |
1039 | |
1040 | errno_t |
1041 | kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd, |
1042 | const uuid_t prov_uuid, const void *nx_ctx, |
1043 | nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid, |
1044 | const struct kern_nexus_init *init) |
1045 | { |
1046 | struct kern_nexus *nx = NULL; |
1047 | struct nxctl *nxctl; |
1048 | errno_t err = 0; |
1049 | |
1050 | if (ncd == NULL || prov_uuid == NULL || uuid_is_null(uu: prov_uuid) || |
1051 | nx_uuid == NULL || init == NULL || |
1052 | init->nxi_version != KERN_NEXUS_CURRENT_VERSION || |
1053 | (init->nxi_rx_pbufpool != NULL && |
1054 | init->nxi_rx_pbufpool != init->nxi_tx_pbufpool)) { |
1055 | err = EINVAL; |
1056 | goto done; |
1057 | } |
1058 | |
1059 | nxctl = ncd->ncd_nxctl; |
1060 | NXCTL_LOCK(nxctl); |
1061 | nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_UNDEFINED, nx_ctx, |
1062 | nx_ctx_release, init->nxi_tx_pbufpool, init->nxi_rx_pbufpool, &err); |
1063 | NXCTL_UNLOCK(nxctl); |
1064 | if (nx == NULL) { |
1065 | ASSERT(err != 0); |
1066 | goto done; |
1067 | } |
1068 | ASSERT(err == 0); |
1069 | uuid_copy(dst: *nx_uuid, src: nx->nx_uuid); |
1070 | |
1071 | done: |
1072 | /* release extra ref from nx_create */ |
1073 | if (nx != NULL) { |
1074 | (void) nx_release(nx); |
1075 | } |
1076 | |
1077 | return err; |
1078 | } |
1079 | |
1080 | errno_t |
1081 | kern_nexus_controller_alloc_net_provider_instance( |
1082 | const nexus_controller_t ncd, const uuid_t prov_uuid, const void *nx_ctx, |
1083 | nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid, |
1084 | const struct kern_nexus_net_init *init, struct ifnet **pifp) |
1085 | { |
1086 | struct kern_nexus *nx = NULL; |
1087 | struct ifnet *ifp = NULL; |
1088 | struct nxctl *nxctl; |
1089 | boolean_t nxctl_locked = FALSE; |
1090 | errno_t err = 0; |
1091 | |
1092 | if (ncd == NULL || prov_uuid == NULL || uuid_is_null(uu: prov_uuid) || |
1093 | nx_uuid == NULL || init == NULL || |
1094 | init->nxneti_version != KERN_NEXUS_NET_CURRENT_VERSION || |
1095 | init->nxneti_eparams == NULL || pifp == NULL) { |
1096 | err = EINVAL; |
1097 | goto done; |
1098 | } |
1099 | |
1100 | /* |
1101 | * Skywalk native interface doesn't support legacy model. |
1102 | */ |
1103 | if ((init->nxneti_eparams->start != NULL) || |
1104 | (init->nxneti_eparams->flags & IFNET_INIT_LEGACY) || |
1105 | (init->nxneti_eparams->flags & IFNET_INIT_INPUT_POLL)) { |
1106 | err = EINVAL; |
1107 | goto done; |
1108 | } |
1109 | |
1110 | /* create an embryonic ifnet */ |
1111 | err = ifnet_allocate_extended(init: init->nxneti_eparams, interface: &ifp); |
1112 | if (err != 0) { |
1113 | goto done; |
1114 | } |
1115 | |
1116 | nxctl = ncd->ncd_nxctl; |
1117 | NXCTL_LOCK(nxctl); |
1118 | nxctl_locked = TRUE; |
1119 | |
1120 | nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_NET_IF, nx_ctx, |
1121 | nx_ctx_release, init->nxneti_tx_pbufpool, init->nxneti_rx_pbufpool, |
1122 | &err); |
1123 | if (nx == NULL) { |
1124 | ASSERT(err != 0); |
1125 | goto done; |
1126 | } |
1127 | |
1128 | if (NX_LLINK_PROV(nx)) { |
1129 | if (init->nxneti_llink == NULL) { |
1130 | SK_ERR("logical link configuration required" ); |
1131 | err = EINVAL; |
1132 | goto done; |
1133 | } |
1134 | err = nx_netif_default_llink_config(NX_NETIF_PRIVATE(nx), |
1135 | init->nxneti_llink); |
1136 | if (err != 0) { |
1137 | goto done; |
1138 | } |
1139 | } |
1140 | |
1141 | /* prepare this ifnet instance if needed */ |
1142 | if (init->nxneti_prepare != NULL) { |
1143 | err = init->nxneti_prepare(nx, ifp); |
1144 | if (err != 0) { |
1145 | goto done; |
1146 | } |
1147 | } |
1148 | |
1149 | /* attach embryonic ifnet to nexus */ |
1150 | err = _kern_nexus_ifattach(nxctl, nx_uuid: nx->nx_uuid, ifp, NULL, FALSE, NULL); |
1151 | |
1152 | if (err != 0) { |
1153 | goto done; |
1154 | } |
1155 | |
1156 | /* and finalize the ifnet attach */ |
1157 | ASSERT(nxctl_locked); |
1158 | NXCTL_UNLOCK(nxctl); |
1159 | nxctl_locked = FALSE; |
1160 | |
1161 | err = ifnet_attach(interface: ifp, ll_addr: init->nxneti_lladdr); |
1162 | if (err != 0) { |
1163 | goto done; |
1164 | } |
1165 | |
1166 | ASSERT(err == 0); |
1167 | /* |
1168 | * Return ifnet reference held by ifnet_allocate_extended(); |
1169 | * caller is expected to retain this reference until its ifnet |
1170 | * detach callback is called. |
1171 | */ |
1172 | *pifp = ifp; |
1173 | uuid_copy(dst: *nx_uuid, src: nx->nx_uuid); |
1174 | |
1175 | done: |
1176 | if (nxctl_locked) { |
1177 | NXCTL_UNLOCK(nxctl); |
1178 | } |
1179 | |
1180 | /* release extra ref from nx_create */ |
1181 | if (nx != NULL) { |
1182 | SK_LOCK(); |
1183 | if (err != 0) { |
1184 | (void) nx_close(nx, TRUE); |
1185 | } |
1186 | (void) nx_release_locked(nx); |
1187 | SK_UNLOCK(); |
1188 | } |
1189 | if (err != 0 && ifp != NULL) { |
1190 | ifnet_release(interface: ifp); |
1191 | } |
1192 | |
1193 | return err; |
1194 | } |
1195 | |
1196 | errno_t |
1197 | kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd, |
1198 | const uuid_t nx_uuid) |
1199 | { |
1200 | errno_t err; |
1201 | |
1202 | if (ncd == NULL || nx_uuid == NULL || uuid_is_null(uu: nx_uuid)) { |
1203 | err = EINVAL; |
1204 | } else { |
1205 | struct nxctl *nxctl = ncd->ncd_nxctl; |
1206 | NXCTL_LOCK(nxctl); |
1207 | err = nx_destroy(nxctl, nx_uuid); |
1208 | NXCTL_UNLOCK(nxctl); |
1209 | } |
1210 | return err; |
1211 | } |
1212 | |
1213 | errno_t |
1214 | kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd, |
1215 | const uuid_t nx_uuid, nexus_port_t *port, const pid_t pid, |
1216 | const uuid_t exec_uuid, const void *key, const uint32_t key_len, |
1217 | const uint32_t bind_flags) |
1218 | { |
1219 | struct nx_bind_req nbr; |
1220 | struct sockopt sopt; |
1221 | struct nxctl *nxctl; |
1222 | int err = 0; |
1223 | |
1224 | if (ncd == NULL || nx_uuid == NULL || uuid_is_null(uu: nx_uuid) || |
1225 | port == NULL) { |
1226 | return EINVAL; |
1227 | } |
1228 | |
1229 | __nexus_bind_req_prepare(nbr: &nbr, nx_uuid, port: *port, pid, exec_uuid, |
1230 | key, key_len, bind_flags); |
1231 | |
1232 | bzero(s: &sopt, n: sizeof(sopt)); |
1233 | sopt.sopt_dir = SOPT_SET; |
1234 | sopt.sopt_name = NXOPT_NEXUS_BIND; |
1235 | sopt.sopt_val = (user_addr_t)&nbr; |
1236 | sopt.sopt_valsize = sizeof(nbr); |
1237 | sopt.sopt_p = kernproc; |
1238 | |
1239 | nxctl = ncd->ncd_nxctl; |
1240 | NXCTL_LOCK(nxctl); |
1241 | err = nxctl_set_opt(nxctl, &sopt); |
1242 | NXCTL_UNLOCK(nxctl); |
1243 | |
1244 | if (err == 0) { |
1245 | *port = nbr.nb_port; |
1246 | } |
1247 | |
1248 | return err; |
1249 | } |
1250 | |
1251 | errno_t |
1252 | kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd, |
1253 | const uuid_t nx_uuid, const nexus_port_t port) |
1254 | { |
1255 | struct nx_unbind_req nbu; |
1256 | struct sockopt sopt; |
1257 | struct nxctl *nxctl; |
1258 | int err = 0; |
1259 | |
1260 | if (ncd == NULL || nx_uuid == NULL || uuid_is_null(uu: nx_uuid)) { |
1261 | return EINVAL; |
1262 | } |
1263 | |
1264 | __nexus_unbind_req_prepare(nbu: &nbu, nx_uuid, port); |
1265 | |
1266 | bzero(s: &sopt, n: sizeof(sopt)); |
1267 | sopt.sopt_dir = SOPT_SET; |
1268 | sopt.sopt_name = NXOPT_NEXUS_UNBIND; |
1269 | sopt.sopt_val = (user_addr_t)&nbu; |
1270 | sopt.sopt_valsize = sizeof(nbu); |
1271 | sopt.sopt_p = kernproc; |
1272 | |
1273 | nxctl = ncd->ncd_nxctl; |
1274 | NXCTL_LOCK(nxctl); |
1275 | err = nxctl_set_opt(nxctl, &sopt); |
1276 | NXCTL_UNLOCK(nxctl); |
1277 | |
1278 | return err; |
1279 | } |
1280 | |
1281 | errno_t |
1282 | kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd, |
1283 | const uuid_t prov_uuid, nexus_attr_t nxa) |
1284 | { |
1285 | struct nxprov_reg_ent nre; |
1286 | struct nxprov_params *p = &nre.npre_prov_params; |
1287 | struct sockopt sopt; |
1288 | struct nxctl *nxctl; |
1289 | int err = 0; |
1290 | |
1291 | if (ncd == NULL || prov_uuid == NULL || uuid_is_null(uu: prov_uuid) || |
1292 | nxa == NULL) { |
1293 | return EINVAL; |
1294 | } |
1295 | |
1296 | bzero(s: &nre, n: sizeof(nre)); |
1297 | bcopy(src: prov_uuid, dst: nre.npre_prov_uuid, n: sizeof(uuid_t)); |
1298 | |
1299 | bzero(s: &sopt, n: sizeof(sopt)); |
1300 | sopt.sopt_dir = SOPT_GET; |
1301 | sopt.sopt_name = NXOPT_NEXUS_PROV_ENTRY; |
1302 | sopt.sopt_val = (user_addr_t)&nre; |
1303 | sopt.sopt_valsize = sizeof(nre); |
1304 | sopt.sopt_p = kernproc; |
1305 | |
1306 | nxctl = ncd->ncd_nxctl; |
1307 | NXCTL_LOCK(nxctl); |
1308 | err = nxctl_get_opt(nxctl, &sopt); |
1309 | NXCTL_UNLOCK(nxctl); |
1310 | |
1311 | if (err == 0) { |
1312 | __nexus_attr_from_params(nxa, p); |
1313 | } |
1314 | |
1315 | return err; |
1316 | } |
1317 | |
1318 | void |
1319 | kern_nexus_controller_destroy(nexus_controller_t ncd) |
1320 | { |
1321 | struct nxctl *nxctl; |
1322 | |
1323 | if (ncd == NULL) { |
1324 | return; |
1325 | } |
1326 | |
1327 | nxctl = ncd->ncd_nxctl; |
1328 | ASSERT(nxctl != NULL); |
1329 | ncd->ncd_nxctl = NULL; |
1330 | nxctl_dtor(nxctl); |
1331 | |
1332 | ncd_free(ncd); |
1333 | } |
1334 | |
1335 | void * |
1336 | kern_nexus_get_context(const kern_nexus_t nx) |
1337 | { |
1338 | return nx->nx_ctx; |
1339 | } |
1340 | |
1341 | void |
1342 | kern_nexus_stop(const kern_nexus_t nx) |
1343 | { |
1344 | SK_LOCK(); |
1345 | nx_stop(nx); |
1346 | SK_UNLOCK(); |
1347 | } |
1348 | |
1349 | errno_t |
1350 | kern_nexus_get_pbufpool(const kern_nexus_t nx, kern_pbufpool_t *ptx_pp, |
1351 | kern_pbufpool_t *prx_pp) |
1352 | { |
1353 | kern_pbufpool_t tpp = NULL, rpp = NULL; |
1354 | int err = 0; |
1355 | |
1356 | if (ptx_pp == NULL && prx_pp == NULL) { |
1357 | return EINVAL; |
1358 | } |
1359 | |
1360 | if (NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info == NULL) { |
1361 | err = ENOTSUP; |
1362 | } else { |
1363 | err = NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info(nx, &tpp, &rpp); |
1364 | } |
1365 | |
1366 | if (ptx_pp != NULL) { |
1367 | *ptx_pp = tpp; |
1368 | } |
1369 | if (prx_pp != NULL) { |
1370 | *prx_pp = rpp; |
1371 | } |
1372 | |
1373 | return err; |
1374 | } |
1375 | |
1376 | static int |
1377 | _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid, |
1378 | struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host, |
1379 | uuid_t *nx_if_uuid) |
1380 | { |
1381 | struct nx_cfg_req ncr; |
1382 | struct nx_spec_req nsr; |
1383 | struct sockopt sopt; |
1384 | int err = 0; |
1385 | |
1386 | NXCTL_LOCK_ASSERT_HELD(nxctl); |
1387 | |
1388 | if (nx_uuid == NULL || uuid_is_null(uu: nx_uuid)) { |
1389 | return EINVAL; |
1390 | } |
1391 | |
1392 | bzero(s: &nsr, n: sizeof(nsr)); |
1393 | if (ifp != NULL) { |
1394 | if (nx_uuid_attachee != NULL) { |
1395 | return EINVAL; |
1396 | } |
1397 | |
1398 | nsr.nsr_flags = NXSPECREQ_IFP; |
1399 | nsr.nsr_ifp = ifp; |
1400 | } else { |
1401 | if (nx_uuid_attachee == NULL) { |
1402 | return EINVAL; |
1403 | } |
1404 | |
1405 | nsr.nsr_flags = NXSPECREQ_UUID; |
1406 | if (host) { |
1407 | nsr.nsr_flags |= NXSPECREQ_HOST; |
1408 | } |
1409 | |
1410 | uuid_copy(dst: nsr.nsr_uuid, src: nx_uuid_attachee); |
1411 | } |
1412 | __nexus_config_req_prepare(ncr: &ncr, nx_uuid, cmd: NXCFG_CMD_ATTACH, |
1413 | arg: &nsr, arg_len: sizeof(nsr)); |
1414 | |
1415 | bzero(s: &sopt, n: sizeof(sopt)); |
1416 | sopt.sopt_dir = SOPT_SET; |
1417 | sopt.sopt_name = NXOPT_NEXUS_CONFIG; |
1418 | sopt.sopt_val = (user_addr_t)&ncr; |
1419 | sopt.sopt_valsize = sizeof(ncr); |
1420 | sopt.sopt_p = kernproc; |
1421 | |
1422 | err = nxctl_set_opt(nxctl, &sopt); |
1423 | if (err == 0 && nx_if_uuid != NULL) { |
1424 | uuid_copy(dst: *nx_if_uuid, src: nsr.nsr_if_uuid); |
1425 | } |
1426 | |
1427 | return err; |
1428 | } |
1429 | |
1430 | int |
1431 | kern_nexus_ifattach(nexus_controller_t ncd, const uuid_t nx_uuid, |
1432 | struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host, |
1433 | uuid_t *nx_if_uuid) |
1434 | { |
1435 | struct nxctl *nxctl; |
1436 | int err = 0; |
1437 | |
1438 | if (ncd == NULL) { |
1439 | return EINVAL; |
1440 | } |
1441 | |
1442 | nxctl = ncd->ncd_nxctl; |
1443 | ASSERT(nxctl != NULL); |
1444 | NXCTL_LOCK(nxctl); |
1445 | err = _kern_nexus_ifattach(nxctl, nx_uuid, ifp, nx_uuid_attachee, |
1446 | host, nx_if_uuid); |
1447 | NXCTL_UNLOCK(nxctl); |
1448 | |
1449 | return err; |
1450 | } |
1451 | |
1452 | int |
1453 | kern_nexus_ifdetach(const nexus_controller_t ncd, |
1454 | const uuid_t nx_uuid, const uuid_t nx_if_uuid) |
1455 | { |
1456 | struct nx_cfg_req ncr; |
1457 | struct nx_spec_req nsr; |
1458 | struct sockopt sopt; |
1459 | struct nxctl *nxctl; |
1460 | int err = 0; |
1461 | |
1462 | if (ncd == NULL || nx_uuid == NULL || uuid_is_null(uu: nx_uuid) || |
1463 | nx_if_uuid == NULL || uuid_is_null(uu: nx_if_uuid)) { |
1464 | return EINVAL; |
1465 | } |
1466 | |
1467 | bzero(s: &nsr, n: sizeof(nsr)); |
1468 | uuid_copy(dst: nsr.nsr_if_uuid, src: nx_if_uuid); |
1469 | |
1470 | __nexus_config_req_prepare(ncr: &ncr, nx_uuid, cmd: NXCFG_CMD_DETACH, |
1471 | arg: &nsr, arg_len: sizeof(nsr)); |
1472 | |
1473 | bzero(s: &sopt, n: sizeof(sopt)); |
1474 | sopt.sopt_dir = SOPT_SET; |
1475 | sopt.sopt_name = NXOPT_NEXUS_CONFIG; |
1476 | sopt.sopt_val = (user_addr_t)&ncr; |
1477 | sopt.sopt_valsize = sizeof(ncr); |
1478 | sopt.sopt_p = kernproc; |
1479 | |
1480 | nxctl = ncd->ncd_nxctl; |
1481 | NXCTL_LOCK(nxctl); |
1482 | err = nxctl_set_opt(nxctl, &sopt); |
1483 | NXCTL_UNLOCK(nxctl); |
1484 | |
1485 | return err; |
1486 | } |
1487 | |
1488 | int |
1489 | kern_nexus_get_netif_instance(struct ifnet *ifp, uuid_t nx_uuid) |
1490 | { |
1491 | struct nexus_netif_adapter *if_na; |
1492 | int err = 0; |
1493 | |
1494 | SK_LOCK(); |
1495 | if_na = ifp->if_na; |
1496 | if (if_na != NULL) { |
1497 | uuid_copy(dst: nx_uuid, src: if_na->nifna_up.na_nx->nx_uuid); |
1498 | } else { |
1499 | err = ENXIO; |
1500 | } |
1501 | SK_UNLOCK(); |
1502 | if (err != 0) { |
1503 | uuid_clear(uu: nx_uuid); |
1504 | } |
1505 | |
1506 | return err; |
1507 | } |
1508 | |
1509 | int |
1510 | kern_nexus_get_flowswitch_instance(struct ifnet *ifp, uuid_t nx_uuid) |
1511 | { |
1512 | struct nexus_netif_adapter *if_na; |
1513 | struct nx_flowswitch *fsw = NULL; |
1514 | int err = 0; |
1515 | |
1516 | SK_LOCK(); |
1517 | if_na = ifp->if_na; |
1518 | if (if_na != NULL) { |
1519 | fsw = ifp->if_na->nifna_netif->nif_fsw; |
1520 | } |
1521 | if (fsw != NULL) { |
1522 | uuid_copy(dst: nx_uuid, src: fsw->fsw_nx->nx_uuid); |
1523 | } else { |
1524 | err = ENXIO; |
1525 | } |
1526 | SK_UNLOCK(); |
1527 | if (err != 0) { |
1528 | uuid_clear(uu: nx_uuid); |
1529 | } |
1530 | |
1531 | return err; |
1532 | } |
1533 | |
1534 | static void |
1535 | kern_nexus_netagent_add(struct kern_nexus *nx, void *arg0) |
1536 | { |
1537 | #pragma unused(arg0) |
1538 | nx_fsw_netagent_add(nx); |
1539 | } |
1540 | |
1541 | static void |
1542 | kern_nexus_netagent_remove(struct kern_nexus *nx, void *arg0) |
1543 | { |
1544 | #pragma unused(arg0) |
1545 | nx_fsw_netagent_remove(nx); |
1546 | } |
1547 | |
1548 | static void |
1549 | kern_nexus_netagent_update(struct kern_nexus *nx, void *arg0) |
1550 | { |
1551 | #pragma unused(arg0) |
1552 | nx_fsw_netagent_update(nx); |
1553 | } |
1554 | |
1555 | void |
1556 | kern_nexus_register_netagents(void) |
1557 | { |
1558 | kern_nexus_walktree(kern_nexus_netagent_add, NULL, FALSE); |
1559 | } |
1560 | |
1561 | void |
1562 | kern_nexus_deregister_netagents(void) |
1563 | { |
1564 | kern_nexus_walktree(kern_nexus_netagent_remove, NULL, FALSE); |
1565 | } |
1566 | |
1567 | void |
1568 | kern_nexus_update_netagents(void) |
1569 | { |
1570 | kern_nexus_walktree(kern_nexus_netagent_update, NULL, FALSE); |
1571 | } |
1572 | |
1573 | static int |
1574 | _interface_add_remove_netagent(struct ifnet *ifp, bool add) |
1575 | { |
1576 | struct nexus_netif_adapter *if_na; |
1577 | int err = ENXIO; |
1578 | |
1579 | SK_LOCK(); |
1580 | if_na = ifp->if_na; |
1581 | if (if_na != NULL) { |
1582 | struct nx_flowswitch *fsw; |
1583 | |
1584 | fsw = if_na->nifna_netif->nif_fsw; |
1585 | if (fsw != NULL) { |
1586 | if (add) { |
1587 | err = nx_fsw_netagent_add(nx: fsw->fsw_nx); |
1588 | } else { |
1589 | err = nx_fsw_netagent_remove(nx: fsw->fsw_nx); |
1590 | } |
1591 | } |
1592 | } |
1593 | SK_UNLOCK(); |
1594 | return err; |
1595 | } |
1596 | |
1597 | int |
1598 | kern_nexus_interface_add_netagent(struct ifnet *ifp) |
1599 | { |
1600 | return _interface_add_remove_netagent(ifp, true); |
1601 | } |
1602 | |
1603 | int |
1604 | kern_nexus_interface_remove_netagent(struct ifnet *ifp) |
1605 | { |
1606 | return _interface_add_remove_netagent(ifp, false); |
1607 | } |
1608 | |
1609 | int |
1610 | kern_nexus_set_netif_input_tbr_rate(struct ifnet *ifp, uint64_t rate) |
1611 | { |
1612 | /* input tbr is only functional with active netif attachment */ |
1613 | if (ifp->if_na == NULL) { |
1614 | if (rate != 0) { |
1615 | return EINVAL; |
1616 | } else { |
1617 | return 0; |
1618 | } |
1619 | } |
1620 | |
1621 | ifp->if_na->nifna_netif->nif_input_rate = rate; |
1622 | return 0; |
1623 | } |
1624 | |
1625 | int |
1626 | kern_nexus_set_if_netem_params(const nexus_controller_t ncd, |
1627 | const uuid_t nx_uuid, void *data, size_t data_len) |
1628 | { |
1629 | struct nx_cfg_req ncr; |
1630 | struct sockopt sopt; |
1631 | struct nxctl *nxctl; |
1632 | int err = 0; |
1633 | |
1634 | if (nx_uuid == NULL || uuid_is_null(uu: nx_uuid) || |
1635 | data_len < sizeof(struct if_netem_params)) { |
1636 | return EINVAL; |
1637 | } |
1638 | |
1639 | __nexus_config_req_prepare(ncr: &ncr, nx_uuid, cmd: NXCFG_CMD_NETEM, |
1640 | arg: data, arg_len: data_len); |
1641 | bzero(s: &sopt, n: sizeof(sopt)); |
1642 | sopt.sopt_dir = SOPT_SET; |
1643 | sopt.sopt_name = NXOPT_NEXUS_CONFIG; |
1644 | sopt.sopt_val = (user_addr_t)&ncr; |
1645 | sopt.sopt_valsize = sizeof(ncr); |
1646 | sopt.sopt_p = kernproc; |
1647 | |
1648 | nxctl = ncd->ncd_nxctl; |
1649 | NXCTL_LOCK(nxctl); |
1650 | err = nxctl_set_opt(nxctl, &sopt); |
1651 | NXCTL_UNLOCK(nxctl); |
1652 | |
1653 | return err; |
1654 | } |
1655 | |
1656 | static int |
1657 | _kern_nexus_flow_config(const nexus_controller_t ncd, const uuid_t nx_uuid, |
1658 | const nxcfg_cmd_t cmd, void *data, size_t data_len) |
1659 | { |
1660 | struct nx_cfg_req ncr; |
1661 | struct sockopt sopt; |
1662 | struct nxctl *nxctl; |
1663 | int err = 0; |
1664 | |
1665 | if (nx_uuid == NULL || uuid_is_null(uu: nx_uuid) || |
1666 | data_len < sizeof(struct nx_flow_req)) { |
1667 | return EINVAL; |
1668 | } |
1669 | |
1670 | __nexus_config_req_prepare(ncr: &ncr, nx_uuid, cmd, arg: data, arg_len: data_len); |
1671 | |
1672 | bzero(s: &sopt, n: sizeof(sopt)); |
1673 | sopt.sopt_dir = SOPT_SET; |
1674 | sopt.sopt_name = NXOPT_NEXUS_CONFIG; |
1675 | sopt.sopt_val = (user_addr_t)&ncr; |
1676 | sopt.sopt_valsize = sizeof(ncr); |
1677 | sopt.sopt_p = kernproc; |
1678 | |
1679 | nxctl = ncd->ncd_nxctl; |
1680 | NXCTL_LOCK(nxctl); |
1681 | err = nxctl_set_opt(nxctl, &sopt); |
1682 | NXCTL_UNLOCK(nxctl); |
1683 | |
1684 | return err; |
1685 | } |
1686 | |
1687 | int |
1688 | kern_nexus_flow_add(const nexus_controller_t ncd, const uuid_t nx_uuid, |
1689 | void *data, size_t data_len) |
1690 | { |
1691 | return _kern_nexus_flow_config(ncd, nx_uuid, cmd: NXCFG_CMD_FLOW_ADD, data, |
1692 | data_len); |
1693 | } |
1694 | |
1695 | int |
1696 | kern_nexus_flow_del(const nexus_controller_t ncd, const uuid_t nx_uuid, |
1697 | void *data, size_t data_len) |
1698 | { |
1699 | return _kern_nexus_flow_config(ncd, nx_uuid, cmd: NXCFG_CMD_FLOW_DEL, data, |
1700 | data_len); |
1701 | } |
1702 | |
1703 | static struct kern_nexus_domain_provider * |
1704 | nxdom_prov_alloc(zalloc_flags_t how) |
1705 | { |
1706 | SK_LOCK_ASSERT_HELD(); |
1707 | |
1708 | return zalloc_flags(nxdom_prov_zone, how | Z_ZERO); |
1709 | } |
1710 | |
1711 | static void |
1712 | nxdom_prov_free(struct kern_nexus_domain_provider *nxdom_prov) |
1713 | { |
1714 | SK_LOCK_ASSERT_HELD(); |
1715 | |
1716 | ASSERT(nxdom_prov->nxdom_prov_refcnt == 0); |
1717 | ASSERT(!(nxdom_prov->nxdom_prov_flags & |
1718 | (NXDOMPROVF_ATTACHED | NXDOMPROVF_DETACHING))); |
1719 | |
1720 | if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED) { |
1721 | /* |
1722 | * Tell the domain provider that we're done with this |
1723 | * instance, and it is now free to go away. |
1724 | */ |
1725 | if (nxdom_prov->nxdom_prov_fini != NULL) { |
1726 | nxdom_prov->nxdom_prov_fini(nxdom_prov); |
1727 | } |
1728 | nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_INITIALIZED; |
1729 | } |
1730 | uuid_clear(uu: nxdom_prov->nxdom_prov_uuid); |
1731 | nxdom_prov->nxdom_prov_dom = NULL; |
1732 | |
1733 | SK_DF(SK_VERB_MEM, "nxdom_prov 0x%llx %s" , SK_KVA(nxdom_prov), |
1734 | ((nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) ? |
1735 | "FREE" : "DESTROY" )); |
1736 | if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) { |
1737 | zfree(nxdom_prov_zone, nxdom_prov); |
1738 | } |
1739 | } |
1740 | |
1741 | void |
1742 | nxdom_prov_retain_locked(struct kern_nexus_domain_provider *nxdom_prov) |
1743 | { |
1744 | SK_LOCK_ASSERT_HELD(); |
1745 | |
1746 | nxdom_prov->nxdom_prov_refcnt++; |
1747 | ASSERT(nxdom_prov->nxdom_prov_refcnt != 0); |
1748 | } |
1749 | |
1750 | void |
1751 | nxdom_prov_retain(struct kern_nexus_domain_provider *nxdom_prov) |
1752 | { |
1753 | SK_LOCK(); |
1754 | nxdom_prov_retain_locked(nxdom_prov); |
1755 | SK_UNLOCK(); |
1756 | } |
1757 | |
1758 | static int |
1759 | nxdom_prov_params_default(struct kern_nexus_domain_provider *nxdom_prov, |
1760 | const uint32_t req, const struct nxprov_params *nxp0, |
1761 | struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS], |
1762 | uint32_t pp_region_config_flags) |
1763 | { |
1764 | struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom; |
1765 | |
1766 | return nxprov_params_adjust(nxdom_prov, req, nxp0, nxp, srp, |
1767 | nxdom, nxdom, nxdom, pp_region_config_flags, NULL); |
1768 | } |
1769 | |
1770 | int |
1771 | nxdom_prov_validate_params(struct kern_nexus_domain_provider *nxdom_prov, |
1772 | const struct nxprov_reg *reg, struct nxprov_params *nxp, |
1773 | struct skmem_region_params srp[SKMEM_REGIONS], const uint32_t oflags, |
1774 | uint32_t pp_region_config_flags) |
1775 | { |
1776 | const struct nxprov_params *nxp0 = ®->nxpreg_params; |
1777 | const uint32_t req = reg->nxpreg_requested; |
1778 | int i, err = 0; |
1779 | |
1780 | ASSERT(reg->nxpreg_version == NXPROV_REG_CURRENT_VERSION && |
1781 | nxp0->nxp_namelen != 0 && |
1782 | nxp0->nxp_namelen <= sizeof(nexus_name_t)); |
1783 | |
1784 | /* fill in with default values and let the nexus override them */ |
1785 | bzero(s: nxp, n: sizeof(*nxp)); |
1786 | bcopy(src: &nxp0->nxp_name, dst: &nxp->nxp_name, n: sizeof(nxp->nxp_name)); |
1787 | nxp->nxp_name[sizeof(nxp->nxp_name) - 1] = '\0'; |
1788 | nxp->nxp_namelen = nxp0->nxp_namelen; |
1789 | nxp->nxp_type = nxp0->nxp_type; |
1790 | nxp->nxp_md_type = nxdom_prov->nxdom_prov_dom->nxdom_md_type; |
1791 | nxp->nxp_md_subtype = nxdom_prov->nxdom_prov_dom->nxdom_md_subtype; |
1792 | nxp->nxp_flags = (nxp0->nxp_flags & NXPF_MASK); |
1793 | nxp->nxp_flags |= oflags; /* override */ |
1794 | nxp->nxp_format = nxp0->nxp_format; |
1795 | nxp->nxp_ifindex = nxp0->nxp_ifindex; |
1796 | nxp->nxp_reject_on_close = nxp0->nxp_reject_on_close; |
1797 | |
1798 | /* inherit default region parameters */ |
1799 | for (i = 0; i < SKMEM_REGIONS; i++) { |
1800 | srp[i] = *skmem_get_default(i); |
1801 | } |
1802 | |
1803 | if (nxdom_prov->nxdom_prov_params != NULL) { |
1804 | err = nxdom_prov->nxdom_prov_params(nxdom_prov, req, nxp0, |
1805 | nxp, srp, pp_region_config_flags); |
1806 | } else { |
1807 | err = nxdom_prov_params_default(nxdom_prov, req, nxp0, |
1808 | nxp, srp, pp_region_config_flags); |
1809 | } |
1810 | return err; |
1811 | } |
1812 | |
1813 | boolean_t |
1814 | nxdom_prov_release_locked(struct kern_nexus_domain_provider *nxdom_prov) |
1815 | { |
1816 | int oldref = nxdom_prov->nxdom_prov_refcnt; |
1817 | |
1818 | SK_LOCK_ASSERT_HELD(); |
1819 | |
1820 | ASSERT(nxdom_prov->nxdom_prov_refcnt != 0); |
1821 | if (--nxdom_prov->nxdom_prov_refcnt == 0) { |
1822 | nxdom_prov_free(nxdom_prov); |
1823 | } |
1824 | |
1825 | return oldref == 1; |
1826 | } |
1827 | |
1828 | boolean_t |
1829 | nxdom_prov_release(struct kern_nexus_domain_provider *nxdom_prov) |
1830 | { |
1831 | boolean_t lastref; |
1832 | |
1833 | SK_LOCK(); |
1834 | lastref = nxdom_prov_release_locked(nxdom_prov); |
1835 | SK_UNLOCK(); |
1836 | |
1837 | return lastref; |
1838 | } |
1839 | |
1840 | static uint32_t |
1841 | nxprov_bound_var(uint32_t *v, uint32_t dflt, uint32_t lo, uint32_t hi, |
1842 | const char *msg) |
1843 | { |
1844 | #pragma unused(msg) |
1845 | uint32_t oldv = *v; |
1846 | const char *op = NULL; |
1847 | |
1848 | if (dflt < lo) { |
1849 | dflt = lo; |
1850 | } |
1851 | if (dflt > hi) { |
1852 | dflt = hi; |
1853 | } |
1854 | if (oldv < lo) { |
1855 | *v = dflt; |
1856 | op = "bump" ; |
1857 | } else if (oldv > hi) { |
1858 | *v = hi; |
1859 | op = "clamp" ; |
1860 | } |
1861 | #if SK_LOG |
1862 | if (op != NULL && msg != NULL) { |
1863 | SK_ERR("%s %s to %u (was %u)" , op, msg, *v, oldv); |
1864 | } |
1865 | #endif /* SK_LOG */ |
1866 | return *v; |
1867 | } |
1868 | |
1869 | #define NXPROV_PARAMS_ADJUST(flag, param) do { \ |
1870 | uint32_t _v0, _v; \ |
1871 | if (req & (flag)) \ |
1872 | _v = nxp0->nxp_##param; \ |
1873 | else \ |
1874 | _v = NXDOM_DEF(nxdom_def, param); \ |
1875 | _v0 = _v; \ |
1876 | if (nxprov_bound_var(&_v, NXDOM_DEF(nxdom_def, param), \ |
1877 | NXDOM_MIN(nxdom_min, param), NXDOM_MAX(nxdom_max, param), \ |
1878 | "nxp_" #param) < _v0) { \ |
1879 | err = ENOMEM; \ |
1880 | goto error; \ |
1881 | } \ |
1882 | nxp->nxp_##param = _v; \ |
1883 | } while (0) |
1884 | |
1885 | #define MUL(x, y, z) do { \ |
1886 | if (__builtin_mul_overflow((x), (y), (z))) { \ |
1887 | overflowline = __LINE__; \ |
1888 | goto error; \ |
1889 | } \ |
1890 | } while (0) |
1891 | |
1892 | #define ADD(x, y, z) do { \ |
1893 | if (__builtin_add_overflow((x), (y), (z))) { \ |
1894 | overflowline = __LINE__; \ |
1895 | goto error; \ |
1896 | } \ |
1897 | } while (0) |
1898 | |
1899 | int |
1900 | nxprov_params_adjust(struct kern_nexus_domain_provider *nxdom_prov, |
1901 | const uint32_t req, const struct nxprov_params *nxp0, |
1902 | struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS], |
1903 | const struct nxdom *nxdom_def, const struct nxdom *nxdom_min, |
1904 | const struct nxdom *nxdom_max, uint32_t pp_region_config_flags, |
1905 | int (*adjust_fn)(const struct kern_nexus_domain_provider *, |
1906 | const struct nxprov_params *, struct nxprov_adjusted_params *)) |
1907 | { |
1908 | uint32_t buf_cnt; |
1909 | uint32_t stats_size; |
1910 | uint32_t flowadv_max; |
1911 | uint32_t nexusadv_size; |
1912 | uint32_t capabs; |
1913 | uint32_t tx_rings, rx_rings; |
1914 | uint32_t alloc_rings = 0, free_rings = 0, ev_rings = 0; |
1915 | uint32_t tx_slots, rx_slots; |
1916 | uint32_t alloc_slots = 0, free_slots = 0, ev_slots = 0; |
1917 | uint32_t buf_size, buf_region_segment_size, max_buffers = 0; |
1918 | uint32_t tmp1, tmp2, tmp3, tmp4xpipes, tmpsumrings; |
1919 | uint32_t tmpsumall, tmp4xpipesplusrings; |
1920 | uint32_t large_buf_size; |
1921 | int overflowline = 0; |
1922 | int err = 0; |
1923 | |
1924 | NXPROV_PARAMS_ADJUST(NXPREQ_TX_RINGS, tx_rings); |
1925 | NXPROV_PARAMS_ADJUST(NXPREQ_RX_RINGS, rx_rings); |
1926 | NXPROV_PARAMS_ADJUST(NXPREQ_TX_SLOTS, tx_slots); |
1927 | NXPROV_PARAMS_ADJUST(NXPREQ_RX_SLOTS, rx_slots); |
1928 | NXPROV_PARAMS_ADJUST(NXPREQ_BUF_SIZE, buf_size); |
1929 | NXPROV_PARAMS_ADJUST(NXPREQ_LARGE_BUF_SIZE, large_buf_size); |
1930 | NXPROV_PARAMS_ADJUST(NXPREQ_STATS_SIZE, stats_size); |
1931 | NXPROV_PARAMS_ADJUST(NXPREQ_FLOWADV_MAX, flowadv_max); |
1932 | NXPROV_PARAMS_ADJUST(NXPREQ_NEXUSADV_SIZE, nexusadv_size); |
1933 | NXPROV_PARAMS_ADJUST(NXPREQ_PIPES, pipes); |
1934 | NXPROV_PARAMS_ADJUST(NXPREQ_EXTENSIONS, extensions); |
1935 | NXPROV_PARAMS_ADJUST(NXPREQ_MHINTS, mhints); |
1936 | NXPROV_PARAMS_ADJUST(NXPREQ_CAPABILITIES, capabilities); |
1937 | NXPROV_PARAMS_ADJUST(NXPREQ_QMAP, qmap); |
1938 | NXPROV_PARAMS_ADJUST(NXPREQ_MAX_FRAGS, max_frags); |
1939 | |
1940 | capabs = NXDOM_DEF(nxdom_def, capabilities); |
1941 | if (req & NXPREQ_USER_CHANNEL) { |
1942 | if (nxp->nxp_flags & NXPF_USER_CHANNEL) { |
1943 | capabs |= NXPCAP_USER_CHANNEL; |
1944 | } else { |
1945 | capabs &= ~NXPCAP_USER_CHANNEL; |
1946 | } |
1947 | } else { |
1948 | if (capabs & NXPCAP_USER_CHANNEL) { |
1949 | nxp->nxp_flags |= NXPF_USER_CHANNEL; |
1950 | } else { |
1951 | nxp->nxp_flags &= ~NXPF_USER_CHANNEL; |
1952 | } |
1953 | } |
1954 | |
1955 | if (NXDOM_MIN(nxdom_min, capabilities) != 0 && |
1956 | !(capabs & NXDOM_MIN(nxdom_min, capabilities))) { |
1957 | SK_ERR("%s: caps 0x%b < min 0x%b" , |
1958 | nxdom_prov->nxdom_prov_name, capabs, NXPCAP_BITS, |
1959 | NXDOM_MIN(nxdom_min, capabilities), NXPCAP_BITS); |
1960 | err = EINVAL; |
1961 | goto error; |
1962 | } else if (NXDOM_MAX(nxdom_max, capabilities) != 0 && |
1963 | (capabs & ~NXDOM_MAX(nxdom_max, capabilities))) { |
1964 | SK_ERR("%s: caps 0x%b > max 0x%b" , |
1965 | nxdom_prov->nxdom_prov_name, capabs, NXPCAP_BITS, |
1966 | NXDOM_MAX(nxdom_max, capabilities), NXPCAP_BITS); |
1967 | err = EINVAL; |
1968 | goto error; |
1969 | } |
1970 | |
1971 | stats_size = nxp->nxp_stats_size; |
1972 | flowadv_max = nxp->nxp_flowadv_max; |
1973 | nexusadv_size = nxp->nxp_nexusadv_size; |
1974 | tx_rings = nxp->nxp_tx_rings; |
1975 | rx_rings = nxp->nxp_rx_rings; |
1976 | tx_slots = nxp->nxp_tx_slots; |
1977 | rx_slots = nxp->nxp_rx_slots; |
1978 | buf_size = nxp->nxp_buf_size; |
1979 | large_buf_size = nxp->nxp_large_buf_size; |
1980 | buf_region_segment_size = skmem_usr_buf_seg_size; |
1981 | ASSERT(pp_region_config_flags & PP_REGION_CONFIG_MD_MAGAZINE_ENABLE); |
1982 | |
1983 | if (adjust_fn != NULL) { |
1984 | struct nxprov_adjusted_params adj = { |
1985 | .adj_md_subtype = &nxp->nxp_md_subtype, |
1986 | .adj_stats_size = &stats_size, |
1987 | .adj_flowadv_max = &flowadv_max, |
1988 | .adj_nexusadv_size = &nexusadv_size, |
1989 | .adj_caps = &capabs, |
1990 | .adj_tx_rings = &tx_rings, |
1991 | .adj_rx_rings = &rx_rings, |
1992 | .adj_tx_slots = &tx_slots, |
1993 | .adj_rx_slots = &rx_slots, |
1994 | .adj_alloc_rings = &alloc_rings, |
1995 | .adj_free_rings = &free_rings, |
1996 | .adj_alloc_slots = &alloc_slots, |
1997 | .adj_free_slots = &free_slots, |
1998 | .adj_buf_size = &buf_size, |
1999 | .adj_buf_region_segment_size = &buf_region_segment_size, |
2000 | .adj_pp_region_config_flags = &pp_region_config_flags, |
2001 | .adj_max_frags = &nxp->nxp_max_frags, |
2002 | .adj_event_rings = &ev_rings, |
2003 | .adj_event_slots = &ev_slots, |
2004 | .adj_max_buffers = &max_buffers, |
2005 | .adj_large_buf_size = &large_buf_size, |
2006 | }; |
2007 | err = adjust_fn(nxdom_prov, nxp, &adj); |
2008 | if (err != 0) { |
2009 | goto error; |
2010 | } |
2011 | |
2012 | ASSERT(capabs >= NXDOM_MIN(nxdom_min, capabilities)); |
2013 | ASSERT(capabs <= NXDOM_MAX(nxdom_max, capabilities)); |
2014 | } |
2015 | |
2016 | if (nxp->nxp_max_frags > UINT16_MAX) { |
2017 | SK_ERR("invalid configuration for max frags %d" , |
2018 | nxp->nxp_max_frags); |
2019 | err = EINVAL; |
2020 | } |
2021 | |
2022 | if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) { |
2023 | if (tx_rings != rx_rings) { |
2024 | SK_ERR("invalid configuration: {rx,tx} rings must be" |
2025 | "in pairs for user pipe rx_rings(%d) tx_rings(%d)" , |
2026 | rx_rings, tx_rings); |
2027 | err = EINVAL; |
2028 | } |
2029 | } else { |
2030 | if (nxp->nxp_pipes != 0) { |
2031 | SK_ERR("invalid configuration: pipe configuration is" |
2032 | "only valid for user pipe nexus, type %d, pipes %d" , |
2033 | nxp->nxp_type, nxp->nxp_pipes); |
2034 | err = EINVAL; |
2035 | } |
2036 | } |
2037 | if (err != 0) { |
2038 | goto error; |
2039 | } |
2040 | |
2041 | /* leading and trailing guard pages (if applicable) */ |
2042 | if (sk_guard) { |
2043 | srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = SKMEM_PAGE_SIZE; |
2044 | srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = sk_headguard_sz; |
2045 | skmem_region_params_config(&srp[SKMEM_REGION_GUARD_HEAD]); |
2046 | srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = SKMEM_PAGE_SIZE; |
2047 | srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = sk_tailguard_sz; |
2048 | skmem_region_params_config(&srp[SKMEM_REGION_GUARD_TAIL]); |
2049 | } else { |
2050 | srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = 0; |
2051 | srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = 0; |
2052 | srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = 0; |
2053 | srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = 0; |
2054 | } |
2055 | |
2056 | /* update to the adjusted/configured values */ |
2057 | nxp->nxp_buf_size = buf_size; |
2058 | nxp->nxp_tx_slots = tx_slots; |
2059 | nxp->nxp_rx_slots = rx_slots; |
2060 | nxp->nxp_large_buf_size = large_buf_size; |
2061 | |
2062 | SK_D("nxdom \"%s\" (0x%llx) type %d" , |
2063 | nxdom_prov->nxdom_prov_dom->nxdom_name, |
2064 | SK_KVA(nxdom_prov->nxdom_prov_dom), |
2065 | nxdom_prov->nxdom_prov_dom->nxdom_type); |
2066 | SK_D("nxp \"%s\" (0x%llx) flags 0x%b" , |
2067 | nxp->nxp_name, SK_KVA(nxp), nxp->nxp_flags, NXPF_BITS); |
2068 | SK_D(" req 0x%b rings %u/%u/%u/%u/%u slots %u/%u/%u/%u/%u buf %u " |
2069 | "type %u subtype %u stats %u flowadv_max %u nexusadv_size %u " |
2070 | "capabs 0x%b pipes %u extensions %u max_frags %u headguard %u " |
2071 | "tailguard %u large_buf %u" , req, NXPREQ_BITS, tx_rings, rx_rings, |
2072 | alloc_rings, free_rings, ev_rings, tx_slots, rx_slots, alloc_slots, |
2073 | free_slots, ev_slots, nxp->nxp_buf_size, nxp->nxp_md_type, |
2074 | nxp->nxp_md_subtype, stats_size, flowadv_max, nexusadv_size, |
2075 | capabs, NXPCAP_BITS, nxp->nxp_pipes, nxp->nxp_extensions, |
2076 | nxp->nxp_max_frags, srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size * |
2077 | srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt, |
2078 | srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size * |
2079 | srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt, |
2080 | nxp->nxp_large_buf_size); |
2081 | |
2082 | /* |
2083 | * tmp4xpipes = 4 * nxp->nxp_pipes |
2084 | */ |
2085 | MUL(4, nxp->nxp_pipes, &tmp4xpipes); |
2086 | |
2087 | /* |
2088 | * tmp4xpipesplusrings = tx_rings + (4 * nxp->nxp_pipes) |
2089 | */ |
2090 | VERIFY((tmp4xpipes == 0) || (rx_rings == tx_rings)); |
2091 | ADD(tx_rings, tmp4xpipes, &tmp4xpipesplusrings); |
2092 | |
2093 | /* |
2094 | * tmpsumrings = tx_rings + rx_rings + alloc_rings + free_rings + |
2095 | * ev_rings |
2096 | */ |
2097 | ADD(tx_rings, rx_rings, &tmpsumrings); |
2098 | ADD(tmpsumrings, alloc_rings, &tmpsumrings); |
2099 | ADD(tmpsumrings, free_rings, &tmpsumrings); |
2100 | ADD(tmpsumrings, ev_rings, &tmpsumrings); |
2101 | |
2102 | /* |
2103 | * tmpsumall = (tx_rings + rx_rings + |
2104 | * alloc_rings + free_rings + ev_rings + (4 * nxp->nxp_pipes)) |
2105 | */ |
2106 | ADD(tmpsumrings, tmp4xpipes, &tmpsumall); |
2107 | |
2108 | /* possibly increase them to fit user request */ |
2109 | VERIFY(CHANNEL_SCHEMA_SIZE(tmpsumrings) <= UINT32_MAX); |
2110 | srp[SKMEM_REGION_SCHEMA].srp_r_obj_size = |
2111 | (uint32_t)CHANNEL_SCHEMA_SIZE(tmpsumrings); |
2112 | /* worst case is one channel bound to each ring pair */ |
2113 | srp[SKMEM_REGION_SCHEMA].srp_r_obj_cnt = tmp4xpipesplusrings; |
2114 | |
2115 | skmem_region_params_config(&srp[SKMEM_REGION_SCHEMA]); |
2116 | |
2117 | srp[SKMEM_REGION_RING].srp_r_obj_size = |
2118 | sizeof(struct __user_channel_ring); |
2119 | /* each pipe endpoint needs two tx rings and two rx rings */ |
2120 | srp[SKMEM_REGION_RING].srp_r_obj_cnt = tmpsumall; |
2121 | skmem_region_params_config(&srp[SKMEM_REGION_RING]); |
2122 | |
2123 | /* |
2124 | * For each pipe we only need the buffers for the "real" rings. |
2125 | * On the other end, the pipe ring dimension may be different from |
2126 | * the parent port ring dimension. As a compromise, we allocate twice |
2127 | * the space actually needed if the pipe rings were the same size as |
2128 | * the parent rings. |
2129 | * |
2130 | * buf_cnt = ((4 * nxp->nxp_pipes) + rx_rings) * rx_slots + |
2131 | * ((4 * nxp->nxp_pipes) + tx_rings) * tx_slots + |
2132 | * (ev_rings * ev_slots); |
2133 | */ |
2134 | if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) { |
2135 | MUL(tmp4xpipesplusrings, rx_slots, &tmp1); |
2136 | MUL(tmp4xpipesplusrings, tx_slots, &tmp2); |
2137 | ASSERT(ev_rings == 0); |
2138 | tmp3 = 0; |
2139 | } else { |
2140 | MUL(rx_rings, rx_slots, &tmp1); |
2141 | MUL(tx_rings, tx_slots, &tmp2); |
2142 | MUL(ev_rings, ev_slots, &tmp3); |
2143 | } |
2144 | ADD(tmp1, tmp2, &buf_cnt); |
2145 | ADD(tmp3, buf_cnt, &buf_cnt); |
2146 | |
2147 | if (nxp->nxp_max_frags > 1) { |
2148 | pp_region_config_flags |= PP_REGION_CONFIG_BUFLET; |
2149 | buf_cnt = MIN((((uint32_t)P2ROUNDUP(NX_MAX_AGGR_PKT_SIZE, |
2150 | nxp->nxp_buf_size) / nxp->nxp_buf_size) * buf_cnt), |
2151 | (buf_cnt * nxp->nxp_max_frags)); |
2152 | } |
2153 | |
2154 | if (max_buffers != 0) { |
2155 | buf_cnt = MIN(max_buffers, buf_cnt); |
2156 | } |
2157 | |
2158 | if ((nxp->nxp_flags & NXPF_USER_CHANNEL) == 0) { |
2159 | pp_region_config_flags |= PP_REGION_CONFIG_KERNEL_ONLY; |
2160 | } |
2161 | |
2162 | /* # of metadata objects is same as the # of buffer objects */ |
2163 | ASSERT(buf_region_segment_size != 0); |
2164 | pp_regions_params_adjust(srp, nxp->nxp_md_type, nxp->nxp_md_subtype, |
2165 | buf_cnt, (uint16_t)nxp->nxp_max_frags, nxp->nxp_buf_size, |
2166 | nxp->nxp_large_buf_size, buf_cnt, buf_region_segment_size, |
2167 | pp_region_config_flags); |
2168 | |
2169 | /* statistics region size */ |
2170 | if (stats_size != 0) { |
2171 | srp[SKMEM_REGION_USTATS].srp_r_obj_size = stats_size; |
2172 | srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 1; |
2173 | skmem_region_params_config(&srp[SKMEM_REGION_USTATS]); |
2174 | } else { |
2175 | srp[SKMEM_REGION_USTATS].srp_r_obj_size = 0; |
2176 | srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 0; |
2177 | srp[SKMEM_REGION_USTATS].srp_c_obj_size = 0; |
2178 | srp[SKMEM_REGION_USTATS].srp_c_obj_cnt = 0; |
2179 | } |
2180 | |
2181 | /* flow advisory region size */ |
2182 | if (flowadv_max != 0) { |
2183 | _CASSERT(NX_FLOWADV_DEFAULT * sizeof(struct __flowadv_entry) <= |
2184 | SKMEM_MIN_SEG_SIZE); |
2185 | MUL(sizeof(struct __flowadv_entry), flowadv_max, &tmp1); |
2186 | srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = tmp1; |
2187 | srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 1; |
2188 | skmem_region_params_config(&srp[SKMEM_REGION_FLOWADV]); |
2189 | } else { |
2190 | srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = 0; |
2191 | srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 0; |
2192 | srp[SKMEM_REGION_FLOWADV].srp_c_obj_size = 0; |
2193 | srp[SKMEM_REGION_FLOWADV].srp_c_obj_cnt = 0; |
2194 | } |
2195 | |
2196 | /* nexus advisory region size */ |
2197 | if (nexusadv_size != 0) { |
2198 | srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = nexusadv_size + |
2199 | sizeof(struct __kern_nexus_adv_metadata); |
2200 | srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 1; |
2201 | skmem_region_params_config(&srp[SKMEM_REGION_NEXUSADV]); |
2202 | } else { |
2203 | srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = 0; |
2204 | srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 0; |
2205 | srp[SKMEM_REGION_NEXUSADV].srp_c_obj_size = 0; |
2206 | srp[SKMEM_REGION_NEXUSADV].srp_c_obj_cnt = 0; |
2207 | } |
2208 | |
2209 | /* sysctls region is not applicable to nexus */ |
2210 | srp[SKMEM_REGION_SYSCTLS].srp_r_obj_size = 0; |
2211 | srp[SKMEM_REGION_SYSCTLS].srp_r_obj_cnt = 0; |
2212 | srp[SKMEM_REGION_SYSCTLS].srp_c_obj_size = 0; |
2213 | srp[SKMEM_REGION_SYSCTLS].srp_c_obj_cnt = 0; |
2214 | |
2215 | /* |
2216 | * Since the tx/alloc/event slots share the same region and cache, |
2217 | * we will use the same object size for both types of slots. |
2218 | */ |
2219 | srp[SKMEM_REGION_TXAKSD].srp_r_obj_size = |
2220 | (MAX(MAX(tx_slots, alloc_slots), ev_slots)) * SLOT_DESC_SZ; |
2221 | srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt = tx_rings + alloc_rings + |
2222 | ev_rings; |
2223 | skmem_region_params_config(&srp[SKMEM_REGION_TXAKSD]); |
2224 | |
2225 | /* USD and KSD objects share the same size and count */ |
2226 | srp[SKMEM_REGION_TXAUSD].srp_r_obj_size = |
2227 | srp[SKMEM_REGION_TXAKSD].srp_r_obj_size; |
2228 | srp[SKMEM_REGION_TXAUSD].srp_r_obj_cnt = |
2229 | srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt; |
2230 | skmem_region_params_config(&srp[SKMEM_REGION_TXAUSD]); |
2231 | |
2232 | /* |
2233 | * Since the rx/free slots share the same region and cache, |
2234 | * we will use the same object size for both types of slots. |
2235 | */ |
2236 | srp[SKMEM_REGION_RXFKSD].srp_r_obj_size = |
2237 | MAX(rx_slots, free_slots) * SLOT_DESC_SZ; |
2238 | srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt = rx_rings + free_rings; |
2239 | skmem_region_params_config(&srp[SKMEM_REGION_RXFKSD]); |
2240 | |
2241 | /* USD and KSD objects share the same size and count */ |
2242 | srp[SKMEM_REGION_RXFUSD].srp_r_obj_size = |
2243 | srp[SKMEM_REGION_RXFKSD].srp_r_obj_size; |
2244 | srp[SKMEM_REGION_RXFUSD].srp_r_obj_cnt = |
2245 | srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt; |
2246 | skmem_region_params_config(&srp[SKMEM_REGION_RXFUSD]); |
2247 | |
2248 | /* update these based on the adjusted/configured values */ |
2249 | nxp->nxp_meta_size = srp[SKMEM_REGION_KMD].srp_c_obj_size; |
2250 | nxp->nxp_stats_size = stats_size; |
2251 | nxp->nxp_flowadv_max = flowadv_max; |
2252 | nxp->nxp_nexusadv_size = nexusadv_size; |
2253 | nxp->nxp_capabilities = capabs; |
2254 | |
2255 | error: |
2256 | if (overflowline) { |
2257 | err = EOVERFLOW; |
2258 | SK_ERR("math overflow in %s on line %d" , |
2259 | __func__, overflowline); |
2260 | } |
2261 | return err; |
2262 | } |
2263 | |
2264 | #undef ADD |
2265 | #undef MUL |
2266 | #undef NXPROV_PARAMS_ADJUST |
2267 | |
2268 | static void |
2269 | nxprov_detaching_enqueue(struct kern_nexus_domain_provider *nxdom_prov) |
2270 | { |
2271 | SK_LOCK_ASSERT_HELD(); |
2272 | |
2273 | ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED | |
2274 | NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING); |
2275 | |
2276 | ++nxprov_detaching_cnt; |
2277 | ASSERT(nxprov_detaching_cnt != 0); |
2278 | /* |
2279 | * Insert this to the detaching list; caller is expected to |
2280 | * have held a reference, most likely the same one that was |
2281 | * used for the per-domain provider list. |
2282 | */ |
2283 | STAILQ_INSERT_TAIL(&nxprov_detaching_head, nxdom_prov, |
2284 | nxdom_prov_detaching_link); |
2285 | wakeup(chan: (caddr_t)&nxprov_detach_wchan); |
2286 | } |
2287 | |
2288 | static struct kern_nexus_domain_provider * |
2289 | nxprov_detaching_dequeue(void) |
2290 | { |
2291 | struct kern_nexus_domain_provider *nxdom_prov; |
2292 | |
2293 | SK_LOCK_ASSERT_HELD(); |
2294 | |
2295 | nxdom_prov = STAILQ_FIRST(&nxprov_detaching_head); |
2296 | ASSERT(nxprov_detaching_cnt != 0 || nxdom_prov == NULL); |
2297 | if (nxdom_prov != NULL) { |
2298 | ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED | |
2299 | NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING); |
2300 | ASSERT(nxprov_detaching_cnt != 0); |
2301 | --nxprov_detaching_cnt; |
2302 | STAILQ_REMOVE(&nxprov_detaching_head, nxdom_prov, |
2303 | kern_nexus_domain_provider, nxdom_prov_detaching_link); |
2304 | } |
2305 | return nxdom_prov; |
2306 | } |
2307 | |
2308 | __attribute__((noreturn)) |
2309 | static void |
2310 | nxprov_detacher(void *v, wait_result_t w) |
2311 | { |
2312 | #pragma unused(v, w) |
2313 | SK_LOCK(); |
2314 | (void) msleep0(chan: &nxprov_detach_wchan, mtx: &sk_lock, pri: (PZERO - 1), |
2315 | wmesg: __func__, timo: 0, continuation: nxprov_detacher_cont); |
2316 | /* |
2317 | * msleep0() shouldn't have returned as PCATCH was not set; |
2318 | * therefore assert in this case. |
2319 | */ |
2320 | SK_UNLOCK(); |
2321 | VERIFY(0); |
2322 | /* NOTREACHED */ |
2323 | __builtin_unreachable(); |
2324 | } |
2325 | |
2326 | static int |
2327 | nxprov_detacher_cont(int err) |
2328 | { |
2329 | #pragma unused(err) |
2330 | struct kern_nexus_domain_provider *nxdom_prov; |
2331 | |
2332 | for (;;) { |
2333 | SK_LOCK_ASSERT_HELD(); |
2334 | while (nxprov_detaching_cnt == 0) { |
2335 | (void) msleep0(chan: &nxprov_detach_wchan, mtx: &sk_lock, |
2336 | pri: (PZERO - 1), wmesg: __func__, timo: 0, continuation: nxprov_detacher_cont); |
2337 | /* NOTREACHED */ |
2338 | } |
2339 | |
2340 | ASSERT(STAILQ_FIRST(&nxprov_detaching_head) != NULL); |
2341 | |
2342 | nxdom_prov = nxprov_detaching_dequeue(); |
2343 | if (nxdom_prov != NULL) { |
2344 | nxdom_del_provider_final(nxdom_prov); |
2345 | } |
2346 | } |
2347 | } |
2348 | |