1 | /* |
2 | * Copyright (c) 2015-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /* |
30 | * Copyright (C) 2014 Giuseppe Lettieri. All rights reserved. |
31 | * |
32 | * Redistribution and use in source and binary forms, with or without |
33 | * modification, are permitted provided that the following conditions |
34 | * are met: |
35 | * 1. Redistributions of source code must retain the above copyright |
36 | * notice, this list of conditions and the following disclaimer. |
37 | * 2. Redistributions in binary form must reproduce the above copyright |
38 | * notice, this list of conditions and the following disclaimer in the |
39 | * documentation and/or other materials provided with the distribution. |
40 | * |
41 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
42 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
43 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
44 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE |
45 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
46 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
47 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
48 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
49 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
50 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
51 | * SUCH DAMAGE. |
52 | */ |
53 | |
54 | #include <skywalk/os_skywalk_private.h> |
55 | #include <skywalk/nexus/upipe/nx_user_pipe.h> |
56 | |
57 | #define NX_UPIPE_RINGSIZE 128 /* default ring size */ |
58 | #define NX_UPIPE_MAXRINGS NX_MAX_NUM_RING_PAIR |
59 | #define NX_UPIPE_MINSLOTS 2 /* XXX same as above */ |
60 | #define NX_UPIPE_MAXSLOTS 4096 /* XXX same as above */ |
61 | #define NX_UPIPE_BUFSIZE (2 * 1024) |
62 | #define NX_UPIPE_MINBUFSIZE 1024 |
63 | #define NX_UPIPE_MAXBUFSIZE (16 * 1024) |
64 | #define NX_UPIPE_MHINTS NEXUS_MHINTS_NORMAL |
65 | |
66 | static int nx_upipe_na_alloc(struct nexus_adapter *, uint32_t); |
67 | static struct nexus_upipe_adapter *nx_upipe_find(struct nexus_adapter *, |
68 | uint32_t); |
69 | static int nx_upipe_na_add(struct nexus_adapter *, |
70 | struct nexus_upipe_adapter *); |
71 | static void nx_upipe_na_remove(struct nexus_adapter *, |
72 | struct nexus_upipe_adapter *); |
73 | static int nx_upipe_na_txsync(struct __kern_channel_ring *, |
74 | struct proc *, uint32_t); |
75 | static int nx_upipe_na_txsync_locked(struct __kern_channel_ring *, |
76 | struct proc *, uint32_t, int *, boolean_t); |
77 | static int nx_upipe_na_rxsync(struct __kern_channel_ring *, |
78 | struct proc *, uint32_t); |
79 | static int nx_upipe_na_krings_create(struct nexus_adapter *, |
80 | struct kern_channel *); |
81 | static int nx_upipe_na_activate(struct nexus_adapter *, na_activate_mode_t); |
82 | static void nx_upipe_na_krings_delete(struct nexus_adapter *, |
83 | struct kern_channel *, boolean_t); |
84 | static void nx_upipe_na_dtor(struct nexus_adapter *); |
85 | |
86 | static void nx_upipe_dom_init(struct nxdom *); |
87 | static void nx_upipe_dom_terminate(struct nxdom *); |
88 | static void nx_upipe_dom_fini(struct nxdom *); |
89 | static int nx_upipe_dom_bind_port(struct kern_nexus *, nexus_port_t *, |
90 | struct nxbind *, void *); |
91 | static int nx_upipe_dom_unbind_port(struct kern_nexus *, nexus_port_t); |
92 | static int nx_upipe_dom_connect(struct kern_nexus_domain_provider *, |
93 | struct kern_nexus *, struct kern_channel *, struct chreq *, |
94 | struct kern_channel *, struct nxbind *, struct proc *); |
95 | static void nx_upipe_dom_disconnect(struct kern_nexus_domain_provider *, |
96 | struct kern_nexus *, struct kern_channel *); |
97 | static void nx_upipe_dom_defunct(struct kern_nexus_domain_provider *, |
98 | struct kern_nexus *, struct kern_channel *, struct proc *); |
99 | static void nx_upipe_dom_defunct_finalize(struct kern_nexus_domain_provider *, |
100 | struct kern_nexus *, struct kern_channel *, boolean_t); |
101 | |
102 | static int nx_upipe_prov_init(struct kern_nexus_domain_provider *); |
103 | static int nx_upipe_prov_params_adjust( |
104 | const struct kern_nexus_domain_provider *, const struct nxprov_params *, |
105 | struct nxprov_adjusted_params *); |
106 | static int nx_upipe_prov_params(struct kern_nexus_domain_provider *, |
107 | const uint32_t, const struct nxprov_params *, struct nxprov_params *, |
108 | struct skmem_region_params[SKMEM_REGIONS], uint32_t); |
109 | static int nx_upipe_prov_mem_new(struct kern_nexus_domain_provider *, |
110 | struct kern_nexus *, struct nexus_adapter *); |
111 | static void nx_upipe_prov_fini(struct kern_nexus_domain_provider *); |
112 | static int nx_upipe_prov_nx_ctor(struct kern_nexus *); |
113 | static void nx_upipe_prov_nx_dtor(struct kern_nexus *); |
114 | |
115 | static struct nexus_upipe_adapter *na_upipe_alloc(zalloc_flags_t); |
116 | static void na_upipe_free(struct nexus_adapter *); |
117 | |
118 | static struct nx_upipe *nx_upipe_alloc(zalloc_flags_t); |
119 | static void nx_upipe_free(struct nx_upipe *); |
120 | |
121 | #if (DEVELOPMENT || DEBUG) |
122 | static uint32_t nx_upipe_mhints = 0; |
123 | SYSCTL_NODE(_kern_skywalk, OID_AUTO, upipe, CTLFLAG_RW | CTLFLAG_LOCKED, |
124 | 0, "Skywalk upipe tuning" ); |
125 | SYSCTL_UINT(_kern_skywalk_upipe, OID_AUTO, nx_mhints, |
126 | CTLFLAG_RW | CTLFLAG_LOCKED, &nx_upipe_mhints, 0, |
127 | "upipe nexus memory usage hints" ); |
128 | #endif /* (DEVELOPMENT || DEBUG) */ |
129 | |
130 | struct nxdom nx_upipe_dom_s = { |
131 | .nxdom_prov_head = |
132 | STAILQ_HEAD_INITIALIZER(nx_upipe_dom_s.nxdom_prov_head), |
133 | .nxdom_type = NEXUS_TYPE_USER_PIPE, |
134 | .nxdom_md_type = NEXUS_META_TYPE_QUANTUM, |
135 | .nxdom_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD, |
136 | .nxdom_name = "upipe" , |
137 | .nxdom_ports = { |
138 | .nb_def = 2, |
139 | .nb_min = 2, |
140 | .nb_max = 2, |
141 | }, |
142 | .nxdom_tx_rings = { |
143 | .nb_def = 1, |
144 | .nb_min = 1, |
145 | .nb_max = NX_UPIPE_MAXRINGS, |
146 | }, |
147 | .nxdom_rx_rings = { |
148 | .nb_def = 1, |
149 | .nb_min = 1, |
150 | .nb_max = NX_UPIPE_MAXRINGS, |
151 | }, |
152 | .nxdom_tx_slots = { |
153 | .nb_def = NX_UPIPE_RINGSIZE, |
154 | .nb_min = NX_UPIPE_MINSLOTS, |
155 | .nb_max = NX_UPIPE_MAXSLOTS, |
156 | }, |
157 | .nxdom_rx_slots = { |
158 | .nb_def = NX_UPIPE_RINGSIZE, |
159 | .nb_min = NX_UPIPE_MINSLOTS, |
160 | .nb_max = NX_UPIPE_MAXSLOTS, |
161 | }, |
162 | .nxdom_buf_size = { |
163 | .nb_def = NX_UPIPE_BUFSIZE, |
164 | .nb_min = NX_UPIPE_MINBUFSIZE, |
165 | .nb_max = NX_UPIPE_MAXBUFSIZE, |
166 | }, |
167 | .nxdom_large_buf_size = { |
168 | .nb_def = 0, |
169 | .nb_min = 0, |
170 | .nb_max = 0, |
171 | }, |
172 | .nxdom_meta_size = { |
173 | .nb_def = NX_METADATA_OBJ_MIN_SZ, |
174 | .nb_min = NX_METADATA_OBJ_MIN_SZ, |
175 | .nb_max = NX_METADATA_USR_MAX_SZ, |
176 | }, |
177 | .nxdom_stats_size = { |
178 | .nb_def = 0, |
179 | .nb_min = 0, |
180 | .nb_max = NX_STATS_MAX_SZ, |
181 | }, |
182 | .nxdom_pipes = { |
183 | .nb_def = 0, |
184 | .nb_min = 0, |
185 | .nb_max = NX_UPIPE_MAXPIPES, |
186 | }, |
187 | .nxdom_mhints = { |
188 | .nb_def = NX_UPIPE_MHINTS, |
189 | .nb_min = NEXUS_MHINTS_NORMAL, |
190 | .nb_max = (NEXUS_MHINTS_NORMAL | NEXUS_MHINTS_WILLNEED | |
191 | NEXUS_MHINTS_LOWLATENCY | NEXUS_MHINTS_HIUSE), |
192 | }, |
193 | .nxdom_flowadv_max = { |
194 | .nb_def = 0, |
195 | .nb_min = 0, |
196 | .nb_max = NX_FLOWADV_MAX, |
197 | }, |
198 | .nxdom_nexusadv_size = { |
199 | .nb_def = 0, |
200 | .nb_min = 0, |
201 | .nb_max = NX_NEXUSADV_MAX_SZ, |
202 | }, |
203 | .nxdom_capabilities = { |
204 | .nb_def = NXPCAP_USER_CHANNEL, |
205 | .nb_min = NXPCAP_USER_CHANNEL, |
206 | .nb_max = NXPCAP_USER_CHANNEL, |
207 | }, |
208 | .nxdom_qmap = { |
209 | .nb_def = NEXUS_QMAP_TYPE_INVALID, |
210 | .nb_min = NEXUS_QMAP_TYPE_INVALID, |
211 | .nb_max = NEXUS_QMAP_TYPE_INVALID, |
212 | }, |
213 | .nxdom_max_frags = { |
214 | .nb_def = NX_PBUF_FRAGS_DEFAULT, |
215 | .nb_min = NX_PBUF_FRAGS_MIN, |
216 | .nb_max = NX_PBUF_FRAGS_DEFAULT, |
217 | }, |
218 | .nxdom_init = nx_upipe_dom_init, |
219 | .nxdom_terminate = nx_upipe_dom_terminate, |
220 | .nxdom_fini = nx_upipe_dom_fini, |
221 | .nxdom_find_port = NULL, |
222 | .nxdom_port_is_reserved = NULL, |
223 | .nxdom_bind_port = nx_upipe_dom_bind_port, |
224 | .nxdom_unbind_port = nx_upipe_dom_unbind_port, |
225 | .nxdom_connect = nx_upipe_dom_connect, |
226 | .nxdom_disconnect = nx_upipe_dom_disconnect, |
227 | .nxdom_defunct = nx_upipe_dom_defunct, |
228 | .nxdom_defunct_finalize = nx_upipe_dom_defunct_finalize, |
229 | }; |
230 | |
231 | static struct kern_nexus_domain_provider nx_upipe_prov_s = { |
232 | .nxdom_prov_name = NEXUS_PROVIDER_USER_PIPE, |
233 | .nxdom_prov_flags = NXDOMPROVF_DEFAULT, |
234 | .nxdom_prov_cb = { |
235 | .dp_cb_init = nx_upipe_prov_init, |
236 | .dp_cb_fini = nx_upipe_prov_fini, |
237 | .dp_cb_params = nx_upipe_prov_params, |
238 | .dp_cb_mem_new = nx_upipe_prov_mem_new, |
239 | .dp_cb_config = NULL, |
240 | .dp_cb_nx_ctor = nx_upipe_prov_nx_ctor, |
241 | .dp_cb_nx_dtor = nx_upipe_prov_nx_dtor, |
242 | .dp_cb_nx_mem_info = NULL, |
243 | .dp_cb_nx_mib_get = NULL, |
244 | .dp_cb_nx_stop = NULL, |
245 | }, |
246 | }; |
247 | |
248 | static SKMEM_TYPE_DEFINE(na_upipe_zone, struct nexus_upipe_adapter); |
249 | |
250 | static SKMEM_TYPE_DEFINE(nx_upipe_zone, struct nx_upipe); |
251 | |
252 | #define SKMEM_TAG_PIPES "com.apple.skywalk.pipes" |
253 | static SKMEM_TAG_DEFINE(skmem_tag_pipes, SKMEM_TAG_PIPES); |
254 | |
255 | static void |
256 | nx_upipe_dom_init(struct nxdom *nxdom) |
257 | { |
258 | SK_LOCK_ASSERT_HELD(); |
259 | ASSERT(!(nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED)); |
260 | |
261 | (void) nxdom_prov_add(nxdom, &nx_upipe_prov_s); |
262 | } |
263 | |
264 | static void |
265 | nx_upipe_dom_terminate(struct nxdom *nxdom) |
266 | { |
267 | struct kern_nexus_domain_provider *nxdom_prov, *tnxdp; |
268 | |
269 | STAILQ_FOREACH_SAFE(nxdom_prov, &nxdom->nxdom_prov_head, |
270 | nxdom_prov_link, tnxdp) { |
271 | (void) nxdom_prov_del(nxdom_prov); |
272 | } |
273 | } |
274 | |
275 | static void |
276 | nx_upipe_dom_fini(struct nxdom *nxdom) |
277 | { |
278 | #pragma unused(nxdom) |
279 | } |
280 | |
281 | static int |
282 | nx_upipe_prov_init(struct kern_nexus_domain_provider *nxdom_prov) |
283 | { |
284 | #pragma unused(nxdom_prov) |
285 | SK_D("initializing %s" , nxdom_prov->nxdom_prov_name); |
286 | return 0; |
287 | } |
288 | |
289 | static int |
290 | nx_upipe_prov_params_adjust(const struct kern_nexus_domain_provider *nxdom_prov, |
291 | const struct nxprov_params *nxp, struct nxprov_adjusted_params *adj) |
292 | { |
293 | #pragma unused(nxdom_prov, nxp) |
294 | /* |
295 | * User pipe requires double the amount of rings. |
296 | * The ring counts must also be symmetrical. |
297 | */ |
298 | if (*(adj->adj_tx_rings) != *(adj->adj_rx_rings)) { |
299 | SK_ERR("rings: tx (%u) != rx (%u)" , *(adj->adj_tx_rings), |
300 | *(adj->adj_rx_rings)); |
301 | return EINVAL; |
302 | } |
303 | *(adj->adj_tx_rings) *= 2; |
304 | *(adj->adj_rx_rings) *= 2; |
305 | return 0; |
306 | } |
307 | |
308 | static int |
309 | nx_upipe_prov_params(struct kern_nexus_domain_provider *nxdom_prov, |
310 | const uint32_t req, const struct nxprov_params *nxp0, |
311 | struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS], |
312 | uint32_t pp_region_config_flags) |
313 | { |
314 | struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom; |
315 | int err; |
316 | |
317 | err = nxprov_params_adjust(nxdom_prov, req, nxp0, nxp, srp, |
318 | nxdom, nxdom, nxdom, pp_region_config_flags, |
319 | adjust_fn: nx_upipe_prov_params_adjust); |
320 | #if (DEVELOPMENT || DEBUG) |
321 | /* sysctl override */ |
322 | if ((err == 0) && (nx_upipe_mhints != 0)) { |
323 | nxp->nxp_mhints = nx_upipe_mhints; |
324 | } |
325 | #endif /* (DEVELOPMENT || DEBUG) */ |
326 | return err; |
327 | } |
328 | |
329 | static int |
330 | nx_upipe_prov_mem_new(struct kern_nexus_domain_provider *nxdom_prov, |
331 | struct kern_nexus *nx, struct nexus_adapter *na) |
332 | { |
333 | #pragma unused(nxdom_prov) |
334 | int err = 0; |
335 | |
336 | SK_DF(SK_VERB_USER_PIPE, |
337 | "nx 0x%llx (\"%s\":\"%s\") na \"%s\" (0x%llx)" , SK_KVA(nx), |
338 | NX_DOM(nx)->nxdom_name, nxdom_prov->nxdom_prov_name, na->na_name, |
339 | SK_KVA(na)); |
340 | |
341 | ASSERT(na->na_arena == NULL); |
342 | ASSERT(NX_USER_CHANNEL_PROV(nx)); |
343 | /* |
344 | * The underlying nexus adapters already share the same memory |
345 | * allocator, and thus we don't care about storing the pp in |
346 | * the nexus. |
347 | * |
348 | * This means that clients calling kern_nexus_get_pbufpool() |
349 | * will get NULL, but this is fine since we don't expose the |
350 | * user pipe to external kernel clients. |
351 | */ |
352 | na->na_arena = skmem_arena_create_for_nexus(na, |
353 | NX_PROV(nx)->nxprov_region_params, NULL, NULL, FALSE, |
354 | FALSE, NULL, &err); |
355 | ASSERT(na->na_arena != NULL || err != 0); |
356 | |
357 | return err; |
358 | } |
359 | |
360 | static void |
361 | nx_upipe_prov_fini(struct kern_nexus_domain_provider *nxdom_prov) |
362 | { |
363 | #pragma unused(nxdom_prov) |
364 | SK_D("destroying %s" , nxdom_prov->nxdom_prov_name); |
365 | } |
366 | |
367 | static int |
368 | nx_upipe_prov_nx_ctor(struct kern_nexus *nx) |
369 | { |
370 | SK_LOCK_ASSERT_HELD(); |
371 | ASSERT(nx->nx_arg == NULL); |
372 | |
373 | SK_D("nexus 0x%llx (%s)" , SK_KVA(nx), NX_DOM_PROV(nx)->nxdom_prov_name); |
374 | |
375 | nx->nx_arg = nx_upipe_alloc(Z_WAITOK); |
376 | SK_D("create new upipe 0x%llx for nexus 0x%llx" , |
377 | SK_KVA(NX_UPIPE_PRIVATE(nx)), SK_KVA(nx)); |
378 | |
379 | return 0; |
380 | } |
381 | |
382 | static void |
383 | nx_upipe_prov_nx_dtor(struct kern_nexus *nx) |
384 | { |
385 | struct nx_upipe *u = NX_UPIPE_PRIVATE(nx); |
386 | |
387 | SK_LOCK_ASSERT_HELD(); |
388 | |
389 | SK_D("nexus 0x%llx (%s) upipe 0x%llx" , SK_KVA(nx), |
390 | NX_DOM_PROV(nx)->nxdom_prov_name, SK_KVA(u)); |
391 | |
392 | if (u->nup_cli_nxb != NULL) { |
393 | nxb_free(u->nup_cli_nxb); |
394 | u->nup_cli_nxb = NULL; |
395 | } |
396 | if (u->nup_srv_nxb != NULL) { |
397 | nxb_free(u->nup_srv_nxb); |
398 | u->nup_srv_nxb = NULL; |
399 | } |
400 | |
401 | SK_DF(SK_VERB_USER_PIPE, "marking upipe 0x%llx as free" , SK_KVA(u)); |
402 | nx_upipe_free(u); |
403 | nx->nx_arg = NULL; |
404 | } |
405 | |
406 | static struct nexus_upipe_adapter * |
407 | na_upipe_alloc(zalloc_flags_t how) |
408 | { |
409 | struct nexus_upipe_adapter *pna; |
410 | |
411 | _CASSERT(offsetof(struct nexus_upipe_adapter, pna_up) == 0); |
412 | |
413 | pna = zalloc_flags(na_upipe_zone, how | Z_ZERO); |
414 | if (pna) { |
415 | pna->pna_up.na_type = NA_USER_PIPE; |
416 | pna->pna_up.na_free = na_upipe_free; |
417 | } |
418 | return pna; |
419 | } |
420 | |
421 | static void |
422 | na_upipe_free(struct nexus_adapter *na) |
423 | { |
424 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
425 | |
426 | ASSERT(pna->pna_up.na_refcount == 0); |
427 | SK_DF(SK_VERB_MEM, "pna 0x%llx FREE" , SK_KVA(pna)); |
428 | bzero(s: pna, n: sizeof(*pna)); |
429 | zfree(na_upipe_zone, pna); |
430 | } |
431 | |
432 | static int |
433 | nx_upipe_dom_bind_port(struct kern_nexus *nx, nexus_port_t *nx_port, |
434 | struct nxbind *nxb0, void *info) |
435 | { |
436 | #pragma unused(info) |
437 | struct nx_upipe *u = NX_UPIPE_PRIVATE(nx); |
438 | struct nxbind *nxb = NULL; |
439 | int error = 0; |
440 | |
441 | ASSERT(nx_port != NULL); |
442 | ASSERT(nxb0 != NULL); |
443 | |
444 | switch (*nx_port) { |
445 | case NEXUS_PORT_USER_PIPE_CLIENT: |
446 | case NEXUS_PORT_USER_PIPE_SERVER: |
447 | if ((*nx_port == NEXUS_PORT_USER_PIPE_CLIENT && |
448 | u->nup_cli_nxb != NULL) || |
449 | (*nx_port == NEXUS_PORT_USER_PIPE_SERVER && |
450 | u->nup_srv_nxb != NULL)) { |
451 | error = EEXIST; |
452 | break; |
453 | } |
454 | |
455 | nxb = nxb_alloc(Z_WAITOK); |
456 | nxb_move(nxb0, nxb); |
457 | if (*nx_port == NEXUS_PORT_USER_PIPE_CLIENT) { |
458 | u->nup_cli_nxb = nxb; |
459 | } else { |
460 | u->nup_srv_nxb = nxb; |
461 | } |
462 | |
463 | ASSERT(error == 0); |
464 | break; |
465 | |
466 | default: |
467 | error = EDOM; |
468 | break; |
469 | } |
470 | |
471 | return error; |
472 | } |
473 | |
474 | static int |
475 | nx_upipe_dom_unbind_port(struct kern_nexus *nx, nexus_port_t nx_port) |
476 | { |
477 | struct nx_upipe *u = NX_UPIPE_PRIVATE(nx); |
478 | struct nxbind *nxb = NULL; |
479 | int error = 0; |
480 | |
481 | ASSERT(nx_port != NEXUS_PORT_ANY); |
482 | |
483 | switch (nx_port) { |
484 | case NEXUS_PORT_USER_PIPE_CLIENT: |
485 | case NEXUS_PORT_USER_PIPE_SERVER: |
486 | if ((nx_port == NEXUS_PORT_USER_PIPE_CLIENT && |
487 | u->nup_cli_nxb == NULL) || |
488 | (nx_port == NEXUS_PORT_USER_PIPE_SERVER && |
489 | u->nup_srv_nxb == NULL)) { |
490 | error = ENOENT; |
491 | break; |
492 | } |
493 | |
494 | if (nx_port == NEXUS_PORT_USER_PIPE_CLIENT) { |
495 | nxb = u->nup_cli_nxb; |
496 | u->nup_cli_nxb = NULL; |
497 | } else { |
498 | nxb = u->nup_srv_nxb; |
499 | u->nup_srv_nxb = NULL; |
500 | } |
501 | nxb_free(nxb); |
502 | ASSERT(error == 0); |
503 | break; |
504 | |
505 | default: |
506 | error = EDOM; |
507 | break; |
508 | } |
509 | |
510 | return error; |
511 | } |
512 | |
513 | static int |
514 | nx_upipe_dom_connect(struct kern_nexus_domain_provider *nxdom_prov, |
515 | struct kern_nexus *nx, struct kern_channel *ch, struct chreq *chr, |
516 | struct kern_channel *ch0, struct nxbind *nxb, struct proc *p) |
517 | { |
518 | #pragma unused(nxdom_prov) |
519 | nexus_port_t port = chr->cr_port; |
520 | int err = 0; |
521 | |
522 | SK_LOCK_ASSERT_HELD(); |
523 | |
524 | ASSERT(NX_DOM_PROV(nx) == nxdom_prov); |
525 | ASSERT(nx->nx_prov->nxprov_params->nxp_type == |
526 | nxdom_prov->nxdom_prov_dom->nxdom_type && |
527 | nx->nx_prov->nxprov_params->nxp_type == NEXUS_TYPE_USER_PIPE); |
528 | |
529 | /* |
530 | * XXX: channel in user packet pool mode is not supported for |
531 | * user-pipe for now. |
532 | */ |
533 | if (chr->cr_mode & CHMODE_USER_PACKET_POOL) { |
534 | SK_ERR("User packet pool mode not supported for upipe" ); |
535 | err = ENOTSUP; |
536 | goto done; |
537 | } |
538 | |
539 | if (chr->cr_mode & CHMODE_EVENT_RING) { |
540 | SK_ERR("event ring is not supported for upipe" ); |
541 | err = ENOTSUP; |
542 | goto done; |
543 | } |
544 | |
545 | if (chr->cr_mode & CHMODE_LOW_LATENCY) { |
546 | SK_ERR("low latency is not supported for upipe" ); |
547 | err = ENOTSUP; |
548 | goto done; |
549 | } |
550 | |
551 | if (port == NEXUS_PORT_USER_PIPE_SERVER) { |
552 | chr->cr_real_endpoint = CH_ENDPOINT_USER_PIPE_MASTER; |
553 | } else if (port == NEXUS_PORT_USER_PIPE_CLIENT) { |
554 | chr->cr_real_endpoint = CH_ENDPOINT_USER_PIPE_SLAVE; |
555 | } else { |
556 | err = EINVAL; |
557 | goto done; |
558 | } |
559 | |
560 | chr->cr_endpoint = chr->cr_real_endpoint; |
561 | chr->cr_ring_set = RING_SET_DEFAULT; |
562 | chr->cr_pipe_id = 0; |
563 | (void) snprintf(chr->cr_name, count: sizeof(chr->cr_name), "upipe:%llu:%.*s" , |
564 | nx->nx_id, (int)nx->nx_prov->nxprov_params->nxp_namelen, |
565 | nx->nx_prov->nxprov_params->nxp_name); |
566 | |
567 | err = na_connect(nx, ch, chr, ch0, nxb, p); |
568 | done: |
569 | return err; |
570 | } |
571 | |
572 | static void |
573 | nx_upipe_dom_disconnect(struct kern_nexus_domain_provider *nxdom_prov, |
574 | struct kern_nexus *nx, struct kern_channel *ch) |
575 | { |
576 | #pragma unused(nxdom_prov) |
577 | SK_LOCK_ASSERT_HELD(); |
578 | |
579 | SK_D("channel 0x%llx -!- nexus 0x%llx (%s:\"%s\":%u:%d)" , SK_KVA(ch), |
580 | SK_KVA(nx), nxdom_prov->nxdom_prov_name, ch->ch_na->na_name, |
581 | ch->ch_info->cinfo_nx_port, (int)ch->ch_info->cinfo_ch_ring_id); |
582 | |
583 | na_disconnect(nx, ch); |
584 | /* |
585 | * Set NXF_REJECT on the nexus which would cause any channel on the |
586 | * peer adapter to cease to function. |
587 | */ |
588 | if (NX_PROV(nx)->nxprov_params->nxp_reject_on_close) { |
589 | os_atomic_or(&nx->nx_flags, NXF_REJECT, relaxed); |
590 | } |
591 | } |
592 | |
593 | static void |
594 | nx_upipe_dom_defunct(struct kern_nexus_domain_provider *nxdom_prov, |
595 | struct kern_nexus *nx, struct kern_channel *ch, struct proc *p) |
596 | { |
597 | #pragma unused(nxdom_prov, nx) |
598 | struct nexus_adapter *na = ch->ch_na; |
599 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
600 | ring_id_t qfirst = ch->ch_first[NR_TX]; |
601 | ring_id_t qlast = ch->ch_last[NR_TX]; |
602 | uint32_t i; |
603 | |
604 | LCK_MTX_ASSERT(&ch->ch_lock, LCK_MTX_ASSERT_OWNED); |
605 | ASSERT(!(ch->ch_flags & CHANF_KERNEL)); |
606 | ASSERT(na->na_type == NA_USER_PIPE); |
607 | |
608 | /* |
609 | * Inform the peer receiver thread in nx_upipe_na_rxsync() or the |
610 | * peer transmit thread in nx_upipe_na_txsync() about |
611 | * this endpoint going defunct. We utilize the TX ring's |
612 | * lock for serialization, since that is what's being used |
613 | * by the receiving endpoint. |
614 | */ |
615 | for (i = qfirst; i < qlast; i++) { |
616 | /* |
617 | * For maintaining lock ordering between the two channels of |
618 | * user pipe. |
619 | */ |
620 | if (pna->pna_role == CH_ENDPOINT_USER_PIPE_MASTER) { |
621 | (void) kr_enter(&NAKR(na, t: NR_TX)[i], TRUE); |
622 | (void) kr_enter(NAKR(na, t: NR_RX)[i].ckr_pipe, TRUE); |
623 | } else { |
624 | (void) kr_enter(NAKR(na, t: NR_RX)[i].ckr_pipe, TRUE); |
625 | (void) kr_enter(&NAKR(na, t: NR_TX)[i], TRUE); |
626 | } |
627 | } |
628 | |
629 | na_ch_rings_defunct(ch, p); |
630 | |
631 | for (i = qfirst; i < qlast; i++) { |
632 | if (pna->pna_role == CH_ENDPOINT_USER_PIPE_MASTER) { |
633 | (void) kr_exit(NAKR(na, t: NR_RX)[i].ckr_pipe); |
634 | (void) kr_exit(&NAKR(na, t: NR_TX)[i]); |
635 | } else { |
636 | (void) kr_exit(&NAKR(na, t: NR_TX)[i]); |
637 | (void) kr_exit(NAKR(na, t: NR_RX)[i].ckr_pipe); |
638 | } |
639 | } |
640 | } |
641 | |
642 | static void |
643 | nx_upipe_dom_defunct_finalize(struct kern_nexus_domain_provider *nxdom_prov, |
644 | struct kern_nexus *nx, struct kern_channel *ch, boolean_t locked) |
645 | { |
646 | #pragma unused(nxdom_prov) |
647 | struct nexus_upipe_adapter *pna = |
648 | (struct nexus_upipe_adapter *)ch->ch_na; |
649 | |
650 | if (!locked) { |
651 | SK_LOCK_ASSERT_NOTHELD(); |
652 | SK_LOCK(); |
653 | LCK_MTX_ASSERT(&ch->ch_lock, LCK_MTX_ASSERT_NOTOWNED); |
654 | } else { |
655 | SK_LOCK_ASSERT_HELD(); |
656 | LCK_MTX_ASSERT(&ch->ch_lock, LCK_MTX_ASSERT_OWNED); |
657 | } |
658 | |
659 | ASSERT(!(ch->ch_flags & CHANF_KERNEL)); |
660 | ASSERT(ch->ch_na->na_type == NA_USER_PIPE); |
661 | |
662 | /* |
663 | * At this point, we know that the arena shared by the master and |
664 | * slave adapters has no more valid mappings on the channels opened |
665 | * to them. We need to invoke na_defunct() on both adapters to |
666 | * release any remaining slots attached to their rings. |
667 | * |
668 | * Note that the 'ch' that we pass in here is irrelevant as we |
669 | * don't support user packet pool for user pipe. |
670 | */ |
671 | na_defunct(nx, ch, &pna->pna_up, locked); |
672 | if (pna->pna_peer != NULL) { |
673 | na_defunct(nx, ch, &pna->pna_peer->pna_up, locked); |
674 | } |
675 | |
676 | /* |
677 | * And if their parent adapter (the memory owner) is a pseudo |
678 | * nexus adapter that we initially created in nx_upipe_na_find(), |
679 | * invoke na_defunct() on it now to do the final teardown on |
680 | * the arena. |
681 | */ |
682 | if (pna->pna_parent->na_type == NA_PSEUDO) { |
683 | na_defunct(nx, ch, pna->pna_parent, locked); |
684 | } |
685 | |
686 | SK_D("%s(%d): ch 0x%llx -/- nx 0x%llx (%s:\"%s\":%u:%d)" , |
687 | ch->ch_name, ch->ch_pid, SK_KVA(ch), SK_KVA(nx), |
688 | nxdom_prov->nxdom_prov_name, ch->ch_na->na_name, |
689 | ch->ch_info->cinfo_nx_port, (int)ch->ch_info->cinfo_ch_ring_id); |
690 | |
691 | if (!locked) { |
692 | LCK_MTX_ASSERT(&ch->ch_lock, LCK_MTX_ASSERT_NOTOWNED); |
693 | SK_UNLOCK(); |
694 | } else { |
695 | LCK_MTX_ASSERT(&ch->ch_lock, LCK_MTX_ASSERT_OWNED); |
696 | SK_LOCK_ASSERT_HELD(); |
697 | } |
698 | } |
699 | |
700 | /* allocate the pipe array in the parent adapter */ |
701 | static int |
702 | nx_upipe_na_alloc(struct nexus_adapter *na, uint32_t npipes) |
703 | { |
704 | struct nexus_upipe_adapter **npa; |
705 | |
706 | if (npipes <= na->na_max_pipes) { |
707 | /* we already have more entries that requested */ |
708 | return 0; |
709 | } |
710 | if (npipes < na->na_next_pipe || npipes > NX_UPIPE_MAXPIPES) { |
711 | return EINVAL; |
712 | } |
713 | |
714 | npa = sk_realloc_type_array(struct nexus_upipe_adapter *, |
715 | na->na_max_pipes, npipes, na->na_pipes, Z_WAITOK, skmem_tag_pipes); |
716 | if (npa == NULL) { |
717 | return ENOMEM; |
718 | } |
719 | |
720 | na->na_pipes = npa; |
721 | na->na_max_pipes = npipes; |
722 | |
723 | return 0; |
724 | } |
725 | |
726 | /* deallocate the parent array in the parent adapter */ |
727 | void |
728 | nx_upipe_na_dealloc(struct nexus_adapter *na) |
729 | { |
730 | if (na->na_pipes) { |
731 | if (na->na_next_pipe > 0) { |
732 | SK_ERR("freeing not empty pipe array for %s " |
733 | "(%u dangling pipes)!" , na->na_name, |
734 | na->na_next_pipe); |
735 | } |
736 | sk_free_type_array(struct nexus_upipe_adapter *, |
737 | na->na_max_pipes, na->na_pipes); |
738 | na->na_pipes = NULL; |
739 | na->na_max_pipes = 0; |
740 | na->na_next_pipe = 0; |
741 | } |
742 | } |
743 | |
744 | /* find a pipe endpoint with the given id among the parent's pipes */ |
745 | static struct nexus_upipe_adapter * |
746 | nx_upipe_find(struct nexus_adapter *parent, uint32_t pipe_id) |
747 | { |
748 | uint32_t i; |
749 | struct nexus_upipe_adapter *na; |
750 | |
751 | for (i = 0; i < parent->na_next_pipe; i++) { |
752 | na = parent->na_pipes[i]; |
753 | if (na->pna_id == pipe_id) { |
754 | return na; |
755 | } |
756 | } |
757 | return NULL; |
758 | } |
759 | |
760 | /* add a new pipe endpoint to the parent array */ |
761 | static int |
762 | nx_upipe_na_add(struct nexus_adapter *parent, struct nexus_upipe_adapter *na) |
763 | { |
764 | if (parent->na_next_pipe >= parent->na_max_pipes) { |
765 | uint32_t npipes = parent->na_max_pipes ? |
766 | 2 * parent->na_max_pipes : 2; |
767 | int error = nx_upipe_na_alloc(na: parent, npipes); |
768 | if (error) { |
769 | return error; |
770 | } |
771 | } |
772 | |
773 | parent->na_pipes[parent->na_next_pipe] = na; |
774 | na->pna_parent_slot = parent->na_next_pipe; |
775 | parent->na_next_pipe++; |
776 | return 0; |
777 | } |
778 | |
779 | /* remove the given pipe endpoint from the parent array */ |
780 | static void |
781 | nx_upipe_na_remove(struct nexus_adapter *parent, struct nexus_upipe_adapter *na) |
782 | { |
783 | uint32_t n; |
784 | n = --parent->na_next_pipe; |
785 | if (n != na->pna_parent_slot) { |
786 | struct nexus_upipe_adapter **p = |
787 | &parent->na_pipes[na->pna_parent_slot]; |
788 | *p = parent->na_pipes[n]; |
789 | (*p)->pna_parent_slot = na->pna_parent_slot; |
790 | } |
791 | parent->na_pipes[n] = NULL; |
792 | } |
793 | |
794 | static int |
795 | nx_upipe_na_txsync(struct __kern_channel_ring *txkring, struct proc *p, |
796 | uint32_t flags) |
797 | { |
798 | struct __kern_channel_ring *rxkring = txkring->ckr_pipe; |
799 | volatile uint64_t *tx_tsync, *tx_tnote, *rx_tsync; |
800 | int sent = 0, ret = 0; |
801 | |
802 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
803 | "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u " |
804 | "flags 0x%x -> kr \"%s\" (0x%llx) krflags 0x%b ring %u" , |
805 | sk_proc_name_address(p), sk_proc_pid(p), txkring->ckr_name, |
806 | SK_KVA(txkring), txkring->ckr_flags, CKRF_BITS, |
807 | txkring->ckr_ring_id, flags, rxkring->ckr_name, SK_KVA(rxkring), |
808 | rxkring->ckr_flags, CKRF_BITS, rxkring->ckr_ring_id); |
809 | |
810 | /* |
811 | * Serialize write access to the transmit ring, since another |
812 | * thread coming down for rxsync might pick up pending slots. |
813 | */ |
814 | ASSERT(txkring->ckr_owner == current_thread()); |
815 | |
816 | /* |
817 | * Record the time of sync and grab sync time of other side; |
818 | * use atomic store and load since we're not holding the |
819 | * lock used by the receive ring. This allows us to avoid |
820 | * the potentially costly os_atomic_thread_fence(seq_cst). |
821 | */ |
822 | /* deconst */ |
823 | tx_tsync = __DECONST(uint64_t *, &txkring->ckr_ring->ring_sync_time); |
824 | os_atomic_store(tx_tsync, txkring->ckr_sync_time, release); |
825 | |
826 | /* |
827 | * Read from the peer's kring, not its user ring; the peer's channel |
828 | * may be defunct, in which case it's unsafe to access its user ring. |
829 | */ |
830 | rx_tsync = __DECONST(uint64_t *, &rxkring->ckr_sync_time); |
831 | tx_tnote = __DECONST(uint64_t *, &txkring->ckr_ring->ring_notify_time); |
832 | *tx_tnote = os_atomic_add_orig(rx_tsync, 0, relaxed); |
833 | |
834 | if (__probable(txkring->ckr_rhead != txkring->ckr_khead)) { |
835 | sent = nx_upipe_na_txsync_locked(txkring, p, flags, |
836 | &ret, FALSE); |
837 | } |
838 | |
839 | if (sent != 0) { |
840 | (void) rxkring->ckr_na_notify(rxkring, p, 0); |
841 | } |
842 | |
843 | return ret; |
844 | } |
845 | |
846 | int |
847 | nx_upipe_na_txsync_locked(struct __kern_channel_ring *txkring, struct proc *p, |
848 | uint32_t flags, int *ret, boolean_t rx) |
849 | { |
850 | #pragma unused(p, flags, rx) |
851 | struct __kern_channel_ring *rxkring = txkring->ckr_pipe; |
852 | const slot_idx_t lim_tx = txkring->ckr_lim; |
853 | const slot_idx_t lim_rx = rxkring->ckr_lim; |
854 | slot_idx_t j, k; |
855 | int n, m, b, sent = 0; |
856 | uint32_t byte_count = 0; |
857 | int limit; /* max # of slots to transfer */ |
858 | |
859 | *ret = 0; |
860 | |
861 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
862 | "%s(%d) kr \"%s\", kh %3u kt %3u | " |
863 | "rh %3u rt %3u [pre%s]" , sk_proc_name_address(p), |
864 | sk_proc_pid(p), txkring->ckr_name, txkring->ckr_khead, |
865 | txkring->ckr_ktail, txkring->ckr_rhead, |
866 | txkring->ckr_rtail, rx ? "*" : "" ); |
867 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
868 | "%s(%d) kr \"%s\", kh %3u kt %3u | " |
869 | "rh %3u rt %3u [pre%s]" , sk_proc_name_address(p), |
870 | sk_proc_pid(p), rxkring->ckr_name, rxkring->ckr_khead, |
871 | rxkring->ckr_ktail, rxkring->ckr_rhead, |
872 | rxkring->ckr_rtail, rx ? "*" : "" ); |
873 | |
874 | if (__improbable(KR_DROP(txkring) || KR_DROP(rxkring))) { |
875 | *ret = ENXIO; |
876 | goto done; |
877 | } |
878 | |
879 | j = rxkring->ckr_ktail; /* RX */ |
880 | k = txkring->ckr_khead; /* TX */ |
881 | |
882 | /* # of new tx slots */ |
883 | n = txkring->ckr_rhead - txkring->ckr_khead; |
884 | if (n < 0) { |
885 | n += txkring->ckr_num_slots; |
886 | } |
887 | limit = n; |
888 | |
889 | /* # of rx busy (unclaimed) slots */ |
890 | b = j - rxkring->ckr_khead; |
891 | if (b < 0) { |
892 | b += rxkring->ckr_num_slots; |
893 | } |
894 | |
895 | /* # of rx avail free slots (subtract busy from max) */ |
896 | m = lim_rx - b; |
897 | if (m < limit) { |
898 | limit = m; |
899 | } |
900 | |
901 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
902 | "%s(%d) kr \"%s\" -> new %u, kr \"%s\" " |
903 | "-> free %u" , sk_proc_name_address(p), sk_proc_pid(p), |
904 | txkring->ckr_name, n, rxkring->ckr_name, m); |
905 | |
906 | /* rxring is full, or nothing to send? */ |
907 | if (__improbable((sent = limit) == 0)) { |
908 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
909 | "%s(%d) kr \"%s\" -> %s%s" , |
910 | sk_proc_name_address(p), sk_proc_pid(p), (n > m) ? |
911 | rxkring->ckr_name : txkring->ckr_name, ((n > m) ? |
912 | "no room avail" : "no new slots" ), |
913 | (rx ? " (lost race, ok)" : "" )); |
914 | goto done; |
915 | } |
916 | |
917 | ASSERT(limit > 0); |
918 | while (limit--) { |
919 | struct __kern_slot_desc *ksd_tx = KR_KSD(txkring, k); |
920 | struct __user_slot_desc *usd_tx = KR_USD(txkring, k); |
921 | struct __kern_slot_desc *ksd_rx = KR_KSD(rxkring, j); |
922 | struct __user_slot_desc *usd_rx = KR_USD(rxkring, j); |
923 | struct __kern_quantum *kqum; |
924 | |
925 | kqum = ksd_tx->sd_qum; |
926 | /* |
927 | * Packets failing internalization should be dropped in |
928 | * TX sync prologue. |
929 | */ |
930 | ASSERT((kqum->qum_qflags & (QUM_F_INTERNALIZED | |
931 | QUM_F_FINALIZED)) == (QUM_F_INTERNALIZED | |
932 | QUM_F_FINALIZED)); |
933 | |
934 | byte_count += kqum->qum_len; |
935 | |
936 | /* |
937 | * Swap the slots. |
938 | * |
939 | * XXX: adi@apple.com -- this bypasses the slot attach/detach |
940 | * interface, and needs to be changed when upipe adopts the |
941 | * packet APIs. SD_SWAP() will perform a block copy of the |
942 | * swap, and will readjust the kernel slot descriptor's sd_user |
943 | * accordingly. |
944 | */ |
945 | SD_SWAP(ksd_rx, usd_rx, ksd_tx, usd_tx); |
946 | |
947 | j = SLOT_NEXT(i: j, lim: lim_rx); |
948 | k = SLOT_NEXT(i: k, lim: lim_tx); |
949 | } |
950 | |
951 | kr_update_stats(kring: rxkring, slot_count: sent, byte_count); |
952 | if (__improbable(kr_stat_enable != 0)) { |
953 | txkring->ckr_stats = rxkring->ckr_stats; |
954 | } |
955 | |
956 | /* |
957 | * Make sure the slots are updated before ckr_ktail reach global |
958 | * visibility, since we are not holding rx ring's kr_enter(). |
959 | */ |
960 | os_atomic_thread_fence(seq_cst); |
961 | |
962 | rxkring->ckr_ktail = j; |
963 | txkring->ckr_khead = k; |
964 | txkring->ckr_ktail = SLOT_PREV(i: k, lim: lim_tx); |
965 | |
966 | done: |
967 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
968 | "%s(%d) kr \"%s\", kh %3u kt %3u | " |
969 | "rh %3u rt %3u [post%s]" , sk_proc_name_address(p), |
970 | sk_proc_pid(p), txkring->ckr_name, txkring->ckr_khead, |
971 | txkring->ckr_ktail, txkring->ckr_rhead, |
972 | txkring->ckr_rtail, rx ? "*" : "" ); |
973 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_TX, |
974 | "%s(%d) kr \"%s\", kh %3u kt %3u | " |
975 | "rh %3u rt %3u [post%s]" , sk_proc_name_address(p), |
976 | sk_proc_pid(p), rxkring->ckr_name, rxkring->ckr_khead, |
977 | rxkring->ckr_ktail, rxkring->ckr_rhead, |
978 | rxkring->ckr_rtail, rx ? "*" : "" ); |
979 | |
980 | return sent; |
981 | } |
982 | |
983 | static int |
984 | nx_upipe_na_rxsync(struct __kern_channel_ring *rxkring, struct proc *p, |
985 | uint32_t flags) |
986 | { |
987 | #pragma unused(p) |
988 | struct __kern_channel_ring *txkring = rxkring->ckr_pipe; |
989 | volatile uint64_t *rx_tsync, *rx_tnote, *tx_tsync; |
990 | const slot_idx_t lim_rx = rxkring->ckr_lim; |
991 | int n; /* new slots from transmit side */ |
992 | int m, b, ret = 0; |
993 | uint32_t r; |
994 | |
995 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_RX, |
996 | "%s(%d) kr \"%s\" (0x%llx) krflags 0x%b ring %u " |
997 | "flags 0x%x <- kr \"%s\" (0x%llx) krflags 0x%b ring %u" , |
998 | sk_proc_name_address(p), sk_proc_pid(p), rxkring->ckr_name, |
999 | SK_KVA(rxkring), rxkring->ckr_flags, CKRF_BITS, |
1000 | rxkring->ckr_ring_id, flags, txkring->ckr_name, SK_KVA(txkring), |
1001 | txkring->ckr_flags, CKRF_BITS, txkring->ckr_ring_id); |
1002 | |
1003 | ASSERT(rxkring->ckr_owner == current_thread()); |
1004 | |
1005 | /* reclaim and get # of rx reclaimed slots */ |
1006 | r = kr_reclaim(kr: rxkring); |
1007 | |
1008 | /* # of rx busy (unclaimed) slots */ |
1009 | b = rxkring->ckr_ktail - rxkring->ckr_khead; |
1010 | if (b < 0) { |
1011 | b += rxkring->ckr_num_slots; |
1012 | } |
1013 | |
1014 | /* # of rx avail free slots (subtract busy from max) */ |
1015 | m = lim_rx - b; |
1016 | |
1017 | /* |
1018 | * Check if there's any new slots on transmit ring; do this |
1019 | * first without acquiring that ring's ckr_qlock, and use |
1020 | * the memory barrier (paired with second one in txsync.) |
1021 | * If we missed the race we'd just pay the cost of acquiring |
1022 | * ckr_qlock and potentially returning from "internal txsync" |
1023 | * without anything to process, which is okay. |
1024 | */ |
1025 | os_atomic_thread_fence(seq_cst); |
1026 | n = txkring->ckr_rhead - txkring->ckr_khead; |
1027 | if (n < 0) { |
1028 | n += txkring->ckr_num_slots; |
1029 | } |
1030 | |
1031 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_RX, |
1032 | "%s(%d) kr \"%s\" <- free %u, kr \"%s\" <- new %u" , |
1033 | sk_proc_name_address(p), sk_proc_pid(p), |
1034 | rxkring->ckr_name, m, txkring->ckr_name, n); |
1035 | |
1036 | /* |
1037 | * Record the time of sync and grab sync time of other side; |
1038 | * use atomic store and load since we're not holding the |
1039 | * lock used by the receive ring. This allows us to avoid |
1040 | * the potentially costly os_atomic_thread_fence(seq_cst). |
1041 | */ |
1042 | /* deconst */ |
1043 | rx_tsync = __DECONST(uint64_t *, &rxkring->ckr_ring->ring_sync_time); |
1044 | os_atomic_store(rx_tsync, rxkring->ckr_sync_time, release); |
1045 | |
1046 | /* |
1047 | * Read from the peer's kring, not its user ring; the peer's channel |
1048 | * may be defunct, in which case it's unsafe to access its user ring. |
1049 | */ |
1050 | tx_tsync = __DECONST(uint64_t *, &txkring->ckr_sync_time); |
1051 | rx_tnote = __DECONST(uint64_t *, &rxkring->ckr_ring->ring_notify_time); |
1052 | *rx_tnote = os_atomic_add_orig(tx_tsync, 0, relaxed); |
1053 | |
1054 | /* |
1055 | * If we have slots to pick up from the transmit side and and we |
1056 | * have space available, perform an equivalent of "internal txsync". |
1057 | * |
1058 | * Acquire write access to the transmit (peer) ring, |
1059 | * Serialize write access to it, since another thread |
1060 | * coming down for txsync might add new slots. |
1061 | * If we fail to get the kring lock, then don't worry because |
1062 | * there's already a transmit sync in progress to move packets. |
1063 | */ |
1064 | if (__probable(n != 0 && m != 0 && (flags & NA_SYNCF_MONITOR) == 0)) { |
1065 | (void) kr_enter(txkring, TRUE); |
1066 | n = nx_upipe_na_txsync_locked(txkring, p, flags, ret: &ret, TRUE); |
1067 | kr_exit(txkring); |
1068 | } else { |
1069 | n = 0; |
1070 | } |
1071 | |
1072 | /* |
1073 | * If we have reclaimed some slots or transferred new slots |
1074 | * from the transmit side, notify the other end. Also notify |
1075 | * ourselves to pick up newly transferred ones, if any. |
1076 | */ |
1077 | if (__probable(r != 0 || n != 0)) { |
1078 | SK_DF(SK_VERB_USER_PIPE | SK_VERB_SYNC | SK_VERB_RX, |
1079 | "%s(%d) kr \"%s\", kh %3u kt %3u | " |
1080 | "rh %3u rt %3u [rel %u new %u]" , |
1081 | sk_proc_name_address(p), sk_proc_pid(p), rxkring->ckr_name, |
1082 | rxkring->ckr_khead, rxkring->ckr_ktail, |
1083 | rxkring->ckr_rhead, rxkring->ckr_rtail, r, n); |
1084 | |
1085 | (void) txkring->ckr_na_notify(txkring, p, 0); |
1086 | } |
1087 | |
1088 | return ret; |
1089 | } |
1090 | |
1091 | static int |
1092 | nx_upipe_na_rings_create(struct nexus_adapter *na, struct kern_channel *ch) |
1093 | { |
1094 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
1095 | struct nexus_adapter *ona = &pna->pna_peer->pna_up; |
1096 | int error = 0; |
1097 | enum txrx t; |
1098 | uint32_t i; |
1099 | |
1100 | /* |
1101 | * Create krings and all the rings for this end; |
1102 | * we'll update ckr_save_ring pointers below. |
1103 | */ |
1104 | error = na_rings_mem_setup(na, FALSE, ch); |
1105 | if (error != 0) { |
1106 | goto err; |
1107 | } |
1108 | |
1109 | /* update our hidden ring pointers */ |
1110 | for_rx_tx(t) { |
1111 | for (i = 0; i < na_get_nrings(na, t); i++) { |
1112 | NAKR(na, t)[i].ckr_save_ring = |
1113 | NAKR(na, t)[i].ckr_ring; |
1114 | } |
1115 | } |
1116 | |
1117 | /* now, create krings and rings of the other end */ |
1118 | error = na_rings_mem_setup(ona, FALSE, ch); |
1119 | if (error != 0) { |
1120 | na_rings_mem_teardown(na, ch, FALSE); /* this end */ |
1121 | goto err; |
1122 | } |
1123 | |
1124 | for_rx_tx(t) { |
1125 | for (i = 0; i < na_get_nrings(na: ona, t); i++) { |
1126 | NAKR(na: ona, t)[i].ckr_save_ring = |
1127 | NAKR(na: ona, t)[i].ckr_ring; |
1128 | } |
1129 | } |
1130 | |
1131 | /* cross link the krings */ |
1132 | for_rx_tx(t) { |
1133 | /* swap NR_TX <-> NR_RX (skip host ring) */ |
1134 | enum txrx r = sk_txrx_swap(t); |
1135 | for (i = 0; i < na_get_nrings(na, t); i++) { |
1136 | NAKR(na, t)[i].ckr_pipe = |
1137 | NAKR(na: &pna->pna_peer->pna_up, t: r) + i; |
1138 | NAKR(na: &pna->pna_peer->pna_up, t: r)[i].ckr_pipe = |
1139 | NAKR(na, t) + i; |
1140 | } |
1141 | } |
1142 | err: |
1143 | return error; |
1144 | } |
1145 | |
1146 | /* |
1147 | * Pipe endpoints are created and destroyed together, so that endopoints do not |
1148 | * have to check for the existence of their peer at each ?xsync. |
1149 | * |
1150 | * To play well with the existing nexus adapter infrastructure (refcounts etc.), |
1151 | * we adopt the following strategy: |
1152 | * |
1153 | * 1) The first endpoint that is created also creates the other endpoint and |
1154 | * grabs a reference to it. |
1155 | * |
1156 | * state A) user1 --> endpoint1 --> endpoint2 |
1157 | * |
1158 | * 2) If, starting from state A, endpoint2 is then registered, endpoint1 gives |
1159 | * its reference to the user: |
1160 | * |
1161 | * state B) user1 --> endpoint1 endpoint2 <--- user2 |
1162 | * |
1163 | * 3) Assume that, starting from state B endpoint2 is closed. In the unregister |
1164 | * callback endpoint2 notes that endpoint1 is still active and adds a reference |
1165 | * from endpoint1 to itself. When user2 then releases her own reference, |
1166 | * endpoint2 is not destroyed and we are back to state A. A symmetrical state |
1167 | * would be reached if endpoint1 were released instead. |
1168 | * |
1169 | * 4) If, starting from state A, endpoint1 is closed, the destructor notes that |
1170 | * it owns a reference to endpoint2 and releases it. |
1171 | * |
1172 | * Something similar goes on for the creation and destruction of the krings. |
1173 | */ |
1174 | |
1175 | |
1176 | /* |
1177 | * nx_upipe_na_krings_create. |
1178 | * |
1179 | * There are two cases: |
1180 | * |
1181 | * 1) state is |
1182 | * |
1183 | * usr1 --> e1 --> e2 |
1184 | * |
1185 | * and we are e1. We have to create both sets |
1186 | * of krings. |
1187 | * |
1188 | * 2) state is |
1189 | * |
1190 | * usr1 --> e1 --> e2 |
1191 | * |
1192 | * and we are e2. e1 is certainly registered and our |
1193 | * krings already exist, but they may be hidden. |
1194 | */ |
1195 | static int |
1196 | nx_upipe_na_krings_create(struct nexus_adapter *na, struct kern_channel *ch) |
1197 | { |
1198 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
1199 | int error = 0; |
1200 | enum txrx t; |
1201 | uint32_t i; |
1202 | |
1203 | /* |
1204 | * Verify symmetrical ring counts; validated |
1205 | * at nexus provider registration time. |
1206 | */ |
1207 | ASSERT(na_get_nrings(na, NR_TX) == na_get_nrings(na, NR_RX)); |
1208 | |
1209 | if (pna->pna_peer_ref) { |
1210 | /* case 1) above */ |
1211 | SK_DF(SK_VERB_USER_PIPE, |
1212 | "0x%llx: case 1, create everything" , SK_KVA(na)); |
1213 | error = nx_upipe_na_rings_create(na, ch); |
1214 | } else { |
1215 | /* case 2) above */ |
1216 | /* recover the hidden rings */ |
1217 | SK_DF(SK_VERB_USER_PIPE, |
1218 | "0x%llx: case 2, hidden rings" , SK_KVA(na)); |
1219 | for_rx_tx(t) { |
1220 | for (i = 0; i < na_get_nrings(na, t); i++) { |
1221 | NAKR(na, t)[i].ckr_ring = |
1222 | NAKR(na, t)[i].ckr_save_ring; |
1223 | } |
1224 | } |
1225 | } |
1226 | |
1227 | ASSERT(error == 0 || (na->na_tx_rings == NULL && |
1228 | na->na_rx_rings == NULL && na->na_slot_ctxs == NULL)); |
1229 | ASSERT(error == 0 || (pna->pna_peer->pna_up.na_tx_rings == NULL && |
1230 | pna->pna_peer->pna_up.na_rx_rings == NULL && |
1231 | pna->pna_peer->pna_up.na_slot_ctxs == NULL)); |
1232 | |
1233 | return error; |
1234 | } |
1235 | |
1236 | /* |
1237 | * nx_upipe_na_activate. |
1238 | * |
1239 | * There are two cases on registration (onoff==1) |
1240 | * |
1241 | * 1.a) state is |
1242 | * |
1243 | * usr1 --> e1 --> e2 |
1244 | * |
1245 | * and we are e1. Nothing special to do. |
1246 | * |
1247 | * 1.b) state is |
1248 | * |
1249 | * usr1 --> e1 --> e2 <-- usr2 |
1250 | * |
1251 | * and we are e2. Drop the ref e1 is holding. |
1252 | * |
1253 | * There are two additional cases on unregister (onoff==0) |
1254 | * |
1255 | * 2.a) state is |
1256 | * |
1257 | * usr1 --> e1 --> e2 |
1258 | * |
1259 | * and we are e1. Nothing special to do, e2 will |
1260 | * be cleaned up by the destructor of e1. |
1261 | * |
1262 | * 2.b) state is |
1263 | * |
1264 | * usr1 --> e1 e2 <-- usr2 |
1265 | * |
1266 | * and we are either e1 or e2. Add a ref from the |
1267 | * other end and hide our rings. |
1268 | */ |
1269 | static int |
1270 | nx_upipe_na_activate(struct nexus_adapter *na, na_activate_mode_t mode) |
1271 | { |
1272 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
1273 | |
1274 | SK_LOCK_ASSERT_HELD(); |
1275 | |
1276 | SK_DF(SK_VERB_USER_PIPE, "na \"%s\" (0x%llx) %s" , na->na_name, |
1277 | SK_KVA(na), na_activate_mode2str(mode)); |
1278 | |
1279 | switch (mode) { |
1280 | case NA_ACTIVATE_MODE_ON: |
1281 | os_atomic_or(&na->na_flags, NAF_ACTIVE, relaxed); |
1282 | break; |
1283 | |
1284 | case NA_ACTIVATE_MODE_DEFUNCT: |
1285 | break; |
1286 | |
1287 | case NA_ACTIVATE_MODE_OFF: |
1288 | os_atomic_andnot(&na->na_flags, NAF_ACTIVE, relaxed); |
1289 | break; |
1290 | |
1291 | default: |
1292 | VERIFY(0); |
1293 | /* NOTREACHED */ |
1294 | __builtin_unreachable(); |
1295 | } |
1296 | |
1297 | if (pna->pna_peer_ref) { |
1298 | SK_DF(SK_VERB_USER_PIPE, |
1299 | "0x%llx: case 1.a or 2.a, nothing to do" , SK_KVA(na)); |
1300 | return 0; |
1301 | } |
1302 | |
1303 | switch (mode) { |
1304 | case NA_ACTIVATE_MODE_ON: |
1305 | SK_DF(SK_VERB_USER_PIPE, |
1306 | "0x%llx: case 1.b, drop peer" , SK_KVA(na)); |
1307 | if (pna->pna_peer->pna_peer_ref) { |
1308 | pna->pna_peer->pna_peer_ref = FALSE; |
1309 | (void) na_release_locked(na); |
1310 | } |
1311 | break; |
1312 | |
1313 | case NA_ACTIVATE_MODE_OFF: |
1314 | SK_DF(SK_VERB_USER_PIPE, |
1315 | "0x%llx: case 2.b, grab peer" , SK_KVA(na)); |
1316 | if (!pna->pna_peer->pna_peer_ref) { |
1317 | na_retain_locked(na); |
1318 | pna->pna_peer->pna_peer_ref = TRUE; |
1319 | } |
1320 | break; |
1321 | |
1322 | default: |
1323 | break; |
1324 | } |
1325 | |
1326 | return 0; |
1327 | } |
1328 | |
1329 | /* |
1330 | * nx_upipe_na_krings_delete. |
1331 | * |
1332 | * There are two cases: |
1333 | * |
1334 | * 1) state is |
1335 | * |
1336 | * usr1 --> e1 --> e2 |
1337 | * |
1338 | * and we are e1 (e2 is not bound, so krings_delete cannot be |
1339 | * called on it); |
1340 | * |
1341 | * 2) state is |
1342 | * |
1343 | * usr1 --> e1 e2 <-- usr2 |
1344 | * |
1345 | * and we are either e1 or e2. |
1346 | * |
1347 | * In the former case we have to also delete the krings of e2; |
1348 | * in the latter case we do nothing (note that our krings |
1349 | * have already been hidden in the unregister callback). |
1350 | */ |
1351 | static void |
1352 | nx_upipe_na_krings_delete(struct nexus_adapter *na, struct kern_channel *ch, |
1353 | boolean_t defunct) |
1354 | { |
1355 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
1356 | struct nexus_adapter *ona; /* na of the other end */ |
1357 | uint32_t i; |
1358 | enum txrx t; |
1359 | |
1360 | SK_LOCK_ASSERT_HELD(); |
1361 | |
1362 | if (!pna->pna_peer_ref) { |
1363 | SK_DF(SK_VERB_USER_PIPE, |
1364 | "0x%llx: case 2, kept alive by peer" , SK_KVA(na)); |
1365 | /* |
1366 | * If adapter is defunct (note the explicit test against |
1367 | * NAF_DEFUNCT, and not the "defunct" parameter passed in |
1368 | * by the caller), then the peer's channel has gone defunct. |
1369 | * We get here because this channel was not defuncted, and |
1370 | * that this is the last active reference to the adapter. |
1371 | * At this point we tear everything down, since the caller |
1372 | * will proceed to destroying the memory regions. |
1373 | */ |
1374 | if (na->na_flags & NAF_DEFUNCT) { |
1375 | na_rings_mem_teardown(na, ch, defunct); |
1376 | } |
1377 | return; |
1378 | } |
1379 | |
1380 | /* case 1) above */ |
1381 | SK_DF(SK_VERB_USER_PIPE, |
1382 | "0x%llx: case 1, deleting everyhing" , SK_KVA(na)); |
1383 | |
1384 | ASSERT(na->na_channels == 0 || (na->na_flags & NAF_DEFUNCT)); |
1385 | |
1386 | /* restore the ring to be deleted on the peer */ |
1387 | ona = &pna->pna_peer->pna_up; |
1388 | if (ona->na_tx_rings == NULL) { |
1389 | /* |
1390 | * Already deleted, we must be on an |
1391 | * cleanup-after-error path |
1392 | * Just delete this end |
1393 | */ |
1394 | na_rings_mem_teardown(na, ch, defunct); |
1395 | return; |
1396 | } |
1397 | |
1398 | /* delete the memory rings */ |
1399 | na_rings_mem_teardown(na, ch, defunct); |
1400 | |
1401 | if (!defunct) { |
1402 | for_rx_tx(t) { |
1403 | for (i = 0; i < na_get_nrings(na: ona, t); i++) { |
1404 | NAKR(na: ona, t)[i].ckr_ring = |
1405 | NAKR(na: ona, t)[i].ckr_save_ring; |
1406 | } |
1407 | } |
1408 | } |
1409 | |
1410 | /* Delete the memory rings */ |
1411 | na_rings_mem_teardown(ona, ch, defunct); |
1412 | } |
1413 | |
1414 | static void |
1415 | nx_upipe_na_dtor(struct nexus_adapter *na) |
1416 | { |
1417 | struct nexus_upipe_adapter *pna = (struct nexus_upipe_adapter *)na; |
1418 | struct nx_upipe *u = NX_UPIPE_PRIVATE(na->na_nx); |
1419 | |
1420 | SK_LOCK_ASSERT_HELD(); |
1421 | |
1422 | SK_DF(SK_VERB_USER_PIPE, "0x%llx" , SK_KVA(na)); |
1423 | if (pna->pna_peer_ref) { |
1424 | SK_DF(SK_VERB_USER_PIPE, |
1425 | "0x%llx: clean up peer 0x%llx" , SK_KVA(na), |
1426 | SK_KVA(&pna->pna_peer->pna_up)); |
1427 | pna->pna_peer_ref = FALSE; |
1428 | (void) na_release_locked(na: &pna->pna_peer->pna_up); |
1429 | } |
1430 | if (pna->pna_role == CH_ENDPOINT_USER_PIPE_MASTER) { |
1431 | nx_upipe_na_remove(parent: pna->pna_parent, na: pna); |
1432 | } |
1433 | (void) na_release_locked(na: pna->pna_parent); |
1434 | pna->pna_parent = NULL; |
1435 | |
1436 | /* release reference to parent adapter held by nx_upipe_na_find() */ |
1437 | ASSERT(u->nup_pna_users != 0); |
1438 | if (--u->nup_pna_users == 0) { |
1439 | ASSERT(u->nup_pna != NULL); |
1440 | SK_DF(SK_VERB_USER_PIPE, "release parent: \"%s\" (0x%llx)" , |
1441 | u->nup_pna->na_name, SK_KVA(u->nup_pna)); |
1442 | na_release_locked(na: u->nup_pna); |
1443 | u->nup_pna = NULL; |
1444 | } |
1445 | } |
1446 | |
1447 | int |
1448 | nx_upipe_na_find(struct kern_nexus *nx, struct kern_channel *ch, |
1449 | struct chreq *chr, struct nxbind *nxb, struct proc *p, |
1450 | struct nexus_adapter **na, boolean_t create) |
1451 | { |
1452 | #pragma unused(ch, p) |
1453 | struct nx_upipe *u = NX_UPIPE_PRIVATE(nx); |
1454 | struct nxprov_params *nxp = NX_PROV(nx)->nxprov_params; |
1455 | struct nexus_adapter *pna = NULL; /* parent adapter */ |
1456 | boolean_t anon = NX_ANONYMOUS_PROV(nx); |
1457 | struct nexus_upipe_adapter *mna, *sna, *req; |
1458 | ch_endpoint_t ep = chr->cr_endpoint; |
1459 | uint32_t pipe_id; |
1460 | int error; |
1461 | |
1462 | SK_LOCK_ASSERT_HELD(); |
1463 | *na = NULL; |
1464 | |
1465 | #if SK_LOG |
1466 | uuid_string_t uuidstr; |
1467 | SK_D("name \"%s\" spec_uuid \"%s\" port %d mode 0x%b pipe_id %u " |
1468 | "ring_id %d ring_set %u ep_type %u:%u create %u%s" , |
1469 | chr->cr_name, sk_uuid_unparse(chr->cr_spec_uuid, uuidstr), |
1470 | (int)chr->cr_port, chr->cr_mode, CHMODE_BITS, |
1471 | chr->cr_pipe_id, (int)chr->cr_ring_id, chr->cr_ring_set, |
1472 | chr->cr_real_endpoint, chr->cr_endpoint, create, |
1473 | (ep != CH_ENDPOINT_USER_PIPE_MASTER && |
1474 | ep != CH_ENDPOINT_USER_PIPE_SLAVE) ? " (skipped)" : "" ); |
1475 | #endif /* SK_LOG */ |
1476 | |
1477 | if (ep != CH_ENDPOINT_USER_PIPE_MASTER && |
1478 | ep != CH_ENDPOINT_USER_PIPE_SLAVE) { |
1479 | return 0; |
1480 | } |
1481 | |
1482 | /* |
1483 | * Check client credentials. |
1484 | */ |
1485 | if (chr->cr_port == NEXUS_PORT_USER_PIPE_SERVER) { |
1486 | if (!anon && (u->nup_srv_nxb == NULL || nxb == NULL || |
1487 | !nxb_is_equal(u->nup_srv_nxb, nxb))) { |
1488 | return EACCES; |
1489 | } |
1490 | } else { |
1491 | ASSERT(chr->cr_port == NEXUS_PORT_USER_PIPE_CLIENT); |
1492 | if (!anon && (u->nup_cli_nxb == NULL || nxb == NULL || |
1493 | !nxb_is_equal(u->nup_cli_nxb, nxb))) { |
1494 | return EACCES; |
1495 | } |
1496 | } |
1497 | |
1498 | /* |
1499 | * First, try to find a previously-created parent adapter |
1500 | * for this nexus; else, create one and store it in the |
1501 | * nexus. We'll release this at nexus destructor time. |
1502 | */ |
1503 | if ((pna = u->nup_pna) != NULL) { |
1504 | na_retain_locked(na: pna); /* for us */ |
1505 | SK_DF(SK_VERB_USER_PIPE, "found parent: \"%s\" (0x%llx)" , |
1506 | pna->na_name, SK_KVA(pna)); |
1507 | } else { |
1508 | /* callee will hold a reference for us upon success */ |
1509 | error = na_pseudo_create(nx, chr, &pna); |
1510 | if (error != 0) { |
1511 | SK_ERR("parent create failed: %d" , error); |
1512 | return error; |
1513 | } |
1514 | /* hold an extra reference for nx_upipe */ |
1515 | u->nup_pna = pna; |
1516 | na_retain_locked(na: pna); |
1517 | SK_DF(SK_VERB_USER_PIPE, "created parent: \"%s\" (0x%llx)" , |
1518 | pna->na_name, SK_KVA(pna)); |
1519 | } |
1520 | |
1521 | /* next, lookup the pipe id in the parent list */ |
1522 | req = NULL; |
1523 | pipe_id = chr->cr_pipe_id; |
1524 | mna = nx_upipe_find(parent: pna, pipe_id); |
1525 | if (mna != NULL) { |
1526 | if (mna->pna_role == ep) { |
1527 | SK_DF(SK_VERB_USER_PIPE, |
1528 | "found pipe_id %u directly at slot %u" , |
1529 | pipe_id, mna->pna_parent_slot); |
1530 | req = mna; |
1531 | } else { |
1532 | SK_DF(SK_VERB_USER_PIPE, |
1533 | "found pipe_id %u indirectly at slot %u" , |
1534 | pipe_id, mna->pna_parent_slot); |
1535 | req = mna->pna_peer; |
1536 | } |
1537 | /* |
1538 | * The pipe we have found already holds a ref to the parent, |
1539 | * so we need to drop the one we got from above. |
1540 | */ |
1541 | (void) na_release_locked(na: pna); |
1542 | goto found; |
1543 | } |
1544 | SK_DF(SK_VERB_USER_PIPE, |
1545 | "pipe_id %u not found, create %u" , pipe_id, create); |
1546 | if (!create) { |
1547 | error = ENODEV; |
1548 | goto put_out; |
1549 | } |
1550 | /* |
1551 | * We create both master and slave. |
1552 | * The endpoint we were asked for holds a reference to |
1553 | * the other one. |
1554 | */ |
1555 | mna = na_upipe_alloc(how: Z_WAITOK); |
1556 | |
1557 | ASSERT(mna->pna_up.na_type == NA_USER_PIPE); |
1558 | ASSERT(mna->pna_up.na_free == na_upipe_free); |
1559 | |
1560 | (void) snprintf(mna->pna_up.na_name, count: sizeof(mna->pna_up.na_name), |
1561 | "%s{%u" , pna->na_name, pipe_id); |
1562 | uuid_generate_random(out: mna->pna_up.na_uuid); |
1563 | |
1564 | mna->pna_id = pipe_id; |
1565 | mna->pna_role = CH_ENDPOINT_USER_PIPE_MASTER; |
1566 | mna->pna_parent = pna; |
1567 | mna->pna_up.na_txsync = nx_upipe_na_txsync; |
1568 | mna->pna_up.na_rxsync = nx_upipe_na_rxsync; |
1569 | mna->pna_up.na_activate = nx_upipe_na_activate; |
1570 | mna->pna_up.na_dtor = nx_upipe_na_dtor; |
1571 | mna->pna_up.na_krings_create = nx_upipe_na_krings_create; |
1572 | mna->pna_up.na_krings_delete = nx_upipe_na_krings_delete; |
1573 | mna->pna_up.na_arena = pna->na_arena; |
1574 | skmem_arena_retain((&mna->pna_up)->na_arena); |
1575 | os_atomic_or(&mna->pna_up.na_flags, NAF_MEM_LOANED, relaxed); |
1576 | *(nexus_meta_type_t *)(uintptr_t)&mna->pna_up.na_md_type = |
1577 | pna->na_md_type; |
1578 | *(nexus_meta_subtype_t *)(uintptr_t)&mna->pna_up.na_md_subtype = |
1579 | pna->na_md_subtype; |
1580 | |
1581 | *(nexus_stats_type_t *)(uintptr_t)&mna->pna_up.na_stats_type = |
1582 | NEXUS_STATS_TYPE_INVALID; |
1583 | *(uint32_t *)(uintptr_t)&mna->pna_up.na_flowadv_max = |
1584 | nxp->nxp_flowadv_max; |
1585 | ASSERT(mna->pna_up.na_flowadv_max == 0 || |
1586 | skmem_arena_nexus(mna->pna_up.na_arena)->arn_flowadv_obj != NULL); |
1587 | |
1588 | /* |
1589 | * Parent adapter parameters must match the nexus provider's by the |
1590 | * time we get here, since na_find() above shouldn't return |
1591 | * one otherwise. |
1592 | */ |
1593 | na_set_nrings(na: &mna->pna_up, t: NR_TX, v: nxp->nxp_tx_rings); |
1594 | na_set_nrings(na: &mna->pna_up, t: NR_RX, v: nxp->nxp_rx_rings); |
1595 | na_set_nslots(na: &mna->pna_up, t: NR_TX, v: nxp->nxp_tx_slots); |
1596 | na_set_nslots(na: &mna->pna_up, t: NR_RX, v: nxp->nxp_rx_slots); |
1597 | ASSERT(na_get_nrings(&mna->pna_up, NR_TX) == na_get_nrings(pna, NR_TX)); |
1598 | ASSERT(na_get_nrings(&mna->pna_up, NR_RX) == na_get_nrings(pna, NR_RX)); |
1599 | ASSERT(na_get_nslots(&mna->pna_up, NR_TX) == na_get_nslots(pna, NR_TX)); |
1600 | ASSERT(na_get_nslots(&mna->pna_up, NR_RX) == na_get_nslots(pna, NR_RX)); |
1601 | |
1602 | na_attach_common(&mna->pna_up, nx, &nx_upipe_prov_s); |
1603 | |
1604 | /* register the master with the parent */ |
1605 | error = nx_upipe_na_add(parent: pna, na: mna); |
1606 | if (error != 0) { |
1607 | goto free_mna; |
1608 | } |
1609 | |
1610 | /* create the slave */ |
1611 | sna = na_upipe_alloc(how: Z_WAITOK); |
1612 | |
1613 | /* most fields are the same, copy from master and then fix */ |
1614 | bcopy(src: mna, dst: sna, n: sizeof(*sna)); |
1615 | skmem_arena_retain((&sna->pna_up)->na_arena); |
1616 | os_atomic_or(&sna->pna_up.na_flags, NAF_MEM_LOANED, relaxed); |
1617 | |
1618 | ASSERT(sna->pna_up.na_type == NA_USER_PIPE); |
1619 | ASSERT(sna->pna_up.na_free == na_upipe_free); |
1620 | |
1621 | (void) snprintf(sna->pna_up.na_name, count: sizeof(sna->pna_up.na_name), |
1622 | "%s}%d" , pna->na_name, pipe_id); |
1623 | uuid_generate_random(out: sna->pna_up.na_uuid); |
1624 | |
1625 | sna->pna_role = CH_ENDPOINT_USER_PIPE_SLAVE; |
1626 | na_attach_common(&sna->pna_up, nx, &nx_upipe_prov_s); |
1627 | |
1628 | /* join the two endpoints */ |
1629 | mna->pna_peer = sna; |
1630 | sna->pna_peer = mna; |
1631 | |
1632 | /* |
1633 | * We already have a reference to the parent, but we |
1634 | * need another one for the other endpoint we created |
1635 | */ |
1636 | na_retain_locked(na: pna); |
1637 | |
1638 | if ((chr->cr_mode & CHMODE_DEFUNCT_OK) != 0) { |
1639 | os_atomic_or(&pna->na_flags, NAF_DEFUNCT_OK, relaxed); |
1640 | } |
1641 | |
1642 | if (ep == CH_ENDPOINT_USER_PIPE_MASTER) { |
1643 | req = mna; |
1644 | mna->pna_peer_ref = TRUE; |
1645 | na_retain_locked(na: &sna->pna_up); |
1646 | } else { |
1647 | req = sna; |
1648 | sna->pna_peer_ref = TRUE; |
1649 | na_retain_locked(na: &mna->pna_up); |
1650 | } |
1651 | |
1652 | /* parent adapter now has two users (mna and sna) */ |
1653 | u->nup_pna_users += 2; |
1654 | |
1655 | #if SK_LOG |
1656 | SK_DF(SK_VERB_USER_PIPE, "created master 0x%llx and slave 0x%llx" , |
1657 | SK_KVA(mna), SK_KVA(sna)); |
1658 | SK_DF(SK_VERB_USER_PIPE, "mna: \"%s\"" , mna->pna_up.na_name); |
1659 | SK_DF(SK_VERB_USER_PIPE, " UUID: %s" , |
1660 | sk_uuid_unparse(mna->pna_up.na_uuid, uuidstr)); |
1661 | SK_DF(SK_VERB_USER_PIPE, " nx: 0x%llx (\"%s\":\"%s\")" , |
1662 | SK_KVA(mna->pna_up.na_nx), NX_DOM(mna->pna_up.na_nx)->nxdom_name, |
1663 | NX_DOM_PROV(mna->pna_up.na_nx)->nxdom_prov_name); |
1664 | SK_DF(SK_VERB_USER_PIPE, " flags: 0x%b" , |
1665 | mna->pna_up.na_flags, NAF_BITS); |
1666 | SK_DF(SK_VERB_USER_PIPE, " flowadv_max: %u" , |
1667 | mna->pna_up.na_flowadv_max); |
1668 | SK_DF(SK_VERB_USER_PIPE, " rings: tx %u rx %u" , |
1669 | na_get_nrings(&mna->pna_up, NR_TX), |
1670 | na_get_nrings(&mna->pna_up, NR_RX)); |
1671 | SK_DF(SK_VERB_USER_PIPE, " slots: tx %u rx %u" , |
1672 | na_get_nslots(&mna->pna_up, NR_TX), |
1673 | na_get_nslots(&mna->pna_up, NR_RX)); |
1674 | SK_DF(SK_VERB_USER_PIPE, " next_pipe: %u" , mna->pna_up.na_next_pipe); |
1675 | SK_DF(SK_VERB_USER_PIPE, " max_pipes: %u" , mna->pna_up.na_max_pipes); |
1676 | SK_DF(SK_VERB_USER_PIPE, " parent: \"%s\"" , |
1677 | mna->pna_parent->na_name); |
1678 | SK_DF(SK_VERB_USER_PIPE, " id: %u" , mna->pna_id); |
1679 | SK_DF(SK_VERB_USER_PIPE, " role: %u" , mna->pna_role); |
1680 | SK_DF(SK_VERB_USER_PIPE, " peer_ref: %u" , mna->pna_peer_ref); |
1681 | SK_DF(SK_VERB_USER_PIPE, " parent_slot: %u" , mna->pna_parent_slot); |
1682 | SK_DF(SK_VERB_USER_PIPE, "sna: \"%s\"" , sna->pna_up.na_name); |
1683 | SK_DF(SK_VERB_USER_PIPE, " UUID: %s" , |
1684 | sk_uuid_unparse(sna->pna_up.na_uuid, uuidstr)); |
1685 | SK_DF(SK_VERB_USER_PIPE, " nx: 0x%llx (\"%s\":\"%s\")" , |
1686 | SK_KVA(sna->pna_up.na_nx), NX_DOM(sna->pna_up.na_nx)->nxdom_name, |
1687 | NX_DOM_PROV(sna->pna_up.na_nx)->nxdom_prov_name); |
1688 | SK_DF(SK_VERB_USER_PIPE, " flags: 0x%b" , |
1689 | sna->pna_up.na_flags, NAF_BITS); |
1690 | SK_DF(SK_VERB_USER_PIPE, " flowadv_max: %u" , |
1691 | sna->pna_up.na_flowadv_max); |
1692 | SK_DF(SK_VERB_USER_PIPE, " rings: tx %u rx %u" , |
1693 | na_get_nrings(&sna->pna_up, NR_TX), |
1694 | na_get_nrings(&sna->pna_up, NR_RX)); |
1695 | SK_DF(SK_VERB_USER_PIPE, " slots: tx %u rx %u" , |
1696 | na_get_nslots(&sna->pna_up, NR_TX), |
1697 | na_get_nslots(&sna->pna_up, NR_RX)); |
1698 | SK_DF(SK_VERB_USER_PIPE, " next_pipe: %u" , sna->pna_up.na_next_pipe); |
1699 | SK_DF(SK_VERB_USER_PIPE, " max_pipes: %u" , sna->pna_up.na_max_pipes); |
1700 | SK_DF(SK_VERB_USER_PIPE, " parent: \"%s\"" , |
1701 | sna->pna_parent->na_name); |
1702 | SK_DF(SK_VERB_USER_PIPE, " id: %u" , sna->pna_id); |
1703 | SK_DF(SK_VERB_USER_PIPE, " role: %u" , sna->pna_role); |
1704 | SK_DF(SK_VERB_USER_PIPE, " peer_ref: %u" , sna->pna_peer_ref); |
1705 | SK_DF(SK_VERB_USER_PIPE, " parent_slot: %u" , sna->pna_parent_slot); |
1706 | #endif /* SK_LOG */ |
1707 | |
1708 | found: |
1709 | |
1710 | SK_DF(SK_VERB_USER_PIPE, "pipe_id %u role %s at 0x%llx" , pipe_id, |
1711 | (req->pna_role == CH_ENDPOINT_USER_PIPE_MASTER ? |
1712 | "master" : "slave" ), SK_KVA(req)); |
1713 | if ((chr->cr_mode & CHMODE_DEFUNCT_OK) == 0) { |
1714 | os_atomic_andnot(&pna->na_flags, NAF_DEFUNCT_OK, relaxed); |
1715 | } |
1716 | *na = &req->pna_up; |
1717 | na_retain_locked(na: *na); |
1718 | |
1719 | /* |
1720 | * Keep the reference to the parent; it will be released |
1721 | * by the adapter's destructor. |
1722 | */ |
1723 | return 0; |
1724 | |
1725 | free_mna: |
1726 | if (mna->pna_up.na_arena != NULL) { |
1727 | skmem_arena_release((&mna->pna_up)->na_arena); |
1728 | mna->pna_up.na_arena = NULL; |
1729 | } |
1730 | NA_FREE(&mna->pna_up); |
1731 | put_out: |
1732 | (void) na_release_locked(na: pna); |
1733 | return error; |
1734 | } |
1735 | |
1736 | static struct nx_upipe * |
1737 | nx_upipe_alloc(zalloc_flags_t how) |
1738 | { |
1739 | struct nx_upipe *u; |
1740 | |
1741 | SK_LOCK_ASSERT_HELD(); |
1742 | |
1743 | u = zalloc_flags(nx_upipe_zone, how | Z_ZERO); |
1744 | if (u) { |
1745 | SK_DF(SK_VERB_MEM, "upipe 0x%llx ALLOC" , SK_KVA(u)); |
1746 | } |
1747 | return u; |
1748 | } |
1749 | |
1750 | static void |
1751 | nx_upipe_free(struct nx_upipe *u) |
1752 | { |
1753 | ASSERT(u->nup_pna == NULL); |
1754 | ASSERT(u->nup_pna_users == 0); |
1755 | ASSERT(u->nup_cli_nxb == NULL); |
1756 | ASSERT(u->nup_srv_nxb == NULL); |
1757 | |
1758 | SK_DF(SK_VERB_MEM, "upipe 0x%llx FREE" , SK_KVA(u)); |
1759 | zfree(nx_upipe_zone, u); |
1760 | } |
1761 | |