1/*
2 * Copyright (c) 1999-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * Kernel Control domain - allows control connections to
31 * and to read/write data.
32 *
33 * Vincent Lubet, 040506
34 * Christophe Allie, 010928
35 * Justin C. Walker, 990319
36 */
37
38#include <sys/types.h>
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/syslog.h>
42#include <sys/socket.h>
43#include <sys/socketvar.h>
44#include <sys/protosw.h>
45#include <sys/domain.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/sys_domain.h>
49#include <sys/kern_event.h>
50#include <sys/kern_control.h>
51#include <sys/kauth.h>
52#include <sys/sysctl.h>
53#include <sys/proc_info.h>
54#include <net/if_var.h>
55
56#include <mach/vm_types.h>
57
58#include <kern/thread.h>
59
60struct kctl {
61 TAILQ_ENTRY(kctl) next; /* controller chain */
62 kern_ctl_ref kctlref;
63
64 /* controller information provided when registering */
65 char name[MAX_KCTL_NAME]; /* unique identifier */
66 u_int32_t id;
67 u_int32_t reg_unit;
68
69 /* misc communication information */
70 u_int32_t flags; /* support flags */
71 u_int32_t recvbufsize; /* request more than the default buffer size */
72 u_int32_t sendbufsize; /* request more than the default buffer size */
73
74 /* Dispatch functions */
75 ctl_setup_func setup; /* Setup contact */
76 ctl_bind_func bind; /* Prepare contact */
77 ctl_connect_func connect; /* Make contact */
78 ctl_disconnect_func disconnect; /* Break contact */
79 ctl_send_func send; /* Send data to nke */
80 ctl_send_list_func send_list; /* Send list of packets */
81 ctl_setopt_func setopt; /* set kctl configuration */
82 ctl_getopt_func getopt; /* get kctl configuration */
83 ctl_rcvd_func rcvd; /* Notify nke when client reads data */
84
85 TAILQ_HEAD(, ctl_cb) kcb_head;
86 u_int32_t lastunit;
87};
88
89#if DEVELOPMENT || DEBUG
90enum ctl_status {
91 KCTL_DISCONNECTED = 0,
92 KCTL_CONNECTING = 1,
93 KCTL_CONNECTED = 2
94};
95#endif /* DEVELOPMENT || DEBUG */
96
97struct ctl_cb {
98 TAILQ_ENTRY(ctl_cb) next; /* controller chain */
99 lck_mtx_t mtx;
100 struct socket *so; /* controlling socket */
101 struct kctl *kctl; /* back pointer to controller */
102 void *userdata;
103 struct sockaddr_ctl sac;
104 uint32_t usecount;
105 uint32_t kcb_usecount;
106 uint32_t require_clearing_count;
107#if DEVELOPMENT || DEBUG
108 enum ctl_status status;
109#endif /* DEVELOPMENT || DEBUG */
110};
111
112#ifndef ROUNDUP64
113#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t))
114#endif
115
116#ifndef ADVANCE64
117#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n))
118#endif
119
120/*
121 * Definitions and vars for we support
122 */
123
124#define CTL_SENDSIZE (2 * 1024) /* default buffer size */
125#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
126
127/*
128 * Definitions and vars for we support
129 */
130
131const u_int32_t ctl_maxunit = 65536;
132static LCK_ATTR_DECLARE(ctl_lck_attr, 0, 0);
133static LCK_GRP_DECLARE(ctl_lck_grp, "Kernel Control Protocol");
134static LCK_MTX_DECLARE_ATTR(ctl_mtx, &ctl_lck_grp, &ctl_lck_attr);
135
136/* all the controllers are chained */
137TAILQ_HEAD(kctl_list, kctl) ctl_head = TAILQ_HEAD_INITIALIZER(ctl_head);
138
139static int ctl_attach(struct socket *, int, struct proc *);
140static int ctl_detach(struct socket *);
141static int ctl_sofreelastref(struct socket *so);
142static int ctl_bind(struct socket *, struct sockaddr *, struct proc *);
143static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
144static int ctl_disconnect(struct socket *);
145static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
146 struct ifnet *ifp, struct proc *p);
147static int ctl_send(struct socket *, int, struct mbuf *,
148 struct sockaddr *, struct mbuf *, struct proc *);
149static int ctl_send_list(struct socket *, struct mbuf *, u_int *, int);
150static int ctl_ctloutput(struct socket *, struct sockopt *);
151static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
152static int ctl_usr_rcvd(struct socket *so, int flags);
153
154static struct kctl *ctl_find_by_name(const char *);
155static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
156
157static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit,
158 u_int32_t *);
159static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
160static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
161
162static int ctl_lock(struct socket *, int, void *);
163static int ctl_unlock(struct socket *, int, void *);
164static lck_mtx_t * ctl_getlock(struct socket *, int);
165
166static struct pr_usrreqs ctl_usrreqs = {
167 .pru_attach = ctl_attach,
168 .pru_bind = ctl_bind,
169 .pru_connect = ctl_connect,
170 .pru_control = ctl_ioctl,
171 .pru_detach = ctl_detach,
172 .pru_disconnect = ctl_disconnect,
173 .pru_peeraddr = ctl_peeraddr,
174 .pru_rcvd = ctl_usr_rcvd,
175 .pru_send = ctl_send,
176 .pru_send_list = ctl_send_list,
177 .pru_sosend = sosend,
178 .pru_sosend_list = sosend_list,
179 .pru_soreceive = soreceive,
180};
181
182static struct protosw kctlsw[] = {
183 {
184 .pr_type = SOCK_DGRAM,
185 .pr_protocol = SYSPROTO_CONTROL,
186 .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
187 .pr_ctloutput = ctl_ctloutput,
188 .pr_usrreqs = &ctl_usrreqs,
189 .pr_lock = ctl_lock,
190 .pr_unlock = ctl_unlock,
191 .pr_getlock = ctl_getlock,
192 },
193 {
194 .pr_type = SOCK_STREAM,
195 .pr_protocol = SYSPROTO_CONTROL,
196 .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD,
197 .pr_ctloutput = ctl_ctloutput,
198 .pr_usrreqs = &ctl_usrreqs,
199 .pr_lock = ctl_lock,
200 .pr_unlock = ctl_unlock,
201 .pr_getlock = ctl_getlock,
202 }
203};
204
205__private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS;
206__private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS;
207__private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS;
208
209
210SYSCTL_NODE(_net_systm, OID_AUTO, kctl,
211 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family");
212
213struct kctlstat kctlstat;
214SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats,
215 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
216 kctl_getstat, "S,kctlstat", "");
217
218SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list,
219 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
220 kctl_reg_list, "S,xkctl_reg", "");
221
222SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist,
223 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0,
224 kctl_pcblist, "S,xkctlpcb", "");
225
226u_int32_t ctl_autorcvbuf_max = 256 * 1024;
227SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax,
228 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, "");
229
230u_int32_t ctl_autorcvbuf_high = 0;
231SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh,
232 CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, "");
233
234u_int32_t ctl_debug = 0;
235SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
236 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
237
238#if DEVELOPMENT || DEBUG
239u_int32_t ctl_panic_debug = 0;
240SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
241 CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
242#endif /* DEVELOPMENT || DEBUG */
243
244#define KCTL_TBL_INC 16
245
246static uintptr_t kctl_tbl_size = 0;
247static u_int32_t kctl_tbl_growing = 0;
248static u_int32_t kctl_tbl_growing_waiting = 0;
249static uintptr_t kctl_tbl_count = 0;
250static struct kctl **kctl_table = NULL;
251static uintptr_t kctl_ref_gencnt = 0;
252
253static void kctl_tbl_grow(void);
254static kern_ctl_ref kctl_make_ref(struct kctl *kctl);
255static void kctl_delete_ref(kern_ctl_ref);
256static struct kctl *kctl_from_ref(kern_ctl_ref);
257
258/*
259 * Install the protosw's for the Kernel Control manager.
260 */
261__private_extern__ void
262kern_control_init(struct domain *dp)
263{
264 struct protosw *pr;
265 int i;
266 int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw));
267
268 VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
269 VERIFY(dp == systemdomain);
270
271 for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
272 net_add_proto(pr, dp, 1);
273 }
274}
275
276static void
277kcb_delete(struct ctl_cb *kcb)
278{
279 if (kcb != 0) {
280 lck_mtx_destroy(lck: &kcb->mtx, grp: &ctl_lck_grp);
281 kfree_type(struct ctl_cb, kcb);
282 }
283}
284
285/*
286 * Kernel Controller user-request functions
287 * attach function must exist and succeed
288 * detach not necessary
289 * we need a pcb for the per socket mutex
290 */
291static int
292ctl_attach(struct socket *so, int proto, struct proc *p)
293{
294#pragma unused(proto, p)
295 struct ctl_cb *kcb = 0;
296
297 kcb = kalloc_type(struct ctl_cb, Z_WAITOK | Z_ZERO | Z_NOFAIL);
298
299 lck_mtx_init(lck: &kcb->mtx, grp: &ctl_lck_grp, attr: &ctl_lck_attr);
300 kcb->so = so;
301 so->so_pcb = (caddr_t)kcb;
302
303 /*
304 * For datagram, use character count for sbspace as its value
305 * may be use for packetization and we do not want to
306 * drop packets based on the sbspace hint that was just provided
307 */
308 if (SOCK_CHECK_TYPE(so, SOCK_DGRAM)) {
309 so->so_rcv.sb_flags |= SB_KCTL;
310 so->so_snd.sb_flags |= SB_KCTL;
311 }
312 return 0;
313}
314
315static int
316ctl_sofreelastref(struct socket *so)
317{
318 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
319
320 so->so_pcb = 0;
321
322 if (kcb != 0) {
323 struct kctl *kctl;
324 if ((kctl = kcb->kctl) != 0) {
325 lck_mtx_lock(lck: &ctl_mtx);
326 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
327 kctlstat.kcs_pcbcount--;
328 kctlstat.kcs_gencnt++;
329 lck_mtx_unlock(lck: &ctl_mtx);
330 }
331 kcb_delete(kcb);
332 }
333 sofreelastref(so, 1);
334 return 0;
335}
336
337/*
338 * Use this function and ctl_kcb_require_clearing to serialize
339 * critical calls into the kctl subsystem
340 */
341static void
342ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
343{
344 LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
345 while (kcb->require_clearing_count > 0) {
346 msleep(chan: &kcb->require_clearing_count, mtx: mutex_held, PSOCK | PCATCH, wmesg: "kcb_require_clearing", NULL);
347 }
348 kcb->kcb_usecount++;
349}
350
351static void
352ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
353{
354 assert(kcb->kcb_usecount != 0);
355 kcb->require_clearing_count++;
356 kcb->kcb_usecount--;
357 while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
358 msleep(chan: &kcb->kcb_usecount, mtx: mutex_held, PSOCK | PCATCH, wmesg: "kcb_usecount", NULL);
359 }
360 kcb->kcb_usecount++;
361}
362
363static void
364ctl_kcb_done_clearing(struct ctl_cb *kcb)
365{
366 assert(kcb->require_clearing_count != 0);
367 kcb->require_clearing_count--;
368 wakeup(chan: (caddr_t)&kcb->require_clearing_count);
369}
370
371static void
372ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
373{
374 assert(kcb->kcb_usecount != 0);
375 kcb->kcb_usecount--;
376 if (kcb->require_clearing_count != 0) {
377 wakeup(chan: (caddr_t)&kcb->kcb_usecount);
378 }
379}
380
381static int
382ctl_detach(struct socket *so)
383{
384 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
385
386 if (kcb == 0) {
387 return 0;
388 }
389
390 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
391 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
392 ctl_kcb_require_clearing(kcb, mutex_held: mtx_held);
393
394 if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
395 kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
396 // The unit was bound, but not connected
397 // Invoke the disconnected call to cleanup
398 if (kcb->kctl->disconnect != NULL) {
399 socket_unlock(so, refcount: 0);
400 (*kcb->kctl->disconnect)(kcb->kctl->kctlref,
401 kcb->sac.sc_unit, kcb->userdata);
402 socket_lock(so, refcount: 0);
403 }
404 }
405
406 soisdisconnected(so);
407#if DEVELOPMENT || DEBUG
408 kcb->status = KCTL_DISCONNECTED;
409#endif /* DEVELOPMENT || DEBUG */
410 so->so_flags |= SOF_PCBCLEARING;
411 ctl_kcb_done_clearing(kcb);
412 ctl_kcb_decrement_use_count(kcb);
413 return 0;
414}
415
416static int
417ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p)
418{
419 struct kctl *kctl = NULL;
420 int error = 0;
421 struct sockaddr_ctl sa;
422 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
423 struct ctl_cb *kcb_next = NULL;
424
425 if (kcb == 0) {
426 panic("ctl_setup_kctl so_pcb null");
427 }
428
429 if (kcb->kctl != NULL) {
430 // Already set up, skip
431 return 0;
432 }
433
434 if (nam->sa_len != sizeof(struct sockaddr_ctl)) {
435 return EINVAL;
436 }
437
438 bcopy(src: nam, dst: &sa, n: sizeof(struct sockaddr_ctl));
439
440 lck_mtx_lock(lck: &ctl_mtx);
441 kctl = ctl_find_by_id_unit(id: sa.sc_id, unit: sa.sc_unit);
442 if (kctl == NULL) {
443 lck_mtx_unlock(lck: &ctl_mtx);
444 return ENOENT;
445 }
446
447 if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
448 (so->so_type != SOCK_STREAM)) ||
449 (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
450 (so->so_type != SOCK_DGRAM))) {
451 lck_mtx_unlock(lck: &ctl_mtx);
452 return EPROTOTYPE;
453 }
454
455 if (kctl->flags & CTL_FLAG_PRIVILEGED) {
456 if (p == 0) {
457 lck_mtx_unlock(lck: &ctl_mtx);
458 return EINVAL;
459 }
460 if (kauth_cred_issuser(cred: kauth_cred_get()) == 0) {
461 lck_mtx_unlock(lck: &ctl_mtx);
462 return EPERM;
463 }
464 }
465
466 if (kctl->setup != NULL) {
467 error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
468 if (error != 0) {
469 lck_mtx_unlock(lck: &ctl_mtx);
470 return error;
471 }
472 } else if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
473 if (kcb_find(kctl, unit: sa.sc_unit) != NULL) {
474 lck_mtx_unlock(lck: &ctl_mtx);
475 return EBUSY;
476 }
477 } else {
478 /* Find an unused ID, assumes control IDs are in order */
479 u_int32_t unit = 1;
480
481 TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
482 if (kcb_next->sac.sc_unit > unit) {
483 /* Found a gap, lets fill it in */
484 break;
485 }
486 unit = kcb_next->sac.sc_unit + 1;
487 if (unit == ctl_maxunit) {
488 break;
489 }
490 }
491
492 if (unit == ctl_maxunit) {
493 lck_mtx_unlock(lck: &ctl_mtx);
494 return EBUSY;
495 }
496
497 sa.sc_unit = unit;
498 }
499
500 bcopy(src: &sa, dst: &kcb->sac, n: sizeof(struct sockaddr_ctl));
501 kcb->kctl = kctl;
502 if (kcb_next != NULL) {
503 TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
504 } else {
505 TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
506 }
507 kctlstat.kcs_pcbcount++;
508 kctlstat.kcs_gencnt++;
509 kctlstat.kcs_connections++;
510 lck_mtx_unlock(lck: &ctl_mtx);
511
512 error = soreserve(so, sndcc: kctl->sendbufsize, rcvcc: kctl->recvbufsize);
513 if (error) {
514#if (DEBUG || DEVELOPMENT)
515 if (ctl_debug) {
516 printf("%s - soreserve(%llu, %u, %u) error %d\n",
517 __func__, so->so_gencnt,
518 kctl->sendbufsize, kctl->recvbufsize, error);
519 }
520#endif /* (DEBUG || DEVELOPMENT) */
521 goto done;
522 }
523
524done:
525 if (error) {
526 soisdisconnected(so);
527#if DEVELOPMENT || DEBUG
528 kcb->status = KCTL_DISCONNECTED;
529#endif /* DEVELOPMENT || DEBUG */
530 lck_mtx_lock(lck: &ctl_mtx);
531 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
532 kcb->kctl = NULL;
533 kcb->sac.sc_unit = 0;
534 kctlstat.kcs_pcbcount--;
535 kctlstat.kcs_gencnt++;
536 kctlstat.kcs_conn_fail++;
537 lck_mtx_unlock(lck: &ctl_mtx);
538 }
539 return error;
540}
541
542static int
543ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
544{
545 int error = 0;
546 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
547
548 if (kcb == NULL) {
549 panic("ctl_bind so_pcb null");
550 }
551
552 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
553 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
554 ctl_kcb_require_clearing(kcb, mutex_held: mtx_held);
555
556 error = ctl_setup_kctl(so, nam, p);
557 if (error) {
558 goto out;
559 }
560
561 if (kcb->kctl == NULL) {
562 panic("ctl_bind kctl null");
563 }
564
565 if (kcb->kctl->bind == NULL) {
566 error = EINVAL;
567 goto out;
568 }
569
570 socket_unlock(so, refcount: 0);
571 error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
572 socket_lock(so, refcount: 0);
573
574out:
575 ctl_kcb_done_clearing(kcb);
576 ctl_kcb_decrement_use_count(kcb);
577 return error;
578}
579
580static int
581ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
582{
583 int error = 0;
584 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
585
586 if (kcb == NULL) {
587 panic("ctl_connect so_pcb null");
588 }
589
590 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
591 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
592 ctl_kcb_require_clearing(kcb, mutex_held: mtx_held);
593
594#if DEVELOPMENT || DEBUG
595 if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
596 panic("kctl already connecting/connected");
597 }
598 kcb->status = KCTL_CONNECTING;
599#endif /* DEVELOPMENT || DEBUG */
600
601 error = ctl_setup_kctl(so, nam, p);
602 if (error) {
603 goto out;
604 }
605
606 if (kcb->kctl == NULL) {
607 panic("ctl_connect kctl null");
608 }
609
610 soisconnecting(so);
611 socket_unlock(so, refcount: 0);
612 error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
613 socket_lock(so, refcount: 0);
614 if (error) {
615 goto end;
616 }
617 soisconnected(so);
618#if DEVELOPMENT || DEBUG
619 kcb->status = KCTL_CONNECTED;
620#endif /* DEVELOPMENT || DEBUG */
621
622end:
623 if (error && kcb->kctl->disconnect) {
624 /*
625 * XXX Make sure we Don't check the return value
626 * of disconnect here.
627 * ipsec/utun_ctl_disconnect will return error when
628 * disconnect gets called after connect failure.
629 * However if we decide to check for disconnect return
630 * value here. Please make sure to revisit
631 * ipsec/utun_ctl_disconnect.
632 */
633 socket_unlock(so, refcount: 0);
634 (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata);
635 socket_lock(so, refcount: 0);
636 }
637 if (error) {
638 soisdisconnected(so);
639#if DEVELOPMENT || DEBUG
640 kcb->status = KCTL_DISCONNECTED;
641#endif /* DEVELOPMENT || DEBUG */
642 lck_mtx_lock(lck: &ctl_mtx);
643 TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
644 kcb->kctl = NULL;
645 kcb->sac.sc_unit = 0;
646 kctlstat.kcs_pcbcount--;
647 kctlstat.kcs_gencnt++;
648 kctlstat.kcs_conn_fail++;
649 lck_mtx_unlock(lck: &ctl_mtx);
650 }
651out:
652 ctl_kcb_done_clearing(kcb);
653 ctl_kcb_decrement_use_count(kcb);
654 return error;
655}
656
657static int
658ctl_disconnect(struct socket *so)
659{
660 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
661
662 if ((kcb = (struct ctl_cb *)so->so_pcb)) {
663 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
664 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
665 ctl_kcb_require_clearing(kcb, mutex_held: mtx_held);
666 struct kctl *kctl = kcb->kctl;
667
668 if (kctl && kctl->disconnect) {
669 socket_unlock(so, refcount: 0);
670 (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit,
671 kcb->userdata);
672 socket_lock(so, refcount: 0);
673 }
674
675 soisdisconnected(so);
676#if DEVELOPMENT || DEBUG
677 kcb->status = KCTL_DISCONNECTED;
678#endif /* DEVELOPMENT || DEBUG */
679
680 socket_unlock(so, refcount: 0);
681 lck_mtx_lock(lck: &ctl_mtx);
682 kcb->kctl = 0;
683 kcb->sac.sc_unit = 0;
684 while (kcb->usecount != 0) {
685 msleep(chan: &kcb->usecount, mtx: &ctl_mtx, pri: 0, wmesg: "kcb->usecount", ts: 0);
686 }
687 TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
688 kctlstat.kcs_pcbcount--;
689 kctlstat.kcs_gencnt++;
690 lck_mtx_unlock(lck: &ctl_mtx);
691 socket_lock(so, refcount: 0);
692 ctl_kcb_done_clearing(kcb);
693 ctl_kcb_decrement_use_count(kcb);
694 }
695 return 0;
696}
697
698static int
699ctl_peeraddr(struct socket *so, struct sockaddr **nam)
700{
701 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
702 struct kctl *kctl;
703 struct sockaddr_ctl sc;
704
705 if (kcb == NULL) { /* sanity check */
706 return ENOTCONN;
707 }
708
709 if ((kctl = kcb->kctl) == NULL) {
710 return EINVAL;
711 }
712
713 bzero(s: &sc, n: sizeof(struct sockaddr_ctl));
714 sc.sc_len = sizeof(struct sockaddr_ctl);
715 sc.sc_family = AF_SYSTEM;
716 sc.ss_sysaddr = AF_SYS_CONTROL;
717 sc.sc_id = kctl->id;
718 sc.sc_unit = kcb->sac.sc_unit;
719
720 *nam = dup_sockaddr(sa: (struct sockaddr *)&sc, canwait: 1);
721
722 return 0;
723}
724
725static void
726ctl_sbrcv_trim(struct socket *so)
727{
728 struct sockbuf *sb = &so->so_rcv;
729
730 if (sb->sb_hiwat > sb->sb_idealsize) {
731 u_int32_t diff;
732 int32_t trim;
733
734 /*
735 * The difference between the ideal size and the
736 * current size is the upper bound of the trimage
737 */
738 diff = sb->sb_hiwat - sb->sb_idealsize;
739 /*
740 * We cannot trim below the outstanding data
741 */
742 trim = sb->sb_hiwat - sb->sb_cc;
743
744 trim = imin(a: trim, b: (int32_t)diff);
745
746 if (trim > 0) {
747 sbreserve(sb, cc: (sb->sb_hiwat - trim));
748
749 if (ctl_debug) {
750 printf("%s - shrunk to %d\n",
751 __func__, sb->sb_hiwat);
752 }
753 }
754 }
755}
756
757static int
758ctl_usr_rcvd(struct socket *so, int flags)
759{
760 int error = 0;
761 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
762 struct kctl *kctl;
763
764 if (kcb == NULL) {
765 return ENOTCONN;
766 }
767
768 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
769 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
770
771 if ((kctl = kcb->kctl) == NULL) {
772 error = EINVAL;
773 goto out;
774 }
775
776 if (kctl->rcvd) {
777 socket_unlock(so, refcount: 0);
778 (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags);
779 socket_lock(so, refcount: 0);
780 }
781
782 ctl_sbrcv_trim(so);
783
784out:
785 ctl_kcb_decrement_use_count(kcb);
786 return error;
787}
788
789static int
790ctl_send(struct socket *so, int flags, struct mbuf *m,
791 struct sockaddr *addr, struct mbuf *control,
792 struct proc *p)
793{
794#pragma unused(addr, p)
795 int error = 0;
796 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
797 struct kctl *kctl;
798
799 if (control) {
800 m_freem(control);
801 }
802
803 if (kcb == NULL) { /* sanity check */
804 m_freem(m);
805 return ENOTCONN;
806 }
807
808 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
809 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
810
811 if (error == 0 && (kctl = kcb->kctl) == NULL) {
812 error = EINVAL;
813 }
814
815 if (error == 0 && kctl->send) {
816 so_tc_update_stats(m, so, m_get_service_class(m));
817 socket_unlock(so, refcount: 0);
818 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
819 m, flags);
820 socket_lock(so, refcount: 0);
821 } else {
822 m_freem(m);
823 if (error == 0) {
824 error = ENOTSUP;
825 }
826 }
827 if (error != 0) {
828 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_send_fail);
829 }
830 ctl_kcb_decrement_use_count(kcb);
831
832 return error;
833}
834
835static int
836ctl_send_list(struct socket *so, struct mbuf *m, u_int *pktcnt, int flags)
837{
838 int error = 0;
839 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
840 struct kctl *kctl;
841 const bool update_tc = SOCK_DOM(so) == PF_INET || SOCK_DOM(so) == PF_INET6;
842
843 if (kcb == NULL) { /* sanity check */
844 m_freem_list(m);
845 return ENOTCONN;
846 }
847
848 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
849 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
850
851 if ((kctl = kcb->kctl) == NULL) {
852 error = EINVAL;
853 goto done;
854 }
855
856 if (kctl->send_list != NULL) {
857 struct mbuf *nxt;
858
859 for (nxt = m; update_tc && nxt != NULL; nxt = nxt->m_nextpkt) {
860 so_tc_update_stats(nxt, so, m_get_service_class(nxt));
861 }
862
863 socket_unlock(so, refcount: 0);
864 error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
865 kcb->userdata, m, flags);
866 socket_lock(so, refcount: 0);
867 } else {
868 *pktcnt = 0;
869 while (m != NULL && error == 0) {
870 struct mbuf *nextpkt = m->m_nextpkt;
871
872 m->m_nextpkt = NULL;
873
874 if (update_tc) {
875 so_tc_update_stats(m, so, m_get_service_class(m));
876 }
877 socket_unlock(so, refcount: 0);
878 error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
879 kcb->userdata, m, flags);
880 socket_lock(so, refcount: 0);
881 m = nextpkt;
882 if (error == 0) {
883 *pktcnt += 1;
884 }
885 }
886 if (m != NULL) {
887 m_freem_list(m);
888 }
889 }
890done:
891 if (error != 0) {
892 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_send_list_fail);
893 }
894 ctl_kcb_decrement_use_count(kcb);
895
896 return error;
897}
898
899static errno_t
900ctl_rcvbspace(struct socket *so, size_t datasize,
901 u_int32_t kctlflags, u_int32_t flags)
902{
903 struct sockbuf *sb = &so->so_rcv;
904 u_int32_t space = sbspace(sb);
905 errno_t error;
906
907 if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
908 if ((u_int32_t) space >= datasize) {
909 error = 0;
910 } else {
911 error = ENOBUFS;
912 }
913 } else if ((flags & CTL_DATA_CRIT) == 0) {
914 /*
915 * Reserve 25% for critical messages
916 */
917 if (space < (sb->sb_hiwat >> 2) ||
918 space < datasize) {
919 error = ENOBUFS;
920 } else {
921 error = 0;
922 }
923 } else {
924 size_t autorcvbuf_max;
925
926 /*
927 * Allow overcommit of 25%
928 */
929 autorcvbuf_max = min(a: sb->sb_idealsize + (sb->sb_idealsize >> 2),
930 b: ctl_autorcvbuf_max);
931
932 if ((u_int32_t) space >= datasize) {
933 error = 0;
934 } else if (sb->sb_hiwat < autorcvbuf_max) {
935 /*
936 * Grow with a little bit of leeway
937 */
938 size_t grow = datasize - space + _MSIZE;
939 u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
940
941 if (sbreserve(sb, cc) == 1) {
942 if (sb->sb_hiwat > ctl_autorcvbuf_high) {
943 ctl_autorcvbuf_high = sb->sb_hiwat;
944 }
945
946 /*
947 * A final check
948 */
949 if ((u_int32_t) sbspace(sb) >= datasize) {
950 error = 0;
951 } else {
952 error = ENOBUFS;
953 }
954
955 if (ctl_debug) {
956 printf("%s - grown to %d error %d\n",
957 __func__, sb->sb_hiwat, error);
958 }
959 } else {
960 error = ENOBUFS;
961 }
962 } else {
963 error = ENOBUFS;
964 }
965 }
966 return error;
967}
968
969errno_t
970ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
971 u_int32_t flags)
972{
973 struct socket *so;
974 errno_t error = 0;
975 int len = m->m_pkthdr.len;
976 u_int32_t kctlflags;
977
978 so = kcb_find_socket(kctlref, unit, &kctlflags);
979 if (so == NULL) {
980 return EINVAL;
981 }
982
983 if (ctl_rcvbspace(so, datasize: len, kctlflags, flags) != 0) {
984 error = ENOBUFS;
985 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
986 goto bye;
987 }
988 if ((flags & CTL_DATA_EOR)) {
989 m->m_flags |= M_EOR;
990 }
991
992 so_recv_data_stat(so, m, 0);
993 if (sbappend_nodrop(sb: &so->so_rcv, m) != 0) {
994 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
995 sorwakeup(so);
996 }
997 } else {
998 error = ENOBUFS;
999 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1000 }
1001bye:
1002 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1003 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1004 __func__, error, len,
1005 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1006 }
1007
1008 socket_unlock(so, refcount: 1);
1009 if (error != 0) {
1010 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fail);
1011 }
1012
1013 return error;
1014}
1015
1016/*
1017 * Compute space occupied by mbuf like sbappendrecord
1018 */
1019static int
1020m_space(struct mbuf *m)
1021{
1022 int space = 0;
1023 struct mbuf *nxt;
1024
1025 for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
1026 space += nxt->m_len;
1027 }
1028
1029 return space;
1030}
1031
1032errno_t
1033ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
1034 u_int32_t flags, struct mbuf **m_remain)
1035{
1036 struct socket *so = NULL;
1037 errno_t error = 0;
1038 struct mbuf *m, *nextpkt;
1039 int needwakeup = 0;
1040 int len = 0;
1041 u_int32_t kctlflags;
1042
1043 /*
1044 * Need to point the beginning of the list in case of early exit
1045 */
1046 m = m_list;
1047
1048 /*
1049 * kcb_find_socket takes the socket lock with a reference
1050 */
1051 so = kcb_find_socket(kctlref, unit, &kctlflags);
1052 if (so == NULL) {
1053 error = EINVAL;
1054 goto done;
1055 }
1056
1057 if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
1058 error = EOPNOTSUPP;
1059 goto done;
1060 }
1061 if (flags & CTL_DATA_EOR) {
1062 error = EINVAL;
1063 goto done;
1064 }
1065
1066 for (m = m_list; m != NULL; m = nextpkt) {
1067 nextpkt = m->m_nextpkt;
1068
1069 if (m->m_pkthdr.len == 0 && ctl_debug) {
1070 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1071 struct kctl *kctl = kcb == NULL ? NULL : kcb->kctl;
1072 uint32_t id = kctl == NULL ? -1 : kctl->id;
1073
1074 printf("%s: %u:%u m_pkthdr.len is 0",
1075 __func__, id, unit);
1076 }
1077
1078 /*
1079 * The mbuf is either appended or freed by sbappendrecord()
1080 * so it's not reliable from a data standpoint
1081 */
1082 len = m_space(m);
1083 if (ctl_rcvbspace(so, datasize: len, kctlflags, flags) != 0) {
1084 error = ENOBUFS;
1085 OSIncrementAtomic64(
1086 address: (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1087 break;
1088 } else {
1089 /*
1090 * Unlink from the list, m is on its own
1091 */
1092 m->m_nextpkt = NULL;
1093 so_recv_data_stat(so, m, 0);
1094 if (sbappendrecord_nodrop(sb: &so->so_rcv, m0: m) != 0) {
1095 needwakeup = 1;
1096 } else {
1097 /*
1098 * We free or return the remaining
1099 * mbufs in the list
1100 */
1101 m = nextpkt;
1102 error = ENOBUFS;
1103 OSIncrementAtomic64(
1104 address: (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1105 break;
1106 }
1107 }
1108 }
1109 if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
1110 sorwakeup(so);
1111 }
1112
1113done:
1114 if (so != NULL) {
1115 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1116 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1117 __func__, error, len,
1118 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1119 }
1120
1121 socket_unlock(so, refcount: 1);
1122 }
1123 if (m_remain) {
1124 *m_remain = m;
1125
1126#if (DEBUG || DEVELOPMENT)
1127 if (m != NULL && socket_debug && so != NULL &&
1128 (so->so_options & SO_DEBUG)) {
1129 struct mbuf *n;
1130
1131 printf("%s m_list %llx\n", __func__,
1132 (uint64_t) VM_KERNEL_ADDRPERM(m_list));
1133 for (n = m; n != NULL; n = n->m_nextpkt) {
1134 printf(" remain %llx m_next %llx\n",
1135 (uint64_t) VM_KERNEL_ADDRPERM(n),
1136 (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
1137 }
1138 }
1139#endif /* (DEBUG || DEVELOPMENT) */
1140 } else {
1141 if (m != NULL) {
1142 m_freem_list(m);
1143 }
1144 }
1145 if (error != 0) {
1146 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fail);
1147 }
1148 return error;
1149}
1150
1151errno_t
1152ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
1153 u_int32_t flags)
1154{
1155 struct socket *so;
1156 struct mbuf *m;
1157 errno_t error = 0;
1158 unsigned int num_needed;
1159 struct mbuf *n;
1160 size_t curlen = 0;
1161 u_int32_t kctlflags;
1162
1163 so = kcb_find_socket(kctlref, unit, &kctlflags);
1164 if (so == NULL) {
1165 return EINVAL;
1166 }
1167
1168 if (ctl_rcvbspace(so, datasize: len, kctlflags, flags) != 0) {
1169 error = ENOBUFS;
1170 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1171 goto bye;
1172 }
1173
1174 num_needed = 1;
1175 m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
1176 if (m == NULL) {
1177 kctlstat.kcs_enqdata_mb_alloc_fail++;
1178 if (ctl_debug) {
1179 printf("%s: m_allocpacket_internal(%lu) failed\n",
1180 __func__, len);
1181 }
1182 error = ENOMEM;
1183 goto bye;
1184 }
1185
1186 for (n = m; n != NULL; n = n->m_next) {
1187 size_t mlen = mbuf_maxlen(mbuf: n);
1188
1189 if (mlen + curlen > len) {
1190 mlen = len - curlen;
1191 }
1192 n->m_len = (int32_t)mlen;
1193 bcopy(src: (char *)data + curlen, dst: m_mtod_current(m: n), n: mlen);
1194 curlen += mlen;
1195 }
1196 mbuf_pkthdr_setlen(mbuf: m, len: curlen);
1197
1198 if ((flags & CTL_DATA_EOR)) {
1199 m->m_flags |= M_EOR;
1200 }
1201 so_recv_data_stat(so, m, 0);
1202 /*
1203 * No need to call the "nodrop" variant of sbappend
1204 * because the mbuf is local to the scope of the function
1205 */
1206 if (sbappend(sb: &so->so_rcv, m) != 0) {
1207 if ((flags & CTL_DATA_NOWAKEUP) == 0) {
1208 sorwakeup(so);
1209 }
1210 } else {
1211 kctlstat.kcs_enqdata_sbappend_fail++;
1212 error = ENOBUFS;
1213 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
1214 }
1215
1216bye:
1217 if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
1218 printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
1219 __func__, error, (int)len,
1220 so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
1221 }
1222
1223 socket_unlock(so, refcount: 1);
1224 if (error != 0) {
1225 OSIncrementAtomic64(address: (SInt64 *)&kctlstat.kcs_enqueue_fail);
1226 }
1227 return error;
1228}
1229
1230errno_t
1231ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt)
1232{
1233 struct socket *so;
1234 u_int32_t cnt;
1235 struct mbuf *m1;
1236
1237 if (pcnt == NULL) {
1238 return EINVAL;
1239 }
1240
1241 so = kcb_find_socket(kctlref, unit, NULL);
1242 if (so == NULL) {
1243 return EINVAL;
1244 }
1245
1246 cnt = 0;
1247 m1 = so->so_rcv.sb_mb;
1248 while (m1 != NULL) {
1249 if (m_has_mtype(m: m1, mtype_flags: MTF_DATA | MTF_HEADER | MTF_OOBDATA)) {
1250 cnt += 1;
1251 }
1252 m1 = m1->m_nextpkt;
1253 }
1254 *pcnt = cnt;
1255
1256 socket_unlock(so, refcount: 1);
1257
1258 return 0;
1259}
1260
1261errno_t
1262ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
1263{
1264 struct socket *so;
1265 long avail;
1266
1267 if (space == NULL) {
1268 return EINVAL;
1269 }
1270
1271 so = kcb_find_socket(kctlref, unit, NULL);
1272 if (so == NULL) {
1273 return EINVAL;
1274 }
1275
1276 avail = sbspace(sb: &so->so_rcv);
1277 *space = (avail < 0) ? 0 : avail;
1278 socket_unlock(so, refcount: 1);
1279
1280 return 0;
1281}
1282
1283errno_t
1284ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit,
1285 u_int32_t *difference)
1286{
1287 struct socket *so;
1288
1289 if (difference == NULL) {
1290 return EINVAL;
1291 }
1292
1293 so = kcb_find_socket(kctlref, unit, NULL);
1294 if (so == NULL) {
1295 return EINVAL;
1296 }
1297
1298 if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) {
1299 *difference = 0;
1300 } else {
1301 *difference = (so->so_rcv.sb_lowat - so->so_rcv.sb_cc);
1302 }
1303 socket_unlock(so, refcount: 1);
1304
1305 return 0;
1306}
1307
1308static int
1309ctl_ctloutput(struct socket *so, struct sockopt *sopt)
1310{
1311 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
1312 struct kctl *kctl;
1313 int error = 0;
1314 void *data = NULL;
1315 size_t data_len = 0;
1316 size_t len;
1317
1318 if (sopt->sopt_level != SYSPROTO_CONTROL) {
1319 return EINVAL;
1320 }
1321
1322 if (kcb == NULL) { /* sanity check */
1323 return ENOTCONN;
1324 }
1325
1326 if ((kctl = kcb->kctl) == NULL) {
1327 return EINVAL;
1328 }
1329
1330 lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
1331 ctl_kcb_increment_use_count(kcb, mutex_held: mtx_held);
1332
1333 switch (sopt->sopt_dir) {
1334 case SOPT_SET:
1335 if (kctl->setopt == NULL) {
1336 error = ENOTSUP;
1337 goto out;
1338 }
1339 if (sopt->sopt_valsize != 0) {
1340 data_len = sopt->sopt_valsize;
1341 data = kalloc_data(data_len, Z_WAITOK | Z_ZERO);
1342 if (data == NULL) {
1343 data_len = 0;
1344 error = ENOMEM;
1345 goto out;
1346 }
1347 error = sooptcopyin(sopt, data,
1348 len: sopt->sopt_valsize, minlen: sopt->sopt_valsize);
1349 }
1350 if (error == 0) {
1351 socket_unlock(so, refcount: 0);
1352 error = (*kctl->setopt)(kctl->kctlref,
1353 kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name,
1354 data, sopt->sopt_valsize);
1355 socket_lock(so, refcount: 0);
1356 }
1357
1358 kfree_data(data, data_len);
1359 break;
1360
1361 case SOPT_GET:
1362 if (kctl->getopt == NULL) {
1363 error = ENOTSUP;
1364 goto out;
1365 }
1366
1367 if (sopt->sopt_valsize && sopt->sopt_val) {
1368 data_len = sopt->sopt_valsize;
1369 data = kalloc_data(data_len, Z_WAITOK | Z_ZERO);
1370 if (data == NULL) {
1371 data_len = 0;
1372 error = ENOMEM;
1373 goto out;
1374 }
1375 /*
1376 * 4108337 - copy user data in case the
1377 * kernel control needs it
1378 */
1379 error = sooptcopyin(sopt, data,
1380 len: sopt->sopt_valsize, minlen: sopt->sopt_valsize);
1381 }
1382
1383 if (error == 0) {
1384 len = sopt->sopt_valsize;
1385 socket_unlock(so, refcount: 0);
1386 error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit,
1387 kcb->userdata, sopt->sopt_name,
1388 data, &len);
1389 if (data != NULL && len > sopt->sopt_valsize) {
1390 panic_plain("ctl_ctloutput: ctl %s returned "
1391 "len (%lu) > sopt_valsize (%lu)\n",
1392 kcb->kctl->name, len,
1393 sopt->sopt_valsize);
1394 }
1395 socket_lock(so, refcount: 0);
1396 if (error == 0) {
1397 if (data != NULL) {
1398 error = sooptcopyout(sopt, data, len);
1399 } else {
1400 sopt->sopt_valsize = len;
1401 }
1402 }
1403 }
1404
1405 kfree_data(data, data_len);
1406 break;
1407 }
1408
1409out:
1410 ctl_kcb_decrement_use_count(kcb);
1411 return error;
1412}
1413
1414static int
1415ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
1416 struct ifnet *ifp, struct proc *p)
1417{
1418#pragma unused(so, ifp, p)
1419 int error = ENOTSUP;
1420
1421 switch (cmd) {
1422 /* get the number of controllers */
1423 case CTLIOCGCOUNT: {
1424 struct kctl *kctl;
1425 u_int32_t n = 0;
1426
1427 lck_mtx_lock(lck: &ctl_mtx);
1428 TAILQ_FOREACH(kctl, &ctl_head, next)
1429 n++;
1430 lck_mtx_unlock(lck: &ctl_mtx);
1431
1432 bcopy(src: &n, dst: data, n: sizeof(n));
1433 error = 0;
1434 break;
1435 }
1436 case CTLIOCGINFO: {
1437 struct ctl_info ctl_info;
1438 struct kctl *kctl = 0;
1439 size_t name_len;
1440
1441 bcopy(src: data, dst: &ctl_info, n: sizeof(ctl_info));
1442 name_len = strnlen(s: ctl_info.ctl_name, MAX_KCTL_NAME);
1443
1444 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1445 error = EINVAL;
1446 break;
1447 }
1448 lck_mtx_lock(lck: &ctl_mtx);
1449 kctl = ctl_find_by_name(ctl_info.ctl_name);
1450 lck_mtx_unlock(lck: &ctl_mtx);
1451 if (kctl == 0) {
1452 error = ENOENT;
1453 break;
1454 }
1455 ctl_info.ctl_id = kctl->id;
1456 bcopy(src: &ctl_info, dst: data, n: sizeof(ctl_info));
1457 error = 0;
1458 break;
1459 }
1460
1461 /* add controls to get list of NKEs */
1462 }
1463
1464 return error;
1465}
1466
1467static void
1468kctl_tbl_grow(void)
1469{
1470 struct kctl **new_table;
1471 uintptr_t new_size;
1472
1473 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1474
1475 if (kctl_tbl_growing) {
1476 /* Another thread is allocating */
1477 kctl_tbl_growing_waiting++;
1478
1479 do {
1480 (void) msleep(chan: (caddr_t) &kctl_tbl_growing, mtx: &ctl_mtx,
1481 PSOCK | PCATCH, wmesg: "kctl_tbl_growing", ts: 0);
1482 } while (kctl_tbl_growing);
1483 kctl_tbl_growing_waiting--;
1484 }
1485 /* Another thread grew the table */
1486 if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) {
1487 return;
1488 }
1489
1490 /* Verify we have a sane size */
1491 if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) {
1492 kctlstat.kcs_tbl_size_too_big++;
1493 if (ctl_debug) {
1494 printf("%s kctl_tbl_size %lu too big\n",
1495 __func__, kctl_tbl_size);
1496 }
1497 return;
1498 }
1499 kctl_tbl_growing = 1;
1500
1501 new_size = kctl_tbl_size + KCTL_TBL_INC;
1502
1503 lck_mtx_unlock(lck: &ctl_mtx);
1504 new_table = kalloc_type(struct kctl *, new_size, Z_WAITOK | Z_ZERO);
1505 lck_mtx_lock(lck: &ctl_mtx);
1506
1507 if (new_table != NULL) {
1508 if (kctl_table != NULL) {
1509 bcopy(src: kctl_table, dst: new_table,
1510 n: kctl_tbl_size * sizeof(struct kctl *));
1511
1512 kfree_type(struct kctl *, kctl_tbl_size, kctl_table);
1513 }
1514 kctl_table = new_table;
1515 kctl_tbl_size = new_size;
1516 }
1517
1518 kctl_tbl_growing = 0;
1519
1520 if (kctl_tbl_growing_waiting) {
1521 wakeup(chan: &kctl_tbl_growing);
1522 }
1523}
1524
1525#define KCTLREF_INDEX_MASK 0x0000FFFF
1526#define KCTLREF_GENCNT_MASK 0xFFFF0000
1527#define KCTLREF_GENCNT_SHIFT 16
1528
1529static kern_ctl_ref
1530kctl_make_ref(struct kctl *kctl)
1531{
1532 uintptr_t i;
1533
1534 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1535
1536 if (kctl_tbl_count >= kctl_tbl_size) {
1537 kctl_tbl_grow();
1538 }
1539
1540 kctl->kctlref = NULL;
1541 for (i = 0; i < kctl_tbl_size; i++) {
1542 if (kctl_table[i] == NULL) {
1543 uintptr_t ref;
1544
1545 /*
1546 * Reference is index plus one
1547 */
1548 kctl_ref_gencnt += 1;
1549
1550 /*
1551 * Add generation count as salt to reference to prevent
1552 * use after deregister
1553 */
1554 ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) &
1555 KCTLREF_GENCNT_MASK) +
1556 ((i + 1) & KCTLREF_INDEX_MASK);
1557
1558 kctl->kctlref = (void *)(ref);
1559 kctl_table[i] = kctl;
1560 kctl_tbl_count++;
1561 break;
1562 }
1563 }
1564
1565 if (kctl->kctlref == NULL) {
1566 panic("%s no space in table", __func__);
1567 }
1568
1569 if (ctl_debug > 0) {
1570 printf("%s %p for %p\n",
1571 __func__, kctl->kctlref, kctl);
1572 }
1573
1574 return kctl->kctlref;
1575}
1576
1577static void
1578kctl_delete_ref(kern_ctl_ref kctlref)
1579{
1580 /*
1581 * Reference is index plus one
1582 */
1583 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1584
1585 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1586
1587 if (i < kctl_tbl_size) {
1588 struct kctl *kctl = kctl_table[i];
1589
1590 if (kctl->kctlref == kctlref) {
1591 kctl_table[i] = NULL;
1592 kctl_tbl_count--;
1593 } else {
1594 kctlstat.kcs_bad_kctlref++;
1595 }
1596 } else {
1597 kctlstat.kcs_bad_kctlref++;
1598 }
1599}
1600
1601static struct kctl *
1602kctl_from_ref(kern_ctl_ref kctlref)
1603{
1604 /*
1605 * Reference is index plus one
1606 */
1607 uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
1608 struct kctl *kctl = NULL;
1609
1610 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1611
1612 if (i >= kctl_tbl_size) {
1613 kctlstat.kcs_bad_kctlref++;
1614 return NULL;
1615 }
1616 kctl = kctl_table[i];
1617 if (kctl->kctlref != kctlref) {
1618 kctlstat.kcs_bad_kctlref++;
1619 return NULL;
1620 }
1621 return kctl;
1622}
1623
1624/*
1625 * Register/unregister a NKE
1626 */
1627errno_t
1628ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
1629{
1630 struct kctl *kctl = NULL;
1631 struct kctl *kctl_next = NULL;
1632 u_int32_t id = 1;
1633 size_t name_len;
1634 int is_extended = 0;
1635 int is_setup = 0;
1636
1637 if (userkctl == NULL) { /* sanity check */
1638 return EINVAL;
1639 }
1640 if (userkctl->ctl_connect == NULL) {
1641 return EINVAL;
1642 }
1643 name_len = strlen(s: userkctl->ctl_name);
1644 if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
1645 return EINVAL;
1646 }
1647
1648 kctl = kalloc_type(struct kctl, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1649
1650 lck_mtx_lock(lck: &ctl_mtx);
1651
1652 if (kctl_make_ref(kctl) == NULL) {
1653 lck_mtx_unlock(lck: &ctl_mtx);
1654 kfree_type(struct kctl, kctl);
1655 return ENOMEM;
1656 }
1657
1658 /*
1659 * Kernel Control IDs
1660 *
1661 * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
1662 * static. If they do not exist, add them to the list in order. If the
1663 * flag is not set, we must find a new unique value. We assume the
1664 * list is in order. We find the last item in the list and add one. If
1665 * this leads to wrapping the id around, we start at the front of the
1666 * list and look for a gap.
1667 */
1668
1669 if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1670 /* Must dynamically assign an unused ID */
1671
1672 /* Verify the same name isn't already registered */
1673 if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
1674 kctl_delete_ref(kctlref: kctl->kctlref);
1675 lck_mtx_unlock(lck: &ctl_mtx);
1676 kfree_type(struct kctl, kctl);
1677 return EEXIST;
1678 }
1679
1680 /* Start with 1 in case the list is empty */
1681 id = 1;
1682 kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
1683
1684 if (kctl_next != NULL) {
1685 /* List was not empty, add one to the last item */
1686 id = kctl_next->id + 1;
1687 kctl_next = NULL;
1688
1689 /*
1690 * If this wrapped the id number, start looking at
1691 * the front of the list for an unused id.
1692 */
1693 if (id == 0) {
1694 /* Find the next unused ID */
1695 id = 1;
1696
1697 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1698 if (kctl_next->id > id) {
1699 /* We found a gap */
1700 break;
1701 }
1702
1703 id = kctl_next->id + 1;
1704 }
1705 }
1706 }
1707
1708 userkctl->ctl_id = id;
1709 kctl->id = id;
1710 kctl->reg_unit = -1;
1711 } else {
1712 TAILQ_FOREACH(kctl_next, &ctl_head, next) {
1713 if (kctl_next->id > userkctl->ctl_id) {
1714 break;
1715 }
1716 }
1717
1718 if (ctl_find_by_id_unit(id: userkctl->ctl_id, unit: userkctl->ctl_unit)) {
1719 kctl_delete_ref(kctlref: kctl->kctlref);
1720 lck_mtx_unlock(lck: &ctl_mtx);
1721 kfree_type(struct kctl, kctl);
1722 return EEXIST;
1723 }
1724 kctl->id = userkctl->ctl_id;
1725 kctl->reg_unit = userkctl->ctl_unit;
1726 }
1727
1728 is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
1729 is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
1730
1731 strlcpy(dst: kctl->name, src: userkctl->ctl_name, MAX_KCTL_NAME);
1732 kctl->flags = userkctl->ctl_flags;
1733
1734 /*
1735 * Let the caller know the default send and receive sizes
1736 */
1737 if (userkctl->ctl_sendsize == 0) {
1738 kctl->sendbufsize = CTL_SENDSIZE;
1739 userkctl->ctl_sendsize = kctl->sendbufsize;
1740 } else {
1741 kctl->sendbufsize = userkctl->ctl_sendsize;
1742 }
1743 if (userkctl->ctl_recvsize == 0) {
1744 kctl->recvbufsize = CTL_RECVSIZE;
1745 userkctl->ctl_recvsize = kctl->recvbufsize;
1746 } else {
1747 kctl->recvbufsize = userkctl->ctl_recvsize;
1748 }
1749
1750 if (is_setup) {
1751 kctl->setup = userkctl->ctl_setup;
1752 }
1753 kctl->bind = userkctl->ctl_bind;
1754 kctl->connect = userkctl->ctl_connect;
1755 kctl->disconnect = userkctl->ctl_disconnect;
1756 kctl->send = userkctl->ctl_send;
1757 kctl->setopt = userkctl->ctl_setopt;
1758 kctl->getopt = userkctl->ctl_getopt;
1759 if (is_extended) {
1760 kctl->rcvd = userkctl->ctl_rcvd;
1761 kctl->send_list = userkctl->ctl_send_list;
1762 }
1763
1764 TAILQ_INIT(&kctl->kcb_head);
1765
1766 if (kctl_next) {
1767 TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
1768 } else {
1769 TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
1770 }
1771
1772 kctlstat.kcs_reg_count++;
1773 kctlstat.kcs_gencnt++;
1774
1775 lck_mtx_unlock(lck: &ctl_mtx);
1776
1777 *kctlref = kctl->kctlref;
1778
1779 ctl_post_msg(KEV_CTL_REGISTERED, id: kctl->id);
1780 return 0;
1781}
1782
1783errno_t
1784ctl_deregister(void *kctlref)
1785{
1786 struct kctl *kctl;
1787
1788 lck_mtx_lock(lck: &ctl_mtx);
1789 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1790 kctlstat.kcs_bad_kctlref++;
1791 lck_mtx_unlock(lck: &ctl_mtx);
1792 if (ctl_debug != 0) {
1793 printf("%s invalid kctlref %p\n",
1794 __func__, kctlref);
1795 }
1796 return EINVAL;
1797 }
1798
1799 if (!TAILQ_EMPTY(&kctl->kcb_head)) {
1800 lck_mtx_unlock(lck: &ctl_mtx);
1801 return EBUSY;
1802 }
1803
1804 TAILQ_REMOVE(&ctl_head, kctl, next);
1805
1806 kctlstat.kcs_reg_count--;
1807 kctlstat.kcs_gencnt++;
1808
1809 kctl_delete_ref(kctlref: kctl->kctlref);
1810 lck_mtx_unlock(lck: &ctl_mtx);
1811
1812 ctl_post_msg(KEV_CTL_DEREGISTERED, id: kctl->id);
1813 kfree_type(struct kctl, kctl);
1814 return 0;
1815}
1816
1817/*
1818 * Must be called with global ctl_mtx lock taked
1819 */
1820static struct kctl *
1821ctl_find_by_name(const char *name)
1822{
1823 struct kctl *kctl;
1824
1825 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1826
1827 TAILQ_FOREACH(kctl, &ctl_head, next)
1828 if (strncmp(s1: kctl->name, s2: name, n: sizeof(kctl->name)) == 0) {
1829 return kctl;
1830 }
1831
1832 return NULL;
1833}
1834
1835u_int32_t
1836ctl_id_by_name(const char *name)
1837{
1838 u_int32_t ctl_id = 0;
1839 struct kctl *kctl;
1840
1841 lck_mtx_lock(lck: &ctl_mtx);
1842 kctl = ctl_find_by_name(name);
1843 if (kctl) {
1844 ctl_id = kctl->id;
1845 }
1846 lck_mtx_unlock(lck: &ctl_mtx);
1847
1848 return ctl_id;
1849}
1850
1851errno_t
1852ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize)
1853{
1854 int found = 0;
1855 struct kctl *kctl;
1856
1857 lck_mtx_lock(lck: &ctl_mtx);
1858 TAILQ_FOREACH(kctl, &ctl_head, next) {
1859 if (kctl->id == id) {
1860 break;
1861 }
1862 }
1863
1864 if (kctl) {
1865 if (maxsize > MAX_KCTL_NAME) {
1866 maxsize = MAX_KCTL_NAME;
1867 }
1868 strlcpy(dst: out_name, src: kctl->name, n: maxsize);
1869 found = 1;
1870 }
1871 lck_mtx_unlock(lck: &ctl_mtx);
1872
1873 return found ? 0 : ENOENT;
1874}
1875
1876/*
1877 * Must be called with global ctl_mtx lock taked
1878 *
1879 */
1880static struct kctl *
1881ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
1882{
1883 struct kctl *kctl;
1884
1885 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1886
1887 TAILQ_FOREACH(kctl, &ctl_head, next) {
1888 if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
1889 return kctl;
1890 } else if (kctl->id == id && kctl->reg_unit == unit) {
1891 return kctl;
1892 }
1893 }
1894 return NULL;
1895}
1896
1897/*
1898 * Must be called with kernel controller lock taken
1899 */
1900static struct ctl_cb *
1901kcb_find(struct kctl *kctl, u_int32_t unit)
1902{
1903 struct ctl_cb *kcb;
1904
1905 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_OWNED);
1906
1907 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
1908 if (kcb->sac.sc_unit == unit) {
1909 return kcb;
1910 }
1911
1912 return NULL;
1913}
1914
1915static struct socket *
1916kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags)
1917{
1918 struct socket *so = NULL;
1919 struct ctl_cb *kcb;
1920 void *lr_saved;
1921 struct kctl *kctl;
1922 int i;
1923
1924 lr_saved = __builtin_return_address(0);
1925
1926 lck_mtx_lock(lck: &ctl_mtx);
1927 /*
1928 * First validate the kctlref
1929 */
1930 if ((kctl = kctl_from_ref(kctlref)) == NULL) {
1931 kctlstat.kcs_bad_kctlref++;
1932 lck_mtx_unlock(lck: &ctl_mtx);
1933 if (ctl_debug != 0) {
1934 printf("%s invalid kctlref %p\n",
1935 __func__, kctlref);
1936 }
1937 return NULL;
1938 }
1939
1940 kcb = kcb_find(kctl, unit);
1941 if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
1942 lck_mtx_unlock(lck: &ctl_mtx);
1943 return NULL;
1944 }
1945 /*
1946 * This prevents the socket from being closed
1947 */
1948 kcb->usecount++;
1949 /*
1950 * Respect lock ordering: socket before ctl_mtx
1951 */
1952 lck_mtx_unlock(lck: &ctl_mtx);
1953
1954 socket_lock(so, refcount: 1);
1955 /*
1956 * The socket lock history is more useful if we store
1957 * the address of the caller.
1958 */
1959 i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
1960 so->lock_lr[i] = lr_saved;
1961
1962 lck_mtx_lock(lck: &ctl_mtx);
1963
1964 if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
1965 lck_mtx_unlock(lck: &ctl_mtx);
1966 socket_unlock(so, refcount: 1);
1967 so = NULL;
1968 lck_mtx_lock(lck: &ctl_mtx);
1969 } else if (kctlflags != NULL) {
1970 *kctlflags = kctl->flags;
1971 }
1972
1973 kcb->usecount--;
1974 if (kcb->usecount == 0 && kcb->require_clearing_count != 0) {
1975 wakeup(chan: (event_t)&kcb->usecount);
1976 }
1977
1978 lck_mtx_unlock(lck: &ctl_mtx);
1979
1980 return so;
1981}
1982
1983static void
1984ctl_post_msg(u_int32_t event_code, u_int32_t id)
1985{
1986 struct ctl_event_data ctl_ev_data;
1987 struct kev_msg ev_msg;
1988
1989 lck_mtx_assert(lck: &ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
1990
1991 bzero(s: &ev_msg, n: sizeof(struct kev_msg));
1992 ev_msg.vendor_code = KEV_VENDOR_APPLE;
1993
1994 ev_msg.kev_class = KEV_SYSTEM_CLASS;
1995 ev_msg.kev_subclass = KEV_CTL_SUBCLASS;
1996 ev_msg.event_code = event_code;
1997
1998 /* common nke subclass data */
1999 bzero(s: &ctl_ev_data, n: sizeof(ctl_ev_data));
2000 ctl_ev_data.ctl_id = id;
2001 ev_msg.dv[0].data_ptr = &ctl_ev_data;
2002 ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
2003
2004 ev_msg.dv[1].data_length = 0;
2005
2006 kev_post_msg(event: &ev_msg);
2007}
2008
2009static int
2010ctl_lock(struct socket *so, int refcount, void *lr)
2011{
2012 void *lr_saved;
2013
2014 if (lr == NULL) {
2015 lr_saved = __builtin_return_address(0);
2016 } else {
2017 lr_saved = lr;
2018 }
2019
2020 if (so->so_pcb != NULL) {
2021 lck_mtx_lock(lck: &((struct ctl_cb *)so->so_pcb)->mtx);
2022 } else {
2023 panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s",
2024 so, lr_saved, solockhistory_nr(so));
2025 /* NOTREACHED */
2026 }
2027
2028 if (so->so_usecount < 0) {
2029 panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s",
2030 so, so->so_pcb, lr_saved, so->so_usecount,
2031 solockhistory_nr(so));
2032 /* NOTREACHED */
2033 }
2034
2035 if (refcount) {
2036 so->so_usecount++;
2037 }
2038
2039 so->lock_lr[so->next_lock_lr] = lr_saved;
2040 so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX;
2041 return 0;
2042}
2043
2044static int
2045ctl_unlock(struct socket *so, int refcount, void *lr)
2046{
2047 void *lr_saved;
2048 lck_mtx_t *mutex_held;
2049
2050 if (lr == NULL) {
2051 lr_saved = __builtin_return_address(0);
2052 } else {
2053 lr_saved = lr;
2054 }
2055
2056#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG))
2057 printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
2058 (uint64_t)VM_KERNEL_ADDRPERM(so),
2059 (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
2060 (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb *)so->so_pcb)->mtx),
2061 so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
2062#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
2063 if (refcount) {
2064 so->so_usecount--;
2065 }
2066
2067 if (so->so_usecount < 0) {
2068 panic("ctl_unlock: so=%p usecount=%x lrh= %s",
2069 so, so->so_usecount, solockhistory_nr(so));
2070 /* NOTREACHED */
2071 }
2072 if (so->so_pcb == NULL) {
2073 panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s",
2074 so, so->so_usecount, (void *)lr_saved,
2075 solockhistory_nr(so));
2076 /* NOTREACHED */
2077 }
2078 mutex_held = &((struct ctl_cb *)so->so_pcb)->mtx;
2079
2080 lck_mtx_assert(lck: mutex_held, LCK_MTX_ASSERT_OWNED);
2081 so->unlock_lr[so->next_unlock_lr] = lr_saved;
2082 so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
2083 lck_mtx_unlock(lck: mutex_held);
2084
2085 if (so->so_usecount == 0) {
2086 ctl_sofreelastref(so);
2087 }
2088
2089 return 0;
2090}
2091
2092static lck_mtx_t *
2093ctl_getlock(struct socket *so, int flags)
2094{
2095#pragma unused(flags)
2096 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2097
2098 if (so->so_pcb) {
2099 if (so->so_usecount < 0) {
2100 panic("ctl_getlock: so=%p usecount=%x lrh= %s",
2101 so, so->so_usecount, solockhistory_nr(so));
2102 }
2103 return &kcb->mtx;
2104 } else {
2105 panic("ctl_getlock: so=%p NULL NO so_pcb %s",
2106 so, solockhistory_nr(so));
2107 return so->so_proto->pr_domain->dom_mtx;
2108 }
2109}
2110
2111__private_extern__ int
2112kctl_reg_list SYSCTL_HANDLER_ARGS
2113{
2114#pragma unused(oidp, arg1, arg2)
2115 int error = 0;
2116 u_int64_t i, n;
2117 struct xsystmgen xsg;
2118 void *buf = NULL;
2119 struct kctl *kctl;
2120 size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
2121
2122 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2123
2124 lck_mtx_lock(lck: &ctl_mtx);
2125
2126 n = kctlstat.kcs_reg_count;
2127
2128 if (req->oldptr == USER_ADDR_NULL) {
2129 req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
2130 goto done;
2131 }
2132 if (req->newptr != USER_ADDR_NULL) {
2133 error = EPERM;
2134 goto done;
2135 }
2136 bzero(s: &xsg, n: sizeof(xsg));
2137 xsg.xg_len = sizeof(xsg);
2138 xsg.xg_count = n;
2139 xsg.xg_gen = kctlstat.kcs_gencnt;
2140 xsg.xg_sogen = so_gencnt;
2141 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2142 if (error) {
2143 goto done;
2144 }
2145 /*
2146 * We are done if there is no pcb
2147 */
2148 if (n == 0) {
2149 goto done;
2150 }
2151
2152 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2153 i < n && kctl != NULL;
2154 i++, kctl = TAILQ_NEXT(kctl, next)) {
2155 struct xkctl_reg *xkr = (struct xkctl_reg *)buf;
2156 struct ctl_cb *kcb;
2157 u_int32_t pcbcount = 0;
2158
2159 TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
2160 pcbcount++;
2161
2162 bzero(s: buf, n: item_size);
2163
2164 xkr->xkr_len = sizeof(struct xkctl_reg);
2165 xkr->xkr_kind = XSO_KCREG;
2166 xkr->xkr_id = kctl->id;
2167 xkr->xkr_reg_unit = kctl->reg_unit;
2168 xkr->xkr_flags = kctl->flags;
2169 xkr->xkr_kctlref = (uint64_t)(kctl->kctlref);
2170 xkr->xkr_recvbufsize = kctl->recvbufsize;
2171 xkr->xkr_sendbufsize = kctl->sendbufsize;
2172 xkr->xkr_lastunit = kctl->lastunit;
2173 xkr->xkr_pcbcount = pcbcount;
2174 xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect);
2175 xkr->xkr_disconnect =
2176 (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect);
2177 xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send);
2178 xkr->xkr_send_list =
2179 (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list);
2180 xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt);
2181 xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt);
2182 xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd);
2183 strlcpy(dst: xkr->xkr_name, src: kctl->name, n: sizeof(xkr->xkr_name));
2184
2185 error = SYSCTL_OUT(req, buf, item_size);
2186 }
2187
2188 if (error == 0) {
2189 /*
2190 * Give the user an updated idea of our state.
2191 * If the generation differs from what we told
2192 * her before, she knows that something happened
2193 * while we were processing this request, and it
2194 * might be necessary to retry.
2195 */
2196 bzero(s: &xsg, n: sizeof(xsg));
2197 xsg.xg_len = sizeof(xsg);
2198 xsg.xg_count = n;
2199 xsg.xg_gen = kctlstat.kcs_gencnt;
2200 xsg.xg_sogen = so_gencnt;
2201 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2202 if (error) {
2203 goto done;
2204 }
2205 }
2206
2207done:
2208 lck_mtx_unlock(lck: &ctl_mtx);
2209
2210 kfree_data(buf, item_size);
2211
2212 return error;
2213}
2214
2215__private_extern__ int
2216kctl_pcblist SYSCTL_HANDLER_ARGS
2217{
2218#pragma unused(oidp, arg1, arg2)
2219 int error = 0;
2220 u_int64_t n, i;
2221 struct xsystmgen xsg;
2222 void *buf = NULL;
2223 struct kctl *kctl;
2224 size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) +
2225 ROUNDUP64(sizeof(struct xsocket_n)) +
2226 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
2227 ROUNDUP64(sizeof(struct xsockstat_n));
2228
2229 buf = kalloc_data(item_size, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2230
2231 lck_mtx_lock(lck: &ctl_mtx);
2232
2233 n = kctlstat.kcs_pcbcount;
2234
2235 if (req->oldptr == USER_ADDR_NULL) {
2236 req->oldidx = (size_t)(n + n / 8) * item_size;
2237 goto done;
2238 }
2239 if (req->newptr != USER_ADDR_NULL) {
2240 error = EPERM;
2241 goto done;
2242 }
2243 bzero(s: &xsg, n: sizeof(xsg));
2244 xsg.xg_len = sizeof(xsg);
2245 xsg.xg_count = n;
2246 xsg.xg_gen = kctlstat.kcs_gencnt;
2247 xsg.xg_sogen = so_gencnt;
2248 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2249 if (error) {
2250 goto done;
2251 }
2252 /*
2253 * We are done if there is no pcb
2254 */
2255 if (n == 0) {
2256 goto done;
2257 }
2258
2259 for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
2260 i < n && kctl != NULL;
2261 kctl = TAILQ_NEXT(kctl, next)) {
2262 struct ctl_cb *kcb;
2263
2264 for (kcb = TAILQ_FIRST(&kctl->kcb_head);
2265 i < n && kcb != NULL;
2266 i++, kcb = TAILQ_NEXT(kcb, next)) {
2267 struct xkctlpcb *xk = (struct xkctlpcb *)buf;
2268 struct xsocket_n *xso = (struct xsocket_n *)
2269 ADVANCE64(xk, sizeof(*xk));
2270 struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *)
2271 ADVANCE64(xso, sizeof(*xso));
2272 struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *)
2273 ADVANCE64(xsbrcv, sizeof(*xsbrcv));
2274 struct xsockstat_n *xsostats = (struct xsockstat_n *)
2275 ADVANCE64(xsbsnd, sizeof(*xsbsnd));
2276
2277 bzero(s: buf, n: item_size);
2278
2279 xk->xkp_len = sizeof(struct xkctlpcb);
2280 xk->xkp_kind = XSO_KCB;
2281 xk->xkp_unit = kcb->sac.sc_unit;
2282 xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRHASH(kcb);
2283 xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRHASH(kctl);
2284 xk->xkp_kctlid = kctl->id;
2285 strlcpy(dst: xk->xkp_kctlname, src: kctl->name,
2286 n: sizeof(xk->xkp_kctlname));
2287
2288 sotoxsocket_n(kcb->so, xso);
2289 sbtoxsockbuf_n(kcb->so ?
2290 &kcb->so->so_rcv : NULL, xsbrcv);
2291 sbtoxsockbuf_n(kcb->so ?
2292 &kcb->so->so_snd : NULL, xsbsnd);
2293 sbtoxsockstat_n(kcb->so, xsostats);
2294
2295 error = SYSCTL_OUT(req, buf, item_size);
2296 }
2297 }
2298
2299 if (error == 0) {
2300 /*
2301 * Give the user an updated idea of our state.
2302 * If the generation differs from what we told
2303 * her before, she knows that something happened
2304 * while we were processing this request, and it
2305 * might be necessary to retry.
2306 */
2307 bzero(s: &xsg, n: sizeof(xsg));
2308 xsg.xg_len = sizeof(xsg);
2309 xsg.xg_count = n;
2310 xsg.xg_gen = kctlstat.kcs_gencnt;
2311 xsg.xg_sogen = so_gencnt;
2312 error = SYSCTL_OUT(req, &xsg, sizeof(xsg));
2313 if (error) {
2314 goto done;
2315 }
2316 }
2317
2318done:
2319 lck_mtx_unlock(lck: &ctl_mtx);
2320
2321 kfree_data(buf, item_size);
2322 return error;
2323}
2324
2325int
2326kctl_getstat SYSCTL_HANDLER_ARGS
2327{
2328#pragma unused(oidp, arg1, arg2)
2329 int error = 0;
2330
2331 lck_mtx_lock(lck: &ctl_mtx);
2332
2333 if (req->newptr != USER_ADDR_NULL) {
2334 error = EPERM;
2335 goto done;
2336 }
2337 if (req->oldptr == USER_ADDR_NULL) {
2338 req->oldidx = sizeof(struct kctlstat);
2339 goto done;
2340 }
2341
2342 error = SYSCTL_OUT(req, &kctlstat,
2343 MIN(sizeof(struct kctlstat), req->oldlen));
2344done:
2345 lck_mtx_unlock(lck: &ctl_mtx);
2346 return error;
2347}
2348
2349void
2350kctl_fill_socketinfo(struct socket *so, struct socket_info *si)
2351{
2352 struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
2353 struct kern_ctl_info *kcsi =
2354 &si->soi_proto.pri_kern_ctl;
2355 struct kctl *kctl = kcb->kctl;
2356
2357 si->soi_kind = SOCKINFO_KERN_CTL;
2358
2359 if (kctl == 0) {
2360 return;
2361 }
2362
2363 kcsi->kcsi_id = kctl->id;
2364 kcsi->kcsi_reg_unit = kctl->reg_unit;
2365 kcsi->kcsi_flags = kctl->flags;
2366 kcsi->kcsi_recvbufsize = kctl->recvbufsize;
2367 kcsi->kcsi_sendbufsize = kctl->sendbufsize;
2368 kcsi->kcsi_unit = kcb->sac.sc_unit;
2369 strlcpy(dst: kcsi->kcsi_name, src: kctl->name, MAX_KCTL_NAME);
2370}
2371