1/*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
29
30/*
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
44 * Hannum.
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59/*
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
63 * Version 2.0.
64 * Copyright (c) 2005-2006 SPARTA, Inc.
65 */
66
67
68#include <sys/appleapiopts.h>
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/kernel.h>
72#include <sys/shm_internal.h>
73#include <sys/proc_internal.h>
74#include <sys/kauth.h>
75#include <sys/malloc.h>
76#include <sys/mman.h>
77#include <sys/stat.h>
78#include <sys/sysctl.h>
79#include <sys/ipcs.h>
80#include <sys/sysent.h>
81#include <sys/sysproto.h>
82#if CONFIG_MACF
83#include <security/mac_framework.h>
84#endif
85
86#include <security/audit/audit.h>
87
88#include <mach/mach_types.h>
89#include <mach/vm_inherit.h>
90#include <mach/vm_map.h>
91
92#include <mach/mach_vm.h>
93
94#include <vm/vm_map.h>
95#include <vm/vm_protos.h>
96#include <vm/vm_kern.h>
97
98#include <kern/locks.h>
99#include <os/overflow.h>
100
101/* Uncomment this line to see MAC debugging output. */
102/* #define MAC_DEBUG */
103#if CONFIG_MACF_DEBUG
104#define MPRINTF(a) printf a
105#else
106#define MPRINTF(a)
107#endif
108
109#if SYSV_SHM
110static int shminit(void);
111
112static LCK_GRP_DECLARE(sysv_shm_subsys_lck_grp, "sysv_shm_subsys_lock");
113static LCK_MTX_DECLARE(sysv_shm_subsys_mutex, &sysv_shm_subsys_lck_grp);
114
115#define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
116#define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
117
118static int oshmctl(void *p, void *uap, void *retval);
119static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, int * retval);
120static int shmget_existing(struct shmget_args *uap, int mode, int segnum, int * retval);
121static void shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out);
122static void shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out);
123
124/* XXX casting to (sy_call_t *) is bogus, as usual. */
125static sy_call_t* const shmcalls[] = {
126 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
127 (sy_call_t *)shmdt, (sy_call_t *)shmget,
128 (sy_call_t *)shmctl
129};
130
131#define SHMSEG_FREE 0x0200
132#define SHMSEG_REMOVED 0x0400
133#define SHMSEG_ALLOCATED 0x0800
134#define SHMSEG_WANTED 0x1000
135
136static int shm_last_free, shm_nused, shm_committed;
137struct shmid_kernel *shmsegs; /* 64 bit version */
138static int shm_inited = 0;
139
140/*
141 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
142 * we have to keep a list of chunks when we want to handle a shared memory
143 * segment bigger than ANON_MAX_SIZE.
144 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
145 * of anonymous memory.
146 */
147struct shm_handle {
148 void * shm_object; /* named entry for this chunk*/
149 memory_object_size_t shm_handle_size; /* size of this chunk */
150 struct shm_handle *shm_handle_next; /* next chunk */
151};
152
153struct shmmap_state {
154 mach_vm_address_t va; /* user address */
155 int shmid; /* segment id */
156};
157
158static void shm_deallocate_segment(struct shmid_kernel *);
159static int shm_find_segment_by_key(key_t);
160static struct shmid_kernel *shm_find_segment_by_shmid(int);
161static int shm_delete_mapping(struct proc *, struct shmmap_state *, int);
162
163#ifdef __APPLE_API_PRIVATE
164#define DEFAULT_SHMMAX (4 * 1024 * 1024)
165#define DEFAULT_SHMMIN 1
166#define DEFAULT_SHMMNI 32
167#define DEFAULT_SHMSEG 8
168#define DEFAULT_SHMALL 1024
169
170struct shminfo shminfo = {
171 .shmmax = DEFAULT_SHMMAX,
172 .shmmin = DEFAULT_SHMMIN,
173 .shmmni = DEFAULT_SHMMNI,
174 .shmseg = DEFAULT_SHMSEG,
175 .shmall = DEFAULT_SHMALL
176};
177
178#define SHMID_IS_VALID(x) ((x) >= 0)
179#define SHMID_UNALLOCATED (-1)
180#define SHMID_SENTINEL (-2)
181
182#endif /* __APPLE_API_PRIVATE */
183
184static __inline__ time_t
185sysv_shmtime(void)
186{
187 struct timeval tv;
188 microtime(tv: &tv);
189 return tv.tv_sec;
190}
191
192/*
193 * This conversion is safe, since if we are converting for a 32 bit process,
194 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
195 *
196 * NOTE: Source and target may *NOT* overlap! (target is smaller)
197 */
198static void
199shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out)
200{
201 out->shm_perm = in->shm_perm;
202 out->shm_segsz = in->shm_segsz;
203 out->shm_lpid = in->shm_lpid;
204 out->shm_cpid = in->shm_cpid;
205 out->shm_nattch = in->shm_nattch;
206 out->shm_atime = in->shm_atime;
207 out->shm_dtime = in->shm_dtime;
208 out->shm_ctime = in->shm_ctime;
209 out->shm_internal = CAST_DOWN_EXPLICIT(int, in->shm_internal);
210}
211
212/*
213 * NOTE: Source and target may are permitted to overlap! (source is smaller);
214 * this works because we copy fields in order from the end of the struct to
215 * the beginning.
216 */
217static void
218shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out)
219{
220 out->shm_internal = in->shm_internal;
221 out->shm_ctime = in->shm_ctime;
222 out->shm_dtime = in->shm_dtime;
223 out->shm_atime = in->shm_atime;
224 out->shm_nattch = in->shm_nattch;
225 out->shm_cpid = in->shm_cpid;
226 out->shm_lpid = in->shm_lpid;
227 out->shm_segsz = in->shm_segsz;
228 out->shm_perm = in->shm_perm;
229}
230
231
232static int
233shm_find_segment_by_key(key_t key)
234{
235 int i;
236
237 for (i = 0; i < shminfo.shmmni; i++) {
238 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
239 shmsegs[i].u.shm_perm._key == key) {
240 return i;
241 }
242 }
243 return -1;
244}
245
246static struct shmid_kernel *
247shm_find_segment_by_shmid(int shmid)
248{
249 int segnum;
250 struct shmid_kernel *shmseg;
251
252 segnum = IPCID_TO_IX(shmid);
253 if (segnum < 0 || segnum >= shminfo.shmmni) {
254 return NULL;
255 }
256 shmseg = &shmsegs[segnum];
257 if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
258 != SHMSEG_ALLOCATED ||
259 shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid)) {
260 return NULL;
261 }
262 return shmseg;
263}
264
265static void
266shm_deallocate_segment(struct shmid_kernel *shmseg)
267{
268 struct shm_handle *shm_handle, *shm_handle_next;
269 mach_vm_size_t size;
270
271 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */
272 shm_handle != NULL;
273 shm_handle = shm_handle_next) {
274 shm_handle_next = shm_handle->shm_handle_next;
275 mach_memory_entry_port_release(port: shm_handle->shm_object);
276 kfree_type(struct shm_handle, shm_handle);
277 }
278 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
279 size = vm_map_round_page(shmseg->u.shm_segsz,
280 vm_map_page_mask(current_map()));
281 shm_committed -= btoc(size);
282 shm_nused--;
283 shmseg->u.shm_perm.mode = SHMSEG_FREE;
284#if CONFIG_MACF
285 /* Reset the MAC label */
286 mac_sysvshm_label_recycle(shmsegptr: shmseg);
287#endif
288}
289
290static int
291shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s,
292 int deallocate)
293{
294 struct shmid_kernel *shmseg;
295 int segnum, result;
296 mach_vm_size_t size;
297
298 segnum = IPCID_TO_IX(shmmap_s->shmid);
299 shmseg = &shmsegs[segnum];
300 size = vm_map_round_page(shmseg->u.shm_segsz,
301 vm_map_page_mask(current_map())); /* XXX done for us? */
302 if (deallocate) {
303 result = mach_vm_deallocate(target: current_map(), address: shmmap_s->va, size);
304 if (result != KERN_SUCCESS) {
305 return EINVAL;
306 }
307 }
308 shmmap_s->shmid = SHMID_UNALLOCATED;
309 shmseg->u.shm_dtime = sysv_shmtime();
310 if ((--shmseg->u.shm_nattch <= 0) &&
311 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
312 shm_deallocate_segment(shmseg);
313 shm_last_free = segnum;
314 }
315 return 0;
316}
317
318int
319shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval)
320{
321#if CONFIG_MACF
322 struct shmid_kernel *shmsegptr;
323#endif
324 struct shmmap_state *shmmap_s;
325 int i;
326 int shmdtret = 0;
327
328 AUDIT_ARG(svipc_addr, uap->shmaddr);
329
330 SYSV_SHM_SUBSYS_LOCK();
331
332 if ((shmdtret = shminit())) {
333 goto shmdt_out;
334 }
335
336 shmmap_s = (struct shmmap_state *)p->vm_shm;
337 if (shmmap_s == NULL) {
338 shmdtret = EINVAL;
339 goto shmdt_out;
340 }
341
342 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
343 if (SHMID_IS_VALID(shmmap_s->shmid) &&
344 shmmap_s->va == (mach_vm_offset_t)uap->shmaddr) {
345 break;
346 }
347 }
348
349 if (!SHMID_IS_VALID(shmmap_s->shmid)) {
350 shmdtret = EINVAL;
351 goto shmdt_out;
352 }
353
354#if CONFIG_MACF
355 /*
356 * XXX: It might be useful to move this into the shm_delete_mapping
357 * function
358 */
359 shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
360 shmdtret = mac_sysvshm_check_shmdt(cred: kauth_cred_get(), shmsegptr);
361 if (shmdtret) {
362 goto shmdt_out;
363 }
364#endif
365 i = shm_delete_mapping(p, shmmap_s, deallocate: 1);
366
367 if (i == 0) {
368 *retval = 0;
369 }
370 shmdtret = i;
371shmdt_out:
372 SYSV_SHM_SUBSYS_UNLOCK();
373 return shmdtret;
374}
375
376int
377shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval)
378{
379 int error, flags;
380 struct shmid_kernel *shmseg;
381 struct shmmap_state *shmmap_s = NULL;
382 struct shm_handle *shm_handle;
383 mach_vm_address_t attach_va; /* attach address in/out */
384 mach_vm_address_t shmlba;
385 mach_vm_size_t map_size; /* size of map entry */
386 mach_vm_size_t mapped_size;
387 vm_prot_t prot;
388 kern_return_t rv;
389 int shmat_ret;
390 vm_map_kernel_flags_t vmk_flags;
391
392 shmat_ret = 0;
393
394 AUDIT_ARG(svipc_id, uap->shmid);
395 AUDIT_ARG(svipc_addr, uap->shmaddr);
396
397 SYSV_SHM_SUBSYS_LOCK();
398
399 if ((shmat_ret = shminit())) {
400 goto shmat_out;
401 }
402
403 shmmap_s = (struct shmmap_state *)p->vm_shm;
404 if (shmmap_s == NULL) {
405 /* lazily allocate the shm map */
406
407 int nsegs = shminfo.shmseg;
408 if (nsegs <= 0) {
409 shmat_ret = EMFILE;
410 goto shmat_out;
411 }
412
413 /* +1 for the sentinel */
414 shmmap_s = kalloc_type(struct shmmap_state, nsegs + 1, Z_WAITOK);
415 if (shmmap_s == NULL) {
416 shmat_ret = ENOMEM;
417 goto shmat_out;
418 }
419
420 /* initialize the entries */
421 for (int i = 0; i < nsegs; i++) {
422 shmmap_s[i].shmid = SHMID_UNALLOCATED;
423 }
424 shmmap_s[nsegs].shmid = SHMID_SENTINEL;
425
426 p->vm_shm = (caddr_t)shmmap_s;
427 }
428
429 shmseg = shm_find_segment_by_shmid(shmid: uap->shmid);
430 if (shmseg == NULL) {
431 shmat_ret = EINVAL;
432 goto shmat_out;
433 }
434
435 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
436 error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm,
437 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R | IPC_W);
438 if (error) {
439 shmat_ret = error;
440 goto shmat_out;
441 }
442
443#if CONFIG_MACF
444 error = mac_sysvshm_check_shmat(cred: kauth_cred_get(), shmsegptr: shmseg, shmflg: uap->shmflg);
445 if (error) {
446 shmat_ret = error;
447 goto shmat_out;
448 }
449#endif
450
451 /* find a free shmid */
452 while (SHMID_IS_VALID(shmmap_s->shmid)) {
453 shmmap_s++;
454 }
455 if (shmmap_s->shmid != SHMID_UNALLOCATED) {
456 /* no free shmids */
457 shmat_ret = EMFILE;
458 goto shmat_out;
459 }
460
461 map_size = vm_map_round_page(shmseg->u.shm_segsz,
462 vm_map_page_mask(current_map()));
463 prot = VM_PROT_READ;
464 if ((uap->shmflg & SHM_RDONLY) == 0) {
465 prot |= VM_PROT_WRITE;
466 }
467 flags = MAP_ANON | MAP_SHARED;
468 if (uap->shmaddr) {
469 flags |= MAP_FIXED;
470 }
471
472 attach_va = (mach_vm_address_t)uap->shmaddr;
473 shmlba = vm_map_page_size(map: current_map()); /* XXX instead of SHMLBA */
474 if (uap->shmflg & SHM_RND) {
475 attach_va &= ~(shmlba - 1);
476 } else if ((attach_va & (shmlba - 1)) != 0) {
477 shmat_ret = EINVAL;
478 goto shmat_out;
479 }
480
481 if (flags & MAP_FIXED) {
482 vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED();
483 } else {
484 vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
485 }
486
487 mapped_size = 0;
488
489 /* first reserve enough space... */
490 rv = mach_vm_map_kernel(target_map: current_map(),
491 address: &attach_va,
492 initial_size: map_size,
493 mask: 0,
494 vmk_flags,
495 IPC_PORT_NULL,
496 offset: 0,
497 FALSE,
498 VM_PROT_NONE,
499 VM_PROT_NONE,
500 VM_INHERIT_NONE);
501 if (rv != KERN_SUCCESS) {
502 goto out;
503 }
504
505 shmmap_s->va = attach_va;
506
507 /* ... then map the shared memory over the reserved space */
508 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */
509 shm_handle != NULL;
510 shm_handle = shm_handle->shm_handle_next) {
511 vm_map_size_t chunk_size;
512
513 assert(mapped_size < map_size);
514 chunk_size = shm_handle->shm_handle_size;
515 if (chunk_size > map_size - mapped_size) {
516 /*
517 * Partial mapping of last chunk due to
518 * page size mismatch.
519 */
520 assert(vm_map_page_shift(current_map()) < PAGE_SHIFT);
521 assert(shm_handle->shm_handle_next == NULL);
522 chunk_size = map_size - mapped_size;
523 }
524 rv = vm_map_enter_mem_object(
525 map: current_map(), /* process map */
526 address: &attach_va, /* attach address */
527 size: chunk_size, /* size to map */
528 mask: (mach_vm_offset_t)0, /* alignment mask */
529 VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = true),
530 port: shm_handle->shm_object,
531 offset: (mach_vm_offset_t)0,
532 FALSE,
533 cur_protection: prot,
534 max_protection: prot,
535 VM_INHERIT_SHARE);
536 if (rv != KERN_SUCCESS) {
537 goto out;
538 }
539
540 mapped_size += chunk_size;
541 attach_va = attach_va + chunk_size;
542 }
543
544 shmmap_s->shmid = uap->shmid;
545 shmseg->u.shm_lpid = proc_getpid(p);
546 shmseg->u.shm_atime = sysv_shmtime();
547 shmseg->u.shm_nattch++;
548 *retval = shmmap_s->va; /* XXX return -1 on error */
549 shmat_ret = 0;
550 goto shmat_out;
551out:
552 if (mapped_size > 0) {
553 (void) mach_vm_deallocate(target: current_map(),
554 address: shmmap_s->va,
555 size: mapped_size);
556 }
557 switch (rv) {
558 case KERN_INVALID_ADDRESS:
559 case KERN_NO_SPACE:
560 shmat_ret = ENOMEM;
561 break;
562 case KERN_PROTECTION_FAILURE:
563 shmat_ret = EACCES;
564 break;
565 default:
566 shmat_ret = EINVAL;
567 break;
568 }
569shmat_out:
570 SYSV_SHM_SUBSYS_UNLOCK();
571 return shmat_ret;
572}
573
574static int
575oshmctl(__unused void *p, __unused void *uap, __unused void *retval)
576{
577 return EINVAL;
578}
579
580/*
581 * Returns: 0 Success
582 * EINVAL
583 * copyout:EFAULT
584 * copyin:EFAULT
585 * ipcperm:EPERM
586 * ipcperm:EACCES
587 */
588int
589shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
590{
591 int error;
592 kauth_cred_t cred = kauth_cred_get();
593 struct user_shmid_ds inbuf;
594 struct shmid_kernel *shmseg;
595
596 int shmctl_ret = 0;
597
598 AUDIT_ARG(svipc_cmd, uap->cmd);
599 AUDIT_ARG(svipc_id, uap->shmid);
600
601 SYSV_SHM_SUBSYS_LOCK();
602
603 if ((shmctl_ret = shminit())) {
604 goto shmctl_out;
605 }
606
607 shmseg = shm_find_segment_by_shmid(shmid: uap->shmid);
608 if (shmseg == NULL) {
609 shmctl_ret = EINVAL;
610 goto shmctl_out;
611 }
612
613 /* XXAUDIT: This is the perms BEFORE any change by this call. This
614 * may not be what is desired.
615 */
616 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
617
618#if CONFIG_MACF
619 error = mac_sysvshm_check_shmctl(cred, shmsegptr: shmseg, cmd: uap->cmd);
620 if (error) {
621 shmctl_ret = error;
622 goto shmctl_out;
623 }
624#endif
625 switch (uap->cmd) {
626 case IPC_STAT:
627 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_R);
628 if (error) {
629 shmctl_ret = error;
630 goto shmctl_out;
631 }
632
633 if (IS_64BIT_PROCESS(p)) {
634 struct user_shmid_ds shmid_ds = {};
635 memcpy(dst: &shmid_ds, src: &shmseg->u, n: sizeof(struct user_shmid_ds));
636
637 /* Clear kernel reserved pointer before copying to user space */
638 shmid_ds.shm_internal = USER_ADDR_NULL;
639
640 error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds));
641 } else {
642 struct user32_shmid_ds shmid_ds32 = {};
643 shmid_ds_64to32(in: &shmseg->u, out: &shmid_ds32);
644
645 /* Clear kernel reserved pointer before copying to user space */
646 shmid_ds32.shm_internal = (user32_addr_t)0;
647
648 error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32));
649 }
650 if (error) {
651 shmctl_ret = error;
652 goto shmctl_out;
653 }
654 break;
655 case IPC_SET:
656 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
657 if (error) {
658 shmctl_ret = error;
659 goto shmctl_out;
660 }
661 if (IS_64BIT_PROCESS(p)) {
662 error = copyin(uap->buf, &inbuf, sizeof(struct user_shmid_ds));
663 } else {
664 struct user32_shmid_ds shmid_ds32;
665 error = copyin(uap->buf, &shmid_ds32, sizeof(shmid_ds32));
666 /* convert in place; ugly, but safe */
667 shmid_ds_32to64(in: &shmid_ds32, out: &inbuf);
668 }
669 if (error) {
670 shmctl_ret = error;
671 goto shmctl_out;
672 }
673 shmseg->u.shm_perm.uid = inbuf.shm_perm.uid;
674 shmseg->u.shm_perm.gid = inbuf.shm_perm.gid;
675 shmseg->u.shm_perm.mode =
676 (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
677 (inbuf.shm_perm.mode & ACCESSPERMS);
678 shmseg->u.shm_ctime = sysv_shmtime();
679 break;
680 case IPC_RMID:
681 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
682 if (error) {
683 shmctl_ret = error;
684 goto shmctl_out;
685 }
686 shmseg->u.shm_perm._key = IPC_PRIVATE;
687 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
688 if (shmseg->u.shm_nattch <= 0) {
689 shm_deallocate_segment(shmseg);
690 shm_last_free = IPCID_TO_IX(uap->shmid);
691 }
692 break;
693#if 0
694 case SHM_LOCK:
695 case SHM_UNLOCK:
696#endif
697 default:
698 shmctl_ret = EINVAL;
699 goto shmctl_out;
700 }
701 *retval = 0;
702 shmctl_ret = 0;
703shmctl_out:
704 SYSV_SHM_SUBSYS_UNLOCK();
705 return shmctl_ret;
706}
707
708static int
709shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval)
710{
711 struct shmid_kernel *shmseg;
712 int error = 0;
713
714 shmseg = &shmsegs[segnum];
715 if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
716 /*
717 * This segment is in the process of being allocated. Wait
718 * until it's done, and look the key up again (in case the
719 * allocation failed or it was freed).
720 */
721 shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
722 error = tsleep(chan: (caddr_t)shmseg, PLOCK | PCATCH, wmesg: "shmget", timo: 0);
723 if (error) {
724 return error;
725 }
726 return EAGAIN;
727 }
728
729 /*
730 * The low 9 bits of shmflag are the mode bits being requested, which
731 * are the actual mode bits desired on the segment, and not in IPC_R
732 * form; therefore it would be incorrect to call ipcperm() to validate
733 * them; instead, we AND the existing mode with the requested mode, and
734 * verify that it matches the requested mode; otherwise, we fail with
735 * EACCES (access denied).
736 */
737 if ((shmseg->u.shm_perm.mode & mode) != mode) {
738 return EACCES;
739 }
740
741#if CONFIG_MACF
742 error = mac_sysvshm_check_shmget(cred: kauth_cred_get(), shmsegptr: shmseg, shmflg: uap->shmflg);
743 if (error) {
744 return error;
745 }
746#endif
747
748 if (uap->size && uap->size > shmseg->u.shm_segsz) {
749 return EINVAL;
750 }
751
752 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL)) {
753 return EEXIST;
754 }
755
756 *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
757 return 0;
758}
759
760static int
761shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode,
762 int *retval)
763{
764 int i, segnum, shmid;
765 kauth_cred_t cred = kauth_cred_get();
766 struct shmid_kernel *shmseg;
767 struct shm_handle *shm_handle;
768 kern_return_t kret;
769 mach_vm_size_t total_size, size = 0, alloc_size;
770 void * mem_object;
771 struct shm_handle *shm_handle_next, **shm_handle_next_p;
772
773 if (uap->size <= 0 ||
774 uap->size < (user_size_t)shminfo.shmmin ||
775 uap->size > (user_size_t)shminfo.shmmax) {
776 return EINVAL;
777 }
778 if (shm_nused >= shminfo.shmmni) { /* any shmids left? */
779 return ENOSPC;
780 }
781 if (mach_vm_round_page_overflow(in: uap->size, out: &total_size)) {
782 return EINVAL;
783 }
784 if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall) {
785 return ENOMEM;
786 }
787 if (shm_last_free < 0) {
788 for (i = 0; i < shminfo.shmmni; i++) {
789 if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE) {
790 break;
791 }
792 }
793 if (i == shminfo.shmmni) {
794 panic("shmseg free count inconsistent");
795 }
796 segnum = i;
797 } else {
798 segnum = shm_last_free;
799 shm_last_free = -1;
800 }
801 shmseg = &shmsegs[segnum];
802
803 /*
804 * In case we sleep in malloc(), mark the segment present but deleted
805 * so that noone else tries to create the same key.
806 * XXX but we don't release the global lock !?
807 */
808 shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
809 shmseg->u.shm_perm._key = uap->key;
810 shmseg->u.shm_perm._seq = (shmseg->u.shm_perm._seq + 1) & 0x7fff;
811
812 shm_handle_next_p = NULL;
813 for (alloc_size = 0;
814 alloc_size < total_size;
815 alloc_size += size) {
816 size = MIN(total_size - alloc_size, ANON_MAX_SIZE);
817 kret = mach_make_memory_entry_64(
818 VM_MAP_NULL,
819 size: (memory_object_size_t *) &size,
820 offset: (memory_object_offset_t) 0,
821 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
822 object_handle: (ipc_port_t *) &mem_object, parent_entry: 0);
823 if (kret != KERN_SUCCESS) {
824 goto out;
825 }
826
827 shm_handle = kalloc_type(struct shm_handle, Z_WAITOK | Z_NOFAIL);
828 shm_handle->shm_object = mem_object;
829 shm_handle->shm_handle_size = size;
830 shm_handle->shm_handle_next = NULL;
831 if (shm_handle_next_p == NULL) {
832 shmseg->u.shm_internal = CAST_USER_ADDR_T(shm_handle);/* tunnel */
833 } else {
834 *shm_handle_next_p = shm_handle;
835 }
836 shm_handle_next_p = &shm_handle->shm_handle_next;
837 }
838
839 shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
840
841 shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = kauth_cred_getuid(cred: cred);
842 shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = kauth_cred_getgid(cred: cred);
843 shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
844 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
845 shmseg->u.shm_segsz = uap->size;
846 shmseg->u.shm_cpid = proc_getpid(p);
847 shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
848 shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
849#if CONFIG_MACF
850 mac_sysvshm_label_associate(cred, shmsegptr: shmseg);
851#endif
852 shmseg->u.shm_ctime = sysv_shmtime();
853 shm_committed += btoc(size);
854 shm_nused++;
855 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
856 if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
857 /*
858 * Somebody else wanted this key while we were asleep. Wake
859 * them up now.
860 */
861 shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
862 wakeup(chan: (caddr_t)shmseg);
863 }
864 *retval = shmid;
865 AUDIT_ARG(svipc_id, shmid);
866 return 0;
867out:
868 if (kret != KERN_SUCCESS) {
869 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal); /* tunnel */
870 shm_handle != NULL;
871 shm_handle = shm_handle_next) {
872 shm_handle_next = shm_handle->shm_handle_next;
873 mach_memory_entry_port_release(port: shm_handle->shm_object);
874 kfree_type(struct shm_handle, shm_handle);
875 }
876 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
877 }
878
879 switch (kret) {
880 case KERN_INVALID_ADDRESS:
881 case KERN_NO_SPACE:
882 return ENOMEM;
883 case KERN_PROTECTION_FAILURE:
884 return EACCES;
885 default:
886 return EINVAL;
887 }
888}
889
890int
891shmget(struct proc *p, struct shmget_args *uap, int32_t *retval)
892{
893 int segnum, mode, error;
894 int shmget_ret = 0;
895
896 /* Auditing is actually done in shmget_allocate_segment() */
897
898 SYSV_SHM_SUBSYS_LOCK();
899
900 if ((shmget_ret = shminit())) {
901 goto shmget_out;
902 }
903
904 mode = uap->shmflg & ACCESSPERMS;
905 if (uap->key != IPC_PRIVATE) {
906again:
907 segnum = shm_find_segment_by_key(key: uap->key);
908 if (segnum >= 0) {
909 error = shmget_existing(uap, mode, segnum, retval);
910 if (error == EAGAIN) {
911 goto again;
912 }
913 shmget_ret = error;
914 goto shmget_out;
915 }
916 if ((uap->shmflg & IPC_CREAT) == 0) {
917 shmget_ret = ENOENT;
918 goto shmget_out;
919 }
920 }
921 shmget_ret = shmget_allocate_segment(p, uap, mode, retval);
922shmget_out:
923 SYSV_SHM_SUBSYS_UNLOCK();
924 return shmget_ret;
925}
926
927/*
928 * shmsys
929 *
930 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
931 *
932 * Parameters: p Process requesting the call
933 * uap User argument descriptor (see below)
934 * retval Return value of the selected shm call
935 *
936 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
937 * uap->a2 User argument descriptor
938 *
939 * Returns: 0 Success
940 * !0 Not success
941 *
942 * Implicit returns: retval Return value of the selected shm call
943 *
944 * DEPRECATED: This interface should not be used to call the other SHM
945 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
946 * usage is to call the other SHM functions directly.
947 */
948int
949shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval)
950{
951 /* The routine that we are dispatching already does this */
952
953 if (uap->which >= sizeof(shmcalls) / sizeof(shmcalls[0])) {
954 return EINVAL;
955 }
956 return (*shmcalls[uap->which])(p, &uap->a2, retval);
957}
958
959/*
960 * Return 0 on success, 1 on failure.
961 */
962int
963shmfork(struct proc *p1, struct proc *p2)
964{
965 struct shmmap_state *shmmap_s;
966 int nsegs = 0;
967 int ret = 0;
968
969 SYSV_SHM_SUBSYS_LOCK();
970
971 if (shminit()) {
972 ret = 1;
973 goto shmfork_out;
974 }
975
976 struct shmmap_state *src = (struct shmmap_state *)p1->vm_shm;
977 assert(src);
978
979 /* count number of shmid entries in src */
980 for (struct shmmap_state *s = src; s->shmid != SHMID_SENTINEL; s++) {
981 nsegs++;
982 }
983
984 shmmap_s = kalloc_type(struct shmmap_state, nsegs + 1, Z_WAITOK);
985 if (shmmap_s == NULL) {
986 ret = 1;
987 goto shmfork_out;
988 }
989
990 bcopy(src, dst: (caddr_t)shmmap_s, n: (nsegs + 1) * sizeof(struct shmmap_state));
991 p2->vm_shm = (caddr_t)shmmap_s;
992 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
993 if (SHMID_IS_VALID(shmmap_s->shmid)) {
994 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
995 }
996 }
997
998shmfork_out:
999 SYSV_SHM_SUBSYS_UNLOCK();
1000 return ret;
1001}
1002
1003static void
1004shmcleanup(struct proc *p, int deallocate)
1005{
1006 struct shmmap_state *shmmap_s;
1007 int nsegs = 0;
1008
1009 SYSV_SHM_SUBSYS_LOCK();
1010
1011 shmmap_s = (struct shmmap_state *)p->vm_shm;
1012 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
1013 nsegs++;
1014 if (SHMID_IS_VALID(shmmap_s->shmid)) {
1015 /*
1016 * XXX: Should the MAC framework enforce
1017 * check here as well.
1018 */
1019 shm_delete_mapping(p, shmmap_s, deallocate);
1020 }
1021 }
1022
1023 kfree_type(struct shmmap_state, nsegs + 1, p->vm_shm);
1024 SYSV_SHM_SUBSYS_UNLOCK();
1025}
1026
1027void
1028shmexit(struct proc *p)
1029{
1030 shmcleanup(p, deallocate: 1);
1031}
1032
1033/*
1034 * shmexec() is like shmexit(), only it doesn't delete the mappings,
1035 * since the old address space has already been destroyed and the new
1036 * one instantiated. Instead, it just does the housekeeping work we
1037 * need to do to keep the System V shared memory subsystem sane.
1038 */
1039__private_extern__ void
1040shmexec(struct proc *p)
1041{
1042 shmcleanup(p, deallocate: 0);
1043}
1044
1045int
1046shminit(void)
1047{
1048 size_t sz;
1049 int i;
1050
1051 if (!shm_inited) {
1052 /*
1053 * we store internally 64 bit, since if we didn't, we would
1054 * be unable to represent a segment size in excess of 32 bits
1055 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
1056 * dictates this filed be a size_t, which is 64 bits when
1057 * running 64 bit binaries.
1058 */
1059 if (os_mul_overflow(shminfo.shmmni, sizeof(struct shmid_kernel), &sz)) {
1060 return ENOMEM;
1061 }
1062
1063 shmsegs = zalloc_permanent(sz, ZALIGN_PTR);
1064 if (shmsegs == NULL) {
1065 return ENOMEM;
1066 }
1067 for (i = 0; i < shminfo.shmmni; i++) {
1068 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
1069 shmsegs[i].u.shm_perm._seq = 0;
1070#if CONFIG_MACF
1071 mac_sysvshm_label_init(shmsegptr: &shmsegs[i]);
1072#endif
1073 }
1074 shm_last_free = 0;
1075 shm_nused = 0;
1076 shm_committed = 0;
1077 shm_inited = 1;
1078 }
1079
1080 return 0;
1081}
1082
1083/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1084 * struct sysctl_req *req) */
1085static int
1086sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1,
1087 __unused int arg2, struct sysctl_req *req)
1088{
1089 int error = 0;
1090 int sysctl_shminfo_ret = 0;
1091 int64_t saved_shmmax;
1092 int64_t saved_shmmin;
1093 int64_t saved_shmseg;
1094 int64_t saved_shmmni;
1095 int64_t saved_shmall;
1096
1097 error = SYSCTL_OUT(req, arg1, sizeof(int64_t));
1098 if (error || req->newptr == USER_ADDR_NULL) {
1099 return error;
1100 }
1101
1102 SYSV_SHM_SUBSYS_LOCK();
1103
1104 /* shmmni can not be changed after SysV SHM has been initialized */
1105 if (shm_inited && arg1 == &shminfo.shmmni) {
1106 sysctl_shminfo_ret = EPERM;
1107 goto sysctl_shminfo_out;
1108 }
1109 saved_shmmax = shminfo.shmmax;
1110 saved_shmmin = shminfo.shmmin;
1111 saved_shmseg = shminfo.shmseg;
1112 saved_shmmni = shminfo.shmmni;
1113 saved_shmall = shminfo.shmall;
1114
1115 if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))) != 0) {
1116 sysctl_shminfo_ret = error;
1117 goto sysctl_shminfo_out;
1118 }
1119
1120 if (arg1 == &shminfo.shmmax) {
1121 /* shmmax needs to be page-aligned */
1122 if (shminfo.shmmax & PAGE_MASK_64 || shminfo.shmmax < 0) {
1123 shminfo.shmmax = saved_shmmax;
1124 sysctl_shminfo_ret = EINVAL;
1125 goto sysctl_shminfo_out;
1126 }
1127 } else if (arg1 == &shminfo.shmmin) {
1128 if (shminfo.shmmin < 0) {
1129 shminfo.shmmin = saved_shmmin;
1130 sysctl_shminfo_ret = EINVAL;
1131 goto sysctl_shminfo_out;
1132 }
1133 } else if (arg1 == &shminfo.shmseg) {
1134 /* add a sanity check - 20847256 */
1135 if (shminfo.shmseg > INT32_MAX || shminfo.shmseg < 0) {
1136 shminfo.shmseg = saved_shmseg;
1137 sysctl_shminfo_ret = EINVAL;
1138 goto sysctl_shminfo_out;
1139 }
1140 } else if (arg1 == &shminfo.shmmni) {
1141 /* add a sanity check - 20847256 */
1142 if (shminfo.shmmni > INT32_MAX || shminfo.shmmni < 0) {
1143 shminfo.shmmni = saved_shmmni;
1144 sysctl_shminfo_ret = EINVAL;
1145 goto sysctl_shminfo_out;
1146 }
1147 } else if (arg1 == &shminfo.shmall) {
1148 /* add a sanity check - 20847256 */
1149 if (shminfo.shmall > INT32_MAX || shminfo.shmall < 0) {
1150 shminfo.shmall = saved_shmall;
1151 sysctl_shminfo_ret = EINVAL;
1152 goto sysctl_shminfo_out;
1153 }
1154 }
1155 sysctl_shminfo_ret = 0;
1156sysctl_shminfo_out:
1157 SYSV_SHM_SUBSYS_UNLOCK();
1158 return sysctl_shminfo_ret;
1159}
1160
1161static int
1162IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1163 __unused int arg2, struct sysctl_req *req)
1164{
1165 int error;
1166 int cursor;
1167 union {
1168 struct user32_IPCS_command u32;
1169 struct user_IPCS_command u64;
1170 } ipcs = { };
1171 struct user32_shmid_ds shmid_ds32 = { }; /* post conversion, 32 bit version */
1172 struct user_shmid_ds shmid_ds = { }; /* 64 bit version */
1173 void *shmid_dsp;
1174 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1175 size_t shmid_ds_sz = sizeof(struct user_shmid_ds);
1176 struct proc *p = current_proc();
1177
1178 SYSV_SHM_SUBSYS_LOCK();
1179
1180 if ((error = shminit())) {
1181 goto ipcs_shm_sysctl_out;
1182 }
1183
1184 if (!IS_64BIT_PROCESS(p)) {
1185 ipcs_sz = sizeof(struct user32_IPCS_command);
1186 shmid_ds_sz = sizeof(struct user32_shmid_ds);
1187 }
1188
1189 /* Copy in the command structure */
1190 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1191 goto ipcs_shm_sysctl_out;
1192 }
1193
1194 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1195 ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
1196 }
1197
1198 /* Let us version this interface... */
1199 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1200 error = EINVAL;
1201 goto ipcs_shm_sysctl_out;
1202 }
1203
1204 switch (ipcs.u64.ipcs_op) {
1205 case IPCS_SHM_CONF: /* Obtain global configuration data */
1206 if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) {
1207 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1208 error = ENOMEM;
1209 break;
1210 }
1211 error = ERANGE;
1212 break;
1213 }
1214 error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1215 break;
1216
1217 case IPCS_SHM_ITER: /* Iterate over existing segments */
1218 cursor = ipcs.u64.ipcs_cursor;
1219 if (cursor < 0 || cursor >= shminfo.shmmni) {
1220 error = ERANGE;
1221 break;
1222 }
1223 if (ipcs.u64.ipcs_datalen != (int)shmid_ds_sz) {
1224 error = EINVAL;
1225 break;
1226 }
1227 for (; cursor < shminfo.shmmni; cursor++) {
1228 if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED) {
1229 break;
1230 }
1231 continue;
1232 }
1233 if (cursor == shminfo.shmmni) {
1234 error = ENOENT;
1235 break;
1236 }
1237
1238 shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */
1239
1240 /*
1241 * If necessary, convert the 64 bit kernel segment
1242 * descriptor to a 32 bit user one.
1243 */
1244 if (!IS_64BIT_PROCESS(p)) {
1245 shmid_ds_64to32(in: shmid_dsp, out: &shmid_ds32);
1246
1247 /* Clear kernel reserved pointer before copying to user space */
1248 shmid_ds32.shm_internal = (user32_addr_t)0;
1249
1250 shmid_dsp = &shmid_ds32;
1251 } else {
1252 memcpy(dst: &shmid_ds, src: shmid_dsp, n: sizeof(shmid_ds));
1253
1254 /* Clear kernel reserved pointer before copying to user space */
1255 shmid_ds.shm_internal = USER_ADDR_NULL;
1256
1257 shmid_dsp = &shmid_ds;
1258 }
1259 error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1260 if (!error) {
1261 /* update cursor */
1262 ipcs.u64.ipcs_cursor = cursor + 1;
1263
1264 if (!IS_64BIT_PROCESS(p)) { /* convert in place */
1265 ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t, ipcs.u64.ipcs_data);
1266 }
1267
1268 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1269 }
1270 break;
1271
1272 default:
1273 error = EINVAL;
1274 break;
1275 }
1276ipcs_shm_sysctl_out:
1277 SYSV_SHM_SUBSYS_UNLOCK();
1278 return error;
1279}
1280
1281SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV");
1282
1283SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1284 &shminfo.shmmax, 0, &sysctl_shminfo, "Q", "shmmax");
1285
1286SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1287 &shminfo.shmmin, 0, &sysctl_shminfo, "Q", "shmmin");
1288
1289SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1290 &shminfo.shmmni, 0, &sysctl_shminfo, "Q", "shmmni");
1291
1292SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1293 &shminfo.shmseg, 0, &sysctl_shminfo, "Q", "shmseg");
1294
1295SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1296 &shminfo.shmall, 0, &sysctl_shminfo, "Q", "shmall");
1297
1298SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS");
1299
1300SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1301 0, 0, IPCS_shm_sysctl,
1302 "S,IPCS_shm_command",
1303 "ipcs shm command interface");
1304#endif /* SYSV_SHM */
1305
1306/* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */
1307