1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* $NetBSD: sysv_shm.c,v 1.23 1994/07/04 23:25:12 glass Exp $ */
29
30/*
31 * Copyright (c) 1994 Adam Glass and Charles Hannum. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by Adam Glass and Charles
44 * Hannum.
45 * 4. The names of the authors may not be used to endorse or promote products
46 * derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
52 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
53 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
57 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59/*
60 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
61 * support for mandatory and extensible security protections. This notice
62 * is included in support of clause 2.2 (b) of the Apple Public License,
63 * Version 2.0.
64 * Copyright (c) 2005-2006 SPARTA, Inc.
65*/
66
67
68#include <sys/appleapiopts.h>
69#include <sys/param.h>
70#include <sys/systm.h>
71#include <sys/kernel.h>
72#include <sys/shm_internal.h>
73#include <sys/proc_internal.h>
74#include <sys/kauth.h>
75#include <sys/malloc.h>
76#include <sys/mman.h>
77#include <sys/stat.h>
78#include <sys/sysctl.h>
79#include <sys/ipcs.h>
80#include <sys/sysent.h>
81#include <sys/sysproto.h>
82#if CONFIG_MACF
83#include <security/mac_framework.h>
84#endif
85
86#include <security/audit/audit.h>
87
88#include <mach/mach_types.h>
89#include <mach/vm_inherit.h>
90#include <mach/vm_map.h>
91
92#include <mach/mach_vm.h>
93
94#include <vm/vm_map.h>
95#include <vm/vm_protos.h>
96#include <vm/vm_kern.h>
97
98#include <kern/locks.h>
99#include <os/overflow.h>
100
101/* Uncomment this line to see MAC debugging output. */
102/* #define MAC_DEBUG */
103#if CONFIG_MACF_DEBUG
104#define MPRINTF(a) printf a
105#else
106#define MPRINTF(a)
107#endif
108
109#if SYSV_SHM
110static int shminit(void);
111
112static lck_grp_t *sysv_shm_subsys_lck_grp;
113static lck_grp_attr_t *sysv_shm_subsys_lck_grp_attr;
114static lck_attr_t *sysv_shm_subsys_lck_attr;
115static lck_mtx_t sysv_shm_subsys_mutex;
116
117#define SYSV_SHM_SUBSYS_LOCK() lck_mtx_lock(&sysv_shm_subsys_mutex)
118#define SYSV_SHM_SUBSYS_UNLOCK() lck_mtx_unlock(&sysv_shm_subsys_mutex)
119
120static int oshmctl(void *p, void *uap, void *retval);
121static int shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode, int * retval);
122static int shmget_existing(struct shmget_args *uap, int mode, int segnum, int * retval);
123static void shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out);
124static void shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out);
125
126/* XXX casting to (sy_call_t *) is bogus, as usual. */
127static sy_call_t *shmcalls[] = {
128 (sy_call_t *)shmat, (sy_call_t *)oshmctl,
129 (sy_call_t *)shmdt, (sy_call_t *)shmget,
130 (sy_call_t *)shmctl
131};
132
133#define SHMSEG_FREE 0x0200
134#define SHMSEG_REMOVED 0x0400
135#define SHMSEG_ALLOCATED 0x0800
136#define SHMSEG_WANTED 0x1000
137
138static int shm_last_free, shm_nused, shm_committed;
139struct shmid_kernel *shmsegs; /* 64 bit version */
140static int shm_inited = 0;
141
142/*
143 * Since anonymous memory chunks are limited to ANON_MAX_SIZE bytes,
144 * we have to keep a list of chunks when we want to handle a shared memory
145 * segment bigger than ANON_MAX_SIZE.
146 * Each chunk points to a VM named entry of up to ANON_MAX_SIZE bytes
147 * of anonymous memory.
148 */
149struct shm_handle {
150 void * shm_object; /* named entry for this chunk*/
151 memory_object_size_t shm_handle_size; /* size of this chunk */
152 struct shm_handle *shm_handle_next; /* next chunk */
153};
154
155struct shmmap_state {
156 mach_vm_address_t va; /* user address */
157 int shmid; /* segment id */
158};
159
160static void shm_deallocate_segment(struct shmid_kernel *);
161static int shm_find_segment_by_key(key_t);
162static struct shmid_kernel *shm_find_segment_by_shmid(int);
163static int shm_delete_mapping(struct proc *, struct shmmap_state *, int);
164
165#ifdef __APPLE_API_PRIVATE
166#define DEFAULT_SHMMAX (4 * 1024 * 1024)
167#define DEFAULT_SHMMIN 1
168#define DEFAULT_SHMMNI 32
169#define DEFAULT_SHMSEG 8
170#define DEFAULT_SHMALL 1024
171
172struct shminfo shminfo = {
173 DEFAULT_SHMMAX,
174 DEFAULT_SHMMIN,
175 DEFAULT_SHMMNI,
176 DEFAULT_SHMSEG,
177 DEFAULT_SHMALL
178};
179
180#define SHMID_IS_VALID(x) ((x) >= 0)
181#define SHMID_UNALLOCATED (-1)
182#define SHMID_SENTINEL (-2)
183
184#endif /* __APPLE_API_PRIVATE */
185
186void sysv_shm_lock_init(void);
187
188static __inline__ time_t
189sysv_shmtime(void)
190{
191 struct timeval tv;
192 microtime(&tv);
193 return (tv.tv_sec);
194}
195
196/*
197 * This conversion is safe, since if we are converting for a 32 bit process,
198 * then it's value of (struct shmid_ds)->shm_segsz will never exceed 4G.
199 *
200 * NOTE: Source and target may *NOT* overlap! (target is smaller)
201 */
202static void
203shmid_ds_64to32(struct user_shmid_ds *in, struct user32_shmid_ds *out)
204{
205 out->shm_perm = in->shm_perm;
206 out->shm_segsz = in->shm_segsz;
207 out->shm_lpid = in->shm_lpid;
208 out->shm_cpid = in->shm_cpid;
209 out->shm_nattch = in->shm_nattch;
210 out->shm_atime = in->shm_atime;
211 out->shm_dtime = in->shm_dtime;
212 out->shm_ctime = in->shm_ctime;
213 out->shm_internal = CAST_DOWN_EXPLICIT(int,in->shm_internal);
214}
215
216/*
217 * NOTE: Source and target may are permitted to overlap! (source is smaller);
218 * this works because we copy fields in order from the end of the struct to
219 * the beginning.
220 */
221static void
222shmid_ds_32to64(struct user32_shmid_ds *in, struct user_shmid_ds *out)
223{
224 out->shm_internal = in->shm_internal;
225 out->shm_ctime = in->shm_ctime;
226 out->shm_dtime = in->shm_dtime;
227 out->shm_atime = in->shm_atime;
228 out->shm_nattch = in->shm_nattch;
229 out->shm_cpid = in->shm_cpid;
230 out->shm_lpid = in->shm_lpid;
231 out->shm_segsz = in->shm_segsz;
232 out->shm_perm = in->shm_perm;
233}
234
235
236static int
237shm_find_segment_by_key(key_t key)
238{
239 int i;
240
241 for (i = 0; i < shminfo.shmmni; i++)
242 if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
243 shmsegs[i].u.shm_perm._key == key)
244 return i;
245 return -1;
246}
247
248static struct shmid_kernel *
249shm_find_segment_by_shmid(int shmid)
250{
251 int segnum;
252 struct shmid_kernel *shmseg;
253
254 segnum = IPCID_TO_IX(shmid);
255 if (segnum < 0 || segnum >= shminfo.shmmni)
256 return NULL;
257 shmseg = &shmsegs[segnum];
258 if ((shmseg->u.shm_perm.mode & (SHMSEG_ALLOCATED | SHMSEG_REMOVED))
259 != SHMSEG_ALLOCATED ||
260 shmseg->u.shm_perm._seq != IPCID_TO_SEQ(shmid))
261 return NULL;
262 return shmseg;
263}
264
265static void
266shm_deallocate_segment(struct shmid_kernel *shmseg)
267{
268 struct shm_handle *shm_handle, *shm_handle_next;
269 mach_vm_size_t size;
270
271 for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal); /* tunnel */
272 shm_handle != NULL;
273 shm_handle = shm_handle_next) {
274 shm_handle_next = shm_handle->shm_handle_next;
275 mach_memory_entry_port_release(shm_handle->shm_object);
276 FREE((caddr_t) shm_handle, M_SHM);
277 }
278 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
279 size = mach_vm_round_page(shmseg->u.shm_segsz);
280 shm_committed -= btoc(size);
281 shm_nused--;
282 shmseg->u.shm_perm.mode = SHMSEG_FREE;
283#if CONFIG_MACF
284 /* Reset the MAC label */
285 mac_sysvshm_label_recycle(shmseg);
286#endif
287}
288
289static int
290shm_delete_mapping(__unused struct proc *p, struct shmmap_state *shmmap_s,
291 int deallocate)
292{
293 struct shmid_kernel *shmseg;
294 int segnum, result;
295 mach_vm_size_t size;
296
297 segnum = IPCID_TO_IX(shmmap_s->shmid);
298 shmseg = &shmsegs[segnum];
299 size = mach_vm_round_page(shmseg->u.shm_segsz); /* XXX done for us? */
300 if (deallocate) {
301 result = mach_vm_deallocate(current_map(), shmmap_s->va, size);
302 if (result != KERN_SUCCESS)
303 return EINVAL;
304 }
305 shmmap_s->shmid = SHMID_UNALLOCATED;
306 shmseg->u.shm_dtime = sysv_shmtime();
307 if ((--shmseg->u.shm_nattch <= 0) &&
308 (shmseg->u.shm_perm.mode & SHMSEG_REMOVED)) {
309 shm_deallocate_segment(shmseg);
310 shm_last_free = segnum;
311 }
312 return 0;
313}
314
315int
316shmdt(struct proc *p, struct shmdt_args *uap, int32_t *retval)
317{
318#if CONFIG_MACF
319 struct shmid_kernel *shmsegptr;
320#endif
321 struct shmmap_state *shmmap_s;
322 int i;
323 int shmdtret = 0;
324
325 AUDIT_ARG(svipc_addr, uap->shmaddr);
326
327 SYSV_SHM_SUBSYS_LOCK();
328
329 if ((shmdtret = shminit())) {
330 goto shmdt_out;
331 }
332
333 shmmap_s = (struct shmmap_state *)p->vm_shm;
334 if (shmmap_s == NULL) {
335 shmdtret = EINVAL;
336 goto shmdt_out;
337 }
338
339 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
340 if (SHMID_IS_VALID(shmmap_s->shmid) &&
341 shmmap_s->va == (mach_vm_offset_t)uap->shmaddr) {
342 break;
343 }
344 }
345
346 if (!SHMID_IS_VALID(shmmap_s->shmid)) {
347 shmdtret = EINVAL;
348 goto shmdt_out;
349 }
350
351#if CONFIG_MACF
352 /*
353 * XXX: It might be useful to move this into the shm_delete_mapping
354 * function
355 */
356 shmsegptr = &shmsegs[IPCID_TO_IX(shmmap_s->shmid)];
357 shmdtret = mac_sysvshm_check_shmdt(kauth_cred_get(), shmsegptr);
358 if (shmdtret)
359 goto shmdt_out;
360#endif
361 i = shm_delete_mapping(p, shmmap_s, 1);
362
363 if (i == 0)
364 *retval = 0;
365 shmdtret = i;
366shmdt_out:
367 SYSV_SHM_SUBSYS_UNLOCK();
368 return shmdtret;
369}
370
371int
372shmat(struct proc *p, struct shmat_args *uap, user_addr_t *retval)
373{
374 int error, i, flags;
375 struct shmid_kernel *shmseg;
376 struct shmmap_state *shmmap_s = NULL;
377 struct shm_handle *shm_handle;
378 mach_vm_address_t attach_va; /* attach address in/out */
379 mach_vm_size_t map_size; /* size of map entry */
380 mach_vm_size_t mapped_size;
381 vm_prot_t prot;
382 size_t size;
383 kern_return_t rv;
384 int shmat_ret;
385 int vm_flags;
386
387 shmat_ret = 0;
388
389 AUDIT_ARG(svipc_id, uap->shmid);
390 AUDIT_ARG(svipc_addr, uap->shmaddr);
391
392 SYSV_SHM_SUBSYS_LOCK();
393
394 if ((shmat_ret = shminit())) {
395 goto shmat_out;
396 }
397
398 shmmap_s = (struct shmmap_state *)p->vm_shm;
399 if (shmmap_s == NULL) {
400 /* lazily allocate the shm map */
401
402 int nsegs = shminfo.shmseg;
403 if (nsegs <= 0) {
404 shmat_ret = EMFILE;
405 goto shmat_out;
406 }
407
408 /* +1 for the sentinel */
409 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
410 shmat_ret = ENOMEM;
411 goto shmat_out;
412 }
413
414 MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK | M_NULL);
415 if (shmmap_s == NULL) {
416 shmat_ret = ENOMEM;
417 goto shmat_out;
418 }
419
420 /* initialize the entries */
421 for (i = 0; i < nsegs; i++) {
422 shmmap_s[i].shmid = SHMID_UNALLOCATED;
423 }
424 shmmap_s[i].shmid = SHMID_SENTINEL;
425
426 p->vm_shm = (caddr_t)shmmap_s;
427 }
428
429 shmseg = shm_find_segment_by_shmid(uap->shmid);
430 if (shmseg == NULL) {
431 shmat_ret = EINVAL;
432 goto shmat_out;
433 }
434
435 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
436 error = ipcperm(kauth_cred_get(), &shmseg->u.shm_perm,
437 (uap->shmflg & SHM_RDONLY) ? IPC_R : IPC_R|IPC_W);
438 if (error) {
439 shmat_ret = error;
440 goto shmat_out;
441 }
442
443#if CONFIG_MACF
444 error = mac_sysvshm_check_shmat(kauth_cred_get(), shmseg, uap->shmflg);
445 if (error) {
446 shmat_ret = error;
447 goto shmat_out;
448 }
449#endif
450
451 /* find a free shmid */
452 while (SHMID_IS_VALID(shmmap_s->shmid)) {
453 shmmap_s++;
454 }
455 if (shmmap_s->shmid != SHMID_UNALLOCATED) {
456 /* no free shmids */
457 shmat_ret = EMFILE;
458 goto shmat_out;
459 }
460
461 map_size = mach_vm_round_page(shmseg->u.shm_segsz);
462 prot = VM_PROT_READ;
463 if ((uap->shmflg & SHM_RDONLY) == 0)
464 prot |= VM_PROT_WRITE;
465 flags = MAP_ANON | MAP_SHARED;
466 if (uap->shmaddr)
467 flags |= MAP_FIXED;
468
469 attach_va = (mach_vm_address_t)uap->shmaddr;
470 if (uap->shmflg & SHM_RND)
471 attach_va &= ~(SHMLBA-1);
472 else if ((attach_va & (SHMLBA-1)) != 0) {
473 shmat_ret = EINVAL;
474 goto shmat_out;
475 }
476
477 if (flags & MAP_FIXED) {
478 vm_flags = VM_FLAGS_FIXED;
479 } else {
480 vm_flags = VM_FLAGS_ANYWHERE;
481 }
482
483 mapped_size = 0;
484
485 /* first reserve enough space... */
486 rv = mach_vm_map_kernel(current_map(),
487 &attach_va,
488 map_size,
489 0,
490 vm_flags,
491 VM_MAP_KERNEL_FLAGS_NONE,
492 VM_KERN_MEMORY_NONE,
493 IPC_PORT_NULL,
494 0,
495 FALSE,
496 VM_PROT_NONE,
497 VM_PROT_NONE,
498 VM_INHERIT_NONE);
499 if (rv != KERN_SUCCESS) {
500 goto out;
501 }
502
503 shmmap_s->va = attach_va;
504
505 /* ... then map the shared memory over the reserved space */
506 for (shm_handle = CAST_DOWN(void *, shmseg->u.shm_internal);/* tunnel */
507 shm_handle != NULL;
508 shm_handle = shm_handle->shm_handle_next) {
509
510 rv = vm_map_enter_mem_object(
511 current_map(), /* process map */
512 &attach_va, /* attach address */
513 shm_handle->shm_handle_size, /* segment size */
514 (mach_vm_offset_t)0, /* alignment mask */
515 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
516 VM_MAP_KERNEL_FLAGS_NONE,
517 VM_KERN_MEMORY_NONE,
518 shm_handle->shm_object,
519 (mach_vm_offset_t)0,
520 FALSE,
521 prot,
522 prot,
523 VM_INHERIT_SHARE);
524 if (rv != KERN_SUCCESS)
525 goto out;
526
527 mapped_size += shm_handle->shm_handle_size;
528 attach_va = attach_va + shm_handle->shm_handle_size;
529 }
530
531 shmmap_s->shmid = uap->shmid;
532 shmseg->u.shm_lpid = p->p_pid;
533 shmseg->u.shm_atime = sysv_shmtime();
534 shmseg->u.shm_nattch++;
535 *retval = shmmap_s->va; /* XXX return -1 on error */
536 shmat_ret = 0;
537 goto shmat_out;
538out:
539 if (mapped_size > 0) {
540 (void) mach_vm_deallocate(current_map(),
541 shmmap_s->va,
542 mapped_size);
543 }
544 switch (rv) {
545 case KERN_INVALID_ADDRESS:
546 case KERN_NO_SPACE:
547 shmat_ret = ENOMEM;
548 break;
549 case KERN_PROTECTION_FAILURE:
550 shmat_ret = EACCES;
551 break;
552 default:
553 shmat_ret = EINVAL;
554 break;
555 }
556shmat_out:
557 SYSV_SHM_SUBSYS_UNLOCK();
558 return shmat_ret;
559}
560
561static int
562oshmctl(__unused void *p, __unused void *uap, __unused void *retval)
563{
564 return EINVAL;
565}
566
567/*
568 * Returns: 0 Success
569 * EINVAL
570 * copyout:EFAULT
571 * copyin:EFAULT
572 * ipcperm:EPERM
573 * ipcperm:EACCES
574 */
575int
576shmctl(__unused struct proc *p, struct shmctl_args *uap, int32_t *retval)
577{
578 int error;
579 kauth_cred_t cred = kauth_cred_get();
580 struct user_shmid_ds inbuf;
581 struct shmid_kernel *shmseg;
582
583 int shmctl_ret = 0;
584
585 AUDIT_ARG(svipc_cmd, uap->cmd);
586 AUDIT_ARG(svipc_id, uap->shmid);
587
588 SYSV_SHM_SUBSYS_LOCK();
589
590 if ((shmctl_ret = shminit())) {
591 goto shmctl_out;
592 }
593
594 shmseg = shm_find_segment_by_shmid(uap->shmid);
595 if (shmseg == NULL) {
596 shmctl_ret = EINVAL;
597 goto shmctl_out;
598 }
599
600 /* XXAUDIT: This is the perms BEFORE any change by this call. This
601 * may not be what is desired.
602 */
603 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
604
605#if CONFIG_MACF
606 error = mac_sysvshm_check_shmctl(cred, shmseg, uap->cmd);
607 if (error) {
608 shmctl_ret = error;
609 goto shmctl_out;
610 }
611#endif
612 switch (uap->cmd) {
613 case IPC_STAT:
614 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_R);
615 if (error) {
616 shmctl_ret = error;
617 goto shmctl_out;
618 }
619
620 if (IS_64BIT_PROCESS(p)) {
621 struct user_shmid_ds shmid_ds;
622 memcpy(&shmid_ds, &shmseg->u, sizeof(struct user_shmid_ds));
623
624 /* Clear kernel reserved pointer before copying to user space */
625 shmid_ds.shm_internal = USER_ADDR_NULL;
626
627 error = copyout(&shmid_ds, uap->buf, sizeof(shmid_ds));
628 } else {
629 struct user32_shmid_ds shmid_ds32 = {};
630 shmid_ds_64to32(&shmseg->u, &shmid_ds32);
631
632 /* Clear kernel reserved pointer before copying to user space */
633 shmid_ds32.shm_internal = (user32_addr_t)0;
634
635 error = copyout(&shmid_ds32, uap->buf, sizeof(shmid_ds32));
636 }
637 if (error) {
638 shmctl_ret = error;
639 goto shmctl_out;
640 }
641 break;
642 case IPC_SET:
643 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
644 if (error) {
645 shmctl_ret = error;
646 goto shmctl_out;
647 }
648 if (IS_64BIT_PROCESS(p)) {
649 error = copyin(uap->buf, &inbuf, sizeof(struct user_shmid_ds));
650 } else {
651 struct user32_shmid_ds shmid_ds32;
652 error = copyin(uap->buf, &shmid_ds32, sizeof(shmid_ds32));
653 /* convert in place; ugly, but safe */
654 shmid_ds_32to64(&shmid_ds32, &inbuf);
655 }
656 if (error) {
657 shmctl_ret = error;
658 goto shmctl_out;
659 }
660 shmseg->u.shm_perm.uid = inbuf.shm_perm.uid;
661 shmseg->u.shm_perm.gid = inbuf.shm_perm.gid;
662 shmseg->u.shm_perm.mode =
663 (shmseg->u.shm_perm.mode & ~ACCESSPERMS) |
664 (inbuf.shm_perm.mode & ACCESSPERMS);
665 shmseg->u.shm_ctime = sysv_shmtime();
666 break;
667 case IPC_RMID:
668 error = ipcperm(cred, &shmseg->u.shm_perm, IPC_M);
669 if (error) {
670 shmctl_ret = error;
671 goto shmctl_out;
672 }
673 shmseg->u.shm_perm._key = IPC_PRIVATE;
674 shmseg->u.shm_perm.mode |= SHMSEG_REMOVED;
675 if (shmseg->u.shm_nattch <= 0) {
676 shm_deallocate_segment(shmseg);
677 shm_last_free = IPCID_TO_IX(uap->shmid);
678 }
679 break;
680#if 0
681 case SHM_LOCK:
682 case SHM_UNLOCK:
683#endif
684 default:
685 shmctl_ret = EINVAL;
686 goto shmctl_out;
687 }
688 *retval = 0;
689 shmctl_ret = 0;
690shmctl_out:
691 SYSV_SHM_SUBSYS_UNLOCK();
692 return shmctl_ret;
693}
694
695static int
696shmget_existing(struct shmget_args *uap, int mode, int segnum, int *retval)
697{
698 struct shmid_kernel *shmseg;
699 int error = 0;
700
701 shmseg = &shmsegs[segnum];
702 if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
703 /*
704 * This segment is in the process of being allocated. Wait
705 * until it's done, and look the key up again (in case the
706 * allocation failed or it was freed).
707 */
708 shmseg->u.shm_perm.mode |= SHMSEG_WANTED;
709 error = tsleep((caddr_t)shmseg, PLOCK | PCATCH, "shmget", 0);
710 if (error)
711 return error;
712 return EAGAIN;
713 }
714
715 /*
716 * The low 9 bits of shmflag are the mode bits being requested, which
717 * are the actual mode bits desired on the segment, and not in IPC_R
718 * form; therefore it would be incorrect to call ipcperm() to validate
719 * them; instead, we AND the existing mode with the requested mode, and
720 * verify that it matches the requested mode; otherwise, we fail with
721 * EACCES (access denied).
722 */
723 if ((shmseg->u.shm_perm.mode & mode) != mode)
724 return EACCES;
725
726#if CONFIG_MACF
727 error = mac_sysvshm_check_shmget(kauth_cred_get(), shmseg, uap->shmflg);
728 if (error)
729 return (error);
730#endif
731
732 if (uap->size && uap->size > shmseg->u.shm_segsz)
733 return EINVAL;
734
735 if ((uap->shmflg & (IPC_CREAT | IPC_EXCL)) == (IPC_CREAT | IPC_EXCL))
736 return EEXIST;
737
738 *retval = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
739 return 0;
740}
741
742static int
743shmget_allocate_segment(struct proc *p, struct shmget_args *uap, int mode,
744 int *retval)
745{
746 int i, segnum, shmid;
747 kauth_cred_t cred = kauth_cred_get();
748 struct shmid_kernel *shmseg;
749 struct shm_handle *shm_handle;
750 kern_return_t kret;
751 mach_vm_size_t total_size, size, alloc_size;
752 void * mem_object;
753 struct shm_handle *shm_handle_next, **shm_handle_next_p;
754
755 if (uap->size <= 0 ||
756 uap->size < (user_size_t)shminfo.shmmin ||
757 uap->size > (user_size_t)shminfo.shmmax) {
758 return EINVAL;
759 }
760 if (shm_nused >= shminfo.shmmni) /* any shmids left? */
761 return ENOSPC;
762 if (mach_vm_round_page_overflow(uap->size, &total_size)) {
763 return EINVAL;
764 }
765 if ((user_ssize_t)(shm_committed + btoc(total_size)) > shminfo.shmall)
766 return ENOMEM;
767 if (shm_last_free < 0) {
768 for (i = 0; i < shminfo.shmmni; i++)
769 if (shmsegs[i].u.shm_perm.mode & SHMSEG_FREE)
770 break;
771 if (i == shminfo.shmmni)
772 panic("shmseg free count inconsistent");
773 segnum = i;
774 } else {
775 segnum = shm_last_free;
776 shm_last_free = -1;
777 }
778 shmseg = &shmsegs[segnum];
779
780 /*
781 * In case we sleep in malloc(), mark the segment present but deleted
782 * so that noone else tries to create the same key.
783 * XXX but we don't release the global lock !?
784 */
785 shmseg->u.shm_perm.mode = SHMSEG_ALLOCATED | SHMSEG_REMOVED;
786 shmseg->u.shm_perm._key = uap->key;
787 shmseg->u.shm_perm._seq = (shmseg->u.shm_perm._seq + 1) & 0x7fff;
788
789 shm_handle_next_p = NULL;
790 for (alloc_size = 0;
791 alloc_size < total_size;
792 alloc_size += size) {
793 size = MIN(total_size - alloc_size, ANON_MAX_SIZE);
794 kret = mach_make_memory_entry_64(
795 VM_MAP_NULL,
796 (memory_object_size_t *) &size,
797 (memory_object_offset_t) 0,
798 MAP_MEM_NAMED_CREATE | VM_PROT_DEFAULT,
799 (ipc_port_t *) &mem_object, 0);
800 if (kret != KERN_SUCCESS)
801 goto out;
802
803 MALLOC(shm_handle, struct shm_handle *, sizeof(struct shm_handle), M_SHM, M_WAITOK);
804 if (shm_handle == NULL) {
805 kret = KERN_NO_SPACE;
806 mach_memory_entry_port_release(mem_object);
807 mem_object = NULL;
808 goto out;
809 }
810 shm_handle->shm_object = mem_object;
811 shm_handle->shm_handle_size = size;
812 shm_handle->shm_handle_next = NULL;
813 if (shm_handle_next_p == NULL) {
814 shmseg->u.shm_internal = CAST_USER_ADDR_T(shm_handle);/* tunnel */
815 } else {
816 *shm_handle_next_p = shm_handle;
817 }
818 shm_handle_next_p = &shm_handle->shm_handle_next;
819 }
820
821 shmid = IXSEQ_TO_IPCID(segnum, shmseg->u.shm_perm);
822
823 shmseg->u.shm_perm.cuid = shmseg->u.shm_perm.uid = kauth_cred_getuid(cred);
824 shmseg->u.shm_perm.cgid = shmseg->u.shm_perm.gid = kauth_cred_getgid(cred);
825 shmseg->u.shm_perm.mode = (shmseg->u.shm_perm.mode & SHMSEG_WANTED) |
826 (mode & ACCESSPERMS) | SHMSEG_ALLOCATED;
827 shmseg->u.shm_segsz = uap->size;
828 shmseg->u.shm_cpid = p->p_pid;
829 shmseg->u.shm_lpid = shmseg->u.shm_nattch = 0;
830 shmseg->u.shm_atime = shmseg->u.shm_dtime = 0;
831#if CONFIG_MACF
832 mac_sysvshm_label_associate(cred, shmseg);
833#endif
834 shmseg->u.shm_ctime = sysv_shmtime();
835 shm_committed += btoc(size);
836 shm_nused++;
837 AUDIT_ARG(svipc_perm, &shmseg->u.shm_perm);
838 if (shmseg->u.shm_perm.mode & SHMSEG_WANTED) {
839 /*
840 * Somebody else wanted this key while we were asleep. Wake
841 * them up now.
842 */
843 shmseg->u.shm_perm.mode &= ~SHMSEG_WANTED;
844 wakeup((caddr_t)shmseg);
845 }
846 *retval = shmid;
847 AUDIT_ARG(svipc_id, shmid);
848 return 0;
849out:
850 if (kret != KERN_SUCCESS) {
851 for (shm_handle = CAST_DOWN(void *,shmseg->u.shm_internal); /* tunnel */
852 shm_handle != NULL;
853 shm_handle = shm_handle_next) {
854 shm_handle_next = shm_handle->shm_handle_next;
855 mach_memory_entry_port_release(shm_handle->shm_object);
856 FREE((caddr_t) shm_handle, M_SHM);
857 }
858 shmseg->u.shm_internal = USER_ADDR_NULL; /* tunnel */
859 }
860
861 switch (kret) {
862 case KERN_INVALID_ADDRESS:
863 case KERN_NO_SPACE:
864 return (ENOMEM);
865 case KERN_PROTECTION_FAILURE:
866 return (EACCES);
867 default:
868 return (EINVAL);
869 }
870
871}
872
873int
874shmget(struct proc *p, struct shmget_args *uap, int32_t *retval)
875{
876 int segnum, mode, error;
877 int shmget_ret = 0;
878
879 /* Auditing is actually done in shmget_allocate_segment() */
880
881 SYSV_SHM_SUBSYS_LOCK();
882
883 if ((shmget_ret = shminit())) {
884 goto shmget_out;
885 }
886
887 mode = uap->shmflg & ACCESSPERMS;
888 if (uap->key != IPC_PRIVATE) {
889 again:
890 segnum = shm_find_segment_by_key(uap->key);
891 if (segnum >= 0) {
892 error = shmget_existing(uap, mode, segnum, retval);
893 if (error == EAGAIN)
894 goto again;
895 shmget_ret = error;
896 goto shmget_out;
897 }
898 if ((uap->shmflg & IPC_CREAT) == 0) {
899 shmget_ret = ENOENT;
900 goto shmget_out;
901 }
902 }
903 shmget_ret = shmget_allocate_segment(p, uap, mode, retval);
904shmget_out:
905 SYSV_SHM_SUBSYS_UNLOCK();
906 return shmget_ret;
907}
908
909/*
910 * shmsys
911 *
912 * Entry point for all SHM calls: shmat, oshmctl, shmdt, shmget, shmctl
913 *
914 * Parameters: p Process requesting the call
915 * uap User argument descriptor (see below)
916 * retval Return value of the selected shm call
917 *
918 * Indirect parameters: uap->which msg call to invoke (index in array of shm calls)
919 * uap->a2 User argument descriptor
920 *
921 * Returns: 0 Success
922 * !0 Not success
923 *
924 * Implicit returns: retval Return value of the selected shm call
925 *
926 * DEPRECATED: This interface should not be used to call the other SHM
927 * functions (shmat, oshmctl, shmdt, shmget, shmctl). The correct
928 * usage is to call the other SHM functions directly.
929 */
930int
931shmsys(struct proc *p, struct shmsys_args *uap, int32_t *retval)
932{
933
934 /* The routine that we are dispatching already does this */
935
936 if (uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
937 return EINVAL;
938 return ((*shmcalls[uap->which])(p, &uap->a2, retval));
939}
940
941/*
942 * Return 0 on success, 1 on failure.
943 */
944int
945shmfork(struct proc *p1, struct proc *p2)
946{
947 struct shmmap_state *shmmap_s;
948 size_t size;
949 int nsegs = 0;
950 int ret = 0;
951
952 SYSV_SHM_SUBSYS_LOCK();
953
954 if (shminit()) {
955 ret = 1;
956 goto shmfork_out;
957 }
958
959 struct shmmap_state *src = (struct shmmap_state *)p1->vm_shm;
960 assert(src);
961
962 /* count number of shmid entries in src */
963 for (struct shmmap_state *s = src; s->shmid != SHMID_SENTINEL; s++) {
964 nsegs++;
965 }
966
967 if (os_add_and_mul_overflow(nsegs, 1, sizeof(struct shmmap_state), &size)) {
968 ret = 1;
969 goto shmfork_out;
970 }
971 MALLOC(shmmap_s, struct shmmap_state *, size, M_SHM, M_WAITOK);
972 if (shmmap_s == NULL) {
973 ret = 1;
974 goto shmfork_out;
975 }
976
977 bcopy(src, (caddr_t)shmmap_s, size);
978 p2->vm_shm = (caddr_t)shmmap_s;
979 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
980 if (SHMID_IS_VALID(shmmap_s->shmid)) {
981 shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
982 }
983 }
984
985shmfork_out:
986 SYSV_SHM_SUBSYS_UNLOCK();
987 return ret;
988}
989
990static void
991shmcleanup(struct proc *p, int deallocate)
992{
993 struct shmmap_state *shmmap_s;
994
995 SYSV_SHM_SUBSYS_LOCK();
996
997 shmmap_s = (struct shmmap_state *)p->vm_shm;
998 for (; shmmap_s->shmid != SHMID_SENTINEL; shmmap_s++) {
999 if (SHMID_IS_VALID(shmmap_s->shmid)) {
1000 /*
1001 * XXX: Should the MAC framework enforce
1002 * check here as well.
1003 */
1004 shm_delete_mapping(p, shmmap_s, deallocate);
1005 }
1006 }
1007
1008 FREE((caddr_t)p->vm_shm, M_SHM);
1009 p->vm_shm = NULL;
1010 SYSV_SHM_SUBSYS_UNLOCK();
1011}
1012
1013void
1014shmexit(struct proc *p)
1015{
1016 shmcleanup(p, 1);
1017}
1018
1019/*
1020 * shmexec() is like shmexit(), only it doesn't delete the mappings,
1021 * since the old address space has already been destroyed and the new
1022 * one instantiated. Instead, it just does the housekeeping work we
1023 * need to do to keep the System V shared memory subsystem sane.
1024 */
1025__private_extern__ void
1026shmexec(struct proc *p)
1027{
1028 shmcleanup(p, 0);
1029}
1030
1031int
1032shminit(void)
1033{
1034 size_t sz;
1035 int i;
1036
1037 if (!shm_inited) {
1038 /*
1039 * we store internally 64 bit, since if we didn't, we would
1040 * be unable to represent a segment size in excess of 32 bits
1041 * with the (struct shmid_ds)->shm_segsz field; also, POSIX
1042 * dictates this filed be a size_t, which is 64 bits when
1043 * running 64 bit binaries.
1044 */
1045 if (os_mul_overflow(shminfo.shmmni, sizeof(struct shmid_kernel), &sz)) {
1046 return ENOMEM;
1047 }
1048
1049 MALLOC(shmsegs, struct shmid_kernel *, sz, M_SHM, M_WAITOK);
1050 if (shmsegs == NULL) {
1051 return ENOMEM;
1052 }
1053 for (i = 0; i < shminfo.shmmni; i++) {
1054 shmsegs[i].u.shm_perm.mode = SHMSEG_FREE;
1055 shmsegs[i].u.shm_perm._seq = 0;
1056#if CONFIG_MACF
1057 mac_sysvshm_label_init(&shmsegs[i]);
1058#endif
1059 }
1060 shm_last_free = 0;
1061 shm_nused = 0;
1062 shm_committed = 0;
1063 shm_inited = 1;
1064 }
1065
1066 return 0;
1067}
1068
1069/* Initialize the mutex governing access to the SysV shm subsystem */
1070__private_extern__ void
1071sysv_shm_lock_init( void )
1072{
1073
1074 sysv_shm_subsys_lck_grp_attr = lck_grp_attr_alloc_init();
1075
1076 sysv_shm_subsys_lck_grp = lck_grp_alloc_init("sysv_shm_subsys_lock", sysv_shm_subsys_lck_grp_attr);
1077
1078 sysv_shm_subsys_lck_attr = lck_attr_alloc_init();
1079 lck_mtx_init(&sysv_shm_subsys_mutex, sysv_shm_subsys_lck_grp, sysv_shm_subsys_lck_attr);
1080}
1081
1082/* (struct sysctl_oid *oidp, void *arg1, int arg2, \
1083 struct sysctl_req *req) */
1084static int
1085sysctl_shminfo(__unused struct sysctl_oid *oidp, void *arg1,
1086 __unused int arg2, struct sysctl_req *req)
1087{
1088 int error = 0;
1089 int sysctl_shminfo_ret = 0;
1090 int64_t saved_shmmax;
1091 int64_t saved_shmmin;
1092 int64_t saved_shmseg;
1093 int64_t saved_shmmni;
1094 int64_t saved_shmall;
1095
1096 error = SYSCTL_OUT(req, arg1, sizeof(int64_t));
1097 if (error || req->newptr == USER_ADDR_NULL)
1098 return(error);
1099
1100 SYSV_SHM_SUBSYS_LOCK();
1101
1102 /* shmmni can not be changed after SysV SHM has been initialized */
1103 if (shm_inited && arg1 == &shminfo.shmmni) {
1104 sysctl_shminfo_ret = EPERM;
1105 goto sysctl_shminfo_out;
1106 }
1107 saved_shmmax = shminfo.shmmax;
1108 saved_shmmin = shminfo.shmmin;
1109 saved_shmseg = shminfo.shmseg;
1110 saved_shmmni = shminfo.shmmni;
1111 saved_shmall = shminfo.shmall;
1112
1113 if ((error = SYSCTL_IN(req, arg1, sizeof(int64_t))) != 0) {
1114 sysctl_shminfo_ret = error;
1115 goto sysctl_shminfo_out;
1116 }
1117
1118 if (arg1 == &shminfo.shmmax) {
1119 /* shmmax needs to be page-aligned */
1120 if (shminfo.shmmax & PAGE_MASK_64 || shminfo.shmmax < 0) {
1121 shminfo.shmmax = saved_shmmax;
1122 sysctl_shminfo_ret = EINVAL;
1123 goto sysctl_shminfo_out;
1124 }
1125 }
1126 else if (arg1 == &shminfo.shmmin) {
1127 if (shminfo.shmmin < 0) {
1128 shminfo.shmmin = saved_shmmin;
1129 sysctl_shminfo_ret = EINVAL;
1130 goto sysctl_shminfo_out;
1131 }
1132 }
1133 else if (arg1 == &shminfo.shmseg) {
1134 /* add a sanity check - 20847256 */
1135 if (shminfo.shmseg > INT32_MAX || shminfo.shmseg < 0) {
1136 shminfo.shmseg = saved_shmseg;
1137 sysctl_shminfo_ret = EINVAL;
1138 goto sysctl_shminfo_out;
1139 }
1140 }
1141 else if (arg1 == &shminfo.shmmni) {
1142 /* add a sanity check - 20847256 */
1143 if (shminfo.shmmni > INT32_MAX || shminfo.shmmni < 0) {
1144 shminfo.shmmni = saved_shmmni;
1145 sysctl_shminfo_ret = EINVAL;
1146 goto sysctl_shminfo_out;
1147 }
1148 }
1149 else if (arg1 == &shminfo.shmall) {
1150 /* add a sanity check - 20847256 */
1151 if (shminfo.shmall > INT32_MAX || shminfo.shmall < 0) {
1152 shminfo.shmall = saved_shmall;
1153 sysctl_shminfo_ret = EINVAL;
1154 goto sysctl_shminfo_out;
1155 }
1156 }
1157 sysctl_shminfo_ret = 0;
1158sysctl_shminfo_out:
1159 SYSV_SHM_SUBSYS_UNLOCK();
1160 return sysctl_shminfo_ret;
1161}
1162
1163static int
1164IPCS_shm_sysctl(__unused struct sysctl_oid *oidp, __unused void *arg1,
1165 __unused int arg2, struct sysctl_req *req)
1166{
1167 int error;
1168 int cursor;
1169 union {
1170 struct user32_IPCS_command u32;
1171 struct user_IPCS_command u64;
1172 } ipcs;
1173 struct user32_shmid_ds shmid_ds32 = {}; /* post conversion, 32 bit version */
1174 struct user_shmid_ds shmid_ds; /* 64 bit version */
1175 void *shmid_dsp;
1176 size_t ipcs_sz = sizeof(struct user_IPCS_command);
1177 size_t shmid_ds_sz = sizeof(struct user_shmid_ds);
1178 struct proc *p = current_proc();
1179
1180 SYSV_SHM_SUBSYS_LOCK();
1181
1182 if ((error = shminit())) {
1183 goto ipcs_shm_sysctl_out;
1184 }
1185
1186 if (!IS_64BIT_PROCESS(p)) {
1187 ipcs_sz = sizeof(struct user32_IPCS_command);
1188 shmid_ds_sz = sizeof(struct user32_shmid_ds);
1189 }
1190
1191 /* Copy in the command structure */
1192 if ((error = SYSCTL_IN(req, &ipcs, ipcs_sz)) != 0) {
1193 goto ipcs_shm_sysctl_out;
1194 }
1195
1196 if (!IS_64BIT_PROCESS(p)) /* convert in place */
1197 ipcs.u64.ipcs_data = CAST_USER_ADDR_T(ipcs.u32.ipcs_data);
1198
1199 /* Let us version this interface... */
1200 if (ipcs.u64.ipcs_magic != IPCS_MAGIC) {
1201 error = EINVAL;
1202 goto ipcs_shm_sysctl_out;
1203 }
1204
1205 switch(ipcs.u64.ipcs_op) {
1206 case IPCS_SHM_CONF: /* Obtain global configuration data */
1207 if (ipcs.u64.ipcs_datalen != sizeof(struct shminfo)) {
1208 if (ipcs.u64.ipcs_cursor != 0) { /* fwd. compat. */
1209 error = ENOMEM;
1210 break;
1211 }
1212 error = ERANGE;
1213 break;
1214 }
1215 error = copyout(&shminfo, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1216 break;
1217
1218 case IPCS_SHM_ITER: /* Iterate over existing segments */
1219 cursor = ipcs.u64.ipcs_cursor;
1220 if (cursor < 0 || cursor >= shminfo.shmmni) {
1221 error = ERANGE;
1222 break;
1223 }
1224 if (ipcs.u64.ipcs_datalen != (int)shmid_ds_sz) {
1225 error = EINVAL;
1226 break;
1227 }
1228 for( ; cursor < shminfo.shmmni; cursor++) {
1229 if (shmsegs[cursor].u.shm_perm.mode & SHMSEG_ALLOCATED)
1230 break;
1231 continue;
1232 }
1233 if (cursor == shminfo.shmmni) {
1234 error = ENOENT;
1235 break;
1236 }
1237
1238 shmid_dsp = &shmsegs[cursor]; /* default: 64 bit */
1239
1240 /*
1241 * If necessary, convert the 64 bit kernel segment
1242 * descriptor to a 32 bit user one.
1243 */
1244 if (!IS_64BIT_PROCESS(p)) {
1245 shmid_ds_64to32(shmid_dsp, &shmid_ds32);
1246
1247 /* Clear kernel reserved pointer before copying to user space */
1248 shmid_ds32.shm_internal = (user32_addr_t)0;
1249
1250 shmid_dsp = &shmid_ds32;
1251 } else {
1252 memcpy(&shmid_ds, shmid_dsp, sizeof(shmid_ds));
1253
1254 /* Clear kernel reserved pointer before copying to user space */
1255 shmid_ds.shm_internal = USER_ADDR_NULL;
1256
1257 shmid_dsp = &shmid_ds;
1258 }
1259 error = copyout(shmid_dsp, ipcs.u64.ipcs_data, ipcs.u64.ipcs_datalen);
1260 if (!error) {
1261 /* update cursor */
1262 ipcs.u64.ipcs_cursor = cursor + 1;
1263
1264 if (!IS_64BIT_PROCESS(p)) /* convert in place */
1265 ipcs.u32.ipcs_data = CAST_DOWN_EXPLICIT(user32_addr_t,ipcs.u64.ipcs_data);
1266
1267 error = SYSCTL_OUT(req, &ipcs, ipcs_sz);
1268 }
1269 break;
1270
1271 default:
1272 error = EINVAL;
1273 break;
1274 }
1275ipcs_shm_sysctl_out:
1276 SYSV_SHM_SUBSYS_UNLOCK();
1277 return(error);
1278}
1279
1280SYSCTL_NODE(_kern, KERN_SYSV, sysv, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSV");
1281
1282SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmax, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1283 &shminfo.shmmax, 0, &sysctl_shminfo ,"Q","shmmax");
1284
1285SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmin, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1286 &shminfo.shmmin, 0, &sysctl_shminfo ,"Q","shmmin");
1287
1288SYSCTL_PROC(_kern_sysv, OID_AUTO, shmmni, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1289 &shminfo.shmmni, 0, &sysctl_shminfo ,"Q","shmmni");
1290
1291SYSCTL_PROC(_kern_sysv, OID_AUTO, shmseg, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1292 &shminfo.shmseg, 0, &sysctl_shminfo ,"Q","shmseg");
1293
1294SYSCTL_PROC(_kern_sysv, OID_AUTO, shmall, CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
1295 &shminfo.shmall, 0, &sysctl_shminfo ,"Q","shmall");
1296
1297SYSCTL_NODE(_kern_sysv, OID_AUTO, ipcs, CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY, 0, "SYSVIPCS");
1298
1299SYSCTL_PROC(_kern_sysv_ipcs, OID_AUTO, shm, CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1300 0, 0, IPCS_shm_sysctl,
1301 "S,IPCS_shm_command",
1302 "ipcs shm command interface");
1303#endif /* SYSV_SHM */
1304
1305/* DSEP Review Done pl-20051108-v02 @2743,@2908,@2913,@3009 */
1306