1/*
2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62/*
63 */
64/*
65 * File: ipc/ipc_space.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC capability spaces.
70 */
71
72#include <mach/boolean.h>
73#include <mach/kern_return.h>
74#include <mach/port.h>
75#include <kern/assert.h>
76#include <kern/sched_prim.h>
77#include <kern/zalloc.h>
78#include <ipc/port.h>
79#include <ipc/ipc_entry.h>
80#include <ipc/ipc_object.h>
81#include <ipc/ipc_hash.h>
82#include <ipc/ipc_port.h>
83#include <ipc/ipc_space.h>
84#include <ipc/ipc_right.h>
85#include <prng/random.h>
86#include <string.h>
87
88/* Remove this in the future so port names are less predictable. */
89#define CONFIG_SEMI_RANDOM_ENTRIES
90#ifdef CONFIG_SEMI_RANDOM_ENTRIES
91#define NUM_SEQ_ENTRIES 8
92#endif
93
94os_refgrp_decl(static, is_refgrp, "is", NULL);
95static ZONE_DEFINE_TYPE(ipc_space_zone, "ipc spaces",
96 struct ipc_space, ZC_ZFREE_CLEARMEM);
97
98SECURITY_READ_ONLY_LATE(ipc_space_t) ipc_space_kernel;
99SECURITY_READ_ONLY_LATE(ipc_space_t) ipc_space_reply;
100
101static ipc_space_t
102ipc_space_alloc(void)
103{
104 ipc_space_t space;
105
106 space = zalloc_flags(ipc_space_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
107 lck_ticket_init(tlock: &space->is_lock, grp: &ipc_lck_grp);
108
109 return space;
110}
111
112__attribute__((noinline))
113static void
114ipc_space_free(ipc_space_t space)
115{
116 assert(!is_active(space));
117 lck_ticket_destroy(tlock: &space->is_lock, grp: &ipc_lck_grp);
118 zfree(ipc_space_zone, space);
119}
120
121static void
122ipc_space_free_table(smr_node_t node)
123{
124 ipc_entry_t entry = __container_of(node, struct ipc_entry, ie_smr_node);
125 ipc_entry_table_t table = entry->ie_self;
126
127 ipc_entry_table_free_noclear(array: table);
128}
129
130void
131ipc_space_retire_table(ipc_entry_table_t table)
132{
133 ipc_entry_t base;
134 vm_size_t size;
135
136 base = ipc_entry_table_base(array: table);
137 size = ipc_entry_table_size(array: table);
138 base->ie_self = table;
139 smr_ipc_call(&base->ie_smr_node, size, ipc_space_free_table);
140}
141
142void
143ipc_space_reference(
144 ipc_space_t space)
145{
146 os_ref_retain_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp);
147}
148
149void
150ipc_space_release(
151 ipc_space_t space)
152{
153 if (os_ref_release_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp) == 0) {
154 ipc_space_free(space);
155 }
156}
157
158void
159ipc_space_lock(
160 ipc_space_t space)
161{
162 lck_ticket_lock(tlock: &space->is_lock, grp: &ipc_lck_grp);
163}
164
165void
166ipc_space_unlock(
167 ipc_space_t space)
168{
169 lck_ticket_unlock(tlock: &space->is_lock);
170}
171
172void
173ipc_space_lock_sleep(
174 ipc_space_t space)
175{
176 lck_ticket_sleep_with_inheritor(lock: &space->is_lock, grp: &ipc_lck_grp,
177 lck_sleep_action: LCK_SLEEP_DEFAULT, event: (event_t)space, inheritor: space->is_grower,
178 THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
179}
180
181/* Routine: ipc_space_get_rollpoint
182 * Purpose:
183 * Generate a new gencount rollover point from a space's entropy pool
184 */
185ipc_entry_bits_t
186ipc_space_get_rollpoint(
187 ipc_space_t space)
188{
189 return random_bool_gen_bits(
190 bg: &space->bool_gen,
191 buffer: &space->is_entropy[0],
192 IS_ENTROPY_CNT,
193 IE_BITS_ROLL_BITS);
194}
195
196/*
197 * Routine: ipc_entry_rand_freelist
198 * Purpose:
199 * Pseudo-randomly permute the order of entries in an IPC space
200 * Arguments:
201 * space: the ipc space to initialize.
202 * table: the corresponding ipc table to initialize.
203 * the table is 0 initialized.
204 * bottom: the start of the range to initialize (inclusive).
205 * top: the end of the range to initialize (noninclusive).
206 */
207void
208ipc_space_rand_freelist(
209 ipc_space_t space,
210 ipc_entry_t table,
211 mach_port_index_t bottom,
212 mach_port_index_t size)
213{
214 int at_start = (bottom == 0);
215#ifdef CONFIG_SEMI_RANDOM_ENTRIES
216 /*
217 * Only make sequential entries at the start of the table, and not when
218 * we're growing the space.
219 */
220 ipc_entry_num_t total = 0;
221#endif
222
223 /* First entry in the free list is always free, and is the start of the free list. */
224 mach_port_index_t curr = bottom;
225 mach_port_index_t top = size;
226
227 bottom++;
228 top--;
229
230 /*
231 * Initialize the free list in the table.
232 * Add the entries in pseudo-random order and randomly set the generation
233 * number, in order to frustrate attacks involving port name reuse.
234 */
235 while (bottom <= top) {
236 ipc_entry_t entry = &table[curr];
237 int which;
238#ifdef CONFIG_SEMI_RANDOM_ENTRIES
239 /*
240 * XXX: This is a horrible hack to make sure that randomizing the port
241 * doesn't break programs that might have (sad) hard-coded values for
242 * certain port names.
243 */
244 if (at_start && total++ < NUM_SEQ_ENTRIES) {
245 which = 0;
246 } else
247#endif
248 which = random_bool_gen_bits(
249 bg: &space->bool_gen,
250 buffer: &space->is_entropy[0],
251 IS_ENTROPY_CNT,
252 numbits: 1);
253
254 mach_port_index_t next;
255 if (which) {
256 next = top;
257 top--;
258 } else {
259 next = bottom;
260 bottom++;
261 }
262
263 /*
264 * The entry's gencount will roll over on its first allocation, at which
265 * point a random rollover will be set for the entry.
266 */
267 entry->ie_bits = IE_BITS_GEN_MASK;
268 entry->ie_next = next;
269 curr = next;
270 }
271 table[curr].ie_bits = IE_BITS_GEN_MASK;
272}
273
274
275/*
276 * Routine: ipc_space_create
277 * Purpose:
278 * Creates a new IPC space.
279 *
280 * The new space has two references, one for the caller
281 * and one because it is active.
282 * Conditions:
283 * Nothing locked. Allocates memory.
284 * Returns:
285 * KERN_SUCCESS Created a space.
286 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
287 */
288
289kern_return_t
290ipc_space_create(
291 ipc_label_t label,
292 ipc_space_t *spacep)
293{
294 ipc_space_t space;
295 ipc_entry_table_t table;
296 ipc_entry_num_t count;
297
298 table = ipc_entry_table_alloc_by_count(IPC_ENTRY_TABLE_MIN,
299 fl: Z_WAITOK | Z_ZERO | Z_NOFAIL);
300 space = ipc_space_alloc();
301 count = ipc_entry_table_count(array: table);
302
303 random_bool_init(bg: &space->bool_gen);
304 ipc_space_rand_freelist(space, table: ipc_entry_table_base(array: table), bottom: 0, size: count);
305
306 os_ref_init_count_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp, 2, 0);
307 space->is_table_free = count - 1;
308 space->is_label = label;
309 space->is_low_mod = count;
310 space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
311 smr_init_store(&space->is_table, table);
312
313 *spacep = space;
314 return KERN_SUCCESS;
315}
316
317/*
318 * Routine: ipc_space_label
319 * Purpose:
320 * Modify the label on a space. The desired
321 * label must be a super-set of the current
322 * label for the space (as rights may already
323 * have been previously copied out under the
324 * old label value.
325 * Conditions:
326 * Nothing locked.
327 * Returns:
328 * KERN_SUCCESS Updated the label
329 * KERN_INVALID_VALUE label not a superset of old
330 */
331kern_return_t
332ipc_space_label(
333 ipc_space_t space,
334 ipc_label_t label)
335{
336 is_write_lock(space);
337 if (!is_active(space)) {
338 is_write_unlock(space);
339 return KERN_SUCCESS;
340 }
341
342 if ((space->is_label & label) != space->is_label) {
343 is_write_unlock(space);
344 return KERN_INVALID_VALUE;
345 }
346 space->is_label = label;
347 is_write_unlock(space);
348 return KERN_SUCCESS;
349}
350
351/*
352 * Routine: ipc_space_add_label
353 * Purpose:
354 * Modify the label on a space. The desired
355 * label is added to the labels already set
356 * on the space.
357 * Conditions:
358 * Nothing locked.
359 * Returns:
360 * KERN_SUCCESS Updated the label
361 * KERN_INVALID_VALUE label not a superset of old
362 */
363kern_return_t
364ipc_space_add_label(
365 ipc_space_t space,
366 ipc_label_t label)
367{
368 is_write_lock(space);
369 if (!is_active(space)) {
370 is_write_unlock(space);
371 return KERN_SUCCESS;
372 }
373
374 space->is_label |= label;
375 is_write_unlock(space);
376 return KERN_SUCCESS;
377}
378/*
379 * Routine: ipc_space_create_special
380 * Purpose:
381 * Create a special space. A special space
382 * doesn't hold rights in the normal way.
383 * Instead it is place-holder for holding
384 * disembodied (naked) receive rights.
385 * See ipc_port_alloc_special/ipc_port_dealloc_special.
386 * Conditions:
387 * Nothing locked.
388 * Returns:
389 * KERN_SUCCESS Created a space.
390 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
391 */
392
393kern_return_t
394ipc_space_create_special(
395 ipc_space_t *spacep)
396{
397 ipc_space_t space;
398
399 space = ipc_space_alloc();
400 os_ref_init_count_mask(&space->is_bits, IS_FLAGS_BITS, &is_refgrp, 1, 0);
401 space->is_label = IPC_LABEL_SPECIAL;
402 space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
403
404 *spacep = space;
405 return KERN_SUCCESS;
406}
407
408/*
409 * Routine: ipc_space_terminate
410 * Purpose:
411 * Marks the space as dead and cleans up the entries.
412 * Does nothing if the space is already dead.
413 * Conditions:
414 * Nothing locked.
415 */
416
417void
418ipc_space_terminate(
419 ipc_space_t space)
420{
421 ipc_entry_table_t table;
422
423 assert(space != IS_NULL);
424
425 is_write_lock(space);
426 if (!is_active(space)) {
427 is_write_unlock(space);
428 return;
429 }
430
431 table = smr_serialized_load(&space->is_table);
432 smr_clear_store(&space->is_table);
433
434 /*
435 * If somebody is trying to grow the table,
436 * we must wait until they finish and figure
437 * out the space died.
438 */
439 while (is_growing(space)) {
440 is_write_sleep(space);
441 }
442
443 is_write_unlock(space);
444
445
446 /*
447 * Now we can futz with it unlocked.
448 *
449 * First destroy receive rights, then the rest.
450 * This will cut down the number of notifications
451 * being sent when the notification destination
452 * was a receive right in this space.
453 */
454
455 for (mach_port_index_t index = 1;
456 ipc_entry_table_contains(array: table, i: index);
457 index++) {
458 ipc_entry_t entry = ipc_entry_table_get_nocheck(array: table, i: index);
459 mach_port_type_t type;
460
461 type = IE_BITS_TYPE(entry->ie_bits);
462 if (type != MACH_PORT_TYPE_NONE) {
463 mach_port_name_t name;
464
465 name = MACH_PORT_MAKE(index,
466 IE_BITS_GEN(entry->ie_bits));
467 ipc_right_terminate(space, name, entry);
468 }
469 }
470
471 ipc_space_retire_table(table);
472 space->is_table_free = 0;
473
474 /*
475 * Because the space is now dead,
476 * we must release the "active" reference for it.
477 * Our caller still has his reference.
478 */
479 is_release(space);
480}
481
482#if CONFIG_PROC_RESOURCE_LIMITS
483/*
484 * ipc_space_set_table_size_limits:
485 *
486 * Set the table size's soft and hard limit.
487 */
488kern_return_t
489ipc_space_set_table_size_limits(
490 ipc_space_t space,
491 ipc_entry_num_t soft_limit,
492 ipc_entry_num_t hard_limit)
493{
494 if (space == IS_NULL) {
495 return KERN_INVALID_TASK;
496 }
497
498 is_write_lock(space);
499
500 if (!is_active(space)) {
501 is_write_unlock(space);
502 return KERN_INVALID_TASK;
503 }
504
505 if (hard_limit && soft_limit >= hard_limit) {
506 soft_limit = 0;
507 }
508
509 space->is_table_size_soft_limit = soft_limit;
510 space->is_table_size_hard_limit = hard_limit;
511
512 is_write_unlock(space);
513
514 return KERN_SUCCESS;
515}
516
517/*
518 * Check if port space has exceeded its limits.
519 * Should be called with the space write lock held.
520 */
521void
522ipc_space_check_limit_exceeded(ipc_space_t space)
523{
524 size_t size = ipc_entry_table_count(is_active_table(space));
525
526 if (!is_above_soft_limit_notify(space) && space->is_table_size_soft_limit &&
527 ((size - space->is_table_free) > space->is_table_size_soft_limit)) {
528 is_above_soft_limit_send_notification(space);
529 act_set_astproc_resource(current_thread());
530 } else if (!is_above_hard_limit_notify(space) && space->is_table_size_hard_limit &&
531 ((size - space->is_table_free) > space->is_table_size_hard_limit)) {
532 is_above_hard_limit_send_notification(space);
533 act_set_astproc_resource(current_thread());
534 }
535}
536#endif /* CONFIG_PROC_RESOURCE_LIMITS */
537
538/*
539 * Routine: ipc_space_check_table_size_limit
540 * Purpose:
541 * Query the current size, soft_limit, and hard_limit for the ipc space.
542 * Returns true if a notification should be sent as a result of the limit
543 * being exceeded, and if we return true but the soft/hard limit values
544 * are zero that indicates the system limit has been exceeded. See
545 * is_at_max_limit_send_notification
546 * Conditions:
547 * Nothing locked on entry.
548 * Nothing locked on exit.
549 * Returns TRUE if a limit has been exceeded.
550 */
551bool
552ipc_space_check_table_size_limit(
553 ipc_space_t space,
554 ipc_entry_num_t *current_size,
555 ipc_entry_num_t *soft_limit,
556 ipc_entry_num_t *hard_limit)
557{
558 ipc_entry_table_t table;
559 bool should_notify = false;
560
561 if (space == IS_NULL) {
562 return false;
563 }
564
565 is_write_lock(space);
566
567 if (!is_active(space)) {
568 goto exit;
569 }
570 /* space is locked and active */
571
572 table = is_active_table(space);
573 *current_size = ipc_entry_table_count(array: table) - space->is_table_free;
574 if (is_at_max_limit_notify(space)) {
575 if (!is_at_max_limit_already_notified(space)) {
576 *soft_limit = 0;
577 *hard_limit = 0;
578 is_at_max_limit_notified(is: space);
579 should_notify = true;
580 }
581 goto exit;
582 }
583
584#if CONFIG_PROC_RESOURCE_LIMITS
585 *soft_limit = space->is_table_size_soft_limit;
586 *hard_limit = space->is_table_size_hard_limit;
587
588 if (!*soft_limit && !*hard_limit) {
589 should_notify = false;
590 goto exit;
591 }
592
593 /*
594 * Check if the thread sending the soft limit notification arrives after
595 * the one that sent the hard limit notification
596 */
597 if (is_hard_limit_already_notified(space)) {
598 goto exit;
599 }
600
601 if (*hard_limit > 0 && *current_size >= *hard_limit) {
602 *soft_limit = 0;
603 should_notify = true;
604 is_hard_limit_notified(space);
605 } else {
606 if (is_soft_limit_already_notified(space)) {
607 goto exit;
608 }
609 if (*soft_limit > 0 && *current_size >= *soft_limit) {
610 *hard_limit = 0;
611 should_notify = true;
612 is_soft_limit_notified(space);
613 }
614 }
615#endif /* CONFIG_PROC_RESOURCE_LIMITS */
616
617exit:
618 is_write_unlock(space);
619 return should_notify;
620}
621
622/*
623 * Set an ast if port space is at its max limit.
624 * Should be called with the space write lock held.
625 */
626void
627ipc_space_set_at_max_limit(ipc_space_t space)
628{
629 if (!is_at_max_limit_notify(space)) {
630 is_at_max_limit_send_notification(is: space);
631 act_set_astproc_resource(current_thread());
632 }
633}
634