1 | /* |
2 | * Copyright (c) 2023 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #if CONFIG_EXCLAVES |
30 | |
31 | #include <stdint.h> |
32 | #include <stdbool.h> |
33 | |
34 | #include <mach/exclaves.h> |
35 | #include <mach/kern_return.h> |
36 | |
37 | #include <string.h> |
38 | |
39 | #include <kern/assert.h> |
40 | #include <kern/bits.h> |
41 | #include <kern/queue.h> |
42 | #include <kern/kalloc.h> |
43 | #include <kern/locks.h> |
44 | #include <kern/task.h> |
45 | #include <kern/thread_call.h> |
46 | |
47 | #include <vm/pmap.h> |
48 | |
49 | |
50 | #include <kern/ipc_kobject.h> |
51 | |
52 | #include <os/hash.h> |
53 | |
54 | #include <libxnuproxy/messages.h> |
55 | |
56 | #include <mach/mach_traps.h> |
57 | #include <mach/mach_port.h> |
58 | |
59 | #include <sys/event.h> |
60 | |
61 | #include "exclaves_conclave.h" |
62 | #include "exclaves_debug.h" |
63 | #include "exclaves_resource.h" |
64 | #include "exclaves_sensor.h" |
65 | #include "exclaves_shared_memory.h" |
66 | |
67 | /* Use the new version of xnuproxy_msg_t. */ |
68 | #define xnuproxy_msg_t xnuproxy_msg_new_t |
69 | |
70 | static LCK_GRP_DECLARE(resource_lck_grp, "exclaves_resource" ); |
71 | |
72 | extern kern_return_t exclaves_xnu_proxy_send(xnuproxy_msg_t *, void *); |
73 | |
74 | /* |
75 | * A cache of service ids in the kernel domain |
76 | */ |
77 | static bitmap_t |
78 | kernel_service_bitmap[BITMAP_LEN(CONCLAVE_SERVICE_MAX)] = {0}; |
79 | |
80 | /* |
81 | * Exclave Resources |
82 | * |
83 | * Exclaves provide a fixed static set of resources available to XNU. Some |
84 | * examples of types of resources: |
85 | * - Conclave managers |
86 | * - Services |
87 | * - Named buffers |
88 | * - Audio buffers |
89 | * ... |
90 | * |
91 | * Each resource has a name, a type and a corresponding identifier which is |
92 | * shared between XNU and Exclaves. Resources are scoped by what entities are |
93 | * allowed to access them. |
94 | * Resources are discovered during boot and made available in a two-level table |
95 | * scheme. The root table collects resources by their scope, with the |
96 | * second-level tables listing the actual resources. |
97 | * |
98 | * |
99 | * Root Table |
100 | * ┌────────────────────────────┐ |
101 | * │ ┌────────────────────────┐ │ |
102 | * │ │ "com.apple.kernel" │─┼─────┐ |
103 | * │ └────────────────────────┘ │ │ |
104 | * │ ┌────────────────────────┐ │ │ |
105 | * │ │"com.apple.conclave.a" │─┼─┐ │ |
106 | * │ └────────────────────────┘ │ │ │ |
107 | * │ ┌────────────────────────┐ │ │ │ |
108 | * │ │"com.apple.conclave.b" │ │ │ │ |
109 | * │ └────────────────────────┘ │ │ │ |
110 | * │ ┌────────────────────────┐ │ │ │ |
111 | * │ │ "com.apple.driver.a" │ │ │ │ |
112 | * │ └────────────────────────┘ │ │ │ |
113 | * │ ... │ │ │ |
114 | * │ │ │ │ |
115 | * └────────────────────────────┘ │ │ |
116 | * ┌─────────────────────────┘ │ |
117 | * │ │ |
118 | * │ ┌─────────────────────────┘ |
119 | * │ │ |
120 | * │ │ |
121 | * │ │ |
122 | * │ └──▶ "com.apple.kernel" |
123 | * │ ┌─────────────────────────────────────────────────────┐ |
124 | * │ │┌───────────────────────┬──────────────────┬────────┐│ |
125 | * │ ││"com.apple.conclave.a" │ CONCLAVE_MANAGER │ 0x1234 ││ |
126 | * │ │└───────────────────────┴──────────────────┴────────┘│ |
127 | * │ │┌───────────────────────┬──────────────────┬────────┐│ |
128 | * │ ││"com.apple.conclave.b" │ CONCLAVE_MANAGER │ 0x7654 ││ |
129 | * │ │└───────────────────────┴──────────────────┴────────┘│ |
130 | * │ │ │ |
131 | * │ │ ... │ |
132 | * │ └─────────────────────────────────────────────────────┘ |
133 | * │ |
134 | * └─────▶ "com.apple.conclave.a" |
135 | * ┌─────────────────────────────────────────────────────┐ |
136 | * │┌───────────────────────┬──────────────────┬────────┐│ |
137 | * ││ "audio_buf" │ AUDIO_BUFFER │ 0x9999 ││ |
138 | * │└───────────────────────┴──────────────────┴────────┘│ |
139 | * │┌───────────────────────┬──────────────────┬────────┐│ |
140 | * ││ "service_x" │ SERVICE │ 0x1111 ││ |
141 | * │└───────────────────────┴──────────────────┴────────┘│ |
142 | * │┌───────────────────────┬──────────────────┬────────┐│ |
143 | * ││ "named_buffer_x" │ NAMED_BUFFER │0x66565 ││ |
144 | * │└───────────────────────┴──────────────────┴────────┘│ |
145 | * │ ... │ |
146 | * └─────────────────────────────────────────────────────┘ |
147 | * |
148 | * ... |
149 | * |
150 | * |
151 | * Resources can be looked up by first finding the root table entry (the |
152 | * "domain") and then searching for the identifier in that domain. |
153 | * For example to lookup the conclave manager ID for "com.apple.conclave.a", |
154 | * the "com.apple.kernel" domain would be found and then within that domain, the |
155 | * search would continue using the conclave name and the CONCLAVE_MANAGER type. |
156 | * Every conclave domain has a corresponding CONCLAVE_MANAGER resource in the |
157 | * "com.apple.kernel" domain. |
158 | */ |
159 | |
160 | /* -------------------------------------------------------------------------- */ |
161 | #pragma mark Hash Table |
162 | |
163 | #define TABLE_LEN 64 |
164 | |
165 | /* |
166 | * A table item is what ends up being stored in the hash table. It has a key and |
167 | * a value. |
168 | */ |
169 | typedef struct { |
170 | const void *i_key; |
171 | size_t i_key_len; |
172 | void *i_value; |
173 | |
174 | queue_chain_t i_chain; |
175 | } table_item_t; |
176 | |
177 | /* |
178 | * The hash table consists of an array of buckets (queues). The hashing function |
179 | * will choose in which bucket a particular item belongs. |
180 | */ |
181 | typedef struct { |
182 | queue_head_t *t_buckets; |
183 | size_t t_buckets_count; |
184 | } table_t; |
185 | |
186 | /* |
187 | * Given a key, return the corresponding bucket. |
188 | */ |
189 | static queue_head_t * |
190 | get_bucket(table_t *table, const void *key, size_t key_len) |
191 | { |
192 | const uint32_t idx = os_hash_jenkins(key, key_len) & |
193 | (table->t_buckets_count - 1); |
194 | return &table->t_buckets[idx]; |
195 | } |
196 | |
197 | /* |
198 | * Insert a new table item associated with 'key' into a table. |
199 | */ |
200 | static void |
201 | table_put(table_t *table, const void *key, size_t key_len, table_item_t *item) |
202 | { |
203 | assert3p(item->i_chain.next, ==, NULL); |
204 | assert3p(item->i_chain.prev, ==, NULL); |
205 | assert3p(item->i_value, !=, NULL); |
206 | |
207 | queue_head_t *head = get_bucket(table, key, key_len); |
208 | enqueue(head, &item->i_chain); |
209 | } |
210 | |
211 | /* |
212 | * Iterate through all items matching 'key' calling cb for each. |
213 | */ |
214 | static void |
215 | table_get(table_t *table, const void *key, size_t key_len, bool (^cb)(void *)) |
216 | { |
217 | const queue_head_t *head = get_bucket(table, key, key_len); |
218 | table_item_t *elem = NULL; |
219 | |
220 | assert3p(head, !=, NULL); |
221 | |
222 | qe_foreach_element(elem, head, i_chain) { |
223 | if (elem->i_key_len == key_len && |
224 | memcmp(elem->i_key, key, elem->i_key_len) == 0) { |
225 | if (cb(elem->i_value)) { |
226 | return; |
227 | } |
228 | } |
229 | } |
230 | |
231 | return; |
232 | } |
233 | |
234 | /* |
235 | * Initialize the queues. |
236 | */ |
237 | static void |
238 | table_init(table_t *table) |
239 | { |
240 | assert3u(table->t_buckets_count & (table->t_buckets_count - 1), ==, 0); |
241 | |
242 | /* Initialise each bucket. */ |
243 | for (size_t i = 0; i < table->t_buckets_count; i++) { |
244 | queue_init(&table->t_buckets[i]); |
245 | } |
246 | } |
247 | |
248 | /* |
249 | * Allocate a new table with the specified number of buckets. |
250 | */ |
251 | static table_t * |
252 | table_alloc(size_t nbuckets) |
253 | { |
254 | assert3u(nbuckets, >, 0); |
255 | assert3u(nbuckets & (nbuckets - 1), ==, 0); |
256 | |
257 | table_t *table = kalloc_type(table_t, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
258 | |
259 | table->t_buckets_count = nbuckets; |
260 | table->t_buckets = kalloc_type(queue_head_t, nbuckets, |
261 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
262 | |
263 | return table; |
264 | } |
265 | |
266 | static void |
267 | table_iterate(table_t *table, |
268 | bool (^cb)(const void *key, size_t key_len, void *value)) |
269 | { |
270 | for (size_t i = 0; i < table->t_buckets_count; i++) { |
271 | const queue_head_t *head = &table->t_buckets[i]; |
272 | table_item_t *elem = NULL; |
273 | |
274 | qe_foreach_element(elem, head, i_chain) { |
275 | if (cb(elem->i_key, elem->i_key_len, elem->i_value)) { |
276 | return; |
277 | } |
278 | } |
279 | } |
280 | } |
281 | |
282 | |
283 | /* -------------------------------------------------------------------------- */ |
284 | #pragma mark Root Table |
285 | |
286 | /* |
287 | * The root table is a hash table which contains an entry for every top-level |
288 | * domain. |
289 | * Domains scope resources. For example a conclave domain will contain a list of |
290 | * services available in that conclave. The kernel itself gets its own domain |
291 | * which holds conclave managers and other resources the kernel communicates |
292 | * with directly. |
293 | */ |
294 | table_t root_table = { |
295 | .t_buckets = (queue_chain_t *)(queue_chain_t[TABLE_LEN]){}, |
296 | .t_buckets_count = TABLE_LEN, |
297 | }; |
298 | |
299 | /* |
300 | * Entries in the root table. Each itself a table containing resources available |
301 | * in that domain. |
302 | */ |
303 | typedef struct { |
304 | char d_name[XNUPROXY_RESOURCE_NAME_MAX]; |
305 | table_t *d_table_name; |
306 | table_t *d_table_id; |
307 | } exclaves_resource_domain_t; |
308 | |
309 | static exclaves_resource_domain_t * |
310 | lookup_domain(const char *domain_name) |
311 | { |
312 | __block exclaves_resource_domain_t *domain = NULL; |
313 | table_get(&root_table, domain_name, strlen(domain_name), ^bool (void *data) { |
314 | domain = data; |
315 | return true; |
316 | }); |
317 | |
318 | return domain; |
319 | } |
320 | |
321 | static void |
322 | iterate_domains(bool (^cb)(exclaves_resource_domain_t *)) |
323 | { |
324 | table_iterate(&root_table, |
325 | ^(__unused const void *key, __unused size_t key_len, void *value) { |
326 | exclaves_resource_domain_t *domain = value; |
327 | return cb(domain); |
328 | }); |
329 | } |
330 | |
331 | static void |
332 | iterate_resources(exclaves_resource_domain_t *domain, |
333 | bool (^cb)(exclaves_resource_t *)) |
334 | { |
335 | table_iterate(domain->d_table_name, |
336 | ^(__unused const void *key, __unused size_t key_len, void *value) { |
337 | exclaves_resource_t *resource = value; |
338 | return cb(resource); |
339 | }); |
340 | } |
341 | |
342 | static exclaves_resource_t * |
343 | lookup_resource_by_name(exclaves_resource_domain_t *domain, const char *name, |
344 | xnuproxy_resource_t type) |
345 | { |
346 | __block exclaves_resource_t *resource = NULL; |
347 | table_get(domain->d_table_name, name, strlen(name), ^bool (void *data) { |
348 | exclaves_resource_t *tmp = data; |
349 | if (tmp->r_type == type) { |
350 | resource = data; |
351 | return true; |
352 | } |
353 | return false; |
354 | }); |
355 | |
356 | return resource; |
357 | } |
358 | |
359 | static exclaves_resource_t * |
360 | lookup_resource_by_id(exclaves_resource_domain_t *domain, uint64_t id, |
361 | xnuproxy_resource_t type) |
362 | { |
363 | __block exclaves_resource_t *resource = NULL; |
364 | table_get(domain->d_table_id, &id, sizeof(id), ^bool (void *data) { |
365 | exclaves_resource_t *tmp = data; |
366 | if (tmp->r_type == type) { |
367 | resource = data; |
368 | return true; |
369 | } |
370 | return false; |
371 | }); |
372 | |
373 | return resource; |
374 | } |
375 | |
376 | static exclaves_resource_domain_t * |
377 | exclaves_resource_domain_alloc(const char *scope) |
378 | { |
379 | assert3u(strlen(scope), >, 0); |
380 | assert3u(strlen(scope), <=, XNUPROXY_RESOURCE_NAME_MAX); |
381 | |
382 | exclaves_resource_domain_t *domain = kalloc_type( |
383 | exclaves_resource_domain_t, Z_WAITOK | Z_ZERO | Z_NOFAIL); |
384 | (void) strlcpy(domain->d_name, scope, |
385 | sizeof(domain->d_name)); |
386 | |
387 | domain->d_table_name = table_alloc(TABLE_LEN); |
388 | table_init(domain->d_table_name); |
389 | |
390 | domain->d_table_id = table_alloc(TABLE_LEN); |
391 | table_init(domain->d_table_id); |
392 | |
393 | table_item_t *item = kalloc_type(table_item_t, |
394 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
395 | item->i_key = domain->d_name; |
396 | item->i_key_len = strlen(domain->d_name); |
397 | item->i_value = domain; |
398 | |
399 | table_put(&root_table, scope, strlen(scope), item); |
400 | |
401 | return domain; |
402 | } |
403 | |
404 | static exclaves_resource_t * |
405 | exclaves_resource_alloc(xnuproxy_resource_t type, const char *name, uint64_t id, |
406 | exclaves_resource_domain_t *domain) |
407 | { |
408 | exclaves_resource_t *resource = kalloc_type(exclaves_resource_t, |
409 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
410 | |
411 | resource->r_type = type; |
412 | resource->r_id = id; |
413 | resource->r_active = false; |
414 | os_atomic_store(&resource->r_usecnt, 0, relaxed); |
415 | |
416 | /* |
417 | * Each resource has an associated kobject of type |
418 | * IKOT_EXCLAVES_RESOURCE. |
419 | */ |
420 | ipc_port_t port = ipc_kobject_alloc_port((ipc_kobject_t)resource, |
421 | IKOT_EXCLAVES_RESOURCE, IPC_KOBJECT_ALLOC_NSREQUEST); |
422 | resource->r_port = port; |
423 | |
424 | lck_mtx_init(&resource->r_mutex, &resource_lck_grp, NULL); |
425 | |
426 | (void) strlcpy(resource->r_name, name, sizeof(resource->r_name)); |
427 | |
428 | |
429 | /* Stick the newly created resource into the name table. */ |
430 | table_item_t *name_item = kalloc_type(table_item_t, |
431 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
432 | |
433 | name_item->i_key = resource->r_name; |
434 | name_item->i_key_len = strlen(resource->r_name); |
435 | name_item->i_value = resource; |
436 | |
437 | assert(lookup_resource_by_name(domain, name, type) == NULL); |
438 | table_put(domain->d_table_name, name, strlen(name), name_item); |
439 | |
440 | /* |
441 | * Some types also need to lookup by id in addition to looking up by |
442 | * name. |
443 | */ |
444 | switch (type) { |
445 | case XNUPROXY_RESOURCE_NOTIFICATION: { |
446 | /* Stick the newly created resource into the ID table. */ |
447 | table_item_t *id_item = kalloc_type(table_item_t, |
448 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
449 | id_item->i_key = &resource->r_id; |
450 | id_item->i_key_len = sizeof(resource->r_id); |
451 | id_item->i_value = resource; |
452 | |
453 | assert(lookup_resource_by_id(domain, id, type) == NULL); |
454 | table_put(domain->d_table_id, &id, sizeof(id), id_item); |
455 | break; |
456 | } |
457 | |
458 | default: |
459 | break; |
460 | } |
461 | |
462 | return resource; |
463 | } |
464 | |
465 | /* -------------------------------------------------------------------------- */ |
466 | #pragma mark Exclaves Resources |
467 | |
468 | static void exclaves_resource_no_senders(ipc_port_t port, |
469 | mach_port_mscount_t mscount); |
470 | |
471 | IPC_KOBJECT_DEFINE(IKOT_EXCLAVES_RESOURCE, |
472 | .iko_op_stable = true, |
473 | .iko_op_no_senders = exclaves_resource_no_senders); |
474 | |
475 | static void exclaves_conclave_init(exclaves_resource_t *resource); |
476 | static void exclaves_notification_init(exclaves_resource_t *resource); |
477 | static void exclaves_named_buffer_unmap(exclaves_resource_t *resource); |
478 | static void exclaves_audio_buffer_delete(exclaves_resource_t *resource); |
479 | static void exclaves_resource_sensor_reset(exclaves_resource_t *resource); |
480 | static void exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource); |
481 | static void exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource); |
482 | |
483 | static void |
484 | populate_conclave_services(void) |
485 | { |
486 | /* BEGIN IGNORE CODESTYLE */ |
487 | iterate_domains(^(exclaves_resource_domain_t *domain) { |
488 | |
489 | const bool is_kernel_domain = |
490 | (strcmp(domain->d_name, EXCLAVES_DOMAIN_KERNEL) == 0 || |
491 | strcmp(domain->d_name, EXCLAVES_DOMAIN_DARWIN) == 0); |
492 | |
493 | exclaves_resource_t *cm = exclaves_resource_lookup_by_name( |
494 | EXCLAVES_DOMAIN_KERNEL, domain->d_name, |
495 | XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
496 | |
497 | iterate_resources(domain, ^(exclaves_resource_t *resource) { |
498 | if (resource->r_type != XNUPROXY_RESOURCE_SERVICE) { |
499 | return (bool)false; |
500 | } |
501 | |
502 | if (cm != NULL) { |
503 | conclave_resource_t *c = &cm->r_conclave; |
504 | bitmap_set(c->c_service_bitmap, |
505 | (uint32_t)resource->r_id); |
506 | return (bool)false; |
507 | } |
508 | |
509 | if (is_kernel_domain) { |
510 | bitmap_set(kernel_service_bitmap, |
511 | (uint32_t)resource->r_id); |
512 | return (bool)false; |
513 | |
514 | } |
515 | |
516 | /* |
517 | * Ignore services that are in unknown domains. This can |
518 | * happen if a conclave manager doesn't have a populated |
519 | * endpoint (for example during bringup). |
520 | */ |
521 | return (bool)false; |
522 | }); |
523 | |
524 | return (bool)false; |
525 | }); |
526 | /* END IGNORE CODESTYLE */ |
527 | } |
528 | |
529 | /* |
530 | * Discover all the static exclaves resources populating the resource tables as |
531 | * we go. |
532 | */ |
533 | kern_return_t |
534 | exclaves_resource_init(void) |
535 | { |
536 | /* Initialize the root table. */ |
537 | table_init(&root_table); |
538 | |
539 | for (uint32_t i = 0;; i++) { |
540 | /* Get info about the 'i'th resource. */ |
541 | xnuproxy_msg_t msg = { |
542 | .cmd = XNUPROXY_CMD_RESOURCE_INFO, |
543 | .cmd_resource_info = (xnuproxy_cmd_resource_info_t) { |
544 | .request.index = i, |
545 | }, |
546 | }; |
547 | |
548 | kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL); |
549 | if (kr != KERN_SUCCESS) { |
550 | return kr; |
551 | } |
552 | |
553 | /* |
554 | * An empty name indicates there are no resources left to |
555 | * enumerate. |
556 | */ |
557 | if (msg.cmd_resource_info.response.name[0] == '\0') { |
558 | break; |
559 | } |
560 | |
561 | xnuproxy_resource_t type = msg.cmd_resource_info.response.type; |
562 | const char *name = |
563 | (const char *)&msg.cmd_resource_info.response.name; |
564 | const uint64_t id = msg.cmd_resource_info.response.id; |
565 | const char *scope = |
566 | (const char *)&msg.cmd_resource_info.response.domain; |
567 | |
568 | /* |
569 | * Every resource is scoped to a specific domain, find the |
570 | * domain (or create one if it doesn't exist). |
571 | */ |
572 | exclaves_resource_domain_t *domain = lookup_domain(scope); |
573 | if (domain == NULL) { |
574 | domain = exclaves_resource_domain_alloc(scope); |
575 | } |
576 | |
577 | /* Allocate a new resource in the domain. */ |
578 | exclaves_resource_t *resource = exclaves_resource_alloc(type, |
579 | name, id, domain); |
580 | |
581 | /* |
582 | * Type specific initialization. |
583 | */ |
584 | switch (type) { |
585 | case XNUPROXY_RESOURCE_CONCLAVE_MANAGER: |
586 | exclaves_conclave_init(resource); |
587 | break; |
588 | |
589 | case XNUPROXY_RESOURCE_NOTIFICATION: |
590 | exclaves_notification_init(resource); |
591 | break; |
592 | |
593 | case XNUPROXY_RESOURCE_SERVICE: |
594 | assert3u(resource->r_id, <, CONCLAVE_SERVICE_MAX); |
595 | break; |
596 | |
597 | default: |
598 | break; |
599 | } |
600 | } |
601 | |
602 | /* Populate the conclave service ID bitmaps. */ |
603 | populate_conclave_services(); |
604 | |
605 | return KERN_SUCCESS; |
606 | } |
607 | |
608 | exclaves_resource_t * |
609 | exclaves_resource_lookup_by_name(const char *domain_name, const char *name, |
610 | xnuproxy_resource_t type) |
611 | { |
612 | assert3u(strlen(domain_name), >, 0); |
613 | assert3u(strlen(name), >, 0); |
614 | |
615 | exclaves_resource_domain_t *domain = lookup_domain(domain_name); |
616 | if (domain == NULL) { |
617 | return NULL; |
618 | } |
619 | |
620 | return lookup_resource_by_name(domain, name, type); |
621 | } |
622 | |
623 | static exclaves_resource_t * |
624 | exclaves_resource_lookup_by_id(const char *domain_name, uint64_t id, |
625 | xnuproxy_resource_t type) |
626 | { |
627 | assert3u(strlen(domain_name), >, 0); |
628 | |
629 | exclaves_resource_domain_t *domain = lookup_domain(domain_name); |
630 | if (domain == NULL) { |
631 | return NULL; |
632 | } |
633 | |
634 | return lookup_resource_by_id(domain, id, type); |
635 | } |
636 | |
637 | const char * |
638 | exclaves_resource_name(const exclaves_resource_t *resource) |
639 | { |
640 | return resource->r_name; |
641 | } |
642 | |
643 | /* |
644 | * Notes on use-count management |
645 | * For the most part everything is done under the resource lock. |
646 | * In some cases, it's necessary to grab/release a use count without |
647 | * holding the lock - for example the realtime audio paths doing copyin/copyout |
648 | * of named buffers/audio buffers. |
649 | * To prevent against races, initialization/de-initialization should always |
650 | * recheck the use-count under the lock. |
651 | */ |
652 | uint32_t |
653 | exclaves_resource_retain(exclaves_resource_t *resource) |
654 | { |
655 | uint32_t orig = |
656 | os_atomic_inc_orig(&resource->r_usecnt, relaxed); |
657 | assert3u(orig, <, UINT32_MAX); |
658 | |
659 | return orig; |
660 | } |
661 | |
662 | void |
663 | exclaves_resource_release(exclaves_resource_t *resource) |
664 | { |
665 | /* |
666 | * Drop the use count without holding the lock (this path may be called |
667 | * by RT threads and should be RT-safe). |
668 | */ |
669 | uint32_t orig = os_atomic_dec_orig(&resource->r_usecnt, relaxed); |
670 | assert3u(orig, !=, 0); |
671 | if (orig != 1) { |
672 | return; |
673 | } |
674 | |
675 | /* |
676 | * Now grab the lock. The RT-safe paths calling this function shouldn't |
677 | * end up here unless there's a bug or mis-behaving user code (like |
678 | * deallocating an in-use mach port). |
679 | */ |
680 | lck_mtx_lock(&resource->r_mutex); |
681 | |
682 | /* |
683 | * Re-check the use count - as a second user of the resource |
684 | * may have snuck in in the meantime. |
685 | */ |
686 | if (os_atomic_load(&resource->r_usecnt, relaxed) > 0) { |
687 | lck_mtx_unlock(&resource->r_mutex); |
688 | return; |
689 | } |
690 | |
691 | switch (resource->r_type) { |
692 | case XNUPROXY_RESOURCE_NAMED_BUFFER: |
693 | exclaves_named_buffer_unmap(resource); |
694 | break; |
695 | |
696 | case XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER: |
697 | exclaves_audio_buffer_delete(resource); |
698 | break; |
699 | |
700 | case XNUPROXY_RESOURCE_SENSOR: |
701 | exclaves_resource_sensor_reset(resource); |
702 | break; |
703 | |
704 | case XNUPROXY_RESOURCE_SHARED_MEMORY: |
705 | exclaves_resource_shared_memory_unmap(resource); |
706 | break; |
707 | |
708 | case XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY: |
709 | exclaves_resource_audio_memory_unmap(resource); |
710 | break; |
711 | |
712 | default: |
713 | break; |
714 | } |
715 | |
716 | lck_mtx_unlock(&resource->r_mutex); |
717 | } |
718 | |
719 | kern_return_t |
720 | exclaves_resource_from_port_name(ipc_space_t space, mach_port_name_t name, |
721 | exclaves_resource_t **out) |
722 | { |
723 | kern_return_t kr = KERN_SUCCESS; |
724 | ipc_port_t port = IPC_PORT_NULL; |
725 | |
726 | if (!MACH_PORT_VALID(name)) { |
727 | return KERN_INVALID_NAME; |
728 | } |
729 | |
730 | kr = ipc_port_translate_send(space, name, &port); |
731 | if (kr != KERN_SUCCESS) { |
732 | return kr; |
733 | } |
734 | |
735 | /* port is locked */ |
736 | assert(IP_VALID(port)); |
737 | |
738 | exclaves_resource_t *resource = ipc_kobject_get_stable(port, |
739 | IKOT_EXCLAVES_RESOURCE); |
740 | |
741 | /* The port is valid, but doesn't denote an exclaves resource. */ |
742 | if (resource == NULL) { |
743 | ip_mq_unlock(port); |
744 | return KERN_INVALID_CAPABILITY; |
745 | } |
746 | |
747 | /* Grab a reference while the port is good and the ipc lock is held. */ |
748 | __assert_only uint32_t orig = exclaves_resource_retain(resource); |
749 | assert3u(orig, >, 0); |
750 | |
751 | ip_mq_unlock(port); |
752 | *out = resource; |
753 | |
754 | return KERN_SUCCESS; |
755 | } |
756 | |
757 | /* |
758 | * Consumes a reference to the resource. On success the resource is reference is |
759 | * associated with the lifetime of the port. |
760 | */ |
761 | kern_return_t |
762 | exclaves_resource_create_port_name(exclaves_resource_t *resource, ipc_space_t space, |
763 | mach_port_name_t *name) |
764 | { |
765 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
766 | |
767 | ipc_port_t port = resource->r_port; |
768 | |
769 | ip_mq_lock(port); |
770 | |
771 | /* Create an armed send right. */ |
772 | kern_return_t ret = ipc_kobject_make_send_nsrequest_locked(port, |
773 | resource, IKOT_EXCLAVES_RESOURCE); |
774 | if (ret != KERN_SUCCESS && |
775 | ret != KERN_ALREADY_WAITING) { |
776 | ip_mq_unlock(port); |
777 | exclaves_resource_release(resource); |
778 | return ret; |
779 | } |
780 | |
781 | /* |
782 | * If there was already a send right, then the port already has an |
783 | * associated use count so drop this one. |
784 | */ |
785 | if (port->ip_srights > 1) { |
786 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 1); |
787 | exclaves_resource_release(resource); |
788 | } |
789 | |
790 | ip_mq_unlock(port); |
791 | |
792 | *name = ipc_port_copyout_send(port, space); |
793 | if (!MACH_PORT_VALID(*name)) { |
794 | /* |
795 | * ipc_port_copyout_send() releases the send right on failure |
796 | * (possibly calling exclaves_resource_no_senders() in the |
797 | * process). |
798 | */ |
799 | return KERN_RESOURCE_SHORTAGE; |
800 | } |
801 | |
802 | return KERN_SUCCESS; |
803 | } |
804 | |
805 | static void |
806 | exclaves_resource_no_senders(ipc_port_t port, |
807 | __unused mach_port_mscount_t mscount) |
808 | { |
809 | exclaves_resource_t *resource = ipc_kobject_get_stable(port, |
810 | IKOT_EXCLAVES_RESOURCE); |
811 | |
812 | exclaves_resource_release(resource); |
813 | } |
814 | |
815 | /* -------------------------------------------------------------------------- */ |
816 | #pragma mark Named Buffers |
817 | |
818 | int |
819 | exclaves_named_buffer_io(exclaves_resource_t *resource, off_t offset, |
820 | size_t len, int (^cb)(char *, size_t)) |
821 | { |
822 | assert(resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER || |
823 | resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER); |
824 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
825 | |
826 | named_buffer_resource_t *nb = &resource->r_named_buffer; |
827 | assert3u(nb->nb_nranges, >, 0); |
828 | assert3u(nb->nb_size, !=, 0); |
829 | assert3u(offset + len, <=, nb->nb_size); |
830 | |
831 | for (int i = 0; i < nb->nb_nranges; i++) { |
832 | /* Skip forward to the starting range. */ |
833 | if (offset >= nb->nb_range[i].npages * PAGE_SIZE) { |
834 | offset -= nb->nb_range[i].npages * PAGE_SIZE; |
835 | continue; |
836 | } |
837 | |
838 | size_t size = MIN((nb->nb_range[i].npages * PAGE_SIZE) - offset, len); |
839 | int ret = cb(nb->nb_range[i].address + offset, size); |
840 | if (ret != 0) { |
841 | return ret; |
842 | } |
843 | |
844 | offset = 0; |
845 | len -= size; |
846 | |
847 | if (len == 0) { |
848 | break; |
849 | } |
850 | } |
851 | assert3u(len, ==, 0); |
852 | |
853 | return 0; |
854 | } |
855 | |
856 | static kern_return_t |
857 | exclaves_named_buffer_io_copyin(exclaves_resource_t *resource, |
858 | user_addr_t _src, off_t offset, size_t len) |
859 | { |
860 | assert3u(resource->r_named_buffer.nb_perm & EXCLAVES_BUFFER_PERM_WRITE, |
861 | !=, 0); |
862 | |
863 | __block user_addr_t src = _src; |
864 | return exclaves_named_buffer_io(resource, offset, len, |
865 | ^(char *buffer, size_t size) { |
866 | if (copyin(src, buffer, size) != 0) { |
867 | return KERN_FAILURE; |
868 | } |
869 | |
870 | src += size; |
871 | return KERN_SUCCESS; |
872 | }); |
873 | } |
874 | |
875 | kern_return_t |
876 | exclaves_named_buffer_copyin(exclaves_resource_t *resource, |
877 | user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1, |
878 | mach_vm_size_t size2, mach_vm_size_t offset2) |
879 | { |
880 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
881 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NAMED_BUFFER); |
882 | |
883 | mach_vm_size_t umax = 0; |
884 | kern_return_t kr = KERN_FAILURE; |
885 | |
886 | if (buffer == USER_ADDR_NULL || size1 == 0) { |
887 | return KERN_INVALID_ARGUMENT; |
888 | } |
889 | |
890 | named_buffer_resource_t *nb = &resource->r_named_buffer; |
891 | assert3u(nb->nb_nranges, >, 0); |
892 | assert3u(nb->nb_size, !=, 0); |
893 | |
894 | if (os_add_overflow(offset1, size1, &umax) || umax > nb->nb_size) { |
895 | return KERN_INVALID_ARGUMENT; |
896 | } |
897 | |
898 | if (os_add_overflow(offset2, size2, &umax) || umax > nb->nb_size) { |
899 | return KERN_INVALID_ARGUMENT; |
900 | } |
901 | |
902 | if ((nb->nb_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) { |
903 | return KERN_PROTECTION_FAILURE; |
904 | } |
905 | |
906 | kr = exclaves_named_buffer_io_copyin(resource, buffer, offset1, size1); |
907 | if (kr != KERN_SUCCESS) { |
908 | return kr; |
909 | } |
910 | |
911 | kr = exclaves_named_buffer_io_copyin(resource, buffer + size1, offset2, |
912 | size2); |
913 | if (kr != KERN_SUCCESS) { |
914 | return kr; |
915 | } |
916 | |
917 | return KERN_SUCCESS; |
918 | } |
919 | |
920 | static kern_return_t |
921 | exclaves_named_buffer_io_copyout(exclaves_resource_t *resource, |
922 | user_addr_t _dst, off_t offset, size_t len) |
923 | { |
924 | assert3u(resource->r_named_buffer.nb_perm & EXCLAVES_BUFFER_PERM_READ, |
925 | !=, 0); |
926 | |
927 | __block user_addr_t dst = _dst; |
928 | return exclaves_named_buffer_io(resource, offset, len, |
929 | ^(char *buffer, size_t size) { |
930 | if (copyout(buffer, dst, size) != 0) { |
931 | return KERN_FAILURE; |
932 | } |
933 | |
934 | dst += size; |
935 | return KERN_SUCCESS; |
936 | }); |
937 | } |
938 | |
939 | kern_return_t |
940 | exclaves_named_buffer_copyout(exclaves_resource_t *resource, |
941 | user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1, |
942 | mach_vm_size_t size2, mach_vm_size_t offset2) |
943 | { |
944 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
945 | assert(resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER || |
946 | resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER); |
947 | |
948 | mach_vm_size_t umax = 0; |
949 | kern_return_t kr = KERN_FAILURE; |
950 | |
951 | if (buffer == USER_ADDR_NULL || size1 == 0) { |
952 | return KERN_INVALID_ARGUMENT; |
953 | } |
954 | |
955 | named_buffer_resource_t *nb = &resource->r_named_buffer; |
956 | assert3u(nb->nb_nranges, >, 0); |
957 | assert3u(nb->nb_size, !=, 0); |
958 | |
959 | if (os_add_overflow(offset1, size1, &umax) || umax > nb->nb_size) { |
960 | return KERN_INVALID_ARGUMENT; |
961 | } |
962 | |
963 | if (os_add_overflow(offset2, size2, &umax) || umax > nb->nb_size) { |
964 | return KERN_INVALID_ARGUMENT; |
965 | } |
966 | |
967 | if ((nb->nb_perm & EXCLAVES_BUFFER_PERM_READ) == 0) { |
968 | return KERN_PROTECTION_FAILURE; |
969 | } |
970 | |
971 | kr = exclaves_named_buffer_io_copyout(resource, buffer, offset1, size1); |
972 | if (kr != KERN_SUCCESS) { |
973 | return kr; |
974 | } |
975 | |
976 | kr = exclaves_named_buffer_io_copyout(resource, buffer + size1, |
977 | offset2, size2); |
978 | if (kr != KERN_SUCCESS) { |
979 | return kr; |
980 | } |
981 | |
982 | return KERN_SUCCESS; |
983 | } |
984 | |
985 | static void |
986 | named_buffer_unmap(exclaves_resource_t *resource) |
987 | { |
988 | assert(resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER || |
989 | resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER); |
990 | LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
991 | |
992 | /* BEGIN IGNORE CODESTYLE */ |
993 | resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER ? |
994 | exclaves_named_buffer_unmap(resource) : |
995 | exclaves_audio_buffer_delete(resource); |
996 | /* END IGNORE CODESTYLE */ |
997 | } |
998 | |
999 | static kern_return_t |
1000 | named_buffer_map(exclaves_resource_t *resource, size_t size, |
1001 | exclaves_buffer_perm_t perm) |
1002 | { |
1003 | assert(resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER || |
1004 | resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER); |
1005 | assert3u(perm & ~(EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE), ==, 0); |
1006 | |
1007 | xnuproxy_cmd_t cmd = 0; |
1008 | kern_return_t kr = KERN_FAILURE; |
1009 | uint32_t status = 0; |
1010 | |
1011 | if (size == 0) { |
1012 | return KERN_INVALID_ARGUMENT; |
1013 | } |
1014 | |
1015 | /* round size up to nearest page */ |
1016 | mach_vm_offset_t rounded_size = 0; |
1017 | if (mach_vm_round_page_overflow(size, &rounded_size)) { |
1018 | return KERN_INVALID_ARGUMENT; |
1019 | } |
1020 | |
1021 | lck_mtx_lock(&resource->r_mutex); |
1022 | |
1023 | /* |
1024 | * If already active, bump the use count, check that the perms and size |
1025 | * are compatible and return. Checking the use count is insufficient |
1026 | * here as this can race with with a non-locked use count release. |
1027 | */ |
1028 | if (resource->r_active) { |
1029 | const named_buffer_resource_t *nb = &resource->r_named_buffer; |
1030 | |
1031 | /* |
1032 | * When only inbound and outbound buffers are supported, the |
1033 | * perm check should be updated to ensure that the perms match |
1034 | * (rather than being a subset). */ |
1035 | if (nb->nb_size < rounded_size || |
1036 | (nb->nb_perm & perm) == 0) { |
1037 | lck_mtx_unlock(&resource->r_mutex); |
1038 | return KERN_INVALID_ARGUMENT; |
1039 | } |
1040 | |
1041 | exclaves_resource_retain(resource); |
1042 | lck_mtx_unlock(&resource->r_mutex); |
1043 | return KERN_SUCCESS; |
1044 | } |
1045 | |
1046 | cmd = resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER ? |
1047 | XNUPROXY_CMD_AUDIO_BUFFER_MAP: |
1048 | XNUPROXY_CMD_NAMED_BUFFER_MAP; |
1049 | xnuproxy_msg_t msg = { |
1050 | .cmd = cmd, |
1051 | .cmd_named_buf_map = (xnuproxy_cmd_named_buf_map_t) { |
1052 | .request.buffer_id = resource->r_id, |
1053 | .request.buffer_size = rounded_size, |
1054 | } |
1055 | }; |
1056 | |
1057 | kr = exclaves_xnu_proxy_send(&msg, NULL); |
1058 | if (kr != KERN_SUCCESS) { |
1059 | lck_mtx_unlock(&resource->r_mutex); |
1060 | return kr; |
1061 | } |
1062 | status = msg.cmd_named_buf_map.response.status; |
1063 | if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) { |
1064 | lck_mtx_unlock(&resource->r_mutex); |
1065 | return status == XNUPROXY_NAMED_BUFFER_EINVAL ? |
1066 | KERN_INVALID_ARGUMENT : KERN_FAILURE; |
1067 | } |
1068 | |
1069 | /* |
1070 | * From this point on named_buffer_unmap() must be called if |
1071 | * something goes wrong so that the buffer will be properly unmapped. |
1072 | */ |
1073 | const bool ro = msg.cmd_named_buf_map.response.readonly != 0; |
1074 | switch (perm) { |
1075 | case EXCLAVES_BUFFER_PERM_READ: |
1076 | if (!ro) { |
1077 | named_buffer_unmap(resource); |
1078 | lck_mtx_unlock(&resource->r_mutex); |
1079 | return KERN_PROTECTION_FAILURE; |
1080 | } |
1081 | break; |
1082 | case EXCLAVES_BUFFER_PERM_WRITE: |
1083 | if (ro) { |
1084 | named_buffer_unmap(resource); |
1085 | lck_mtx_unlock(&resource->r_mutex); |
1086 | return KERN_PROTECTION_FAILURE; |
1087 | } |
1088 | break; |
1089 | /* Maintain backwards compatibility for named buffers (READ|WRITE) */ |
1090 | case EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE: |
1091 | if (ro) { |
1092 | perm &= ~EXCLAVES_BUFFER_PERM_WRITE; |
1093 | } |
1094 | break; |
1095 | } |
1096 | |
1097 | named_buffer_resource_t *nb = &resource->r_named_buffer; |
1098 | nb->nb_size = rounded_size; |
1099 | nb->nb_perm = perm; |
1100 | |
1101 | /* |
1102 | * The named buffer is now accessible by xnu. Discover the |
1103 | * layout of the memory. |
1104 | */ |
1105 | const uint64_t count = rounded_size / PAGE_SIZE; |
1106 | uint32_t page = 0; |
1107 | cmd = resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER ? |
1108 | XNUPROXY_CMD_AUDIO_BUFFER_LAYOUT: |
1109 | XNUPROXY_CMD_NAMED_BUFFER_LAYOUT; |
1110 | while (page < count) { |
1111 | xnuproxy_msg_t layout_msg = { |
1112 | .cmd = cmd, |
1113 | .cmd_named_buf_layout = (xnuproxy_cmd_named_buf_layout_t) { |
1114 | .request.buffer_id = resource->r_id, |
1115 | .request.start = page, |
1116 | .request.npages = (uint32_t)count - page, |
1117 | } |
1118 | }; |
1119 | |
1120 | kr = exclaves_xnu_proxy_send(&layout_msg, NULL); |
1121 | if (kr != KERN_SUCCESS) { |
1122 | named_buffer_unmap(resource); |
1123 | lck_mtx_unlock(&resource->r_mutex); |
1124 | return kr; |
1125 | } |
1126 | |
1127 | status = layout_msg.cmd_named_buf_layout.response.status; |
1128 | switch (status) { |
1129 | case XNUPROXY_NAMED_BUFFER_SUCCESS: |
1130 | case XNUPROXY_NAMED_BUFFER_ENOSPC: |
1131 | break; |
1132 | |
1133 | case XNUPROXY_NAMED_BUFFER_EINVAL: |
1134 | named_buffer_unmap(resource); |
1135 | lck_mtx_unlock(&resource->r_mutex); |
1136 | return KERN_INVALID_ARGUMENT; |
1137 | |
1138 | default: |
1139 | named_buffer_unmap(resource); |
1140 | lck_mtx_unlock(&resource->r_mutex); |
1141 | return KERN_FAILURE; |
1142 | } |
1143 | |
1144 | xnuproxy_named_buf_range_t *range = |
1145 | layout_msg.cmd_named_buf_layout.response.range; |
1146 | uint32_t nranges = |
1147 | layout_msg.cmd_named_buf_layout.response.nranges; |
1148 | |
1149 | if (nb->nb_nranges + nranges > EXCLAVES_SHARED_BUFFER_MAX_RANGES) { |
1150 | named_buffer_unmap(resource); |
1151 | lck_mtx_unlock(&resource->r_mutex); |
1152 | exclaves_debug_printf(show_errors, "exclaves: " |
1153 | "fragmented named buffer can't fit\n" ); |
1154 | return KERN_FAILURE; |
1155 | } |
1156 | |
1157 | for (uint32_t i = 0; i < nranges; i++) { |
1158 | nb->nb_range[nb->nb_nranges].address = |
1159 | (char *)phystokv(range[i].address); |
1160 | nb->nb_range[nb->nb_nranges].npages = range[i].npages; |
1161 | |
1162 | assert3p(nb->nb_range[nb->nb_nranges].address, !=, |
1163 | NULL); |
1164 | |
1165 | nb->nb_nranges++; |
1166 | page += range[i].npages; |
1167 | assert3u(page, <=, count); |
1168 | } |
1169 | } |
1170 | |
1171 | exclaves_resource_retain(resource); |
1172 | resource->r_active = true; |
1173 | |
1174 | lck_mtx_unlock(&resource->r_mutex); |
1175 | |
1176 | return KERN_SUCCESS; |
1177 | } |
1178 | |
1179 | kern_return_t |
1180 | exclaves_named_buffer_map(const char *domain, const char *name, size_t size, |
1181 | exclaves_buffer_perm_t perm, exclaves_resource_t **out) |
1182 | { |
1183 | assert3p(out, !=, NULL); |
1184 | |
1185 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
1186 | name, XNUPROXY_RESOURCE_NAMED_BUFFER); |
1187 | if (resource == NULL) { |
1188 | return KERN_NOT_FOUND; |
1189 | } |
1190 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NAMED_BUFFER); |
1191 | |
1192 | kern_return_t kr = named_buffer_map(resource, size, perm); |
1193 | if (kr != KERN_SUCCESS) { |
1194 | return kr; |
1195 | } |
1196 | |
1197 | *out = resource; |
1198 | return KERN_SUCCESS; |
1199 | } |
1200 | |
1201 | static void |
1202 | exclaves_named_buffer_unmap(exclaves_resource_t *resource) |
1203 | { |
1204 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NAMED_BUFFER); |
1205 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0); |
1206 | LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
1207 | |
1208 | xnuproxy_msg_t msg = { |
1209 | .cmd = XNUPROXY_CMD_NAMED_BUFFER_DELETE, |
1210 | .cmd_named_buf_delete.request.buffer_id = resource->r_id, |
1211 | }; |
1212 | |
1213 | kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL); |
1214 | if (kr != KERN_SUCCESS) { |
1215 | exclaves_debug_printf(show_errors, |
1216 | "exclaves: failed to delete named buffer: %s\n" , |
1217 | resource->r_name); |
1218 | return; |
1219 | } |
1220 | uint8_t status = msg.cmd_named_buf_delete.response.status; |
1221 | |
1222 | if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) { |
1223 | exclaves_debug_printf(show_errors, |
1224 | "exclaves: failed to delete named buffer: %s, " |
1225 | "status: %d\n" , resource->r_name, status); |
1226 | return; |
1227 | } |
1228 | |
1229 | bzero(&resource->r_named_buffer, sizeof(resource->r_named_buffer)); |
1230 | |
1231 | resource->r_active = false; |
1232 | } |
1233 | |
1234 | /* -------------------------------------------------------------------------- */ |
1235 | #pragma mark Audio buffers |
1236 | |
1237 | kern_return_t |
1238 | exclaves_audio_buffer_map(const char *domain, const char *name, size_t size, |
1239 | exclaves_resource_t **out) |
1240 | { |
1241 | assert3p(out, !=, NULL); |
1242 | |
1243 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
1244 | name, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER); |
1245 | if (resource == NULL) { |
1246 | return KERN_NOT_FOUND; |
1247 | } |
1248 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER); |
1249 | |
1250 | kern_return_t kr = named_buffer_map(resource, size, |
1251 | EXCLAVES_BUFFER_PERM_READ); |
1252 | if (kr != KERN_SUCCESS) { |
1253 | return kr; |
1254 | } |
1255 | |
1256 | *out = resource; |
1257 | return KERN_SUCCESS; |
1258 | } |
1259 | |
1260 | static void |
1261 | exclaves_audio_buffer_delete(exclaves_resource_t *resource) |
1262 | { |
1263 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER); |
1264 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0); |
1265 | LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
1266 | |
1267 | xnuproxy_msg_t msg = { |
1268 | .cmd = XNUPROXY_CMD_AUDIO_BUFFER_DELETE, |
1269 | .cmd_audio_buf_delete.request.buffer_id = resource->r_id, |
1270 | }; |
1271 | |
1272 | kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL); |
1273 | if (kr != KERN_SUCCESS) { |
1274 | exclaves_debug_printf(show_errors, |
1275 | "exclaves: failed to delete audio buffer: %s\n" , |
1276 | resource->r_name); |
1277 | return; |
1278 | } |
1279 | uint8_t status = msg.cmd_audio_buf_delete.response.status; |
1280 | |
1281 | if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) { |
1282 | exclaves_debug_printf(show_errors, |
1283 | "exclaves: failed to delete audio buffer: %s, " |
1284 | "status: %d\n" , resource->r_name, status); |
1285 | return; |
1286 | } |
1287 | |
1288 | bzero(&resource->r_named_buffer, sizeof(resource->r_named_buffer)); |
1289 | resource->r_active = false; |
1290 | } |
1291 | |
1292 | kern_return_t |
1293 | exclaves_audio_buffer_copyout(exclaves_resource_t *resource, |
1294 | user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1, |
1295 | mach_vm_size_t size2, mach_vm_size_t offset2) |
1296 | { |
1297 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
1298 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER); |
1299 | |
1300 | xnuproxy_msg_t msg = { |
1301 | .cmd = XNUPROXY_CMD_AUDIO_BUFFER_COPYOUT, |
1302 | .cmd_audio_buf_copyout.request.buffer_id = resource->r_id, |
1303 | .cmd_audio_buf_copyout.request.size1 = size1, |
1304 | .cmd_audio_buf_copyout.request.offset1 = offset1, |
1305 | .cmd_audio_buf_copyout.request.size2 = size2, |
1306 | .cmd_audio_buf_copyout.request.offset2 = offset2, |
1307 | }; |
1308 | |
1309 | kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL); |
1310 | if (kr != KERN_SUCCESS) { |
1311 | return kr; |
1312 | } |
1313 | uint8_t status = msg.cmd_audio_buf_copyout.response.status; |
1314 | |
1315 | if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) { |
1316 | if (status == XNUPROXY_NAMED_BUFFER_EINVAL) { |
1317 | return KERN_INVALID_ARGUMENT; |
1318 | } |
1319 | return KERN_FAILURE; |
1320 | } |
1321 | |
1322 | return exclaves_named_buffer_copyout(resource, buffer, size1, offset1, |
1323 | size2, offset2); |
1324 | } |
1325 | |
1326 | /* -------------------------------------------------------------------------- */ |
1327 | #pragma mark Conclave Manager |
1328 | |
1329 | static void |
1330 | exclaves_conclave_init(exclaves_resource_t *resource) |
1331 | { |
1332 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1333 | |
1334 | tb_client_connection_t connection = NULL; |
1335 | __assert_only kern_return_t kr = exclaves_conclave_launcher_init(resource->r_id, |
1336 | &connection); |
1337 | assert3u(kr, ==, KERN_SUCCESS); |
1338 | |
1339 | conclave_resource_t *conclave = &resource->r_conclave; |
1340 | |
1341 | conclave->c_control = connection; |
1342 | conclave->c_state = CONCLAVE_S_NONE; |
1343 | conclave->c_task = TASK_NULL; |
1344 | } |
1345 | |
1346 | kern_return_t |
1347 | exclaves_conclave_attach(const char *domain, const char *name, task_t task) |
1348 | { |
1349 | assert3p(task, !=, TASK_NULL); |
1350 | |
1351 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
1352 | name, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1353 | if (resource == NULL) { |
1354 | return KERN_INVALID_ARGUMENT; |
1355 | } |
1356 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1357 | |
1358 | conclave_resource_t *conclave = &resource->r_conclave; |
1359 | |
1360 | lck_mtx_lock(&resource->r_mutex); |
1361 | |
1362 | if (conclave->c_state != CONCLAVE_S_NONE) { |
1363 | lck_mtx_unlock(&resource->r_mutex); |
1364 | return KERN_INVALID_ARGUMENT; |
1365 | } |
1366 | |
1367 | task_reference(task); |
1368 | |
1369 | task->conclave = resource; |
1370 | |
1371 | conclave->c_task = task; |
1372 | conclave->c_state = CONCLAVE_S_ATTACHED; |
1373 | |
1374 | lck_mtx_unlock(&resource->r_mutex); |
1375 | |
1376 | return KERN_SUCCESS; |
1377 | } |
1378 | |
1379 | kern_return_t |
1380 | exclaves_conclave_detach(exclaves_resource_t *resource, task_t task) |
1381 | { |
1382 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1383 | |
1384 | conclave_resource_t *conclave = &resource->r_conclave; |
1385 | |
1386 | lck_mtx_lock(&resource->r_mutex); |
1387 | |
1388 | if (conclave->c_state != CONCLAVE_S_ATTACHED && |
1389 | conclave->c_state != CONCLAVE_S_STOPPED) { |
1390 | panic("Task %p trying to detach a conclave %p but it is in a " |
1391 | "weird state" , task, conclave); |
1392 | } |
1393 | |
1394 | assert3p(task->conclave, !=, NULL); |
1395 | assert3p(resource, ==, task->conclave); |
1396 | |
1397 | task->conclave = NULL; |
1398 | conclave->c_task = TASK_NULL; |
1399 | |
1400 | conclave->c_state = CONCLAVE_S_NONE; |
1401 | |
1402 | lck_mtx_unlock(&resource->r_mutex); |
1403 | |
1404 | task_deallocate(task); |
1405 | |
1406 | return KERN_SUCCESS; |
1407 | } |
1408 | |
1409 | kern_return_t |
1410 | exclaves_conclave_inherit(exclaves_resource_t *resource, task_t old_task, |
1411 | task_t new_task) |
1412 | { |
1413 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1414 | |
1415 | conclave_resource_t *conclave = &resource->r_conclave; |
1416 | |
1417 | lck_mtx_lock(&resource->r_mutex); |
1418 | |
1419 | assert3u(conclave->c_state, !=, CONCLAVE_S_NONE); |
1420 | |
1421 | assert3p(new_task->conclave, ==, NULL); |
1422 | assert3p(old_task->conclave, !=, NULL); |
1423 | assert3p(resource, ==, old_task->conclave); |
1424 | |
1425 | /* Only allow inheriting the conclave if it has not yet started. */ |
1426 | if (conclave->c_state != CONCLAVE_S_ATTACHED) { |
1427 | lck_mtx_unlock(&resource->r_mutex); |
1428 | return KERN_FAILURE; |
1429 | } |
1430 | |
1431 | old_task->conclave = NULL; |
1432 | |
1433 | task_reference(new_task); |
1434 | new_task->conclave = resource; |
1435 | |
1436 | conclave->c_task = new_task; |
1437 | |
1438 | lck_mtx_unlock(&resource->r_mutex); |
1439 | task_deallocate(old_task); |
1440 | |
1441 | return KERN_SUCCESS; |
1442 | } |
1443 | |
1444 | bool |
1445 | exclaves_conclave_is_attached(const exclaves_resource_t *resource) |
1446 | { |
1447 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1448 | const conclave_resource_t *conclave = &resource->r_conclave; |
1449 | |
1450 | return conclave->c_state == CONCLAVE_S_ATTACHED; |
1451 | } |
1452 | |
1453 | kern_return_t |
1454 | exclaves_conclave_launch(exclaves_resource_t *resource) |
1455 | { |
1456 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1457 | |
1458 | conclave_resource_t *conclave = &resource->r_conclave; |
1459 | |
1460 | lck_mtx_lock(&resource->r_mutex); |
1461 | |
1462 | if (conclave->c_state != CONCLAVE_S_ATTACHED) { |
1463 | lck_mtx_unlock(&resource->r_mutex); |
1464 | return KERN_FAILURE; |
1465 | } |
1466 | |
1467 | conclave->c_state = CONCLAVE_S_LAUNCHING; |
1468 | lck_mtx_unlock(&resource->r_mutex); |
1469 | |
1470 | __assert_only kern_return_t ret = |
1471 | exclaves_conclave_launcher_launch(conclave->c_control); |
1472 | assert3u(ret, ==, KERN_SUCCESS); |
1473 | |
1474 | lck_mtx_lock(&resource->r_mutex); |
1475 | /* Check if conclave stop is requested */ |
1476 | if (conclave->c_state == CONCLAVE_S_STOP_REQUESTED) { |
1477 | conclave->c_state = CONCLAVE_S_STOPPING; |
1478 | lck_mtx_unlock(&resource->r_mutex); |
1479 | |
1480 | ret = exclaves_conclave_launcher_stop(conclave->c_control, |
1481 | CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT); |
1482 | assert3u(ret, ==, KERN_SUCCESS); |
1483 | |
1484 | lck_mtx_lock(&resource->r_mutex); |
1485 | conclave->c_state = CONCLAVE_S_STOPPED; |
1486 | } else if (conclave->c_state == CONCLAVE_S_LAUNCHING) { |
1487 | conclave->c_state = CONCLAVE_S_LAUNCHED; |
1488 | } |
1489 | lck_mtx_unlock(&resource->r_mutex); |
1490 | |
1491 | return KERN_SUCCESS; |
1492 | } |
1493 | |
1494 | /* |
1495 | * Return the domain associated with the current conclave. |
1496 | * If not joined to a conclave, return the KERNEL domain. This implies that the |
1497 | * calling task is sufficiently privileged. |
1498 | */ |
1499 | const char * |
1500 | exclaves_conclave_get_domain(exclaves_resource_t *resource) |
1501 | { |
1502 | if (resource != NULL) { |
1503 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1504 | return resource->r_name; |
1505 | } |
1506 | |
1507 | assert(exclaves_has_priv(current_task(), EXCLAVES_PRIV_KERNEL_DOMAIN)); |
1508 | return EXCLAVES_DOMAIN_KERNEL; |
1509 | } |
1510 | |
1511 | kern_return_t |
1512 | exclaves_conclave_stop(exclaves_resource_t *resource, bool gather_crash_bt) |
1513 | { |
1514 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1515 | |
1516 | conclave_resource_t *conclave = &resource->r_conclave; |
1517 | |
1518 | uint32_t conclave_stop_reason = gather_crash_bt ? |
1519 | CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_KILLED : |
1520 | CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT; |
1521 | |
1522 | lck_mtx_lock(&resource->r_mutex); |
1523 | |
1524 | /* TBD Call stop on the conclave manager endpoint. */ |
1525 | if (conclave->c_state == CONCLAVE_S_LAUNCHING) { |
1526 | /* If another thread is launching, just request a stop */ |
1527 | conclave->c_state = CONCLAVE_S_STOP_REQUESTED; |
1528 | lck_mtx_unlock(&resource->r_mutex); |
1529 | return KERN_SUCCESS; |
1530 | } else if (conclave->c_state == CONCLAVE_S_ATTACHED) { |
1531 | /* Change the state to stopped if the conclave was never started */ |
1532 | conclave->c_state = CONCLAVE_S_STOPPED; |
1533 | lck_mtx_unlock(&resource->r_mutex); |
1534 | return KERN_SUCCESS; |
1535 | } else if (conclave->c_state == CONCLAVE_S_STOPPING || |
1536 | conclave->c_state == CONCLAVE_S_STOPPED) { |
1537 | /* Upcall to stop the conclave might be in progress, bail out */ |
1538 | lck_mtx_unlock(&resource->r_mutex); |
1539 | return KERN_SUCCESS; |
1540 | } |
1541 | |
1542 | if (conclave->c_state != CONCLAVE_S_LAUNCHED) { |
1543 | lck_mtx_unlock(&resource->r_mutex); |
1544 | return KERN_FAILURE; |
1545 | } |
1546 | |
1547 | conclave->c_state = CONCLAVE_S_STOPPING; |
1548 | lck_mtx_unlock(&resource->r_mutex); |
1549 | |
1550 | __assert_only kern_return_t kr = |
1551 | exclaves_conclave_launcher_stop(conclave->c_control, |
1552 | conclave_stop_reason); |
1553 | assert3u(kr, ==, KERN_SUCCESS); |
1554 | |
1555 | lck_mtx_lock(&resource->r_mutex); |
1556 | conclave->c_state = CONCLAVE_S_STOPPED; |
1557 | lck_mtx_unlock(&resource->r_mutex); |
1558 | |
1559 | return KERN_SUCCESS; |
1560 | } |
1561 | |
1562 | extern int exit_with_exclave_exception(void *p); |
1563 | |
1564 | kern_return_t |
1565 | exclaves_conclave_stop_upcall(exclaves_resource_t *resource) |
1566 | { |
1567 | assert3p(resource, !=, NULL); |
1568 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1569 | |
1570 | conclave_resource_t *conclave = &resource->r_conclave; |
1571 | thread_t thread = current_thread(); |
1572 | |
1573 | lck_mtx_lock(&resource->r_mutex); |
1574 | |
1575 | if (conclave->c_state == CONCLAVE_S_STOPPING || conclave->c_state == CONCLAVE_S_STOPPED) { |
1576 | /* Upcall to stop the conclave might be in progress, bail out */ |
1577 | lck_mtx_unlock(&resource->r_mutex); |
1578 | return KERN_SUCCESS; |
1579 | } |
1580 | |
1581 | if (conclave->c_state != CONCLAVE_S_LAUNCHED && conclave->c_state != CONCLAVE_S_LAUNCHING |
1582 | && conclave->c_state != CONCLAVE_S_ATTACHED |
1583 | && conclave->c_state != CONCLAVE_S_STOP_REQUESTED) { |
1584 | lck_mtx_unlock(&resource->r_mutex); |
1585 | return KERN_FAILURE; |
1586 | } |
1587 | |
1588 | conclave->c_state = CONCLAVE_S_STOPPING; |
1589 | thread->th_exclaves_state |= TH_EXCLAVES_STOP_UPCALL_PENDING; |
1590 | lck_mtx_unlock(&resource->r_mutex); |
1591 | |
1592 | return KERN_SUCCESS; |
1593 | } |
1594 | |
1595 | kern_return_t |
1596 | exclaves_conclave_stop_upcall_complete(exclaves_resource_t *resource, task_t task) |
1597 | { |
1598 | assert3p(resource, !=, NULL); |
1599 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1600 | |
1601 | conclave_resource_t *conclave = &resource->r_conclave; |
1602 | thread_t thread = current_thread(); |
1603 | |
1604 | thread->th_exclaves_state &= ~TH_EXCLAVES_STOP_UPCALL_PENDING; |
1605 | exit_with_exclave_exception(get_bsdtask_info(task)); |
1606 | |
1607 | lck_mtx_lock(&resource->r_mutex); |
1608 | |
1609 | assert3u(conclave->c_state, ==, CONCLAVE_S_STOPPING); |
1610 | conclave->c_state = CONCLAVE_S_STOPPED; |
1611 | |
1612 | lck_mtx_unlock(&resource->r_mutex); |
1613 | return KERN_SUCCESS; |
1614 | } |
1615 | |
1616 | bool |
1617 | exclaves_conclave_has_service(exclaves_resource_t *resource, uint64_t id) |
1618 | { |
1619 | assert3u(id, <, CONCLAVE_SERVICE_MAX); |
1620 | |
1621 | if (resource == NULL) { |
1622 | /* There's no conclave, fallback to the kernel domain. */ |
1623 | assert(exclaves_has_priv(current_task(), |
1624 | EXCLAVES_PRIV_KERNEL_DOMAIN)); |
1625 | return bitmap_test(kernel_service_bitmap, (uint32_t)id); |
1626 | } |
1627 | |
1628 | assert3p(resource, !=, NULL); |
1629 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER); |
1630 | |
1631 | conclave_resource_t *conclave = &resource->r_conclave; |
1632 | |
1633 | return bitmap_test(conclave->c_service_bitmap, (uint32_t)id); |
1634 | } |
1635 | |
1636 | |
1637 | /* -------------------------------------------------------------------------- */ |
1638 | #pragma mark Sensors |
1639 | |
1640 | static void |
1641 | exclaves_resource_sensor_reset(exclaves_resource_t *resource) |
1642 | { |
1643 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR); |
1644 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0); |
1645 | LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
1646 | |
1647 | exclaves_sensor_status_t status; |
1648 | |
1649 | for (int i = 0; i < resource->r_sensor.s_startcount; i++) { |
1650 | __assert_only kern_return_t kr = exclaves_sensor_stop( |
1651 | (exclaves_sensor_type_t)resource->r_id, 0, &status); |
1652 | assert3u(kr, !=, KERN_INVALID_ARGUMENT); |
1653 | } |
1654 | |
1655 | resource->r_sensor.s_startcount = 0; |
1656 | } |
1657 | |
1658 | kern_return_t |
1659 | exclaves_resource_sensor_open(const char *domain, const char *id_name, |
1660 | exclaves_resource_t **out) |
1661 | { |
1662 | assert3p(out, !=, NULL); |
1663 | |
1664 | exclaves_resource_t *sensor = exclaves_resource_lookup_by_name(domain, |
1665 | id_name, XNUPROXY_RESOURCE_SENSOR); |
1666 | |
1667 | if (sensor == NULL) { |
1668 | return KERN_NOT_FOUND; |
1669 | } |
1670 | |
1671 | assert3u(sensor->r_type, ==, XNUPROXY_RESOURCE_SENSOR); |
1672 | |
1673 | lck_mtx_lock(&sensor->r_mutex); |
1674 | exclaves_resource_retain(sensor); |
1675 | lck_mtx_unlock(&sensor->r_mutex); |
1676 | |
1677 | *out = sensor; |
1678 | |
1679 | return KERN_SUCCESS; |
1680 | } |
1681 | |
1682 | kern_return_t |
1683 | exclaves_resource_sensor_start(exclaves_resource_t *resource, uint64_t flags, |
1684 | exclaves_sensor_status_t *status) |
1685 | { |
1686 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR); |
1687 | |
1688 | lck_mtx_lock(&resource->r_mutex); |
1689 | if (resource->r_sensor.s_startcount == UINT64_MAX) { |
1690 | lck_mtx_unlock(&resource->r_mutex); |
1691 | return KERN_INVALID_ARGUMENT; |
1692 | } |
1693 | |
1694 | kern_return_t kr = exclaves_sensor_start( |
1695 | (exclaves_sensor_type_t)resource->r_id, flags, status); |
1696 | if (kr == KERN_SUCCESS) { |
1697 | resource->r_sensor.s_startcount += 1; |
1698 | } |
1699 | lck_mtx_unlock(&resource->r_mutex); |
1700 | return kr; |
1701 | } |
1702 | |
1703 | kern_return_t |
1704 | exclaves_resource_sensor_status(exclaves_resource_t *resource, uint64_t flags, |
1705 | exclaves_sensor_status_t *status) |
1706 | { |
1707 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR); |
1708 | |
1709 | lck_mtx_lock(&resource->r_mutex); |
1710 | kern_return_t kr = exclaves_sensor_status( |
1711 | (exclaves_sensor_type_t)resource->r_id, flags, status); |
1712 | lck_mtx_unlock(&resource->r_mutex); |
1713 | |
1714 | return kr; |
1715 | } |
1716 | |
1717 | kern_return_t |
1718 | exclaves_resource_sensor_stop(exclaves_resource_t *resource, uint64_t flags, |
1719 | exclaves_sensor_status_t *status) |
1720 | { |
1721 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR); |
1722 | |
1723 | lck_mtx_lock(&resource->r_mutex); |
1724 | if (resource->r_sensor.s_startcount == 0) { |
1725 | lck_mtx_unlock(&resource->r_mutex); |
1726 | return KERN_INVALID_ARGUMENT; |
1727 | } |
1728 | |
1729 | kern_return_t kr = exclaves_sensor_stop( |
1730 | (exclaves_sensor_type_t)resource->r_id, flags, status); |
1731 | if (kr == KERN_SUCCESS) { |
1732 | resource->r_sensor.s_startcount -= 1; |
1733 | } |
1734 | lck_mtx_unlock(&resource->r_mutex); |
1735 | |
1736 | return kr; |
1737 | } |
1738 | |
1739 | /* -------------------------------------------------------------------------- */ |
1740 | #pragma mark Notifications |
1741 | |
1742 | static void |
1743 | exclaves_notification_init(exclaves_resource_t *resource) |
1744 | { |
1745 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION); |
1746 | exclaves_notification_t *notification = &resource->r_notification; |
1747 | klist_init(¬ification->notification_klist); |
1748 | } |
1749 | |
1750 | static int |
1751 | filt_exclaves_notification_attach(struct knote *kn, __unused struct kevent_qos_s *kev) |
1752 | { |
1753 | int error = 0; |
1754 | exclaves_resource_t *exclaves_resource = NULL; |
1755 | kern_return_t kr = exclaves_resource_from_port_name(current_space(), (mach_port_name_t)kn->kn_id, &exclaves_resource); |
1756 | if (kr != KERN_SUCCESS) { |
1757 | error = ENOENT; |
1758 | goto out; |
1759 | } |
1760 | assert3p(exclaves_resource, !=, NULL); |
1761 | if (exclaves_resource->r_type != XNUPROXY_RESOURCE_NOTIFICATION) { |
1762 | exclaves_resource_release(exclaves_resource); |
1763 | error = EINVAL; |
1764 | goto out; |
1765 | } |
1766 | |
1767 | lck_mtx_lock(&exclaves_resource->r_mutex); |
1768 | |
1769 | if (kn->kn_exclaves_resource != NULL) { |
1770 | lck_mtx_unlock(&exclaves_resource->r_mutex); |
1771 | exclaves_resource_release(exclaves_resource); |
1772 | error = EBUSY; |
1773 | goto out; |
1774 | } |
1775 | |
1776 | /* kn_exclaves_resource consumes the ref. */ |
1777 | kn->kn_exclaves_resource = exclaves_resource; |
1778 | KNOTE_ATTACH(&exclaves_resource->r_notification.notification_klist, kn); |
1779 | lck_mtx_unlock(&exclaves_resource->r_mutex); |
1780 | |
1781 | error = 0; |
1782 | out: |
1783 | return error; |
1784 | } |
1785 | |
1786 | static void |
1787 | filt_exclaves_notification_detach(struct knote *kn) |
1788 | { |
1789 | exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource; |
1790 | |
1791 | if (exclaves_resource != NULL) { |
1792 | assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION); |
1793 | lck_mtx_lock(&exclaves_resource->r_mutex); |
1794 | kn->kn_exclaves_resource = NULL; |
1795 | KNOTE_DETACH(&exclaves_resource->r_notification.notification_klist, kn); |
1796 | lck_mtx_unlock(&exclaves_resource->r_mutex); |
1797 | |
1798 | exclaves_resource_release(exclaves_resource); |
1799 | } |
1800 | } |
1801 | |
1802 | static int |
1803 | filt_exclaves_notification_event(struct knote *kn, long hint) |
1804 | { |
1805 | /* ALWAYS CALLED WITH exclaves_resource mutex held */ |
1806 | exclaves_resource_t *exclaves_resource __assert_only = kn->kn_exclaves_resource; |
1807 | LCK_MTX_ASSERT(&exclaves_resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
1808 | |
1809 | /* |
1810 | * if the user is interested in this event, record it. |
1811 | */ |
1812 | if (kn->kn_sfflags & hint) { |
1813 | kn->kn_fflags |= hint; |
1814 | } |
1815 | |
1816 | /* if we have any matching state, activate the knote */ |
1817 | if (kn->kn_fflags != 0) { |
1818 | return FILTER_ACTIVE; |
1819 | } else { |
1820 | return 0; |
1821 | } |
1822 | } |
1823 | |
1824 | static int |
1825 | filt_exclaves_notification_touch(struct knote *kn, struct kevent_qos_s *kev) |
1826 | { |
1827 | int result; |
1828 | exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource; |
1829 | assert3p(exclaves_resource, !=, NULL); |
1830 | assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION); |
1831 | |
1832 | lck_mtx_lock(&exclaves_resource->r_mutex); |
1833 | /* accept new mask and mask off output events no long interesting */ |
1834 | kn->kn_sfflags = kev->fflags; |
1835 | kn->kn_fflags &= kn->kn_sfflags; |
1836 | if (kn->kn_fflags != 0) { |
1837 | result = FILTER_ACTIVE; |
1838 | } else { |
1839 | result = 0; |
1840 | } |
1841 | lck_mtx_unlock(&exclaves_resource->r_mutex); |
1842 | |
1843 | return result; |
1844 | } |
1845 | |
1846 | static int |
1847 | filt_exclaves_notification_process(struct knote *kn, struct kevent_qos_s *kev) |
1848 | { |
1849 | int result = 0; |
1850 | exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource; |
1851 | assert3p(exclaves_resource, !=, NULL); |
1852 | assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION); |
1853 | |
1854 | lck_mtx_lock(&exclaves_resource->r_mutex); |
1855 | if (kn->kn_fflags) { |
1856 | knote_fill_kevent(kn, kev, 0); |
1857 | result = FILTER_ACTIVE; |
1858 | } |
1859 | lck_mtx_unlock(&exclaves_resource->r_mutex); |
1860 | return result; |
1861 | } |
1862 | |
1863 | SECURITY_READ_ONLY_EARLY(struct filterops) exclaves_notification_filtops = { |
1864 | .f_attach = filt_exclaves_notification_attach, |
1865 | .f_detach = filt_exclaves_notification_detach, |
1866 | .f_event = filt_exclaves_notification_event, |
1867 | .f_touch = filt_exclaves_notification_touch, |
1868 | .f_process = filt_exclaves_notification_process, |
1869 | }; |
1870 | |
1871 | kern_return_t |
1872 | exclaves_notification_create(const char *domain, const char *name, |
1873 | exclaves_resource_t **out) |
1874 | { |
1875 | assert3p(out, !=, NULL); |
1876 | |
1877 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
1878 | name, XNUPROXY_RESOURCE_NOTIFICATION); |
1879 | |
1880 | if (resource == NULL) { |
1881 | return KERN_NOT_FOUND; |
1882 | } |
1883 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION); |
1884 | |
1885 | lck_mtx_lock(&resource->r_mutex); |
1886 | exclaves_resource_retain(resource); |
1887 | lck_mtx_unlock(&resource->r_mutex); |
1888 | |
1889 | *out = resource; |
1890 | |
1891 | return KERN_SUCCESS; |
1892 | } |
1893 | |
1894 | kern_return_t |
1895 | exclaves_notification_signal(exclaves_resource_t *exclaves_resource, long event_mask) |
1896 | { |
1897 | assert3p(exclaves_resource, !=, NULL); |
1898 | assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION); |
1899 | |
1900 | lck_mtx_lock(&exclaves_resource->r_mutex); |
1901 | KNOTE(&exclaves_resource->r_notification.notification_klist, event_mask); |
1902 | lck_mtx_unlock(&exclaves_resource->r_mutex); |
1903 | |
1904 | return KERN_SUCCESS; |
1905 | } |
1906 | |
1907 | exclaves_resource_t * |
1908 | exclaves_notification_lookup_by_id(const char *domain, uint64_t id) |
1909 | { |
1910 | return exclaves_resource_lookup_by_id(domain, id, |
1911 | XNUPROXY_RESOURCE_NOTIFICATION); |
1912 | } |
1913 | |
1914 | uint64_t |
1915 | exclaves_service_lookup(const char *domain, const char *name) |
1916 | { |
1917 | assert3p(domain, !=, NULL); |
1918 | assert3p(name, !=, NULL); |
1919 | |
1920 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
1921 | name, XNUPROXY_RESOURCE_SERVICE); |
1922 | if (resource == NULL) { |
1923 | return UINT64_C(~0); |
1924 | } |
1925 | |
1926 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SERVICE); |
1927 | return resource->r_id; |
1928 | } |
1929 | |
1930 | kern_return_t |
1931 | exclaves_xnu_proxy_check_mem_usage(void) |
1932 | { |
1933 | xnuproxy_msg_t msg = { |
1934 | .cmd = XNUPROXY_CMD_REPORT_MEMORY_USAGE, |
1935 | }; |
1936 | |
1937 | return exclaves_xnu_proxy_send(&msg, NULL); |
1938 | } |
1939 | |
1940 | /* -------------------------------------------------------------------------- */ |
1941 | #pragma mark Shared Memory |
1942 | |
1943 | int |
1944 | exclaves_resource_shared_memory_io(exclaves_resource_t *resource, off_t offset, |
1945 | size_t len, int (^cb)(char *, size_t)) |
1946 | { |
1947 | assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY || |
1948 | resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
1949 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
1950 | |
1951 | shared_memory_resource_t *sm = &resource->r_shared_memory; |
1952 | assert3u(sm->sm_nranges, >, 0); |
1953 | assert3u(sm->sm_size, !=, 0); |
1954 | assert3u(offset + len, <=, sm->sm_size); |
1955 | |
1956 | for (int i = 0; i < sm->sm_nranges; i++) { |
1957 | /* Skip forward to the starting range. */ |
1958 | if (offset >= sm->sm_range[i].npages * PAGE_SIZE) { |
1959 | offset -= sm->sm_range[i].npages * PAGE_SIZE; |
1960 | continue; |
1961 | } |
1962 | |
1963 | size_t size = MIN((sm->sm_range[i].npages * PAGE_SIZE) - offset, len); |
1964 | int ret = cb(sm->sm_range[i].address + offset, size); |
1965 | if (ret != 0) { |
1966 | return ret; |
1967 | } |
1968 | |
1969 | offset = 0; |
1970 | len -= size; |
1971 | |
1972 | if (len == 0) { |
1973 | break; |
1974 | } |
1975 | } |
1976 | assert3u(len, ==, 0); |
1977 | |
1978 | return 0; |
1979 | } |
1980 | |
1981 | static kern_return_t |
1982 | exclaves_resource_shared_memory_io_copyin(exclaves_resource_t *resource, |
1983 | user_addr_t _src, off_t offset, size_t len) |
1984 | { |
1985 | assert3u(resource->r_shared_memory.sm_perm & EXCLAVES_BUFFER_PERM_WRITE, |
1986 | !=, 0); |
1987 | |
1988 | __block user_addr_t src = _src; |
1989 | return exclaves_resource_shared_memory_io(resource, offset, len, |
1990 | ^(char *buffer, size_t size) { |
1991 | if (copyin(src, buffer, size) != 0) { |
1992 | return KERN_FAILURE; |
1993 | } |
1994 | |
1995 | src += size; |
1996 | return KERN_SUCCESS; |
1997 | }); |
1998 | } |
1999 | |
2000 | kern_return_t |
2001 | exclaves_resource_shared_memory_copyin(exclaves_resource_t *resource, |
2002 | user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1, |
2003 | mach_vm_size_t size2, mach_vm_size_t offset2) |
2004 | { |
2005 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
2006 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SHARED_MEMORY); |
2007 | |
2008 | mach_vm_size_t umax = 0; |
2009 | kern_return_t kr = KERN_FAILURE; |
2010 | |
2011 | if (buffer == USER_ADDR_NULL || size1 == 0) { |
2012 | return KERN_INVALID_ARGUMENT; |
2013 | } |
2014 | |
2015 | shared_memory_resource_t *sm = &resource->r_shared_memory; |
2016 | assert3u(sm->sm_nranges, >, 0); |
2017 | assert3u(sm->sm_size, !=, 0); |
2018 | |
2019 | if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) { |
2020 | return KERN_INVALID_ARGUMENT; |
2021 | } |
2022 | |
2023 | if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) { |
2024 | return KERN_INVALID_ARGUMENT; |
2025 | } |
2026 | |
2027 | if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) { |
2028 | return KERN_PROTECTION_FAILURE; |
2029 | } |
2030 | |
2031 | kr = exclaves_resource_shared_memory_io_copyin(resource, buffer, offset1, size1); |
2032 | if (kr != KERN_SUCCESS) { |
2033 | return kr; |
2034 | } |
2035 | |
2036 | kr = exclaves_resource_shared_memory_io_copyin(resource, buffer + size1, offset2, |
2037 | size2); |
2038 | if (kr != KERN_SUCCESS) { |
2039 | return kr; |
2040 | } |
2041 | |
2042 | return KERN_SUCCESS; |
2043 | } |
2044 | |
2045 | static kern_return_t |
2046 | exclaves_resource_shared_memory_io_copyout(exclaves_resource_t *resource, |
2047 | user_addr_t _dst, off_t offset, size_t len) |
2048 | { |
2049 | assert3u(resource->r_shared_memory.sm_perm & EXCLAVES_BUFFER_PERM_READ, |
2050 | !=, 0); |
2051 | |
2052 | __block user_addr_t dst = _dst; |
2053 | return exclaves_resource_shared_memory_io(resource, offset, len, |
2054 | ^(char *buffer, size_t size) { |
2055 | if (copyout(buffer, dst, size) != 0) { |
2056 | return KERN_FAILURE; |
2057 | } |
2058 | |
2059 | dst += size; |
2060 | return KERN_SUCCESS; |
2061 | }); |
2062 | } |
2063 | |
2064 | kern_return_t |
2065 | exclaves_resource_shared_memory_copyout(exclaves_resource_t *resource, |
2066 | user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1, |
2067 | mach_vm_size_t size2, mach_vm_size_t offset2) |
2068 | { |
2069 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
2070 | assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY || |
2071 | resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2072 | |
2073 | mach_vm_size_t umax = 0; |
2074 | kern_return_t kr = KERN_FAILURE; |
2075 | |
2076 | if (buffer == USER_ADDR_NULL || size1 == 0) { |
2077 | return KERN_INVALID_ARGUMENT; |
2078 | } |
2079 | |
2080 | shared_memory_resource_t *sm = &resource->r_shared_memory; |
2081 | assert3u(sm->sm_nranges, >, 0); |
2082 | assert3u(sm->sm_size, !=, 0); |
2083 | |
2084 | if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) { |
2085 | return KERN_INVALID_ARGUMENT; |
2086 | } |
2087 | |
2088 | if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) { |
2089 | return KERN_INVALID_ARGUMENT; |
2090 | } |
2091 | |
2092 | if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_READ) == 0) { |
2093 | return KERN_PROTECTION_FAILURE; |
2094 | } |
2095 | |
2096 | kr = exclaves_resource_shared_memory_io_copyout(resource, buffer, offset1, size1); |
2097 | if (kr != KERN_SUCCESS) { |
2098 | return kr; |
2099 | } |
2100 | |
2101 | kr = exclaves_resource_shared_memory_io_copyout(resource, buffer + size1, |
2102 | offset2, size2); |
2103 | if (kr != KERN_SUCCESS) { |
2104 | return kr; |
2105 | } |
2106 | |
2107 | return KERN_SUCCESS; |
2108 | } |
2109 | |
2110 | /* The lower 32bits contain the endpoint id. */ |
2111 | static uint32_t |
2112 | audio_memory_get_endpoint(exclaves_resource_t *resource) |
2113 | { |
2114 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2115 | return resource->r_id << 32 >> 32; |
2116 | } |
2117 | |
2118 | /* The upper 32bits of the id contain the buffer id. */ |
2119 | static uint32_t |
2120 | audio_memory_get_buffer_id(exclaves_resource_t *resource) |
2121 | { |
2122 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2123 | return resource->r_id >> 32; |
2124 | } |
2125 | |
2126 | static kern_return_t |
2127 | shared_memory_map(exclaves_resource_t *resource, size_t size, |
2128 | exclaves_buffer_perm_t perm) |
2129 | { |
2130 | assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY || |
2131 | resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2132 | assert3u(perm & ~(EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE), ==, 0); |
2133 | |
2134 | kern_return_t kr = KERN_FAILURE; |
2135 | |
2136 | /* round size up to nearest page */ |
2137 | mach_vm_offset_t rounded_size = 0; |
2138 | if (size == 0 || mach_vm_round_page_overflow(size, &rounded_size)) { |
2139 | return KERN_INVALID_ARGUMENT; |
2140 | } |
2141 | |
2142 | lck_mtx_lock(&resource->r_mutex); |
2143 | |
2144 | __block shared_memory_resource_t *sm = &resource->r_shared_memory; |
2145 | |
2146 | /* |
2147 | * If already active, bump the use count, check that the perms and size |
2148 | * are compatible and return. Checking the use count is insufficient |
2149 | * here as this can race with with a non-locked use count release. |
2150 | */ |
2151 | if (resource->r_active) { |
2152 | /* |
2153 | * Both the permissions and size must match. |
2154 | */ |
2155 | if (sm->sm_size < rounded_size || sm->sm_perm != perm) { |
2156 | lck_mtx_unlock(&resource->r_mutex); |
2157 | return KERN_INVALID_ARGUMENT; |
2158 | } |
2159 | |
2160 | exclaves_resource_retain(resource); |
2161 | lck_mtx_unlock(&resource->r_mutex); |
2162 | return KERN_SUCCESS; |
2163 | } |
2164 | |
2165 | /* This is lazily initialised and never de-initialised. */ |
2166 | if (sm->sm_client.connection == NULL) { |
2167 | uint64_t endpoint = resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY ? |
2168 | resource->r_id : |
2169 | audio_memory_get_endpoint(resource); |
2170 | |
2171 | kr = exclaves_shared_memory_init(endpoint, &sm->sm_client); |
2172 | if (kr != KERN_SUCCESS) { |
2173 | lck_mtx_unlock(&resource->r_mutex); |
2174 | return kr; |
2175 | } |
2176 | } |
2177 | |
2178 | const sharedmemorybase_perms_s sm_perm = perm == EXCLAVES_BUFFER_PERM_WRITE ? |
2179 | SHAREDMEMORYBASE_PERMS_READWRITE : SHAREDMEMORYBASE_PERMS_READONLY; |
2180 | sharedmemorybase_mapping_s mapping = 0; |
2181 | kr = exclaves_shared_memory_setup(&sm->sm_client, sm_perm, 0, |
2182 | rounded_size / PAGE_SIZE, &mapping); |
2183 | if (kr != KERN_SUCCESS) { |
2184 | lck_mtx_unlock(&resource->r_mutex); |
2185 | return kr; |
2186 | } |
2187 | |
2188 | /* |
2189 | * From this point on exclaves_shared_memory_teardown() must be called |
2190 | * if something goes wrong so that the buffer will be properly unmapped. |
2191 | */ |
2192 | sm->sm_size = rounded_size; |
2193 | sm->sm_perm = perm; |
2194 | sm->sm_nranges = 0; |
2195 | |
2196 | /* |
2197 | * The shared buffer is now accessible by xnu. Discover the layout of |
2198 | * the memory. |
2199 | */ |
2200 | __block bool success = true; |
2201 | /* BEGIN IGNORE CODESTYLE */ |
2202 | kr = exclaves_shared_memory_iterate(&sm->sm_client, &mapping, 0, |
2203 | rounded_size / PAGE_SIZE, ^(uint64_t pa) { |
2204 | char *vaddr = (char *)phystokv(pa); |
2205 | assert3p(vaddr, !=, NULL); |
2206 | |
2207 | /* |
2208 | * If this virtual address is adjacent to the previous |
2209 | * one, just extend the current range. |
2210 | */ |
2211 | if (sm->sm_nranges > 0) { |
2212 | const size_t len = sm->sm_range[sm->sm_nranges - 1].npages * PAGE_SIZE; |
2213 | const char *addr = sm->sm_range[sm->sm_nranges - 1].address + len; |
2214 | |
2215 | if (vaddr == addr) { |
2216 | sm->sm_range[sm->sm_nranges - 1].npages++; |
2217 | return; |
2218 | } |
2219 | |
2220 | if (sm->sm_nranges == EXCLAVES_SHARED_BUFFER_MAX_RANGES - 1) { |
2221 | exclaves_debug_printf(show_errors, |
2222 | "exclaves: too many ranges, can't fit\n" ); |
2223 | success = false; |
2224 | return; |
2225 | } |
2226 | } |
2227 | |
2228 | /* |
2229 | * Page is not virtually contiguous with the previous one - |
2230 | * stick it in a new range. |
2231 | */ |
2232 | sm->sm_range[sm->sm_nranges].npages = 1; |
2233 | sm->sm_range[sm->sm_nranges].address = vaddr; |
2234 | sm->sm_nranges++; |
2235 | }); |
2236 | /* END IGNORE CODESTYLE */ |
2237 | |
2238 | |
2239 | if (kr != KERN_SUCCESS || !success) { |
2240 | exclaves_shared_memory_teardown(&sm->sm_client, &mapping); |
2241 | lck_mtx_unlock(&resource->r_mutex); |
2242 | return KERN_FAILURE; |
2243 | } |
2244 | |
2245 | sm->sm_mapping = mapping; |
2246 | |
2247 | exclaves_resource_retain(resource); |
2248 | resource->r_active = true; |
2249 | |
2250 | lck_mtx_unlock(&resource->r_mutex); |
2251 | |
2252 | return KERN_SUCCESS; |
2253 | } |
2254 | |
2255 | kern_return_t |
2256 | exclaves_resource_shared_memory_map(const char *domain, const char *name, size_t size, |
2257 | exclaves_buffer_perm_t perm, exclaves_resource_t **out) |
2258 | { |
2259 | assert3p(out, !=, NULL); |
2260 | |
2261 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
2262 | name, XNUPROXY_RESOURCE_SHARED_MEMORY); |
2263 | if (resource == NULL) { |
2264 | return KERN_NOT_FOUND; |
2265 | } |
2266 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SHARED_MEMORY); |
2267 | |
2268 | kern_return_t kr = shared_memory_map(resource, size, perm); |
2269 | if (kr != KERN_SUCCESS) { |
2270 | return kr; |
2271 | } |
2272 | |
2273 | *out = resource; |
2274 | return KERN_SUCCESS; |
2275 | } |
2276 | |
2277 | |
2278 | static void |
2279 | exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource) |
2280 | { |
2281 | assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY || |
2282 | resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2283 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0); |
2284 | LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
2285 | |
2286 | shared_memory_resource_t *sm = &resource->r_shared_memory; |
2287 | |
2288 | kern_return_t kr = exclaves_shared_memory_teardown(&sm->sm_client, |
2289 | &sm->sm_mapping); |
2290 | if (kr != KERN_SUCCESS) { |
2291 | exclaves_debug_printf(show_errors, |
2292 | "exclaves: failed to teardown shared memory: %s, \n" , |
2293 | resource->r_name); |
2294 | return; |
2295 | } |
2296 | |
2297 | bzero(&resource->r_shared_memory, sizeof(resource->r_shared_memory)); |
2298 | |
2299 | resource->r_active = false; |
2300 | } |
2301 | |
2302 | |
2303 | /* -------------------------------------------------------------------------- */ |
2304 | #pragma mark Arbitrated Audio Memory |
2305 | |
2306 | kern_return_t |
2307 | exclaves_resource_audio_memory_map(const char *domain, const char *name, |
2308 | size_t size, exclaves_resource_t **out) |
2309 | { |
2310 | assert3p(out, !=, NULL); |
2311 | |
2312 | exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain, |
2313 | name, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2314 | if (resource == NULL) { |
2315 | return KERN_NOT_FOUND; |
2316 | } |
2317 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2318 | |
2319 | kern_return_t kr = shared_memory_map(resource, size, |
2320 | EXCLAVES_BUFFER_PERM_READ); |
2321 | if (kr != KERN_SUCCESS) { |
2322 | return kr; |
2323 | } |
2324 | |
2325 | *out = resource; |
2326 | return KERN_SUCCESS; |
2327 | } |
2328 | |
2329 | static void |
2330 | exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource) |
2331 | { |
2332 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2333 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0); |
2334 | LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED); |
2335 | |
2336 | exclaves_resource_shared_memory_unmap(resource); |
2337 | } |
2338 | |
2339 | static kern_return_t |
2340 | copyout_zero(user_addr_t buffer, mach_vm_size_t size, mach_vm_size_t offset) |
2341 | { |
2342 | static const char zero[PAGE_SIZE] = {0}; |
2343 | |
2344 | while (size > 0) { |
2345 | size_t copy_size = MIN(size, sizeof(zero)); |
2346 | if (copyout(zero, buffer + offset, copy_size) != 0) { |
2347 | return KERN_FAILURE; |
2348 | } |
2349 | |
2350 | offset += copy_size; |
2351 | size -= copy_size; |
2352 | } |
2353 | |
2354 | return KERN_SUCCESS; |
2355 | } |
2356 | |
2357 | kern_return_t |
2358 | exclaves_resource_audio_memory_copyout(exclaves_resource_t *resource, |
2359 | user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1, |
2360 | mach_vm_size_t size2, mach_vm_size_t offset2) |
2361 | { |
2362 | assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0); |
2363 | assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY); |
2364 | |
2365 | kern_return_t kr = KERN_FAILURE; |
2366 | exclaves_sensor_status_t status; |
2367 | const uint32_t id = audio_memory_get_buffer_id(resource); |
2368 | |
2369 | kr = exclaves_sensor_copy(id, size1, offset1, size2, offset2, &status); |
2370 | if (kr != KERN_SUCCESS) { |
2371 | return kr; |
2372 | } |
2373 | |
2374 | if (status == EXCLAVES_SENSOR_STATUS_ALLOWED) { |
2375 | kr = exclaves_resource_shared_memory_copyout(resource, buffer, |
2376 | size1, offset1, size2, offset2); |
2377 | if (kr != KERN_SUCCESS) { |
2378 | return kr; |
2379 | } |
2380 | } else { |
2381 | /* |
2382 | * This should be removed once the audio arbiter is properly |
2383 | * switching buffers and instead we should always rely on the |
2384 | * audio arbiter to do its job and make the data available or |
2385 | * not. |
2386 | */ |
2387 | kr = copyout_zero(buffer, size1, offset1); |
2388 | if (kr != KERN_SUCCESS) { |
2389 | return kr; |
2390 | } |
2391 | |
2392 | kr = copyout_zero(buffer, size2, offset2); |
2393 | if (kr != KERN_SUCCESS) { |
2394 | return kr; |
2395 | } |
2396 | } |
2397 | |
2398 | return KERN_SUCCESS; |
2399 | } |
2400 | |
2401 | #endif /* CONFIG_EXCLAVES */ |
2402 | |