1/*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#if CONFIG_EXCLAVES
30
31#include <stdint.h>
32#include <mach/kern_return.h>
33#include <kern/assert.h>
34#include <kern/misc_protos.h>
35
36#include "exclaves_debug.h"
37#include "exclaves_shared_memory.h"
38#include "kern/exclaves.tightbeam.h"
39
40kern_return_t
41exclaves_shared_memory_init(const uint64_t endpoint,
42 sharedmemorybase_segxnuaccess_s *sm_client)
43{
44 assert3p(sm_client, !=, NULL);
45
46 tb_endpoint_t ep = tb_endpoint_create_with_value(
47 TB_TRANSPORT_TYPE_XNU, endpoint, TB_ENDPOINT_OPTIONS_NONE);
48 tb_error_t ret = sharedmemorybase_segxnuaccess__init(sm_client, ep);
49
50 return ret == TB_ERROR_SUCCESS ? KERN_SUCCESS : KERN_FAILURE;
51}
52
53static kern_return_t
54exclaves_shared_memory_access_check(
55 const sharedmemorybase_segxnuaccess_s *sm_client,
56 const sharedmemorybase_perms_s perm, const uint64_t endpage,
57 bool *access_allowed)
58{
59 assert3p(sm_client, !=, NULL);
60 assert3p(access_allowed, !=, NULL);
61
62 tb_error_t ret = TB_ERROR_SUCCESS;
63
64 __block bool allowed = true;
65 ret = sharedmemorybase_segxnuaccess_xnuaccessstatus(sm_client,
66 ^(sharedmemorybase_accessstatus_s result) {
67 /*
68 * Check permissions.
69 * For the moment just check for writable
70 * access (if relevant).
71 */
72 if (perm == SHAREDMEMORYBASE_PERMS_READWRITE) {
73 allowed = allowed && perm == result.permissions;
74 }
75
76 /* Check that it's xnu mappable. */
77 allowed = allowed && result.xnu;
78
79 /* Check that there are enough pages. */
80 allowed = allowed && endpage <= result.segmentstatus.npages;
81 });
82
83 if (ret != TB_ERROR_SUCCESS) {
84 return KERN_FAILURE;
85 }
86
87 *access_allowed = allowed;
88 return KERN_SUCCESS;
89}
90
91kern_return_t
92exclaves_shared_memory_setup(const sharedmemorybase_segxnuaccess_s *sm_client,
93 const sharedmemorybase_perms_s perm, const uint64_t startpage,
94 const uint64_t endpage, sharedmemorybase_mapping_s *mapping)
95{
96 assert3p(sm_client, !=, NULL);
97 assert3p(mapping, !=, NULL);
98 assert3u(startpage, <, endpage);
99 assert(perm == SHAREDMEMORYBASE_PERMS_READWRITE ||
100 perm == SHAREDMEMORYBASE_PERMS_READONLY);
101
102 tb_error_t ret = TB_ERROR_SUCCESS;
103
104 /* Do a quick sanity check that this access is allowed. */
105 bool allowed = false;
106 kern_return_t kret = exclaves_shared_memory_access_check(sm_client, perm, endpage, &allowed);
107 if (kret != KERN_SUCCESS) {
108 return kret;
109 }
110
111 sharedmemorybase_pagerange__opt_s opt_range = {};
112
113 sharedmemorybase_pagerange_s range = {
114 .startpage = startpage,
115 .endpage = endpage,
116 };
117 sharedmemorybase_pagerange__opt_init(&opt_range, &range);
118
119 *mapping = 0;
120
121 /* BEGIN IGNORE CODESTYLE */
122 ret = sharedmemorybase_segxnuaccess_createxnumapping(sm_client, perm,
123 &opt_range,
124 ^(sharedmemorybase_segxnuaccess_createxnumapping__result_s result) {
125 sharedmemorybase_accesserror_s *error = NULL;
126 error = sharedmemorybase_segxnuaccess_createxnumapping__result_get_failure(&result);
127 if (error != NULL) {
128 exclaves_debug_printf(show_errors,
129 "%s: failed to create mapping: %u", __func__, *error);
130 return;
131 }
132
133 sharedmemorybase_mappingresult_s *sm_result = NULL;
134 sm_result = sharedmemorybase_segxnuaccess_createxnumapping__result_get_success(&result);
135 assert3p(sm_result, !=, NULL);
136
137 *mapping = sm_result->mappinginfo.mapping;
138 assert3u(*mapping, !=, 0);
139 });
140 /* END IGNORE CODESTYLE */
141
142 if (ret != TB_ERROR_SUCCESS || *mapping == 0) {
143 return KERN_FAILURE;
144 }
145
146 return KERN_SUCCESS;
147}
148
149/*
150 * Currently unused as the setup process can provide an initial mapping.
151 */
152kern_return_t
153exclaves_shared_memory_teardown(const sharedmemorybase_segxnuaccess_s *sm_client,
154 const sharedmemorybase_mapping_s *mapping)
155{
156 assert3p(sm_client, !=, NULL);
157 assert3p(mapping, !=, NULL);
158
159 tb_error_t ret = TB_ERROR_SUCCESS;
160 __block bool success = false;
161
162 /* BEGIN IGNORE CODESTYLE */
163 ret = sharedmemorybase_segxnuaccess_mappingdestroy(sm_client, *mapping,
164 ^(sharedmemorybase_segaccessbase_mappingdestroy__result_s result) {
165 sharedmemorybase_accesserror_s *error;
166 error = sharedmemorybase_segaccessbase_mappingdestroy__result_get_failure(&result);
167 if (error != NULL) {
168 exclaves_debug_printf(show_errors,
169 "%s: failed to destroy mapping: %u\n", __func__, *error);
170 return;
171 }
172
173 assert(sharedmemorybase_segaccessbase_mappingdestroy__result_get_success(&result));
174 success = true;
175 });
176 /* END IGNORE CODESTYLE */
177
178 if (ret != TB_ERROR_SUCCESS || !success) {
179 return KERN_FAILURE;
180 }
181
182 return KERN_SUCCESS;
183}
184
185/*
186 * Currently unused as the teardown process unmaps.
187 */
188kern_return_t
189exclaves_shared_memory_map(const sharedmemorybase_segxnuaccess_s *sm_client,
190 const sharedmemorybase_mapping_s *mapping, const uint64_t startpage,
191 const uint64_t endpage)
192{
193 assert3p(sm_client, !=, NULL);
194 assert3p(mapping, !=, NULL);
195 assert3u(startpage, <, endpage);
196
197 tb_error_t ret = TB_ERROR_SUCCESS;
198 __block bool success = false;
199
200 const sharedmemorybase_pagerange_s range = {
201 .startpage = startpage,
202 .endpage = endpage,
203 };
204
205 /* BEGIN IGNORE CODESTYLE */
206 ret = sharedmemorybase_segxnuaccess_mappingmap(sm_client, *mapping,
207 &range, ^(sharedmemorybase_segaccessbase_mappingmap__result_s result) {
208 sharedmemorybase_accesserror_s *error;
209 error = sharedmemorybase_segaccessbase_mappingmap__result_get_failure(&result);
210 if (error != NULL) {
211 exclaves_debug_printf(show_errors,
212 "%s: failed to map: %u\n", __func__, *error);
213 return;
214 }
215
216 assert(sharedmemorybase_segaccessbase_mappingmap__result_get_success(&result));
217 success = true;
218 });
219 /* END IGNORE CODESTYLE */
220
221 if (ret != TB_ERROR_SUCCESS || !success) {
222 return KERN_FAILURE;
223 }
224
225 return KERN_SUCCESS;
226}
227
228
229kern_return_t
230exclaves_shared_memory_unmap(const sharedmemorybase_segxnuaccess_s *sm_client,
231 const sharedmemorybase_mapping_s *mapping, const uint64_t startpage,
232 const uint64_t endpage)
233{
234 assert3p(sm_client, !=, NULL);
235 assert3p(mapping, !=, NULL);
236 assert3u(startpage, <, endpage);
237
238 tb_error_t ret = TB_ERROR_SUCCESS;
239 __block bool success = false;
240
241 const sharedmemorybase_pagerange_s range = {
242 .startpage = startpage,
243 .endpage = endpage,
244 };
245
246 /* BEGIN IGNORE CODESTYLE */
247 ret = sharedmemorybase_segxnuaccess_mappingunmap(sm_client, *mapping,
248 &range, ^(sharedmemorybase_segaccessbase_mappingunmap__result_s result) {
249 sharedmemorybase_accesserror_s *error;
250 error = sharedmemorybase_segaccessbase_mappingunmap__result_get_failure(&result);
251 if (error != NULL) {
252 exclaves_debug_printf(show_errors, "%s: failed to unmap: %u\n",
253 __func__, *error);
254 return;
255 }
256
257 assert(sharedmemorybase_segaccessbase_mappingunmap__result_get_success(&result));
258 success = true;
259 });
260 /* END IGNORE CODESTYLE */
261
262 if (ret != TB_ERROR_SUCCESS || !success) {
263 return KERN_FAILURE;
264 }
265
266 return KERN_SUCCESS;
267}
268
269kern_return_t
270exclaves_shared_memory_iterate(const sharedmemorybase_segxnuaccess_s *sm_client,
271 const sharedmemorybase_mapping_s *mapping, uint64_t startpage, uint64_t endpage,
272 void (^cb)(uint64_t))
273{
274 assert3p(sm_client, !=, NULL);
275 assert3p(mapping, !=, NULL);
276 assert3u(startpage, <, endpage);
277
278 tb_error_t ret = TB_ERROR_SUCCESS;
279 __block bool success = false;
280
281 sharedmemorybase_pagerange_s full_range = {
282 .startpage = startpage,
283 .endpage = endpage,
284 };
285
286 /* BEGIN IGNORE CODESTYLE */
287 ret = sharedmemorybase_segxnuaccess_mappinggetphysicaladdresses(sm_client,
288 *mapping, &full_range,
289 ^(sharedmemorybase_segaccessbase_mappinggetphysicaladdresses__result_s result) {
290 sharedmemorybase_accesserror_s *error = NULL;
291 error = sharedmemorybase_segaccessbase_mappinggetphysicaladdresses__result_get_failure(&result);
292 if (error != NULL) {
293 exclaves_debug_printf(show_errors,
294 "%s: failed to get physical address: %u",
295 __func__, *error);
296 return;
297 }
298
299 physicaladdress_v_s *phys_addr = NULL;
300 phys_addr = sharedmemorybase_segaccessbase_mappinggetphysicaladdresses__result_get_success(&result);
301 assert3p(phys_addr, !=, NULL);
302
303 physicaladdress__v_visit(phys_addr,
304 ^(__unused size_t i, const sharedmemorybase_physicaladdress_s item) {
305 cb(item);
306 });
307
308 success = true;
309 });
310 /* END IGNORE CODESTYLE */
311
312 if (ret != TB_ERROR_SUCCESS || !success) {
313 return KERN_FAILURE;
314 }
315
316 return KERN_SUCCESS;
317}
318
319#endif /* CONFIG_EXCLAVES */
320