1/*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30#include <kdp/kdp_common.h>
31#include <kdp/kdp_dyld.h>
32#include <vm/vm_map.h>
33#include <vm/vm_kern.h>
34#include <vm/vm_pageout.h>
35#include <vm/vm_fault.h>
36#include <vm/vm_shared_region.h>
37#include <vm/vm_compressor.h>
38#include <sys/errno.h>
39
40extern unsigned int not_in_kdp;
41extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
42extern addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
43
44/*
45 * Sets the appropriate page mask and size to use for dealing with pages --
46 * it's important that this is a "min" of page size to account for both K16/U4
47 * (Rosetta) and K4/U16 (armv7k) environments.
48 */
49size_t
50kdp_vm_map_get_page_size(vm_map_t map, size_t *effective_page_mask)
51{
52 /* must be called from debugger context */
53 assert(!not_in_kdp);
54
55 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
56 if (effective_page_mask) {
57 *effective_page_mask = VM_MAP_PAGE_MASK(map);
58 }
59 return VM_MAP_PAGE_SIZE(map);
60 } else {
61 if (effective_page_mask) {
62 *effective_page_mask = PAGE_MASK;
63 }
64 return PAGE_SIZE;
65 }
66}
67
68void
69kdp_memcpy(void *dst, const void *src, size_t len)
70{
71 /* must be called from debugger context */
72 assert(!not_in_kdp);
73
74#if defined(__arm64__)
75 /* Identify if destination buffer is in panic storage area */
76 if (((vm_offset_t)dst >= gPanicBase) && ((vm_offset_t)dst < (gPanicBase + gPanicSize))) {
77 /* Copy over bytes individually to prevent unaligned access */
78 uint8_t *dest_bytes = (uint8_t *)dst;
79 const uint8_t *src_bytes = (const uint8_t *)src;
80 for (size_t i = 0; i < len; i++) {
81 dest_bytes[i] = src_bytes[i];
82 }
83 } else
84#endif
85 memcpy(dst, src, n: len);
86}
87
88size_t
89kdp_strlcpy(char *dst, const char *src, size_t maxlen)
90{
91 /* must be called from debugger context */
92 assert(!not_in_kdp);
93
94 const size_t srclen = strlen(s: src);
95
96 if (srclen < maxlen) {
97 kdp_memcpy(dst, src, len: srclen + 1);
98 } else if (maxlen != 0) {
99 kdp_memcpy(dst, src, len: maxlen - 1);
100 dst[maxlen - 1] = '\0';
101 }
102
103 return srclen;
104}
105
106kern_return_t
107kdp_traverse_mappings(
108 task_t task,
109 kdp_fault_flags_t fault_flags,
110 kdp_traverse_mappings_flags_t traverse_mappings_flags,
111 kdp_traverse_mappings_callback callback,
112 void * context)
113{
114 vm_map_t map = task->map;
115 vm_map_entry_t entry;
116 vm_offset_t vcur;
117 kern_return_t ret = KERN_SUCCESS;
118
119 /* must be called from debugger context */
120 assert(!not_in_kdp);
121
122 size_t effective_page_mask;
123 size_t task_page_size = kdp_vm_map_get_page_size(map, effective_page_mask: &effective_page_mask);
124
125 // Iterate vm map
126 for (entry = vm_map_first_entry(map); ret == KERN_SUCCESS && entry != NULL && entry != vm_map_to_entry(map); entry = entry->vme_next) {
127 // Found a region, iterate over pages in the region
128 for (vcur = entry->vme_start; ret == KERN_SUCCESS && vcur < entry->vme_end; vcur += task_page_size) {
129 vm_offset_t vphys = kdp_find_phys(map, target_addr: vcur, fault_flags, NULL);
130 if (vphys) {
131 if (traverse_mappings_flags & KDP_TRAVERSE_MAPPINGS_FLAGS_PHYSICAL) {
132 ret = callback(vphys, vphys + task_page_size, context);
133 } else {
134 ret = callback(vcur, vcur + task_page_size, context);
135 }
136 }
137 }
138 }
139
140 return ret;
141}
142
143vm_offset_t
144kdp_find_phys(vm_map_t map, vm_offset_t target_addr, kdp_fault_flags_t fault_flags, struct kdp_fault_result * fault_results)
145{
146 vm_offset_t cur_phys_addr;
147
148 /* must be called from debugger context */
149 assert(!not_in_kdp);
150
151 if (map == VM_MAP_NULL) {
152 return 0;
153 }
154
155 cur_phys_addr = (vm_offset_t)kdp_vtophys(pmap: map->pmap, va: target_addr);
156 if (!pmap_valid_page(pn: (ppnum_t) atop(cur_phys_addr))) {
157 if (!(fault_flags & KDP_FAULT_FLAGS_ENABLE_FAULTING)) {
158 if (fault_results) {
159 fault_results->flags |= KDP_FAULT_RESULT_PAGED_OUT;
160 }
161
162 return 0;
163 }
164
165 /*
166 * The pmap doesn't have a valid page so we start at the top level
167 * vm map and try a lightweight fault. Update fault path usage stats.
168 */
169 uint64_t fault_start_time = mach_absolute_time();
170 uint64_t fault_end_time;
171 size_t effective_page_mask;
172 (void)kdp_vm_map_get_page_size(map, effective_page_mask: &effective_page_mask);
173
174 cur_phys_addr = kdp_lightweight_fault(map, cur_target_addr: (target_addr & ~effective_page_mask));
175 fault_end_time = mach_absolute_time();
176
177 if (fault_results) {
178 fault_results->time_spent_faulting += fault_end_time - fault_start_time;
179 }
180
181 cur_phys_addr += (target_addr & effective_page_mask);
182
183 if (!pmap_valid_page(pn: (ppnum_t) atop(cur_phys_addr))) {
184 if (fault_results) {
185 fault_results->flags |= (KDP_FAULT_RESULT_TRIED_FAULT | KDP_FAULT_RESULT_PAGED_OUT);
186 }
187
188 return 0;
189 }
190
191 if (fault_results) {
192 fault_results->flags |= KDP_FAULT_RESULT_FAULTED_IN;
193 }
194 } else {
195 /*
196 * This check is done in kdp_lightweight_fault for the fault path.
197 */
198 unsigned int cur_wimg_bits = pmap_cache_attributes(pn: (ppnum_t) atop(cur_phys_addr));
199
200 if ((cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
201 return 0;
202 }
203 }
204
205 return cur_phys_addr;
206}
207
208int
209kdp_generic_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
210{
211 size_t rem = size;
212 char *kvaddr = dest;
213 size_t effective_page_mask;
214 size_t effective_page_size = kdp_vm_map_get_page_size(map, effective_page_mask: &effective_page_mask);
215
216 /* must be called from debugger context */
217 assert(!not_in_kdp);
218
219#if defined(__arm64__)
220 /* Identify if destination buffer is in panic storage area */
221 if (!not_in_kdp && ((vm_offset_t)dest >= gPanicBase) && ((vm_offset_t)dest < (gPanicBase + gPanicSize))) {
222 if (((vm_offset_t)dest + size) > (gPanicBase + gPanicSize)) {
223 return EINVAL;
224 }
225 }
226#endif
227
228 while (rem) {
229 uint64_t phys_src = (*find_phys_fn)(map, (vm_offset_t)uaddr, fault_flags, context);
230 uint64_t phys_dest = kvtophys(va: (vm_offset_t)kvaddr);
231 uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
232 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
233 size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
234 cur_size = MIN(cur_size, rem);
235
236 if (phys_src && phys_dest) {
237#if defined(__arm64__)
238 /*
239 * On arm devices the panic buffer is mapped as device memory and doesn't allow
240 * unaligned accesses. To prevent these, we copy over bytes individually here.
241 */
242 if (!not_in_kdp) {
243 kdp_memcpy(dst: kvaddr, src: (const void *)phystokv(pa: (pmap_paddr_t)phys_src), len: cur_size);
244 } else
245#endif /* defined(__arm64__) */
246 bcopy_phys(phys_src, phys_dest, cur_size);
247 } else {
248 break;
249 }
250
251 uaddr += cur_size;
252 kvaddr += cur_size;
253 rem -= cur_size;
254 }
255
256 return 0;
257}
258
259int
260kdp_generic_copyin_word(
261 task_t task, uint64_t addr, uint64_t *result, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
262{
263 /* must be called from debugger context */
264 assert(!not_in_kdp);
265
266 if (task_has_64Bit_addr(task)) {
267 return kdp_generic_copyin(map: task->map, uaddr: addr, dest: result, size: sizeof(uint64_t), fault_flags, find_phys_fn, context);
268 } else {
269 uint32_t buf;
270 int r = kdp_generic_copyin(map: task->map, uaddr: addr, dest: &buf, size: sizeof(uint32_t), fault_flags, find_phys_fn, context);
271 if (r == KERN_SUCCESS) {
272 *result = buf;
273 }
274 return r;
275 }
276}
277
278static int
279kdp_generic_copyin_string_slowpath(
280 task_t task, uint64_t addr, char *buf, int buf_sz, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
281{
282 int i;
283 uint64_t validated = 0, valid_from;
284 uint64_t phys_src, phys_dest;
285 vm_map_t map = task->map;
286 size_t effective_page_mask;
287 size_t effective_page_size = kdp_vm_map_get_page_size(map, effective_page_mask: &effective_page_mask);
288
289 /* must be called from debugger context */
290 assert(!not_in_kdp);
291
292 for (i = 0; i < buf_sz; i++) {
293 if (validated == 0) {
294 valid_from = i;
295 phys_src = (*find_phys_fn)(map, (vm_offset_t)(addr + i), fault_flags, context);
296 phys_dest = kvtophys(va: (vm_offset_t)&buf[i]);
297 uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
298 uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
299 if (phys_src && phys_dest) {
300 validated = MIN(src_rem, dst_rem);
301 if (validated) {
302 bcopy_phys(phys_src, phys_dest, 1);
303 validated--;
304 } else {
305 return 0;
306 }
307 } else {
308 return 0;
309 }
310 } else {
311 bcopy_phys(phys_src + (i - valid_from), phys_dest + (i - valid_from), 1);
312 validated--;
313 }
314
315 if (buf[i] == '\0') {
316 return i + 1;
317 }
318 }
319
320 /* ran out of space */
321 return -1;
322}
323
324int
325kdp_generic_copyin_string(
326 task_t task, uint64_t addr, char *buf, int buf_sz, kdp_fault_flags_t fault_flags, find_phys_fn_t find_phys_fn, void *context)
327{
328 /* try to opportunistically copyin 32 bytes, most strings should fit */
329 char optbuffer[32] = {0};
330 int res;
331
332 /* must be called from debugger context */
333 assert(!not_in_kdp);
334
335 res = kdp_generic_copyin(map: task->map, uaddr: addr, dest: optbuffer, size: sizeof(optbuffer), fault_flags, find_phys_fn, context);
336 if (res != KERN_SUCCESS || strnlen(s: optbuffer, n: sizeof(optbuffer)) == sizeof(optbuffer)) {
337 /* try the slowpath */
338 return kdp_generic_copyin_string_slowpath(task, addr, buf, buf_sz, fault_flags, find_phys_fn, context);
339 }
340
341 /* success */
342 return (int) strlcpy(dst: buf, src: optbuffer, n: buf_sz) + 1;
343}
344
345static int
346kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, kdp_fault_flags_t fault_flags)
347{
348 return kdp_generic_copyin(map, uaddr, dest, size, fault_flags, find_phys_fn: (find_phys_fn_t)kdp_find_phys, NULL);
349}
350
351kern_return_t
352kdp_task_dyld_info(task_t task, kdp_fault_flags_t fault_flags, uint64_t * dyld_load_address, uuid_t dyld_uuid, size_t * task_page_size)
353{
354 uint32_t uuid_info_count = 0;
355 mach_vm_address_t uuid_info_addr = 0;
356 mach_vm_address_t dyld_load_addr = 0;
357 boolean_t task_64bit_addr = task_has_64Bit_addr(task);
358
359 /* must be called from debugger context */
360 assert(!not_in_kdp);
361
362 if (dyld_uuid == NULL || dyld_load_address == NULL || task_page_size == NULL) {
363 return KERN_INVALID_ARGUMENT;
364 }
365
366 *task_page_size = kdp_vm_map_get_page_size(map: task->map, NULL);
367
368 if (task_64bit_addr) {
369 struct user64_dyld_all_image_infos task_image_infos;
370 if (kdp_copyin(map: task->map, uaddr: task->all_image_info_addr, dest: &task_image_infos,
371 size: sizeof(struct user64_dyld_all_image_infos), fault_flags) == KERN_SUCCESS) {
372 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
373 uuid_info_addr = task_image_infos.uuidArray;
374 dyld_load_addr = task_image_infos.dyldImageLoadAddress;
375 }
376 } else {
377 struct user32_dyld_all_image_infos task_image_infos;
378 if (kdp_copyin(map: task->map, uaddr: task->all_image_info_addr, dest: &task_image_infos,
379 size: sizeof(struct user32_dyld_all_image_infos), fault_flags) == KERN_SUCCESS) {
380 uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
381 uuid_info_addr = task_image_infos.uuidArray;
382 dyld_load_addr = task_image_infos.dyldImageLoadAddress;
383 }
384 }
385
386 if (uuid_info_count == 0 || uuid_info_addr == 0 || dyld_load_addr == 0) {
387 return KERN_NOT_FOUND;
388 }
389
390 // Find the UUID of dyld
391 for (size_t i = 0; i < uuid_info_count; i++) {
392 if (task_64bit_addr) {
393 struct user64_dyld_uuid_info uuid_info;
394 if (kdp_copyin(map: task->map, uaddr: uuid_info_addr + (i * sizeof(struct user64_dyld_uuid_info)), dest: &uuid_info, size: sizeof(struct user64_dyld_uuid_info), fault_flags) == KERN_SUCCESS) {
395 if (uuid_info.imageLoadAddress == dyld_load_addr) {
396 uuid_copy(dst: dyld_uuid, src: uuid_info.imageUUID);
397 *dyld_load_address = dyld_load_addr;
398 return KERN_SUCCESS;
399 }
400 }
401 } else {
402 struct user32_dyld_uuid_info uuid_info;
403 if (kdp_copyin(map: task->map, uaddr: uuid_info_addr + (i * sizeof(struct user32_dyld_uuid_info)), dest: &uuid_info, size: sizeof(struct user32_dyld_uuid_info), fault_flags) == KERN_SUCCESS) {
404 if (uuid_info.imageLoadAddress == dyld_load_addr) {
405 uuid_copy(dst: dyld_uuid, src: uuid_info.imageUUID);
406 *dyld_load_address = dyld_load_addr;
407 return KERN_SUCCESS;
408 }
409 }
410 }
411 }
412
413 return KERN_NOT_FOUND;
414}
415