1 | /* |
2 | * Copyright (c) 2015-2019 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | /* |
30 | * The main orchestrator for kernel (and co-processor) coredumps. Here's a very simplistic view of |
31 | * the flow: |
32 | * |
33 | * At kernel initialization time (kdp_core_init): |
34 | * ---------------------------------------------- |
35 | * |
36 | * - kdp_core_init() takes care of allocating all necessary data structures and initializes the |
37 | * coredump output stages |
38 | * |
39 | * At coredump time (do_kern_dump): |
40 | * -------------------------------- |
41 | * |
42 | * - Depending on the coredump variant, we chain the necessary output stages together in chain_output_stages() |
43 | * - [Disk only] We initialize the corefile header |
44 | * - [Disk only] We stream the stackshot out through the output stages and update the corefile header |
45 | * - We perform the kernel coredump, streaming it out through the output stages |
46 | * - [Disk only] We update the corefile header |
47 | * - [Disk only] We perform the co-processor coredumps (driven by kern_do_coredump), streaming each out |
48 | * through the output stages and updating the corefile header. |
49 | * - [Disk only] We save the coredump log to the corefile |
50 | */ |
51 | |
52 | #include <mach/kern_return.h> |
53 | #include <mach/vm_types.h> |
54 | #include <kdp/core_exclude.h> |
55 | #include <kdp/kdp_core.h> |
56 | #include <kdp/core_notes.h> |
57 | |
58 | #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING |
59 | |
60 | #include <mach/mach_types.h> |
61 | #include <mach/vm_attributes.h> |
62 | #include <mach/vm_param.h> |
63 | #include <mach/vm_map.h> |
64 | #include <vm/vm_protos.h> |
65 | #include <vm/vm_kern.h> |
66 | #include <vm/vm_map.h> |
67 | #include <machine/cpu_capabilities.h> |
68 | #include <libsa/types.h> |
69 | #include <libkern/kernel_mach_header.h> |
70 | #include <kern/locks.h> |
71 | #include <kdp/kdp_internal.h> |
72 | #include <kdp/output_stages/output_stages.h> |
73 | #include <kdp/processor_core.h> |
74 | #include <IOKit/IOTypes.h> |
75 | #include <IOKit/IOBSD.h> |
76 | #include <sys/errno.h> |
77 | #include <sys/msgbuf.h> |
78 | #include <san/kasan.h> |
79 | #include <kern/debug.h> |
80 | #include <pexpert/pexpert.h> |
81 | #include <os/atomic_private.h> |
82 | |
83 | #if CONFIG_SPTM |
84 | #include <sptm/debug_header.h> |
85 | #endif |
86 | |
87 | #if defined(__x86_64__) |
88 | #include <i386/pmap_internal.h> |
89 | #include <kdp/ml/i386/kdp_x86_common.h> |
90 | #include <kern/debug.h> |
91 | #endif /* defined(__x86_64__) */ |
92 | |
93 | #if CONFIG_SPTM |
94 | #include <arm64/sptm/sptm.h> |
95 | #endif /* CONFIG_SPTM */ |
96 | |
97 | kern_return_t kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context); |
98 | kern_return_t kdp_core_polled_io_polled_file_unavailable(void); |
99 | |
100 | typedef int (*pmap_traverse_callback)(vm_map_offset_t start, |
101 | vm_map_offset_t end, |
102 | void *context); |
103 | |
104 | static kern_return_t kern_dump_init(void *refcon, void *context); |
105 | static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context); |
106 | static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context); |
107 | static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context); |
108 | static int kern_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context); |
109 | static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context); |
110 | static kern_return_t kern_dump_save_note_summary(void *refcon, core_save_note_summary_cb callback, void *context); |
111 | static kern_return_t kern_dump_save_note_descriptions(void *refcon, core_save_note_descriptions_cb callback, void *context); |
112 | static kern_return_t kern_dump_save_note_data(void *refcon, core_save_note_data_cb callback, void *context); |
113 | |
114 | static int |
115 | kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, |
116 | vm_map_offset_t end, |
117 | void *context); |
118 | static int |
119 | kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, |
120 | vm_map_offset_t end, |
121 | void *context); |
122 | |
123 | static int |
124 | kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start, |
125 | vm_map_offset_t end, |
126 | void *context); |
127 | |
128 | static struct kdp_output_stage disk_output_stage = {}; |
129 | static struct kdp_output_stage lz4_output_stage = {}; |
130 | static struct kdp_output_stage zlib_output_stage = {}; |
131 | static struct kdp_output_stage buffer_output_stage = {}; |
132 | static struct kdp_output_stage net_output_stage = {}; |
133 | static struct kdp_output_stage progress_notify_output_stage = {}; |
134 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
135 | static struct kdp_output_stage aea_output_stage = {}; |
136 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
137 | #if defined(__arm64__) |
138 | static struct kdp_output_stage shmem_output_stage = {}; |
139 | static struct kdp_output_stage memory_backing_aware_buffer_output_stage = {}; |
140 | #endif /* defined(__arm64__) */ |
141 | |
142 | extern uint32_t kdp_crashdump_pkt_size; |
143 | |
144 | static boolean_t kern_dump_successful = FALSE; |
145 | |
146 | static const size_t = sizeof(struct mach_core_fileheader_v2) + (KERN_COREDUMP_MAX_CORES * sizeof(struct mach_core_details_v2)); |
147 | static struct mach_core_fileheader_v2 * = NULL; |
148 | |
149 | static lck_grp_t *kdp_core_initialization_lock_group = NULL; |
150 | static lck_mtx_t *kdp_core_disk_stage_lock = NULL; |
151 | static bool kdp_core_is_initializing_disk_stage = false; |
152 | |
153 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
154 | static const size_t PUBLIC_KEY_RESERVED_LENGTH = roundup(4096, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
155 | static void *kdp_core_public_key = NULL; |
156 | static lck_mtx_t *kdp_core_encryption_stage_lock = NULL; |
157 | static bool kdp_core_is_initializing_encryption_stage = false; |
158 | |
159 | static bool kern_dump_should_enforce_encryption(void); |
160 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
161 | |
162 | static lck_mtx_t *kdp_core_lz4_stage_lock = NULL; |
163 | static bool kdp_core_is_initializing_lz4_stage = false; |
164 | |
165 | /* |
166 | * These variables will be modified by the BSD layer if the root device is |
167 | * a RAMDisk. |
168 | */ |
169 | uint64_t kdp_core_ramdisk_addr = 0; |
170 | uint64_t kdp_core_ramdisk_size = 0; |
171 | |
172 | #define COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY (1 << 0) |
173 | #define COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT (1 << 1) |
174 | |
175 | boolean_t |
176 | kdp_has_polled_corefile(void) |
177 | { |
178 | return NULL != gIOPolledCoreFileVars; |
179 | } |
180 | |
181 | kern_return_t |
182 | kdp_polled_corefile_error(void) |
183 | { |
184 | return gIOPolledCoreFileOpenRet; |
185 | } |
186 | |
187 | IOPolledCoreFileMode_t |
188 | kdp_polled_corefile_mode(void) |
189 | { |
190 | return gIOPolledCoreFileMode; |
191 | } |
192 | |
193 | struct kdp_core_excluded_region { |
194 | struct kdp_core_excluded_region *next; |
195 | vm_offset_t addr; |
196 | vm_size_t size; |
197 | }; |
198 | |
199 | static LCK_GRP_DECLARE(excluded_regions_grp, "kdp-exclude-regions" ); |
200 | static LCK_MTX_DECLARE(excluded_regions_mtx, &excluded_regions_grp); |
201 | static struct kdp_core_excluded_region *excluded_regions; |
202 | |
203 | void |
204 | kdp_core_exclude_region(vm_offset_t addr, vm_size_t size) |
205 | { |
206 | struct kdp_core_excluded_region *region; |
207 | |
208 | if (addr >= addr + size) { |
209 | panic("%s: cannot exclude region starting at %p with size %zu (zero or overflowing size)" , |
210 | __func__, (void*)addr, (size_t)size); |
211 | } |
212 | if (addr != round_page(x: addr) || size != round_page(x: size)) { |
213 | panic("%s: cannot exclude region starting at %p with size %zu (not page aligned)" , |
214 | __func__, (void*)addr, (size_t)size); |
215 | } |
216 | |
217 | region = kalloc_type(typeof(*region), Z_WAITOK | Z_NOFAIL); |
218 | region->addr = addr; |
219 | region->size = size; |
220 | |
221 | lck_mtx_lock(lck: &excluded_regions_mtx); |
222 | region->next = excluded_regions; |
223 | excluded_regions = region; |
224 | lck_mtx_unlock(lck: &excluded_regions_mtx); |
225 | } |
226 | |
227 | void |
228 | kdp_core_unexclude_region(vm_offset_t addr, vm_size_t size) |
229 | { |
230 | struct kdp_core_excluded_region *region; |
231 | struct kdp_core_excluded_region **fixup = &excluded_regions; |
232 | |
233 | lck_mtx_lock(lck: &excluded_regions_mtx); |
234 | for (region = excluded_regions; region; region = region->next) { |
235 | if (region->addr == addr && region->size == size) { |
236 | *fixup = region->next; |
237 | break; |
238 | } |
239 | fixup = ®ion->next; |
240 | } |
241 | if (!region) { |
242 | panic("%s: cannot unexclude region starting at %p with size %zu (not currently excluded)" , |
243 | __func__, (void*)addr, (size_t)size); |
244 | } |
245 | lck_mtx_unlock(lck: &excluded_regions_mtx); |
246 | |
247 | // We had exclusive access to the list when we removed the region, and it is no longer |
248 | // reachable from the list, so it is safe to free. |
249 | kfree_type(typeof(*region), region); |
250 | } |
251 | |
252 | static bool |
253 | kernel_vaddr_in_excluded_region(vm_offset_t addr, uint64_t *vincr) |
254 | { |
255 | struct kdp_core_excluded_region *region; |
256 | |
257 | // We check this earlier before attempting to dump the kernel, but verify here. |
258 | assert(!kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx)); |
259 | |
260 | for (region = excluded_regions; region; region = region->next) { |
261 | if (region->addr <= addr && addr < (region->addr + region->size)) { |
262 | *vincr = region->size; |
263 | return true; |
264 | } |
265 | } |
266 | |
267 | return false; |
268 | } |
269 | |
270 | kern_return_t |
271 | kdp_core_output(void *kdp_core_out_state, uint64_t length, void * data) |
272 | { |
273 | kern_return_t err = KERN_SUCCESS; |
274 | uint64_t percent; |
275 | struct kdp_core_out_state *vars = (struct kdp_core_out_state *)kdp_core_out_state; |
276 | struct kdp_output_stage *first_stage = STAILQ_FIRST(&vars->kcos_out_stage); |
277 | |
278 | if (vars->kcos_error == KERN_SUCCESS) { |
279 | #if DEVELOPMENT || DEBUG |
280 | // panic testing: force the write to fail after X number of writes |
281 | if ((panic_test_case & PANIC_TEST_CASE_COREFILE_IO_ERR) && (--panic_test_action_count == 0)) { |
282 | panic_test_case &= ~PANIC_TEST_CASE_COREFILE_IO_ERR; |
283 | length = -1; |
284 | } |
285 | #endif |
286 | |
287 | if ((err = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, length, data)) != KERN_SUCCESS) { |
288 | kern_coredump_log(NULL, string: "(kdp_core_output) outproc(KDP_DATA, NULL, 0x%llx, %p) returned 0x%x\n" , |
289 | length, data, err); |
290 | vars->kcos_error = err; |
291 | } |
292 | if (!data && !length) { |
293 | kern_coredump_log(NULL, string: "100.." ); |
294 | } else { |
295 | vars->kcos_bytes_written += length; |
296 | percent = (vars->kcos_bytes_written * 100) / vars->kcos_totalbytes; |
297 | if ((percent - vars->kcos_lastpercent) >= 10) { |
298 | vars->kcos_lastpercent = percent; |
299 | kern_coredump_log(NULL, string: "%lld..\n" , percent); |
300 | } |
301 | } |
302 | } |
303 | return err; |
304 | } |
305 | |
306 | #if defined(__arm64__) |
307 | extern pmap_paddr_t avail_start, avail_end; |
308 | extern struct vm_object pmap_object_store; |
309 | #endif |
310 | extern vm_offset_t c_buffers; |
311 | extern vm_size_t c_buffers_size; |
312 | |
313 | static bool |
314 | kernel_vaddr_in_coredump_stage(const struct kdp_output_stage *stage, uint64_t vaddr, uint64_t *vincr) |
315 | { |
316 | uint64_t start_addr = (uint64_t)stage->kos_data; |
317 | uint64_t end_addr = start_addr + stage->kos_data_size; |
318 | |
319 | if (!stage->kos_data) { |
320 | return false; |
321 | } |
322 | |
323 | if (vaddr >= start_addr && vaddr < end_addr) { |
324 | *vincr = stage->kos_data_size - (vaddr - start_addr); |
325 | return true; |
326 | } |
327 | |
328 | return false; |
329 | } |
330 | |
331 | static bool |
332 | kernel_vaddr_in_coredump_stages(uint64_t vaddr, uint64_t *vincr) |
333 | { |
334 | if (kernel_vaddr_in_coredump_stage(stage: &disk_output_stage, vaddr, vincr)) { |
335 | return true; |
336 | } |
337 | |
338 | if (kernel_vaddr_in_coredump_stage(stage: &lz4_output_stage, vaddr, vincr)) { |
339 | return true; |
340 | } |
341 | |
342 | if (kernel_vaddr_in_coredump_stage(stage: &zlib_output_stage, vaddr, vincr)) { |
343 | return true; |
344 | } |
345 | |
346 | if (kernel_vaddr_in_coredump_stage(stage: &buffer_output_stage, vaddr, vincr)) { |
347 | return true; |
348 | } |
349 | |
350 | if (kernel_vaddr_in_coredump_stage(stage: &net_output_stage, vaddr, vincr)) { |
351 | return true; |
352 | } |
353 | |
354 | if (kernel_vaddr_in_coredump_stage(stage: &progress_notify_output_stage, vaddr, vincr)) { |
355 | return true; |
356 | } |
357 | |
358 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
359 | if (kernel_vaddr_in_coredump_stage(stage: &aea_output_stage, vaddr, vincr)) { |
360 | return true; |
361 | } |
362 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
363 | |
364 | #if defined(__arm64__) |
365 | if (kernel_vaddr_in_coredump_stage(stage: &shmem_output_stage, vaddr, vincr)) { |
366 | return true; |
367 | } |
368 | #endif /* defined(__arm64__) */ |
369 | |
370 | #if defined(__arm64__) |
371 | if (kernel_vaddr_in_coredump_stage(stage: &memory_backing_aware_buffer_output_stage, vaddr, vincr)) { |
372 | return true; |
373 | } |
374 | #endif /* defined(__arm64__) */ |
375 | |
376 | return false; |
377 | } |
378 | |
379 | ppnum_t |
380 | kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr) |
381 | { |
382 | ppnum_t ppn = 0; |
383 | uint64_t vincr = PAGE_SIZE_64; |
384 | |
385 | assert(!(vaddr & PAGE_MASK_64)); |
386 | |
387 | /* VA ranges to exclude */ |
388 | if (vaddr == c_buffers) { |
389 | /* compressor data */ |
390 | ppn = 0; |
391 | vincr = c_buffers_size; |
392 | } else if (kernel_vaddr_in_coredump_stages(vaddr, vincr: &vincr)) { |
393 | /* coredump output stage working memory */ |
394 | ppn = 0; |
395 | } else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) { |
396 | ppn = 0; |
397 | vincr = kdp_core_ramdisk_size; |
398 | } else |
399 | #if defined(__arm64__) |
400 | if (vaddr == phystokv(pa: avail_start)) { |
401 | /* physical memory map */ |
402 | ppn = 0; |
403 | vincr = (avail_end - avail_start); |
404 | } else |
405 | #endif /* defined(__arm64__) */ |
406 | { |
407 | ppn = (pvphysaddr != NULL ? |
408 | pmap_find_phys(map: kernel_pmap, va: vaddr) : |
409 | pmap_find_phys_nofault(map: kernel_pmap, va: vaddr)); |
410 | } |
411 | |
412 | *pvincr = round_page_64(x: vincr); |
413 | |
414 | if (ppn && pvphysaddr) { |
415 | uint64_t phys = ptoa_64(ppn); |
416 | if (physmap_enclosed(phys)) { |
417 | *pvphysaddr = phystokv(pa: phys); |
418 | } else { |
419 | ppn = 0; |
420 | } |
421 | } |
422 | |
423 | return ppn; |
424 | } |
425 | |
426 | static int |
427 | pmap_traverse_present_mappings(pmap_t __unused pmap, |
428 | vm_map_offset_t start, |
429 | vm_map_offset_t end, |
430 | pmap_traverse_callback callback, |
431 | void *context) |
432 | { |
433 | IOReturn ret; |
434 | vm_map_offset_t vcurstart, vcur; |
435 | uint64_t vincr = 0; |
436 | vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base); |
437 | vm_map_offset_t debug_end = round_page(x: (vm_map_offset_t) (debug_buf_base + debug_buf_size)); |
438 | #if defined(XNU_TARGET_OS_BRIDGE) |
439 | vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base); |
440 | vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size)); |
441 | #endif |
442 | |
443 | boolean_t lastvavalid; |
444 | #if defined(__arm64__) |
445 | vm_page_t m = VM_PAGE_NULL; |
446 | #endif |
447 | |
448 | #if defined(__x86_64__) |
449 | assert(!is_ept_pmap(pmap)); |
450 | #endif |
451 | |
452 | /* Assumes pmap is locked, or being called from the kernel debugger */ |
453 | |
454 | if (start > end) { |
455 | return KERN_INVALID_ARGUMENT; |
456 | } |
457 | |
458 | ret = KERN_SUCCESS; |
459 | lastvavalid = FALSE; |
460 | for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) { |
461 | ppnum_t ppn = 0; |
462 | |
463 | #if defined(__arm64__) |
464 | /* We're at the start of the physmap, so pull out the pagetable pages that |
465 | * are accessed through that region.*/ |
466 | if (vcur == phystokv(pa: avail_start) && vm_object_lock_try_shared(&pmap_object_store)) { |
467 | m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq); |
468 | } |
469 | |
470 | if (m != VM_PAGE_NULL) { |
471 | vm_map_offset_t vprev = vcur; |
472 | ppn = (ppnum_t)atop(avail_end); |
473 | while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) { |
474 | /* Ignore pages that come from the static region and have already been dumped.*/ |
475 | if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) { |
476 | ppn = VM_PAGE_GET_PHYS_PAGE(m); |
477 | break; |
478 | } |
479 | m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); |
480 | } |
481 | vincr = PAGE_SIZE_64; |
482 | if (ppn == atop(avail_end)) { |
483 | vm_object_unlock(&pmap_object_store); |
484 | m = VM_PAGE_NULL; |
485 | // avail_end is not a valid physical address, |
486 | // so phystokv(avail_end) may not produce the expected result. |
487 | #if CONFIG_SPTM |
488 | /** |
489 | * The physical aperture in SPTM systems includes mappings to IO memory, |
490 | * following the last page of managed memory. Rather than calculating the |
491 | * end of the physical aperture as a function of the amount of managed memory, |
492 | * simply advance [vcur] to the point advertised by the SPTM as the end of |
493 | * the physical aperture. |
494 | */ |
495 | vcur = SPTMArgs->physmap_end; |
496 | #else |
497 | vcur = phystokv(pa: avail_start) + (avail_end - avail_start); |
498 | #endif |
499 | } else { |
500 | m = (vm_page_t)vm_page_queue_next(&m->vmp_listq); |
501 | vcur = phystokv(ptoa(ppn)); |
502 | } |
503 | if (vcur != vprev) { |
504 | ret = callback(vcurstart, vprev, context); |
505 | lastvavalid = FALSE; |
506 | } |
507 | } |
508 | if (m == VM_PAGE_NULL) { |
509 | ppn = kernel_pmap_present_mapping(vaddr: vcur, pvincr: &vincr, NULL); |
510 | } |
511 | #else /* defined(__arm64__) */ |
512 | ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL); |
513 | #endif |
514 | if (ppn != 0 && kernel_vaddr_in_excluded_region(addr: vcur, vincr: &vincr)) { |
515 | /* excluded region */ |
516 | ppn = 0; |
517 | } |
518 | if (ppn != 0) { |
519 | if (((vcur < debug_start) || (vcur >= debug_end)) |
520 | && !(pmap_valid_page(pn: ppn) || bootloader_valid_page(ppn)) |
521 | #if defined(XNU_TARGET_OS_BRIDGE) |
522 | // include the macOS panic region if it's mapped |
523 | && ((vcur < macos_panic_start) || (vcur >= macos_panic_end)) |
524 | #endif |
525 | ) { |
526 | /* not something we want */ |
527 | ppn = 0; |
528 | } |
529 | /* include the phys carveout only if explictly marked */ |
530 | if (debug_is_in_phys_carveout(va: vcur) && |
531 | !debug_can_coredump_phys_carveout()) { |
532 | ppn = 0; |
533 | } |
534 | } |
535 | |
536 | if (ppn != 0) { |
537 | if (!lastvavalid) { |
538 | /* Start of a new virtual region */ |
539 | vcurstart = vcur; |
540 | lastvavalid = TRUE; |
541 | } |
542 | } else { |
543 | if (lastvavalid) { |
544 | /* end of a virtual region */ |
545 | ret = callback(vcurstart, vcur, context); |
546 | lastvavalid = FALSE; |
547 | } |
548 | |
549 | #if defined(__x86_64__) |
550 | /* Try to skip by 2MB if possible */ |
551 | if ((vcur & PDMASK) == 0) { |
552 | pd_entry_t *pde; |
553 | pde = pmap_pde(pmap, vcur); |
554 | if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) { |
555 | /* Make sure we wouldn't overflow */ |
556 | if (vcur < (end - NBPD)) { |
557 | vincr = NBPD; |
558 | } |
559 | } |
560 | } |
561 | #endif /* defined(__x86_64__) */ |
562 | } |
563 | vcur += vincr; |
564 | } |
565 | |
566 | if ((ret == KERN_SUCCESS) && lastvavalid) { |
567 | /* send previous run */ |
568 | ret = callback(vcurstart, vcur, context); |
569 | } |
570 | |
571 | #if KASAN |
572 | if (ret == KERN_SUCCESS) { |
573 | ret = kasan_traverse_mappings(callback, context); |
574 | } |
575 | #endif |
576 | |
577 | return ret; |
578 | } |
579 | |
580 | struct kern_dump_preflight_context { |
581 | uint32_t region_count; |
582 | uint64_t dumpable_bytes; |
583 | }; |
584 | |
585 | int |
586 | kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start, |
587 | vm_map_offset_t end, |
588 | void *context) |
589 | { |
590 | struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context; |
591 | IOReturn ret = KERN_SUCCESS; |
592 | |
593 | kdc->region_count++; |
594 | kdc->dumpable_bytes += (end - start); |
595 | |
596 | return ret; |
597 | } |
598 | |
599 | |
600 | struct kern_dump_send_seg_desc_context { |
601 | core_save_segment_descriptions_cb callback; |
602 | void *context; |
603 | }; |
604 | |
605 | int |
606 | kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start, |
607 | vm_map_offset_t end, |
608 | void *context) |
609 | { |
610 | struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context; |
611 | uint64_t seg_start = (uint64_t) start; |
612 | uint64_t seg_end = (uint64_t) end; |
613 | |
614 | return kds_context->callback(seg_start, seg_end, kds_context->context); |
615 | } |
616 | |
617 | struct kern_dump_send_segdata_context { |
618 | core_save_segment_data_cb callback; |
619 | void *context; |
620 | }; |
621 | |
622 | int |
623 | kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start, |
624 | vm_map_offset_t end, |
625 | void *context) |
626 | { |
627 | struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context; |
628 | |
629 | return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context); |
630 | } |
631 | |
632 | static kern_return_t |
633 | kern_dump_init(__unused void *refcon, void *context) |
634 | { |
635 | /* TODO: consider doing mmu flush from an init function */ |
636 | |
637 | // If excluded regions list is locked, it is unsafe to dump the kernel. |
638 | if (kdp_lck_mtx_lock_spin_is_acquired(lck: &excluded_regions_mtx)) { |
639 | kern_coredump_log(context, string: "%s: skipping kernel because excluded regions list is locked\n" , |
640 | __func__); |
641 | #if defined(__arm64__) |
642 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE; |
643 | #else |
644 | panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE; |
645 | #endif |
646 | paniclog_flush(); |
647 | return KERN_NODE_DOWN; |
648 | } |
649 | |
650 | return KERN_SUCCESS; |
651 | } |
652 | |
653 | static int |
654 | kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context) |
655 | { |
656 | struct kern_dump_preflight_context kdc_preflight = { }; |
657 | uint64_t thread_state_size = 0, thread_count = 0; |
658 | vm_map_offset_t vstart = kdp_core_start_addr(); |
659 | kern_return_t ret; |
660 | |
661 | ret = pmap_traverse_present_mappings(pmap: kernel_pmap, |
662 | start: vstart, |
663 | VM_MAX_KERNEL_ADDRESS, |
664 | callback: kern_dump_pmap_traverse_preflight_callback, |
665 | context: &kdc_preflight); |
666 | if (ret != KERN_SUCCESS) { |
667 | kern_coredump_log(context, string: "save_summary: pmap traversal failed: %d\n" , ret); |
668 | return ret; |
669 | } |
670 | |
671 | kern_collectth_state_size(tstate_count: &thread_count, tstate_size: &thread_state_size); |
672 | |
673 | ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes, |
674 | thread_count, thread_state_size, 0, context); |
675 | return ret; |
676 | } |
677 | |
678 | static int |
679 | kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context) |
680 | { |
681 | vm_map_offset_t vstart = kdp_core_start_addr(); |
682 | kern_return_t ret; |
683 | struct kern_dump_send_seg_desc_context kds_context; |
684 | |
685 | kds_context.callback = callback; |
686 | kds_context.context = context; |
687 | |
688 | ret = pmap_traverse_present_mappings(pmap: kernel_pmap, |
689 | start: vstart, |
690 | VM_MAX_KERNEL_ADDRESS, |
691 | callback: kern_dump_pmap_traverse_send_segdesc_callback, |
692 | context: &kds_context); |
693 | if (ret != KERN_SUCCESS) { |
694 | kern_coredump_log(context, string: "save_seg_desc: pmap traversal failed: %d\n" , ret); |
695 | return ret; |
696 | } |
697 | |
698 | return KERN_SUCCESS; |
699 | } |
700 | |
701 | static int |
702 | kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context) |
703 | { |
704 | kern_return_t ret; |
705 | uint64_t thread_state_size = 0, thread_count = 0; |
706 | |
707 | kern_collectth_state_size(tstate_count: &thread_count, tstate_size: &thread_state_size); |
708 | |
709 | if (thread_state_size > 0) { |
710 | void * iter = NULL; |
711 | do { |
712 | kern_collectth_state(thread: current_thread(), buffer: buf, size: thread_state_size, iter: &iter); |
713 | |
714 | ret = callback(buf, context); |
715 | if (ret != KERN_SUCCESS) { |
716 | return ret; |
717 | } |
718 | } while (iter); |
719 | } |
720 | |
721 | return KERN_SUCCESS; |
722 | } |
723 | |
724 | |
725 | static int |
726 | kern_dump_save_sw_vers_detail(__unused void *refcon, core_save_sw_vers_detail_cb callback, void *context) |
727 | { |
728 | return callback(vm_kernel_stext, kernel_uuid, 0, context); |
729 | } |
730 | |
731 | static int |
732 | kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context) |
733 | { |
734 | vm_map_offset_t vstart = kdp_core_start_addr(); |
735 | kern_return_t ret; |
736 | struct kern_dump_send_segdata_context kds_context; |
737 | |
738 | kds_context.callback = callback; |
739 | kds_context.context = context; |
740 | |
741 | ret = pmap_traverse_present_mappings(pmap: kernel_pmap, |
742 | start: vstart, |
743 | VM_MAX_KERNEL_ADDRESS, callback: kern_dump_pmap_traverse_send_segdata_callback, context: &kds_context); |
744 | if (ret != KERN_SUCCESS) { |
745 | kern_coredump_log(context, string: "save_seg_data: pmap traversal failed: %d\n" , ret); |
746 | return ret; |
747 | } |
748 | |
749 | return KERN_SUCCESS; |
750 | } |
751 | |
752 | kern_return_t |
753 | kdp_reset_output_vars(void *kdp_core_out_state, uint64_t totalbytes, bool encrypt_core, bool *out_should_skip_coredump) |
754 | { |
755 | struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state; |
756 | struct kdp_output_stage *current_stage = NULL; |
757 | |
758 | /* Re-initialize kdp_outstate */ |
759 | outstate->kcos_totalbytes = totalbytes; |
760 | outstate->kcos_bytes_written = 0; |
761 | outstate->kcos_lastpercent = 0; |
762 | outstate->kcos_error = KERN_SUCCESS; |
763 | |
764 | /* Reset the output stages */ |
765 | STAILQ_FOREACH(current_stage, &outstate->kcos_out_stage, kos_next) { |
766 | current_stage->kos_funcs.kosf_reset(current_stage); |
767 | } |
768 | |
769 | *out_should_skip_coredump = false; |
770 | if (encrypt_core) { |
771 | if (outstate->kcos_enforce_encryption && !outstate->kcos_encryption_stage) { |
772 | *out_should_skip_coredump = true; |
773 | #if defined(__arm64__) |
774 | panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED; |
775 | #else |
776 | panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED; |
777 | #endif |
778 | kern_coredump_log(NULL, string: "(kdp_reset_output_vars) Encryption requested, is unavailable, and enforcement is active. Skipping current core.\n" ); |
779 | } |
780 | } else if (outstate->kcos_encryption_stage) { |
781 | outstate->kcos_encryption_stage->kos_bypass = true; |
782 | } |
783 | |
784 | return KERN_SUCCESS; |
785 | } |
786 | |
787 | static kern_return_t |
788 | (struct kdp_core_out_state *outstate) |
789 | { |
790 | struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage); |
791 | uint64_t foffset; |
792 | kern_return_t ret; |
793 | |
794 | /* Write the file header -- first seek to the beginning of the file */ |
795 | foffset = 0; |
796 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) { |
797 | kern_coredump_log(NULL, string: "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n" , |
798 | sizeof(foffset), &foffset, foffset, ret); |
799 | return ret; |
800 | } |
801 | |
802 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header_size, kdp_core_header)) != KERN_SUCCESS) { |
803 | kern_coredump_log(NULL, string: "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n" , |
804 | kdp_core_header_size, kdp_core_header, ret); |
805 | return ret; |
806 | } |
807 | |
808 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) { |
809 | kern_coredump_log(NULL, string: "(kern_dump_update_header) outproc data flush returned 0x%x\n" , ret); |
810 | return ret; |
811 | } |
812 | |
813 | #if defined(__arm64__) |
814 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) { |
815 | kern_coredump_log(NULL, string: "(kern_dump_update_header) outproc explicit flush returned 0x%x\n" , ret); |
816 | return ret; |
817 | } |
818 | #endif /* defined(__arm64__) */ |
819 | |
820 | return ret; |
821 | } |
822 | |
823 | kern_return_t |
824 | kern_dump_record_file(void *kdp_core_out_state, const char *filename, uint64_t file_offset, uint64_t *out_file_length, uint64_t details_flags) |
825 | { |
826 | kern_return_t ret = KERN_SUCCESS; |
827 | uint64_t bytes_written = 0; |
828 | struct mach_core_details_v2 *core_details = NULL; |
829 | struct kdp_output_stage *last_stage; |
830 | struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state; |
831 | |
832 | assert(kdp_core_header->num_files < KERN_COREDUMP_MAX_CORES); |
833 | assert(out_file_length != NULL); |
834 | *out_file_length = 0; |
835 | |
836 | last_stage = STAILQ_LAST(&outstate->kcos_out_stage, kdp_output_stage, kos_next); |
837 | bytes_written = last_stage->kos_bytes_written; |
838 | |
839 | core_details = &(kdp_core_header->files[kdp_core_header->num_files]); |
840 | core_details->flags = details_flags; |
841 | core_details->offset = file_offset; |
842 | core_details->length = bytes_written; |
843 | strncpy((char *)&core_details->core_name, filename, |
844 | MACH_CORE_FILEHEADER_NAMELEN); |
845 | core_details->core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0'; |
846 | |
847 | kdp_core_header->num_files++; |
848 | |
849 | ret = kern_dump_update_header(outstate); |
850 | if (ret == KERN_SUCCESS) { |
851 | *out_file_length = bytes_written; |
852 | } |
853 | |
854 | return ret; |
855 | } |
856 | |
857 | kern_return_t |
858 | kern_dump_seek_to_next_file(void *kdp_core_out_state, uint64_t next_file_offset) |
859 | { |
860 | struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state; |
861 | struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage); |
862 | kern_return_t ret; |
863 | |
864 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != KERN_SUCCESS) { |
865 | kern_coredump_log(NULL, string: "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n" , |
866 | sizeof(next_file_offset), &next_file_offset, next_file_offset, ret); |
867 | } |
868 | |
869 | return ret; |
870 | } |
871 | |
872 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
873 | |
874 | static kern_return_t |
875 | kern_dump_write_public_key(struct kdp_core_out_state *outstate) |
876 | { |
877 | struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage); |
878 | uint64_t foffset; |
879 | uint64_t remainder = PUBLIC_KEY_RESERVED_LENGTH - kdp_core_header->pub_key_length; |
880 | kern_return_t ret; |
881 | |
882 | if (kdp_core_header->pub_key_offset == 0 || kdp_core_header->pub_key_length == 0) { |
883 | // Nothing to do |
884 | return KERN_SUCCESS; |
885 | } |
886 | |
887 | /* Write the public key -- first seek to the appropriate offset */ |
888 | foffset = kdp_core_header->pub_key_offset; |
889 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) { |
890 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n" , |
891 | sizeof(foffset), &foffset, foffset, ret); |
892 | return ret; |
893 | } |
894 | |
895 | // Write the public key |
896 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) { |
897 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n" , |
898 | kdp_core_header->pub_key_length, kdp_core_public_key, ret); |
899 | return ret; |
900 | } |
901 | |
902 | // Fill out the remainder of the block with zeroes |
903 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) { |
904 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n" , |
905 | remainder, ret); |
906 | return ret; |
907 | } |
908 | |
909 | // Do it once more to write the "next" public key |
910 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) { |
911 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n" , |
912 | kdp_core_header->pub_key_length, kdp_core_public_key, ret); |
913 | return ret; |
914 | } |
915 | |
916 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) { |
917 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n" , |
918 | remainder, ret); |
919 | return ret; |
920 | } |
921 | |
922 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) { |
923 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc data flush returned 0x%x\n" , ret); |
924 | return ret; |
925 | } |
926 | |
927 | #if defined(__arm64__) |
928 | if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) { |
929 | kern_coredump_log(NULL, string: "(kern_dump_write_public_key) outproc explicit flush returned 0x%x\n" , ret); |
930 | return ret; |
931 | } |
932 | #endif /* defined(__arm64__) */ |
933 | |
934 | return ret; |
935 | } |
936 | |
937 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
938 | |
939 | static kern_return_t |
940 | chain_output_stages(enum kern_dump_type kd_variant, struct kdp_core_out_state *outstate, uint64_t *details_flags) |
941 | { |
942 | struct kdp_output_stage *current = NULL; |
943 | |
944 | assert(details_flags); |
945 | *details_flags = 0; |
946 | |
947 | switch (kd_variant) { |
948 | case KERN_DUMP_STACKSHOT_DISK: |
949 | OS_FALLTHROUGH; |
950 | case KERN_DUMP_DISK: |
951 | #if defined(__arm64__) |
952 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &memory_backing_aware_buffer_output_stage, kos_next); |
953 | #endif |
954 | if (!kdp_corezip_disabled) { |
955 | if (kdp_core_is_initializing_lz4_stage) { |
956 | kern_coredump_log(NULL, string: "We were in the middle of initializing LZ4 stage. Cannot write a coredump to disk\n" ); |
957 | return KERN_FAILURE; |
958 | } else if (!lz4_output_stage.kos_initialized) { |
959 | kern_coredump_log(NULL, string: "LZ4 stage is not yet initialized. Cannot write a coredump to disk\n" ); |
960 | return KERN_FAILURE; |
961 | } |
962 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &lz4_output_stage, kos_next); |
963 | *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_LZ4; |
964 | } |
965 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next); |
966 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
967 | if (kdp_core_is_initializing_encryption_stage) { |
968 | kern_coredump_log(NULL, string: "We were in the middle of initializing encryption. Marking it as unavailable\n" ); |
969 | } else if (aea_output_stage.kos_initialized) { |
970 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &aea_output_stage, kos_next); |
971 | outstate->kcos_encryption_stage = &aea_output_stage; |
972 | *details_flags |= MACH_CORE_DETAILS_V2_FLAG_ENCRYPTED_AEA; |
973 | } |
974 | outstate->kcos_enforce_encryption = kern_dump_should_enforce_encryption(); |
975 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
976 | if (kdp_core_is_initializing_disk_stage) { |
977 | kern_coredump_log(NULL, string: "We were in the middle of initializing the disk stage. Cannot write a coredump to disk\n" ); |
978 | return KERN_FAILURE; |
979 | } else if (disk_output_stage.kos_initialized == false) { |
980 | kern_coredump_log(NULL, string: "Corefile is not yet initialized. Cannot write a coredump to disk\n" ); |
981 | return KERN_FAILURE; |
982 | } |
983 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &disk_output_stage, kos_next); |
984 | break; |
985 | case KERN_DUMP_NET: |
986 | if (!kdp_corezip_disabled) { |
987 | if (!zlib_output_stage.kos_initialized) { |
988 | kern_coredump_log(NULL, string: "Zlib stage is not initialized. Cannot write a coredump to the network\n" ); |
989 | return KERN_FAILURE; |
990 | } |
991 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next); |
992 | *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB; |
993 | } |
994 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next); |
995 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &buffer_output_stage, kos_next); |
996 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &net_output_stage, kos_next); |
997 | break; |
998 | #if defined(__arm64__) |
999 | case KERN_DUMP_HW_SHMEM_DBG: |
1000 | if (!kdp_corezip_disabled) { |
1001 | if (!zlib_output_stage.kos_initialized) { |
1002 | kern_coredump_log(NULL, string: "Zlib stage is not initialized. Cannot write a coredump to shared memory\n" ); |
1003 | return KERN_FAILURE; |
1004 | } |
1005 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next); |
1006 | *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB; |
1007 | } |
1008 | STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &shmem_output_stage, kos_next); |
1009 | break; |
1010 | #endif /* defined(__arm64__) */ |
1011 | } |
1012 | |
1013 | STAILQ_FOREACH(current, &outstate->kcos_out_stage, kos_next) { |
1014 | current->kos_outstate = outstate; |
1015 | } |
1016 | |
1017 | return KERN_SUCCESS; |
1018 | } |
1019 | |
1020 | #if defined(__arm64__) |
1021 | static kern_return_t |
1022 | dump_panic_buffer(struct kdp_core_out_state *outstate, char *panic_buf, size_t panic_len, |
1023 | uint64_t *foffset, uint64_t details_flags) |
1024 | { |
1025 | kern_return_t ret = KERN_SUCCESS; |
1026 | bool should_skip = false; |
1027 | |
1028 | kern_coredump_log(NULL, string: "\nBeginning dump of panic region of size 0x%zx\n" , panic_len); |
1029 | |
1030 | ret = kdp_reset_output_vars(kdp_core_out_state: outstate, totalbytes: panic_len, true, out_should_skip_coredump: &should_skip); |
1031 | if (KERN_SUCCESS != ret) { |
1032 | return ret; |
1033 | } |
1034 | |
1035 | if (should_skip) { |
1036 | kern_coredump_log(NULL, string: "Skipping panic region dump\n" ); |
1037 | return ret; |
1038 | } |
1039 | |
1040 | uint64_t compressed_panic_region_len = 0; |
1041 | ret = kdp_core_output(kdp_core_out_state: outstate, length: panic_len, data: panic_buf); |
1042 | if (KERN_SUCCESS != ret) { |
1043 | kern_coredump_log(NULL, string: "Failed to write panic region to file, kdp_coreoutput(outstate, %zu, %p) returned 0x%x\n" , |
1044 | panic_len, panic_buf, ret); |
1045 | return ret; |
1046 | } |
1047 | |
1048 | ret = kdp_core_output(kdp_core_out_state: outstate, length: 0, NULL); |
1049 | if (KERN_SUCCESS != ret) { |
1050 | kern_coredump_log(NULL, string: "Failed to flush panic region data : kdp_core_output(%p, 0, NULL) returned 0x%x\n" , outstate, ret); |
1051 | return ret; |
1052 | } |
1053 | |
1054 | ret = kern_dump_record_file(kdp_core_out_state: outstate, filename: "panic_region" , file_offset: *foffset, out_file_length: &compressed_panic_region_len, |
1055 | details_flags); |
1056 | if (KERN_SUCCESS != ret) { |
1057 | kern_coredump_log(NULL, string: "Failed to record panic region in corefile header, kern_dump_record_file returned 0x%x\n" , ret); |
1058 | return ret; |
1059 | } |
1060 | |
1061 | kern_coredump_log(NULL, string: "Recorded panic region in corefile at offset 0x%llx, compressed to %llu bytes\n" , *foffset, compressed_panic_region_len); |
1062 | *foffset = roundup((*foffset + compressed_panic_region_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
1063 | |
1064 | ret = kern_dump_seek_to_next_file(kdp_core_out_state: outstate, next_file_offset: *foffset); |
1065 | if (KERN_SUCCESS != ret) { |
1066 | kern_coredump_log(NULL, string: "Failed to seek to panic region file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n" , *foffset, ret); |
1067 | return ret; |
1068 | } |
1069 | |
1070 | return ret; |
1071 | } |
1072 | #endif /* defined(__arm64__) */ |
1073 | |
1074 | static int |
1075 | do_kern_dump(enum kern_dump_type kd_variant) |
1076 | { |
1077 | struct kdp_core_out_state outstate = { }; |
1078 | struct kdp_output_stage *first_stage = NULL; |
1079 | char *coredump_log_start = NULL, *buf = NULL; |
1080 | size_t reserved_debug_logsize = 0, prior_debug_logsize = 0; |
1081 | uint64_t foffset = 0; |
1082 | kern_return_t ret = KERN_SUCCESS; |
1083 | boolean_t output_opened = FALSE, dump_succeeded = TRUE; |
1084 | uint64_t details_flags = 0; |
1085 | |
1086 | /* Initialize output context */ |
1087 | |
1088 | bzero(s: &outstate, n: sizeof(outstate)); |
1089 | STAILQ_INIT(&outstate.kcos_out_stage); |
1090 | ret = chain_output_stages(kd_variant, outstate: &outstate, details_flags: &details_flags); |
1091 | if (KERN_SUCCESS != ret) { |
1092 | dump_succeeded = FALSE; |
1093 | goto exit; |
1094 | } |
1095 | first_stage = STAILQ_FIRST(&outstate.kcos_out_stage); |
1096 | |
1097 | /* |
1098 | * Record the initial panic log buffer length so we can dump the coredump log |
1099 | * and panic log to disk |
1100 | */ |
1101 | coredump_log_start = debug_buf_ptr; |
1102 | #if defined(__arm64__) |
1103 | assert(panic_info->eph_other_log_offset != 0); |
1104 | assert(panic_info->eph_panic_log_len != 0); |
1105 | /* Include any data from before the panic log as well */ |
1106 | prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) + |
1107 | panic_info->eph_panic_log_len + panic_info->eph_other_log_len; |
1108 | #else /* defined(__arm64__) */ |
1109 | if (panic_info->mph_panic_log_offset != 0) { |
1110 | prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) + |
1111 | panic_info->mph_panic_log_len + panic_info->mph_other_log_len; |
1112 | } |
1113 | #endif /* defined(__arm64__) */ |
1114 | |
1115 | assert(prior_debug_logsize <= debug_buf_size); |
1116 | |
1117 | if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { |
1118 | /* Open the file for output */ |
1119 | if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_WRQ, NULL, 0, NULL)) != KERN_SUCCESS) { |
1120 | kern_coredump_log(NULL, string: "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n" , ret); |
1121 | dump_succeeded = FALSE; |
1122 | goto exit; |
1123 | } |
1124 | } |
1125 | output_opened = true; |
1126 | |
1127 | if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { |
1128 | const size_t = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
1129 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
1130 | const size_t aligned_public_key_size = PUBLIC_KEY_RESERVED_LENGTH * 2; |
1131 | #else |
1132 | const size_t aligned_public_key_size = 0; |
1133 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
1134 | |
1135 | reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE; |
1136 | |
1137 | /* Space for file header, public key, panic log, core log */ |
1138 | foffset = roundup(aligned_corefile_header_size + aligned_public_key_size + reserved_debug_logsize, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
1139 | kdp_core_header->log_offset = aligned_corefile_header_size + aligned_public_key_size; |
1140 | |
1141 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
1142 | /* Write the public key */ |
1143 | ret = kern_dump_write_public_key(outstate: &outstate); |
1144 | if (KERN_SUCCESS != ret) { |
1145 | kern_coredump_log(NULL, string: "(do_kern_dump write public key) returned 0x%x\n" , ret); |
1146 | dump_succeeded = FALSE; |
1147 | goto exit; |
1148 | } |
1149 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
1150 | |
1151 | /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */ |
1152 | if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) { |
1153 | kern_coredump_log(NULL, string: "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n" , |
1154 | sizeof(foffset), &foffset, foffset, ret); |
1155 | dump_succeeded = FALSE; |
1156 | goto exit; |
1157 | } |
1158 | } |
1159 | |
1160 | #if defined(__arm64__) |
1161 | flush_mmu_tlb(); |
1162 | #endif |
1163 | |
1164 | kern_coredump_log(NULL, string: "%s" , (kd_variant == KERN_DUMP_DISK) ? "Writing local cores...\n" : |
1165 | "Transmitting kernel state, please wait:\n" ); |
1166 | |
1167 | #if defined (__arm64__) |
1168 | char *panic_buf = (char *)gPanicBase; |
1169 | size_t panic_len = (vm_offset_t)debug_buf_ptr - gPanicBase; |
1170 | if (kd_variant == KERN_DUMP_DISK && (panic_buf && panic_len)) { |
1171 | ret = dump_panic_buffer(outstate: &outstate, panic_buf, panic_len, foffset: &foffset, details_flags); |
1172 | if (KERN_SUCCESS != ret) { |
1173 | dump_succeeded = FALSE; |
1174 | } |
1175 | } |
1176 | #endif |
1177 | |
1178 | #if defined(__x86_64__) |
1179 | if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) { |
1180 | bool should_skip = false; |
1181 | |
1182 | kern_coredump_log(NULL, "\nBeginning dump of kernel stackshot\n" ); |
1183 | |
1184 | ret = kdp_reset_output_vars(&outstate, panic_stackshot_len, true, &should_skip); |
1185 | |
1186 | if (ret != KERN_SUCCESS) { |
1187 | kern_coredump_log(NULL, "Failed to reset outstate for stackshot with len 0x%zx, returned 0x%x\n" , panic_stackshot_len, ret); |
1188 | dump_succeeded = FALSE; |
1189 | } else if (!should_skip) { |
1190 | uint64_t compressed_stackshot_len = 0; |
1191 | if ((ret = kdp_core_output(&outstate, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) { |
1192 | kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outstate, %lu, %p) returned 0x%x\n" , |
1193 | panic_stackshot_len, (void *) panic_stackshot_buf, ret); |
1194 | dump_succeeded = FALSE; |
1195 | } else if ((ret = kdp_core_output(&outstate, 0, NULL)) != KERN_SUCCESS) { |
1196 | kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n" , &outstate, ret); |
1197 | dump_succeeded = FALSE; |
1198 | } else if ((ret = kern_dump_record_file(&outstate, "panic_stackshot.kcdata" , foffset, &compressed_stackshot_len, details_flags)) != KERN_SUCCESS) { |
1199 | kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n" , ret); |
1200 | dump_succeeded = FALSE; |
1201 | } else { |
1202 | kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n" , foffset, compressed_stackshot_len); |
1203 | foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
1204 | if ((ret = kern_dump_seek_to_next_file(&outstate, foffset)) != KERN_SUCCESS) { |
1205 | kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n" , foffset, ret); |
1206 | dump_succeeded = FALSE; |
1207 | } |
1208 | } |
1209 | } else { |
1210 | kern_coredump_log(NULL, "Skipping stackshot dump\n" ); |
1211 | } |
1212 | } |
1213 | #endif |
1214 | |
1215 | if (kd_variant == KERN_DUMP_DISK) { |
1216 | /* |
1217 | * Dump co-processors as well, foffset will be overwritten with the |
1218 | * offset of the next location in the file to be written to. |
1219 | */ |
1220 | if (kern_do_coredump(core_outvars: &outstate, FALSE, first_file_offset: foffset, last_file_offset: &foffset, details_flags) != 0) { |
1221 | dump_succeeded = FALSE; |
1222 | } |
1223 | } else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) { |
1224 | /* Only the kernel */ |
1225 | if (kern_do_coredump(core_outvars: &outstate, TRUE, first_file_offset: foffset, last_file_offset: &foffset, details_flags) != 0) { |
1226 | dump_succeeded = FALSE; |
1227 | } |
1228 | } |
1229 | |
1230 | if (kd_variant == KERN_DUMP_DISK) { |
1231 | assert(reserved_debug_logsize != 0); |
1232 | size_t remaining_debug_logspace = reserved_debug_logsize; |
1233 | |
1234 | /* Write the debug log -- first seek to the end of the corefile header */ |
1235 | foffset = kdp_core_header->log_offset; |
1236 | if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) { |
1237 | kern_coredump_log(NULL, string: "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n" , |
1238 | sizeof(foffset), &foffset, foffset, ret); |
1239 | dump_succeeded = FALSE; |
1240 | goto exit; |
1241 | } |
1242 | |
1243 | /* First flush the data from just the paniclog */ |
1244 | size_t initial_log_length = 0; |
1245 | #if defined(__arm64__) |
1246 | initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) + |
1247 | panic_info->eph_panic_log_len; |
1248 | #else |
1249 | if (panic_info->mph_panic_log_offset != 0) { |
1250 | initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) + |
1251 | panic_info->mph_panic_log_len; |
1252 | } |
1253 | #endif |
1254 | |
1255 | buf = debug_buf_base; |
1256 | if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, initial_log_length, buf)) != KERN_SUCCESS) { |
1257 | kern_coredump_log(NULL, string: "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n" , |
1258 | initial_log_length, buf, ret); |
1259 | dump_succeeded = FALSE; |
1260 | goto exit; |
1261 | } |
1262 | |
1263 | remaining_debug_logspace -= initial_log_length; |
1264 | |
1265 | /* Next include any log data from after the stackshot (the beginning of the 'other' log). */ |
1266 | #if defined(__arm64__) |
1267 | buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset); |
1268 | #else |
1269 | /* |
1270 | * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was |
1271 | * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when |
1272 | * we began taking a coredump. |
1273 | */ |
1274 | if (panic_info->mph_other_log_offset != 0) { |
1275 | buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset); |
1276 | } else { |
1277 | buf = coredump_log_start; |
1278 | } |
1279 | #endif |
1280 | assert(debug_buf_ptr >= buf); |
1281 | |
1282 | size_t other_log_length = debug_buf_ptr - buf; |
1283 | if (other_log_length > remaining_debug_logspace) { |
1284 | other_log_length = remaining_debug_logspace; |
1285 | } |
1286 | |
1287 | /* Write the coredump log */ |
1288 | if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, other_log_length, buf)) != KERN_SUCCESS) { |
1289 | kern_coredump_log(NULL, string: "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n" , |
1290 | other_log_length, buf, ret); |
1291 | dump_succeeded = FALSE; |
1292 | goto exit; |
1293 | } |
1294 | |
1295 | kdp_core_header->log_length = initial_log_length + other_log_length; |
1296 | kern_dump_update_header(outstate: &outstate); |
1297 | } |
1298 | |
1299 | exit: |
1300 | /* close / last packet */ |
1301 | if (output_opened && (ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_EOF, NULL, 0, ((void *) 0))) != KERN_SUCCESS) { |
1302 | kern_coredump_log(NULL, string: "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n" , ret); |
1303 | dump_succeeded = FALSE; |
1304 | } |
1305 | |
1306 | /* If applicable, update the panic header and flush it so we update the CRC */ |
1307 | #if defined(__arm64__) |
1308 | panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE : |
1309 | EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED); |
1310 | paniclog_flush(); |
1311 | #else |
1312 | if (panic_info->mph_panic_log_offset != 0) { |
1313 | panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE : |
1314 | MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED); |
1315 | paniclog_flush(); |
1316 | } |
1317 | #endif |
1318 | |
1319 | return dump_succeeded ? 0 : -1; |
1320 | } |
1321 | |
1322 | boolean_t |
1323 | dumped_kernel_core(void) |
1324 | { |
1325 | return kern_dump_successful; |
1326 | } |
1327 | |
1328 | int |
1329 | kern_dump(enum kern_dump_type kd_variant) |
1330 | { |
1331 | static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE; |
1332 | int ret = -1; |
1333 | #if KASAN |
1334 | kasan_kdp_disable(); |
1335 | #endif |
1336 | if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) { |
1337 | if (dumped_local) { |
1338 | return 0; |
1339 | } |
1340 | if (local_dump_in_progress) { |
1341 | return -1; |
1342 | } |
1343 | local_dump_in_progress = TRUE; |
1344 | ret = do_kern_dump(kd_variant); |
1345 | if (ret == 0) { |
1346 | dumped_local = TRUE; |
1347 | kern_dump_successful = TRUE; |
1348 | local_dump_in_progress = FALSE; |
1349 | } |
1350 | |
1351 | return ret; |
1352 | #if defined(__arm64__) |
1353 | } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) { |
1354 | ret = do_kern_dump(kd_variant); |
1355 | if (ret == 0) { |
1356 | kern_dump_successful = TRUE; |
1357 | } |
1358 | return ret; |
1359 | #endif |
1360 | } else { |
1361 | ret = do_kern_dump(kd_variant); |
1362 | if (ret == 0) { |
1363 | kern_dump_successful = TRUE; |
1364 | } |
1365 | return ret; |
1366 | } |
1367 | } |
1368 | |
1369 | static kern_return_t |
1370 | kdp_core_init_output_stages(void) |
1371 | { |
1372 | kern_return_t ret = KERN_SUCCESS; |
1373 | |
1374 | // We only zero-out the disk stage. It will be initialized |
1375 | // later on when the corefile is initialized |
1376 | bzero(s: &disk_output_stage, n: sizeof(disk_output_stage)); |
1377 | |
1378 | // We only zero-out the LZ4 stage. It will be initialized |
1379 | // later on when the kext is loaded. |
1380 | bzero(s: &lz4_output_stage, n: sizeof(lz4_output_stage)); |
1381 | lz4_stage_monitor_availability(); |
1382 | |
1383 | // We only initialize the zlib output stage if we can reach the debugger. |
1384 | // This saves us from wasting some wired memory that will never be used |
1385 | // in other configurations. |
1386 | bzero(s: &zlib_output_stage, n: sizeof(zlib_output_stage)); |
1387 | if (debug_boot_arg && (debug_boot_arg & DB_REBOOT_ALWAYS) == 0) { |
1388 | ret = zlib_stage_initialize(stage: &zlib_output_stage); |
1389 | if (KERN_SUCCESS != ret) { |
1390 | return ret; |
1391 | } |
1392 | } |
1393 | |
1394 | bzero(s: &buffer_output_stage, n: sizeof(buffer_output_stage)); |
1395 | ret = buffer_stage_initialize(stage: &buffer_output_stage, buffer_size: kdp_crashdump_pkt_size); |
1396 | if (KERN_SUCCESS != ret) { |
1397 | return ret; |
1398 | } |
1399 | |
1400 | bzero(s: &net_output_stage, n: sizeof(net_output_stage)); |
1401 | ret = net_stage_initialize(stage: &net_output_stage); |
1402 | if (KERN_SUCCESS != ret) { |
1403 | return ret; |
1404 | } |
1405 | |
1406 | bzero(s: &progress_notify_output_stage, n: sizeof(progress_notify_output_stage)); |
1407 | ret = progress_notify_stage_initialize(stage: &progress_notify_output_stage); |
1408 | if (KERN_SUCCESS != ret) { |
1409 | return ret; |
1410 | } |
1411 | |
1412 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
1413 | // We only zero-out the AEA stage. It will be initialized |
1414 | // later on, if it's supported and needed |
1415 | bzero(s: &aea_output_stage, n: sizeof(aea_output_stage)); |
1416 | aea_stage_monitor_availability(); |
1417 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
1418 | |
1419 | #if defined(__arm64__) |
1420 | bzero(s: &shmem_output_stage, n: sizeof(shmem_output_stage)); |
1421 | if (PE_consistent_debug_enabled() && PE_i_can_has_debugger(NULL)) { |
1422 | ret = shmem_stage_initialize(stage: &shmem_output_stage); |
1423 | if (KERN_SUCCESS != ret) { |
1424 | return ret; |
1425 | } |
1426 | } |
1427 | #endif /* defined(__arm64__) */ |
1428 | |
1429 | #if defined(__arm64__) |
1430 | bzero(s: &memory_backing_aware_buffer_output_stage, n: sizeof(memory_backing_aware_buffer_output_stage)); |
1431 | ret = memory_backing_aware_buffer_stage_initialize(stage: &memory_backing_aware_buffer_output_stage); |
1432 | if (KERN_SUCCESS != ret) { |
1433 | return ret; |
1434 | } |
1435 | #endif /* defined(__arm64__) */ |
1436 | |
1437 | return ret; |
1438 | } |
1439 | |
1440 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
1441 | |
1442 | static bool |
1443 | kern_dump_should_enforce_encryption(void) |
1444 | { |
1445 | static int enforce_encryption = -1; |
1446 | |
1447 | // Only check once |
1448 | if (enforce_encryption == -1) { |
1449 | uint32_t coredump_encryption_flags = 0; |
1450 | |
1451 | // When set, the boot-arg is the sole decider |
1452 | if (!kernel_debugging_restricted() && |
1453 | PE_parse_boot_argn(arg_string: "coredump_encryption" , arg_ptr: &coredump_encryption_flags, max_arg: sizeof(coredump_encryption_flags))) { |
1454 | enforce_encryption = (coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT) != 0 ? 1 : 0; |
1455 | } else { |
1456 | enforce_encryption = 0; |
1457 | } |
1458 | } |
1459 | |
1460 | return enforce_encryption != 0; |
1461 | } |
1462 | |
1463 | static bool |
1464 | kern_dump_is_encryption_available(void) |
1465 | { |
1466 | // Default to feature enabled unless boot-arg says otherwise |
1467 | uint32_t coredump_encryption_flags = COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY; |
1468 | |
1469 | if (!kernel_debugging_restricted()) { |
1470 | PE_parse_boot_argn(arg_string: "coredump_encryption" , arg_ptr: &coredump_encryption_flags, max_arg: sizeof(coredump_encryption_flags)); |
1471 | } |
1472 | |
1473 | if ((coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY) == 0) { |
1474 | return false; |
1475 | } |
1476 | |
1477 | return aea_stage_is_available(); |
1478 | } |
1479 | |
1480 | /* |
1481 | * Initialize (or de-initialize) the encryption stage. This is done in a way such that if initializing the |
1482 | * encryption stage with a new key fails, then the existing encryption stage is left untouched. Once |
1483 | * the new stage is initialized, the old stage is uninitialized. |
1484 | * |
1485 | * This function is called whenever we have a new public key (whether from someone calling our sysctl, or because |
1486 | * we read it out of a corefile), or when encryption becomes available. |
1487 | * |
1488 | * Parameters: |
1489 | * - public_key: The public key to use when initializing the encryption stage. Can be NULL to indicate that |
1490 | * the encryption stage should be de-initialized. |
1491 | * - public_key_size: The size of the given public key. |
1492 | */ |
1493 | static kern_return_t |
1494 | kdp_core_init_encryption_stage(void *public_key, size_t public_key_size) |
1495 | { |
1496 | kern_return_t ret = KERN_SUCCESS; |
1497 | struct kdp_output_stage new_encryption_stage = {}; |
1498 | struct kdp_output_stage old_encryption_stage = {}; |
1499 | |
1500 | lck_mtx_assert(lck: kdp_core_encryption_stage_lock, LCK_MTX_ASSERT_OWNED); |
1501 | |
1502 | bzero(s: &new_encryption_stage, n: sizeof(new_encryption_stage)); |
1503 | |
1504 | if (public_key && kern_dump_is_encryption_available()) { |
1505 | ret = aea_stage_initialize(stage: &new_encryption_stage, recipient_public_key: public_key, recipient_public_key_size: public_key_size); |
1506 | if (KERN_SUCCESS != ret) { |
1507 | printf(format: "(kdp_core_init_encryption_stage) Failed to initialize the encryption stage. Error 0x%x\n" , ret); |
1508 | return ret; |
1509 | } |
1510 | } |
1511 | |
1512 | bcopy(src: &aea_output_stage, dst: &old_encryption_stage, n: sizeof(aea_output_stage)); |
1513 | |
1514 | bcopy(src: &new_encryption_stage, dst: &aea_output_stage, n: sizeof(new_encryption_stage)); |
1515 | |
1516 | if (old_encryption_stage.kos_initialized && old_encryption_stage.kos_funcs.kosf_free) { |
1517 | old_encryption_stage.kos_funcs.kosf_free(&old_encryption_stage); |
1518 | } |
1519 | |
1520 | return KERN_SUCCESS; |
1521 | } |
1522 | |
1523 | kern_return_t |
1524 | kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context) |
1525 | { |
1526 | kern_return_t ret = KERN_SUCCESS; |
1527 | struct kdp_core_encryption_key_descriptor *key_descriptor = (struct kdp_core_encryption_key_descriptor *) recipient_context; |
1528 | void *old_public_key = NULL; |
1529 | size_t old_public_key_size = 0; |
1530 | |
1531 | if (!key_descriptor) { |
1532 | return kIOReturnBadArgument; |
1533 | } |
1534 | |
1535 | lck_mtx_lock(lck: kdp_core_encryption_stage_lock); |
1536 | kdp_core_is_initializing_encryption_stage = true; |
1537 | |
1538 | do { |
1539 | // Do the risky part first, and bail out cleanly if it fails |
1540 | ret = kdp_core_init_encryption_stage(public_key: key_descriptor->kcekd_key, public_key_size: key_descriptor->kcekd_size); |
1541 | if (ret != KERN_SUCCESS) { |
1542 | printf(format: "kdp_core_handle_new_encryption_key failed to re-initialize encryption stage. Error 0x%x\n" , ret); |
1543 | break; |
1544 | } |
1545 | |
1546 | // The rest of this function should technically never fail |
1547 | |
1548 | old_public_key = kdp_core_public_key; |
1549 | old_public_key_size = kdp_core_header->pub_key_length; |
1550 | |
1551 | kdp_core_public_key = key_descriptor->kcekd_key; |
1552 | kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK; |
1553 | kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_EXISTING_COREFILE_KEY_FORMAT_MASK; |
1554 | if (key_descriptor->kcekd_key) { |
1555 | kdp_core_header->flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK; |
1556 | kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(key_descriptor->kcekd_format); |
1557 | kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
1558 | kdp_core_header->pub_key_length = key_descriptor->kcekd_size; |
1559 | } else { |
1560 | kdp_core_header->pub_key_offset = 0; |
1561 | kdp_core_header->pub_key_length = 0; |
1562 | } |
1563 | |
1564 | /* |
1565 | * Return the old key to the caller to free |
1566 | */ |
1567 | key_descriptor->kcekd_key = old_public_key; |
1568 | key_descriptor->kcekd_size = (uint16_t)old_public_key_size; |
1569 | |
1570 | // If this stuff fails, we have bigger problems |
1571 | struct mach_core_fileheader_v2 ; |
1572 | bool = false; |
1573 | ret = access_data(access_context, FALSE, 0, sizeof(existing_header), &existing_header); |
1574 | if (ret != KERN_SUCCESS) { |
1575 | printf(format: "kdp_core_handle_new_encryption_key failed to read the existing corefile header. Error 0x%x\n" , ret); |
1576 | break; |
1577 | } |
1578 | |
1579 | if (existing_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE |
1580 | && existing_header.version == 2 |
1581 | && (existing_header.pub_key_length == 0 |
1582 | || kdp_core_header->pub_key_length == 0 |
1583 | || existing_header.pub_key_length == kdp_core_header->pub_key_length)) { |
1584 | used_existing_header = true; |
1585 | existing_header.flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK; |
1586 | |
1587 | if (kdp_core_public_key) { |
1588 | existing_header.flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK; |
1589 | |
1590 | if (existing_header.pub_key_offset == 0) { |
1591 | existing_header.pub_key_offset = kdp_core_header->pub_key_offset; |
1592 | existing_header.pub_key_length = kdp_core_header->pub_key_length; |
1593 | } |
1594 | } |
1595 | |
1596 | ret = access_data(access_context, TRUE, 0, sizeof(existing_header), &existing_header); |
1597 | if (ret != KERN_SUCCESS) { |
1598 | printf(format: "kdp_core_handle_new_encryption_key failed to update the existing corefile header. Error 0x%x\n" , ret); |
1599 | break; |
1600 | } |
1601 | } else { |
1602 | ret = access_data(access_context, TRUE, 0, sizeof(struct mach_core_fileheader_v2), kdp_core_header); |
1603 | if (ret != KERN_SUCCESS) { |
1604 | printf(format: "kdp_core_handle_new_encryption_key failed to write the corefile header. Error 0x%x\n" , ret); |
1605 | break; |
1606 | } |
1607 | } |
1608 | |
1609 | if (kdp_core_header->pub_key_length) { |
1610 | uint64_t offset = used_existing_header ? existing_header.pub_key_offset : kdp_core_header->pub_key_offset; |
1611 | ret = access_data(access_context, TRUE, offset + PUBLIC_KEY_RESERVED_LENGTH, kdp_core_header->pub_key_length, kdp_core_public_key); |
1612 | if (ret != KERN_SUCCESS) { |
1613 | printf(format: "kdp_core_handle_new_encryption_key failed to write the next public key. Error 0x%x\n" , ret); |
1614 | break; |
1615 | } |
1616 | |
1617 | if (!used_existing_header) { |
1618 | // Everything that happens here is optional. It's not the end of the world if this stuff fails, so we don't return |
1619 | // any errors |
1620 | // Since we're writing out a completely new header, we make sure to zero-out the region that's reserved for the public key. |
1621 | // This allows us consumers of the corefile to know for sure that this corefile is not encrypted (yet). Once we actually |
1622 | // write out a corefile, we'll overwrite this region with the key that we ended up using at the time. |
1623 | // If we fail to zero-out this region, consumers would read garbage data and properly fail to interpret it as a public key, |
1624 | // which is why it is OK for us to fail here (it's hard to interpret garbage data as a valid key, and even then, they wouldn't |
1625 | // find a matching private key anyway) |
1626 | void *empty_key = NULL; |
1627 | kern_return_t temp_ret = KERN_SUCCESS; |
1628 | |
1629 | empty_key = kalloc_data(PUBLIC_KEY_RESERVED_LENGTH, |
1630 | Z_WAITOK | Z_ZERO | Z_NOFAIL); |
1631 | |
1632 | temp_ret = access_data(access_context, TRUE, offset, PUBLIC_KEY_RESERVED_LENGTH, empty_key); |
1633 | kfree_data(empty_key, PUBLIC_KEY_RESERVED_LENGTH); |
1634 | |
1635 | if (temp_ret != KERN_SUCCESS) { |
1636 | printf(format: "kdp_core_handle_new_encryption_key failed to zero-out the public key region. Error 0x%x\n" , temp_ret); |
1637 | break; |
1638 | } |
1639 | } |
1640 | } |
1641 | } while (0); |
1642 | |
1643 | kdp_core_is_initializing_encryption_stage = false; |
1644 | lck_mtx_unlock(lck: kdp_core_encryption_stage_lock); |
1645 | |
1646 | return ret; |
1647 | } |
1648 | |
1649 | kern_return_t |
1650 | kdp_core_handle_encryption_available(void) |
1651 | { |
1652 | kern_return_t ret; |
1653 | |
1654 | lck_mtx_lock(lck: kdp_core_encryption_stage_lock); |
1655 | kdp_core_is_initializing_encryption_stage = true; |
1656 | |
1657 | ret = kdp_core_init_encryption_stage(public_key: kdp_core_public_key, public_key_size: kdp_core_header->pub_key_length); |
1658 | |
1659 | kdp_core_is_initializing_encryption_stage = false; |
1660 | lck_mtx_unlock(lck: kdp_core_encryption_stage_lock); |
1661 | |
1662 | return ret; |
1663 | } |
1664 | |
1665 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
1666 | |
1667 | kern_return_t |
1668 | kdp_core_handle_lz4_available(void) |
1669 | { |
1670 | kern_return_t ret; |
1671 | lck_mtx_lock(lck: kdp_core_lz4_stage_lock); |
1672 | kdp_core_is_initializing_lz4_stage = true; |
1673 | |
1674 | ret = lz4_stage_initialize(stage: &lz4_output_stage); |
1675 | |
1676 | kdp_core_is_initializing_lz4_stage = false; |
1677 | lck_mtx_unlock(lck: kdp_core_lz4_stage_lock); |
1678 | |
1679 | return ret; |
1680 | } |
1681 | |
1682 | kern_return_t |
1683 | kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, __unused void *recipient_context) |
1684 | { |
1685 | kern_return_t ret = KERN_SUCCESS; |
1686 | |
1687 | lck_mtx_lock(lck: kdp_core_disk_stage_lock); |
1688 | kdp_core_is_initializing_disk_stage = true; |
1689 | |
1690 | ret = disk_stage_initialize(stage: &disk_output_stage); |
1691 | |
1692 | kdp_core_is_initializing_disk_stage = false; |
1693 | lck_mtx_unlock(lck: kdp_core_disk_stage_lock); |
1694 | |
1695 | if (KERN_SUCCESS != ret) { |
1696 | return ret; |
1697 | } |
1698 | |
1699 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
1700 | // If someone has already provided a new public key, |
1701 | // there's no sense in reading the old one from the corefile. |
1702 | if (kdp_core_public_key != NULL) { |
1703 | return KERN_SUCCESS; |
1704 | } |
1705 | |
1706 | // The kernel corefile is now available. Let's try to retrieve the public key from its |
1707 | // header (if available and supported). |
1708 | |
1709 | // First let's read the corefile header itself |
1710 | struct mach_core_fileheader_v2 = {}; |
1711 | ret = access_data(access_context, FALSE, 0, sizeof(temp_header), &temp_header); |
1712 | if (KERN_SUCCESS != ret) { |
1713 | printf(format: "kdp_core_polled_io_polled_file_available failed to read corefile header. Error 0x%x\n" , ret); |
1714 | return ret; |
1715 | } |
1716 | |
1717 | // Check if the corefile header is initialized, and whether it's initialized to values that we support |
1718 | // (for backwards and forwards) compatibility, and check whether the header indicates that the corefile has |
1719 | // has a public key stashed inside of it. |
1720 | if (temp_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE |
1721 | && temp_header.version == 2 |
1722 | && temp_header.pub_key_offset != 0 |
1723 | && temp_header.pub_key_length != 0 |
1724 | /* Future-proofing: make sure it's the key format that we support */ |
1725 | && (temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK) == MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256 |
1726 | /* Add some extra sanity checks. These are not necessary */ |
1727 | && temp_header.pub_key_length <= 4096 |
1728 | && temp_header.pub_key_offset < 65535) { |
1729 | // The corefile header is properly initialized, is supported, and contains a public key. |
1730 | // Let's adopt that public key for our encryption needs |
1731 | void *public_key = NULL; |
1732 | |
1733 | public_key = kalloc_data(temp_header.pub_key_length, |
1734 | Z_ZERO | Z_WAITOK | Z_NOFAIL); |
1735 | |
1736 | // Read the public key from the corefile. Note that the key we're trying to adopt is the "next" key, which is |
1737 | // PUBLIC_KEY_RESERVED_LENGTH bytes after the public key. |
1738 | ret = access_data(access_context, FALSE, temp_header.pub_key_offset + PUBLIC_KEY_RESERVED_LENGTH, temp_header.pub_key_length, public_key); |
1739 | if (KERN_SUCCESS != ret) { |
1740 | printf(format: "kdp_core_polled_io_polled_file_available failed to read the public key. Error 0x%x\n" , ret); |
1741 | kfree_data(public_key, temp_header.pub_key_length); |
1742 | return ret; |
1743 | } |
1744 | |
1745 | lck_mtx_lock(lck: kdp_core_encryption_stage_lock); |
1746 | kdp_core_is_initializing_encryption_stage = true; |
1747 | |
1748 | ret = kdp_core_init_encryption_stage(public_key, public_key_size: temp_header.pub_key_length); |
1749 | if (KERN_SUCCESS == ret) { |
1750 | kdp_core_header->flags |= temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK; |
1751 | kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(temp_header.flags); |
1752 | kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN); |
1753 | kdp_core_header->pub_key_length = temp_header.pub_key_length; |
1754 | kdp_core_public_key = public_key; |
1755 | } |
1756 | |
1757 | kdp_core_is_initializing_encryption_stage = false; |
1758 | lck_mtx_unlock(lck: kdp_core_encryption_stage_lock); |
1759 | } |
1760 | #else |
1761 | #pragma unused(access_data, access_context) |
1762 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
1763 | |
1764 | return ret; |
1765 | } |
1766 | |
1767 | kern_return_t |
1768 | kdp_core_polled_io_polled_file_unavailable(void) |
1769 | { |
1770 | lck_mtx_lock(lck: kdp_core_disk_stage_lock); |
1771 | kdp_core_is_initializing_disk_stage = true; |
1772 | |
1773 | if (disk_output_stage.kos_initialized && disk_output_stage.kos_funcs.kosf_free) { |
1774 | disk_output_stage.kos_funcs.kosf_free(&disk_output_stage); |
1775 | } |
1776 | |
1777 | kdp_core_is_initializing_disk_stage = false; |
1778 | lck_mtx_unlock(lck: kdp_core_disk_stage_lock); |
1779 | |
1780 | return KERN_SUCCESS; |
1781 | } |
1782 | |
1783 | void |
1784 | kdp_core_init(void) |
1785 | { |
1786 | kern_return_t kr; |
1787 | kern_coredump_callback_config core_config = { }; |
1788 | |
1789 | /* Initialize output stages */ |
1790 | kr = kdp_core_init_output_stages(); |
1791 | assert(KERN_SUCCESS == kr); |
1792 | |
1793 | kmem_alloc(map: kernel_map, addrp: (vm_offset_t*)&kdp_core_header, |
1794 | size: kdp_core_header_size, |
1795 | flags: KMA_NOFAIL | KMA_ZERO | KMA_PERMANENT | KMA_KOBJECT | KMA_DATA, |
1796 | VM_KERN_MEMORY_DIAG); |
1797 | |
1798 | kdp_core_header->signature = MACH_CORE_FILEHEADER_V2_SIGNATURE; |
1799 | kdp_core_header->version = 2; |
1800 | |
1801 | kdp_core_initialization_lock_group = lck_grp_alloc_init(grp_name: "KDPCoreStageInit" , LCK_GRP_ATTR_NULL); |
1802 | kdp_core_disk_stage_lock = lck_mtx_alloc_init(grp: kdp_core_initialization_lock_group, LCK_ATTR_NULL); |
1803 | |
1804 | #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION |
1805 | kdp_core_encryption_stage_lock = lck_mtx_alloc_init(grp: kdp_core_initialization_lock_group, LCK_ATTR_NULL); |
1806 | |
1807 | (void) kern_dump_should_enforce_encryption(); |
1808 | #endif // CONFIG_KDP_COREDUMP_ENCRYPTION |
1809 | |
1810 | kdp_core_lz4_stage_lock = lck_mtx_alloc_init(grp: kdp_core_initialization_lock_group, LCK_ATTR_NULL); |
1811 | |
1812 | core_config.kcc_coredump_init = kern_dump_init; |
1813 | core_config.kcc_coredump_get_summary = kern_dump_save_summary; |
1814 | core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions; |
1815 | core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state; |
1816 | core_config.kcc_coredump_save_sw_vers_detail = kern_dump_save_sw_vers_detail; |
1817 | core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data; |
1818 | core_config.kcc_coredump_save_note_summary = kern_dump_save_note_summary; |
1819 | core_config.kcc_coredump_save_note_descriptions = kern_dump_save_note_descriptions; |
1820 | core_config.kcc_coredump_save_note_data = kern_dump_save_note_data; |
1821 | |
1822 | kr = kern_register_xnu_coredump_helper(kc_callbacks: &core_config); |
1823 | assert(KERN_SUCCESS == kr); |
1824 | } |
1825 | |
1826 | /* |
1827 | * Additional LC_NOTES added to the core. |
1828 | */ |
1829 | |
1830 | static kern_return_t |
1831 | kern_dump_save_note_summary(void *refcon __unused, core_save_note_summary_cb callback, void *context) |
1832 | { |
1833 | int count = 1; |
1834 | size_t size = sizeof(addrable_bits_note_t); |
1835 | |
1836 | #ifdef CONFIG_SPTM |
1837 | /* Load binary spec note */ |
1838 | |
1839 | struct debug_header const *debug_header = SPTMArgs != NULL ? SPTMArgs->debug_header : NULL; |
1840 | |
1841 | if (debug_header != NULL && |
1842 | debug_header->magic == DEBUG_HEADER_MAGIC_VAL && |
1843 | debug_header->version == DEBUG_HEADER_CURRENT_VERSION) { |
1844 | /* Also add SPTM, TXM, and xnu kc load binary specs if present */ |
1845 | count += debug_header->count; |
1846 | size += debug_header->count * sizeof(load_binary_spec_note_t); |
1847 | } |
1848 | #endif /* CONFIG_SPTM */ |
1849 | |
1850 | return callback(count, size, context); |
1851 | } |
1852 | |
1853 | static kern_return_t |
1854 | kern_dump_save_note_descriptions(void *refcon __unused, core_save_note_descriptions_cb callback, void *context) |
1855 | { |
1856 | int max_ret = KERN_SUCCESS; |
1857 | int ret; |
1858 | |
1859 | max_ret = ret = callback(ADDRABLE_BITS_DATA_OWNER, sizeof(addrable_bits_note_t), context); |
1860 | |
1861 | #if CONFIG_SPTM |
1862 | struct debug_header const *debug_header = SPTMArgs != NULL ? SPTMArgs->debug_header : NULL; |
1863 | |
1864 | for (int i = 0; i < (debug_header != NULL ? debug_header->count : 0); i++) { |
1865 | ret = callback(LOAD_BINARY_SPEC_DATA_OWNER, sizeof(load_binary_spec_note_t), context); |
1866 | max_ret = MAX(ret, max_ret); |
1867 | } |
1868 | #endif /* CONFIG_SPTM */ |
1869 | |
1870 | return max_ret; |
1871 | } |
1872 | |
1873 | static kern_return_t |
1874 | kern_dump_save_note_data(void *refcon __unused, core_save_note_data_cb callback, void *context) |
1875 | { |
1876 | int max_ret = KERN_SUCCESS; |
1877 | int ret; |
1878 | |
1879 | addrable_bits_note_t note = { |
1880 | .version = ADDRABLE_BITS_VER, |
1881 | .addressing_bits = pmap_kernel_va_bits(), |
1882 | .unused = 0 |
1883 | }; |
1884 | |
1885 | max_ret = ret = callback(¬e, sizeof(addrable_bits_note_t), context); |
1886 | |
1887 | #if CONFIG_SPTM |
1888 | struct debug_header const *debug_header = SPTMArgs != NULL ? SPTMArgs->debug_header : NULL; |
1889 | |
1890 | for (int i = 0; i < (debug_header != NULL ? debug_header->count : 0); i++) { |
1891 | load_binary_spec_note_t load_binary_spec = { |
1892 | .version = LOAD_BINARY_SPEC_VERSION, |
1893 | .uuid = {0}, |
1894 | .address = (uint64_t)debug_header->image[i], |
1895 | .slide = UINT64_MAX // unknown, load address specified |
1896 | }; |
1897 | |
1898 | char const *name; |
1899 | switch (i) { |
1900 | case DEBUG_HEADER_ENTRY_SPTM: |
1901 | name = "sptm" ; |
1902 | break; |
1903 | case DEBUG_HEADER_ENTRY_XNU: |
1904 | name = "xnu" ; |
1905 | break; |
1906 | case DEBUG_HEADER_ENTRY_TXM: |
1907 | name = "txm" ; |
1908 | break; |
1909 | default: |
1910 | name = "UNKNOWN" ; |
1911 | kern_coredump_log(context, "%s(): encountered unknown debug header entry %d, " |
1912 | "including anyway with name '%s'\n" , __func__, i, name); |
1913 | } |
1914 | |
1915 | strlcpy(load_binary_spec.name_cstring, name, LOAD_BINARY_NAME_BUF_SIZE); |
1916 | |
1917 | ret = callback(&load_binary_spec, sizeof(load_binary_spec), context); |
1918 | |
1919 | if (ret != KERN_SUCCESS) { |
1920 | kern_coredump_log(context, "%s(): failed to write load binary spec structure " |
1921 | "for binary #%d ('%s'): callback returned 0x%x\n" , |
1922 | __func__, i, name, ret); |
1923 | max_ret = MAX(ret, max_ret); |
1924 | } |
1925 | } |
1926 | #endif /* CONFIG_SPTM */ |
1927 | |
1928 | return max_ret; |
1929 | } |
1930 | |
1931 | #else |
1932 | |
1933 | void |
1934 | kdp_core_exclude_region(__unused vm_offset_t addr, __unused vm_size_t size) |
1935 | { |
1936 | } |
1937 | |
1938 | void |
1939 | kdp_core_unexclude_region(__unused vm_offset_t addr, __unused vm_size_t size) |
1940 | { |
1941 | } |
1942 | |
1943 | #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */ |
1944 | |