1/*
2 * Copyright (c) 2015-2017 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
30
31#include <mach/mach_types.h>
32#include <mach/vm_attributes.h>
33#include <mach/vm_param.h>
34#include <mach/vm_map.h>
35#include <vm/vm_protos.h>
36#include <vm/vm_kern.h>
37#include <vm/vm_map.h>
38#include <machine/cpu_capabilities.h>
39#include <libsa/types.h>
40#include <libkern/kernel_mach_header.h>
41#include <libkern/zlib.h>
42#include <kdp/kdp_internal.h>
43#include <kdp/kdp_core.h>
44#include <kdp/processor_core.h>
45#include <IOKit/IOPolledInterface.h>
46#include <IOKit/IOBSD.h>
47#include <sys/errno.h>
48#include <sys/msgbuf.h>
49#include <san/kasan.h>
50
51#if defined(__x86_64__)
52#include <i386/pmap_internal.h>
53#include <kdp/ml/i386/kdp_x86_common.h>
54#include <kern/debug.h>
55#endif /* defined(__x86_64__) */
56
57#if CONFIG_EMBEDDED
58#include <arm/cpuid.h>
59#include <arm/caches_internal.h>
60#include <pexpert/arm/consistent_debug.h>
61
62#if !defined(ROUNDUP)
63#define ROUNDUP(a, b) (((a) + ((b) - 1)) & (~((b) - 1)))
64#endif
65
66#if !defined(ROUNDDOWN)
67#define ROUNDDOWN(a, b) ((a) & ~((b) - 1))
68#endif
69#endif /* CONFIG_EMBEDDED */
70
71typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
72 vm_map_offset_t end,
73 void *context);
74
75extern int pmap_traverse_present_mappings(pmap_t pmap,
76 vm_map_offset_t start,
77 vm_map_offset_t end,
78 pmap_traverse_callback callback,
79 void *context);
80
81static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
82static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
83static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
84static int kern_dump_save_sw_vers(void *refcon, core_save_sw_vers_cb callback, void *context);
85static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
86
87static int
88kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
89 vm_map_offset_t end,
90 void *context);
91static int
92kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
93 vm_map_offset_t end,
94 void *context);
95
96static int
97kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
98 vm_map_offset_t end,
99 void *context);
100
101struct kdp_core_out_vars;
102typedef int (*kern_dump_output_proc)(unsigned int request, char *corename,
103 uint64_t length, void *panic_data);
104
105struct kdp_core_out_vars
106{
107 kern_dump_output_proc outproc;
108 z_output_func zoutput;
109 size_t zipped;
110 uint64_t totalbytes;
111 uint64_t lastpercent;
112 IOReturn error;
113 unsigned outremain;
114 unsigned outlen;
115 unsigned writes;
116 Bytef * outbuf;
117};
118
119extern uint32_t kdp_crashdump_pkt_size;
120
121static vm_offset_t kdp_core_zmem;
122static size_t kdp_core_zsize;
123static size_t kdp_core_zoffset;
124static z_stream kdp_core_zs;
125
126static uint64_t kdp_core_total_size;
127static uint64_t kdp_core_total_size_sent_uncomp;
128#if CONFIG_EMBEDDED
129struct xnu_hw_shmem_dbg_command_info *hwsd_info = NULL;
130
131#define KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS 2
132#define KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE 64 * 1024
133
134/*
135 * Astris can read up to 4064 bytes at a time over
136 * the probe, so we should try to make our buffer
137 * size a multiple of this to make reads by astris
138 * (the bottleneck) most efficient.
139 */
140#define OPTIMAL_ASTRIS_READSIZE 4064
141
142struct kdp_hw_shmem_dbg_buf_elm {
143 vm_offset_t khsd_buf;
144 uint32_t khsd_data_length;
145 STAILQ_ENTRY(kdp_hw_shmem_dbg_buf_elm) khsd_elms;
146};
147
148static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) free_hw_shmem_dbg_bufs =
149 STAILQ_HEAD_INITIALIZER(free_hw_shmem_dbg_bufs);
150static STAILQ_HEAD(, kdp_hw_shmem_dbg_buf_elm) hw_shmem_dbg_bufs_to_flush =
151 STAILQ_HEAD_INITIALIZER(hw_shmem_dbg_bufs_to_flush);
152
153static struct kdp_hw_shmem_dbg_buf_elm *currently_filling_buf = NULL;
154static struct kdp_hw_shmem_dbg_buf_elm *currently_flushing_buf = NULL;
155
156static uint32_t kdp_hw_shmem_dbg_bufsize = 0;
157
158static uint32_t kdp_hw_shmem_dbg_seq_no = 0;
159static uint64_t kdp_hw_shmem_dbg_contact_deadline = 0;
160static uint64_t kdp_hw_shmem_dbg_contact_deadline_interval = 0;
161
162#define KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS 30
163#endif /* CONFIG_EMBEDDED */
164
165static boolean_t kern_dump_successful = FALSE;
166
167struct mach_core_fileheader kdp_core_header = { };
168
169/*
170 * These variables will be modified by the BSD layer if the root device is
171 * a RAMDisk.
172 */
173uint64_t kdp_core_ramdisk_addr = 0;
174uint64_t kdp_core_ramdisk_size = 0;
175
176boolean_t kdp_has_polled_corefile(void)
177{
178 return (NULL != gIOPolledCoreFileVars);
179}
180
181kern_return_t kdp_polled_corefile_error(void)
182{
183 return gIOPolledCoreFileOpenRet;
184}
185#if CONFIG_EMBEDDED
186/*
187 * Whenever we start a coredump, make sure the buffers
188 * are all on the free queue and the state is as expected.
189 * The buffers may have been left in a different state if
190 * a previous coredump attempt failed.
191 */
192static void
193kern_dump_hw_shmem_dbg_reset()
194{
195 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL, *tmp_elm = NULL;
196
197 STAILQ_FOREACH(cur_elm, &free_hw_shmem_dbg_bufs, khsd_elms) {
198 cur_elm->khsd_data_length = 0;
199 }
200
201 if (currently_filling_buf != NULL) {
202 currently_filling_buf->khsd_data_length = 0;
203
204 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_filling_buf, khsd_elms);
205 currently_filling_buf = NULL;
206 }
207
208 if (currently_flushing_buf != NULL) {
209 currently_flushing_buf->khsd_data_length = 0;
210
211 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
212 currently_flushing_buf = NULL;
213 }
214
215 STAILQ_FOREACH_SAFE(cur_elm, &hw_shmem_dbg_bufs_to_flush, khsd_elms, tmp_elm) {
216 cur_elm->khsd_data_length = 0;
217
218 STAILQ_REMOVE(&hw_shmem_dbg_bufs_to_flush, cur_elm, kdp_hw_shmem_dbg_buf_elm, khsd_elms);
219 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
220 }
221
222 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_EMPTY;
223 kdp_hw_shmem_dbg_seq_no = 0;
224 hwsd_info->xhsdci_buf_phys_addr = 0;
225 hwsd_info->xhsdci_buf_data_length = 0;
226 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
227 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
228 hwsd_info->xhsdci_page_size = PAGE_SIZE;
229 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
230
231 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() + kdp_hw_shmem_dbg_contact_deadline_interval;
232}
233
234/*
235 * Tries to move buffers forward in 'progress'. If
236 * the hardware debugger is done consuming the current buffer, we
237 * can put the next one on it and move the current
238 * buffer back to the free queue.
239 */
240static int
241kern_dump_hw_shmem_dbg_process_buffers()
242{
243 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
244 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR) {
245 kern_coredump_log(NULL, "Detected remote error, terminating...\n");
246 return -1;
247 } else if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BUF_EMPTY) {
248 if (hwsd_info->xhsdci_seq_no != (kdp_hw_shmem_dbg_seq_no + 1)) {
249 kern_coredump_log(NULL, "Detected stale/invalid seq num. Expected: %d, received %d\n",
250 (kdp_hw_shmem_dbg_seq_no + 1), hwsd_info->xhsdci_seq_no);
251 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
252 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
253 return -1;
254 }
255
256 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
257
258 if (currently_flushing_buf != NULL) {
259 currently_flushing_buf->khsd_data_length = 0;
260 STAILQ_INSERT_TAIL(&free_hw_shmem_dbg_bufs, currently_flushing_buf, khsd_elms);
261 }
262
263 currently_flushing_buf = STAILQ_FIRST(&hw_shmem_dbg_bufs_to_flush);
264 if (currently_flushing_buf != NULL) {
265 STAILQ_REMOVE_HEAD(&hw_shmem_dbg_bufs_to_flush, khsd_elms);
266
267 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
268 hwsd_info->xhsdci_buf_phys_addr = kvtophys(currently_flushing_buf->khsd_buf);
269 hwsd_info->xhsdci_buf_data_length = currently_flushing_buf->khsd_data_length;
270 hwsd_info->xhsdci_coredump_total_size_uncomp = kdp_core_total_size;
271 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = kdp_core_total_size_sent_uncomp;
272 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE);
273 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
274 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_BUF_READY;
275 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
276 }
277
278 kdp_hw_shmem_dbg_contact_deadline = mach_absolute_time() +
279 kdp_hw_shmem_dbg_contact_deadline_interval;
280
281 return 0;
282 } else if (mach_absolute_time() > kdp_hw_shmem_dbg_contact_deadline) {
283 kern_coredump_log(NULL, "Kernel timed out waiting for hardware debugger to update handshake structure.");
284 kern_coredump_log(NULL, "No contact in %d seconds\n", KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS);
285
286 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_ERROR;
287 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
288 return -1;
289 }
290
291 return 0;
292}
293
294/*
295 * Populates currently_filling_buf with a new buffer
296 * once one becomes available. Returns 0 on success
297 * or the value returned by kern_dump_hw_shmem_dbg_process_buffers()
298 * if it is non-zero (an error).
299 */
300static int
301kern_dump_hw_shmem_dbg_get_buffer()
302{
303 int ret = 0;
304
305 assert(currently_filling_buf == NULL);
306
307 while (STAILQ_EMPTY(&free_hw_shmem_dbg_bufs)) {
308 ret = kern_dump_hw_shmem_dbg_process_buffers();
309 if (ret) {
310 return ret;
311 }
312 }
313
314 currently_filling_buf = STAILQ_FIRST(&free_hw_shmem_dbg_bufs);
315 STAILQ_REMOVE_HEAD(&free_hw_shmem_dbg_bufs, khsd_elms);
316
317 assert(currently_filling_buf->khsd_data_length == 0);
318 return ret;
319}
320
321/*
322 * Output procedure for hardware shared memory core dumps
323 *
324 * Tries to fill up the buffer completely before flushing
325 */
326static int
327kern_dump_hw_shmem_dbg_buffer_proc(unsigned int request, __unused char *corename,
328 uint64_t length, void * data)
329{
330 int ret = 0;
331
332 assert(length < UINT32_MAX);
333 uint32_t bytes_remaining = (uint32_t) length;
334 uint32_t bytes_to_copy;
335
336 if (request == KDP_EOF) {
337 assert(currently_filling_buf == NULL);
338
339 /*
340 * Wait until we've flushed all the buffers
341 * before setting the connection status to done.
342 */
343 while (!STAILQ_EMPTY(&hw_shmem_dbg_bufs_to_flush) ||
344 currently_flushing_buf != NULL) {
345 ret = kern_dump_hw_shmem_dbg_process_buffers();
346 if (ret) {
347 return ret;
348 }
349 }
350
351 /*
352 * If the last status we saw indicates that the buffer was
353 * empty and we didn't flush any new data since then, we expect
354 * the sequence number to still match the last we saw.
355 */
356 if (hwsd_info->xhsdci_seq_no < kdp_hw_shmem_dbg_seq_no) {
357 kern_coredump_log(NULL, "EOF Flush: Detected stale/invalid seq num. Expected: %d, received %d\n",
358 kdp_hw_shmem_dbg_seq_no, hwsd_info->xhsdci_seq_no);
359 return -1;
360 }
361
362 kdp_hw_shmem_dbg_seq_no = hwsd_info->xhsdci_seq_no;
363
364 kern_coredump_log(NULL, "Setting coredump status as done!\n");
365 hwsd_info->xhsdci_seq_no = ++kdp_hw_shmem_dbg_seq_no;
366 hwsd_info->xhsdci_status = XHSDCI_COREDUMP_STATUS_DONE;
367 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
368
369 return ret;
370 }
371
372 assert(request == KDP_DATA);
373
374 /*
375 * The output procedure is called with length == 0 and data == NULL
376 * to flush any remaining output at the end of the coredump before
377 * we call it a final time to mark the dump as done.
378 */
379 if (length == 0) {
380 assert(data == NULL);
381
382 if (currently_filling_buf != NULL) {
383 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
384 currently_filling_buf = NULL;
385 }
386
387 /*
388 * Move the current buffer along if possible.
389 */
390 ret = kern_dump_hw_shmem_dbg_process_buffers();
391 return ret;
392 }
393
394 while (bytes_remaining != 0) {
395 /*
396 * Make sure we have a buffer to work with.
397 */
398 while (currently_filling_buf == NULL) {
399 ret = kern_dump_hw_shmem_dbg_get_buffer();
400 if (ret) {
401 return ret;
402 }
403 }
404
405 assert(kdp_hw_shmem_dbg_bufsize >= currently_filling_buf->khsd_data_length);
406 bytes_to_copy = MIN(bytes_remaining, kdp_hw_shmem_dbg_bufsize -
407 currently_filling_buf->khsd_data_length);
408 bcopy(data, (void *)(currently_filling_buf->khsd_buf + currently_filling_buf->khsd_data_length),
409 bytes_to_copy);
410
411 currently_filling_buf->khsd_data_length += bytes_to_copy;
412
413 if (currently_filling_buf->khsd_data_length == kdp_hw_shmem_dbg_bufsize) {
414 STAILQ_INSERT_TAIL(&hw_shmem_dbg_bufs_to_flush, currently_filling_buf, khsd_elms);
415 currently_filling_buf = NULL;
416
417 /*
418 * Move it along if possible.
419 */
420 ret = kern_dump_hw_shmem_dbg_process_buffers();
421 if (ret) {
422 return ret;
423 }
424 }
425
426 bytes_remaining -= bytes_to_copy;
427 data = (void *) ((uintptr_t)data + bytes_to_copy);
428 }
429
430 return ret;
431}
432#endif /* CONFIG_EMBEDDED */
433
434static IOReturn
435kern_dump_disk_proc(unsigned int request, __unused char *corename,
436 uint64_t length, void * data)
437{
438 uint64_t noffset;
439 uint32_t err = kIOReturnSuccess;
440
441 switch (request)
442 {
443 case KDP_WRQ:
444 err = IOPolledFileSeek(gIOPolledCoreFileVars, 0);
445 if (kIOReturnSuccess != err) {
446 kern_coredump_log(NULL, "IOPolledFileSeek(gIOPolledCoreFileVars, 0) returned 0x%x\n", err);
447 break;
448 }
449 err = IOPolledFilePollersOpen(gIOPolledCoreFileVars, kIOPolledBeforeSleepState, false);
450 break;
451
452 case KDP_SEEK:
453 noffset = *((uint64_t *) data);
454 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
455 if (kIOReturnSuccess != err) {
456 kern_coredump_log(NULL, "IOPolledFileWrite (during seek) returned 0x%x\n", err);
457 break;
458 }
459 err = IOPolledFileSeek(gIOPolledCoreFileVars, noffset);
460 if (kIOReturnSuccess != err) {
461 kern_coredump_log(NULL, "IOPolledFileSeek(0x%llx) returned 0x%x\n", noffset, err);
462 }
463 break;
464
465 case KDP_DATA:
466 err = IOPolledFileWrite(gIOPolledCoreFileVars, data, length, NULL);
467 if (kIOReturnSuccess != err) {
468 kern_coredump_log(NULL, "IOPolledFileWrite(gIOPolledCoreFileVars, %p, 0x%llx, NULL) returned 0x%x\n",
469 data, length, err);
470 break;
471 }
472 break;
473
474#if CONFIG_EMBEDDED
475 /* Only supported on embedded by the underlying polled mode driver */
476 case KDP_FLUSH:
477 err = IOPolledFileFlush(gIOPolledCoreFileVars);
478 if (kIOReturnSuccess != err) {
479 kern_coredump_log(NULL, "IOPolledFileFlush() returned 0x%x\n", err);
480 break;
481 }
482 break;
483#endif
484
485 case KDP_EOF:
486 err = IOPolledFileWrite(gIOPolledCoreFileVars, 0, 0, NULL);
487 if (kIOReturnSuccess != err) {
488 kern_coredump_log(NULL, "IOPolledFileWrite (during EOF) returned 0x%x\n", err);
489 break;
490 }
491 err = IOPolledFilePollersClose(gIOPolledCoreFileVars, kIOPolledBeforeSleepState);
492 if (kIOReturnSuccess != err) {
493 kern_coredump_log(NULL, "IOPolledFilePollersClose (during EOF) returned 0x%x\n", err);
494 break;
495 }
496 break;
497 }
498
499 return (err);
500}
501
502/*
503 * flushes any data to the output proc immediately
504 */
505static int
506kdp_core_zoutput(z_streamp strm, Bytef *buf, unsigned len)
507{
508 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
509 IOReturn ret;
510
511 vars->zipped += len;
512
513 if (vars->error >= 0)
514 {
515 if ((ret = (*vars->outproc)(KDP_DATA, NULL, len, buf)) != kIOReturnSuccess)
516 {
517 kern_coredump_log(NULL, "(kdp_core_zoutput) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
518 len, buf, ret);
519 vars->error = ret;
520 }
521 if (!buf && !len) kern_coredump_log(NULL, "100..");
522 }
523 return (len);
524}
525
526/*
527 * tries to fill the buffer with data before flushing it via the output proc.
528 */
529static int
530kdp_core_zoutputbuf(z_streamp strm, Bytef *inbuf, unsigned inlen)
531{
532 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
533 unsigned remain;
534 IOReturn ret;
535 unsigned chunk;
536 boolean_t flush;
537
538 remain = inlen;
539 vars->zipped += inlen;
540 flush = (!inbuf && !inlen);
541
542 while ((vars->error >= 0) && (remain || flush))
543 {
544 chunk = vars->outremain;
545 if (chunk > remain) chunk = remain;
546 if (!inbuf) bzero(&vars->outbuf[vars->outlen - vars->outremain], chunk);
547 else
548 {
549 bcopy(inbuf, &vars->outbuf[vars->outlen - vars->outremain], chunk);
550 inbuf += chunk;
551 }
552 vars->outremain -= chunk;
553 remain -= chunk;
554
555 if (vars->outremain && !flush) break;
556 if ((ret = (*vars->outproc)(KDP_DATA, NULL,
557 vars->outlen - vars->outremain,
558 vars->outbuf)) != kIOReturnSuccess)
559 {
560 kern_coredump_log(NULL, "(kdp_core_zoutputbuf) outproc(KDP_DATA, NULL, 0x%x, %p) returned 0x%x\n",
561 (vars->outlen - vars->outremain), vars->outbuf, ret);
562 vars->error = ret;
563 }
564 if (flush)
565 {
566 kern_coredump_log(NULL, "100..");
567 flush = false;
568 }
569 vars->outremain = vars->outlen;
570 }
571 return (inlen);
572}
573
574static int
575kdp_core_zinput(z_streamp strm, Bytef *buf, unsigned size)
576{
577 struct kdp_core_out_vars * vars = (typeof(vars)) strm->opaque;
578 uint64_t percent, total_in = 0;
579 unsigned len;
580
581 len = strm->avail_in;
582 if (len > size) len = size;
583 if (len == 0) return 0;
584
585 if (strm->next_in != (Bytef *) strm) memcpy(buf, strm->next_in, len);
586 else bzero(buf, len);
587 strm->adler = z_crc32(strm->adler, buf, len);
588
589 strm->avail_in -= len;
590 strm->next_in += len;
591 strm->total_in += len;
592
593 if (0 == (511 & vars->writes++))
594 {
595 total_in = strm->total_in;
596 kdp_core_total_size_sent_uncomp = strm->total_in;
597
598 percent = (total_in * 100) / vars->totalbytes;
599 if ((percent - vars->lastpercent) >= 10)
600 {
601 vars->lastpercent = percent;
602 kern_coredump_log(NULL, "%lld..\n", percent);
603 }
604 }
605
606 return (int)len;
607}
608
609static IOReturn
610kdp_core_stream_output_chunk(struct kdp_core_out_vars * vars, unsigned length, void * data)
611{
612 z_stream * zs;
613 int zr;
614 boolean_t flush;
615
616 zs = &kdp_core_zs;
617
618 if (kdp_corezip_disabled)
619 {
620 (*vars->zoutput)(zs, data, length);
621 }
622 else
623 {
624
625 flush = (!length && !data);
626 zr = Z_OK;
627
628 assert(!zs->avail_in);
629
630 while (vars->error >= 0)
631 {
632 if (!zs->avail_in && !flush)
633 {
634 if (!length) break;
635 zs->next_in = data ? data : (Bytef *) zs /* zero marker */;
636 zs->avail_in = length;
637 length = 0;
638 }
639 if (!zs->avail_out)
640 {
641 zs->next_out = (Bytef *) zs;
642 zs->avail_out = UINT32_MAX;
643 }
644 zr = deflate(zs, flush ? Z_FINISH : Z_NO_FLUSH);
645 if (Z_STREAM_END == zr) break;
646 if (zr != Z_OK)
647 {
648 kern_coredump_log(NULL, "ZERR %d\n", zr);
649 vars->error = zr;
650 }
651 }
652
653 if (flush) (*vars->zoutput)(zs, NULL, 0);
654 }
655
656 return (vars->error);
657}
658
659kern_return_t
660kdp_core_output(void *kdp_core_out_vars, uint64_t length, void * data)
661{
662 IOReturn err;
663 unsigned int chunk;
664 enum { kMaxZLibChunk = 1024*1024*1024 };
665 struct kdp_core_out_vars *vars = (struct kdp_core_out_vars *)kdp_core_out_vars;
666
667 do
668 {
669 if (length <= kMaxZLibChunk) chunk = (typeof(chunk)) length;
670 else chunk = kMaxZLibChunk;
671 err = kdp_core_stream_output_chunk(vars, chunk, data);
672
673 length -= chunk;
674 if (data) data = (void *) (((uintptr_t) data) + chunk);
675 }
676 while (length && (kIOReturnSuccess == err));
677
678 return (err);
679}
680
681#if defined(__arm__) || defined(__arm64__)
682extern pmap_paddr_t avail_start, avail_end;
683extern struct vm_object pmap_object_store;
684#endif
685extern vm_offset_t c_buffers;
686extern vm_size_t c_buffers_size;
687
688ppnum_t
689kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
690{
691 ppnum_t ppn = 0;
692 uint64_t vincr = PAGE_SIZE_64;
693
694 assert(!(vaddr & PAGE_MASK_64));
695
696 /* VA ranges to exclude */
697 if (vaddr == c_buffers)
698 {
699 /* compressor data */
700 ppn = 0;
701 vincr = c_buffers_size;
702 }
703 else if (vaddr == kdp_core_zmem)
704 {
705 /* zlib working memory */
706 ppn = 0;
707 vincr = kdp_core_zsize;
708 }
709 else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr))
710 {
711 ppn = 0;
712 vincr = kdp_core_ramdisk_size;
713 }
714 else
715#if defined(__arm64__) && defined(CONFIG_XNUPOST)
716 if (vaddr == _COMM_HIGH_PAGE64_BASE_ADDRESS)
717 {
718 /* not readable */
719 ppn = 0;
720 vincr = _COMM_PAGE_AREA_LENGTH;
721 }
722 else
723#endif /* defined(__arm64__) */
724#if defined(__arm__) || defined(__arm64__)
725 if (vaddr == phystokv(avail_start))
726 {
727 /* physical memory map */
728 ppn = 0;
729 vincr = (avail_end - avail_start);
730 }
731 else
732#endif /* defined(__arm__) || defined(__arm64__) */
733 ppn = pmap_find_phys(kernel_pmap, vaddr);
734
735 *pvincr = round_page_64(vincr);
736
737 if (ppn && pvphysaddr)
738 {
739 uint64_t phys = ptoa_64(ppn);
740#if defined(__arm__) || defined(__arm64__)
741 if (isphysmem(phys)) *pvphysaddr = phystokv(phys);
742#else
743 if (physmap_enclosed(phys)) *pvphysaddr = (uintptr_t)PHYSMAP_PTOV(phys);
744#endif
745 else ppn = 0;
746 }
747
748 return (ppn);
749}
750
751int
752pmap_traverse_present_mappings(pmap_t __unused pmap,
753 vm_map_offset_t start,
754 vm_map_offset_t end,
755 pmap_traverse_callback callback,
756 void *context)
757{
758 IOReturn ret;
759 vm_map_offset_t vcurstart, vcur;
760 uint64_t vincr = 0;
761 vm_map_offset_t debug_start;
762 vm_map_offset_t debug_end;
763 boolean_t lastvavalid;
764#if defined(__arm__) || defined(__arm64__)
765 vm_page_t m = VM_PAGE_NULL;
766#endif
767
768 debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
769 debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
770
771#if defined(__x86_64__)
772 assert(!is_ept_pmap(pmap));
773#endif
774
775 /* Assumes pmap is locked, or being called from the kernel debugger */
776
777 if (start > end) return (KERN_INVALID_ARGUMENT);
778
779 ret = KERN_SUCCESS;
780 lastvavalid = FALSE;
781 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end); ) {
782 ppnum_t ppn = 0;
783
784#if defined(__arm__) || defined(__arm64__)
785 /* We're at the start of the physmap, so pull out the pagetable pages that
786 * are accessed through that region.*/
787 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store))
788 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
789
790 if (m != VM_PAGE_NULL)
791 {
792 vm_map_offset_t vprev = vcur;
793 ppn = (ppnum_t)atop(avail_end);
794 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m))
795 {
796 /* Ignore pages that come from the static region and have already been dumped.*/
797 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start))
798 {
799 ppn = VM_PAGE_GET_PHYS_PAGE(m);
800 break;
801 }
802 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
803 }
804 vincr = PAGE_SIZE_64;
805 if (ppn == atop(avail_end))
806 {
807 vm_object_unlock(&pmap_object_store);
808 m = VM_PAGE_NULL;
809 // avail_end is not a valid physical address,
810 // so phystokv(avail_end) may not produce the expected result.
811 vcur = phystokv(avail_start) + (avail_end - avail_start);
812 } else {
813 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
814 vcur = phystokv(ptoa(ppn));
815 }
816 if (vcur != vprev)
817 {
818 ret = callback(vcurstart, vprev, context);
819 lastvavalid = FALSE;
820 }
821 }
822 if (m == VM_PAGE_NULL)
823 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
824#else /* defined(__arm__) || defined(__arm64__) */
825 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
826#endif
827 if (ppn != 0)
828 {
829 if (((vcur < debug_start) || (vcur >= debug_end))
830 && !(EFI_VALID_PAGE(ppn) ||
831 pmap_valid_page(ppn)))
832 {
833 /* not something we want */
834 ppn = 0;
835 }
836 }
837
838 if (ppn != 0) {
839 if (!lastvavalid) {
840 /* Start of a new virtual region */
841 vcurstart = vcur;
842 lastvavalid = TRUE;
843 }
844 } else {
845 if (lastvavalid) {
846 /* end of a virtual region */
847 ret = callback(vcurstart, vcur, context);
848 lastvavalid = FALSE;
849 }
850
851#if defined(__x86_64__)
852 /* Try to skip by 2MB if possible */
853 if (((vcur & PDMASK) == 0) && cpu_64bit) {
854 pd_entry_t *pde;
855 pde = pmap_pde(pmap, vcur);
856 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
857 /* Make sure we wouldn't overflow */
858 if (vcur < (end - NBPD)) {
859 vincr = NBPD;
860 }
861 }
862 }
863#endif /* defined(__x86_64__) */
864 }
865 vcur += vincr;
866 }
867
868 if ((ret == KERN_SUCCESS) && lastvavalid) {
869 /* send previous run */
870 ret = callback(vcurstart, vcur, context);
871 }
872
873#if KASAN
874 if (ret == KERN_SUCCESS) {
875 ret = kasan_traverse_mappings(callback, context);
876 }
877#endif
878
879 return (ret);
880}
881
882struct kern_dump_preflight_context
883{
884 uint32_t region_count;
885 uint64_t dumpable_bytes;
886};
887
888int
889kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
890 vm_map_offset_t end,
891 void *context)
892{
893 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
894 IOReturn ret = KERN_SUCCESS;
895
896 kdc->region_count++;
897 kdc->dumpable_bytes += (end - start);
898
899 return (ret);
900}
901
902
903struct kern_dump_send_seg_desc_context
904{
905 core_save_segment_descriptions_cb callback;
906 void *context;
907};
908
909int
910kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
911 vm_map_offset_t end,
912 void *context)
913{
914 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
915 uint64_t seg_start = (uint64_t) start;
916 uint64_t seg_end = (uint64_t) end;
917
918 return kds_context->callback(seg_start, seg_end, kds_context->context);
919}
920
921struct kern_dump_send_segdata_context
922{
923 core_save_segment_data_cb callback;
924 void *context;
925};
926
927int
928kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
929 vm_map_offset_t end,
930 void *context)
931{
932 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
933
934 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
935}
936
937static int
938kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
939{
940 struct kern_dump_preflight_context kdc_preflight = { };
941 uint64_t thread_state_size = 0, thread_count = 0;
942 kern_return_t ret;
943
944 ret = pmap_traverse_present_mappings(kernel_pmap,
945 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
946 VM_MAX_KERNEL_ADDRESS,
947 kern_dump_pmap_traverse_preflight_callback,
948 &kdc_preflight);
949 if (ret != KERN_SUCCESS) {
950 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
951 return ret;
952 }
953
954 kern_collectth_state_size(&thread_count, &thread_state_size);
955
956 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
957 thread_count, thread_state_size, 0, context);
958 return ret;
959}
960
961static int
962kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
963{
964 kern_return_t ret;
965 struct kern_dump_send_seg_desc_context kds_context;
966
967 kds_context.callback = callback;
968 kds_context.context = context;
969
970 ret = pmap_traverse_present_mappings(kernel_pmap,
971 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
972 VM_MAX_KERNEL_ADDRESS,
973 kern_dump_pmap_traverse_send_segdesc_callback,
974 &kds_context);
975 if (ret != KERN_SUCCESS) {
976 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
977 return ret;
978 }
979
980 return KERN_SUCCESS;
981}
982
983static int
984kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
985{
986 kern_return_t ret;
987 uint64_t thread_state_size = 0, thread_count = 0;
988
989 kern_collectth_state_size(&thread_count, &thread_state_size);
990
991 if (thread_state_size > 0) {
992 void * iter = NULL;
993 do {
994 kern_collectth_state (current_thread(), buf, thread_state_size, &iter);
995
996 ret = callback(buf, context);
997 if (ret != KERN_SUCCESS) {
998 return ret;
999 }
1000 } while (iter);
1001 }
1002
1003 return KERN_SUCCESS;
1004}
1005
1006static int
1007kern_dump_save_sw_vers(__unused void *refcon, core_save_sw_vers_cb callback, void *context)
1008{
1009 return callback(&kdp_kernelversion_string, sizeof(kdp_kernelversion_string), context);
1010}
1011
1012static int
1013kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
1014{
1015 kern_return_t ret;
1016 struct kern_dump_send_segdata_context kds_context;
1017
1018 kds_context.callback = callback;
1019 kds_context.context = context;
1020
1021 ret = pmap_traverse_present_mappings(kernel_pmap,
1022 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
1023 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
1024 if (ret != KERN_SUCCESS) {
1025 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
1026 return ret;
1027 }
1028
1029 return KERN_SUCCESS;
1030}
1031
1032kern_return_t
1033kdp_reset_output_vars(void *kdp_core_out_vars, uint64_t totalbytes)
1034{
1035 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1036
1037 /* Re-initialize kdp_outvars */
1038 outvars->zipped = 0;
1039 outvars->totalbytes = totalbytes;
1040 outvars->lastpercent = 0;
1041 outvars->error = kIOReturnSuccess;
1042 outvars->outremain = 0;
1043 outvars->outlen = 0;
1044 outvars->writes = 0;
1045 outvars->outbuf = NULL;
1046
1047 if (outvars->outproc == &kdp_send_crashdump_data) {
1048 /* KERN_DUMP_NET */
1049 outvars->outbuf = (Bytef *) (kdp_core_zmem + kdp_core_zoffset);
1050 outvars->outremain = outvars->outlen = kdp_crashdump_pkt_size;
1051 }
1052
1053 kdp_core_total_size = totalbytes;
1054
1055 /* Re-initialize zstream variables */
1056 kdp_core_zs.avail_in = 0;
1057 kdp_core_zs.next_in = NULL;
1058 kdp_core_zs.avail_out = 0;
1059 kdp_core_zs.next_out = NULL;
1060 kdp_core_zs.opaque = outvars;
1061
1062 deflateResetWithIO(&kdp_core_zs, kdp_core_zinput, outvars->zoutput);
1063
1064 return KERN_SUCCESS;
1065}
1066
1067static int
1068kern_dump_update_header(struct kdp_core_out_vars *outvars)
1069{
1070 uint64_t foffset;
1071 int ret;
1072
1073 /* Write the file header -- first seek to the beginning of the file */
1074 foffset = 0;
1075 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1076 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1077 sizeof(foffset), &foffset, foffset, ret);
1078 return ret;
1079 }
1080
1081 if ((ret = (outvars->outproc)(KDP_DATA, NULL, sizeof(kdp_core_header), &kdp_core_header)) != kIOReturnSuccess) {
1082 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1083 sizeof(kdp_core_header), &kdp_core_header, ret);
1084 return ret;
1085 }
1086
1087 if ((ret = (outvars->outproc)(KDP_DATA, NULL, 0, NULL)) != kIOReturnSuccess) {
1088 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
1089 return ret;
1090 }
1091
1092#if CONFIG_EMBEDDED
1093 if ((ret = (outvars->outproc)(KDP_FLUSH, NULL, 0, NULL)) != kIOReturnSuccess) {
1094 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
1095 return ret;
1096 }
1097#endif
1098
1099 return KERN_SUCCESS;
1100}
1101
1102int
1103kern_dump_record_file(void *kdp_core_out_vars, const char *filename, uint64_t file_offset, uint64_t *out_file_length)
1104{
1105 int ret = 0;
1106 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1107
1108 assert(kdp_core_header.num_files < KERN_COREDUMP_MAX_CORES);
1109 assert(out_file_length != NULL);
1110 *out_file_length = 0;
1111
1112 kdp_core_header.files[kdp_core_header.num_files].gzip_offset = file_offset;
1113 kdp_core_header.files[kdp_core_header.num_files].gzip_length = outvars->zipped;
1114 strncpy((char *)&kdp_core_header.files[kdp_core_header.num_files].core_name, filename,
1115 MACH_CORE_FILEHEADER_NAMELEN);
1116 kdp_core_header.files[kdp_core_header.num_files].core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
1117 kdp_core_header.num_files++;
1118 kdp_core_header.signature = MACH_CORE_FILEHEADER_SIGNATURE;
1119
1120 ret = kern_dump_update_header(outvars);
1121 if (ret == KERN_SUCCESS) {
1122 *out_file_length = outvars->zipped;
1123 }
1124
1125 return ret;
1126}
1127
1128int
1129kern_dump_seek_to_next_file(void *kdp_core_out_vars, uint64_t next_file_offset)
1130{
1131 struct kdp_core_out_vars *outvars = (struct kdp_core_out_vars *)kdp_core_out_vars;
1132 int ret;
1133
1134 if ((ret = (outvars->outproc)(KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != kIOReturnSuccess) {
1135 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1136 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
1137 }
1138
1139 return ret;
1140}
1141
1142static int
1143do_kern_dump(kern_dump_output_proc outproc, enum kern_dump_type kd_variant)
1144{
1145 struct kdp_core_out_vars outvars = { };
1146
1147 char *log_start = NULL, *buf = NULL;
1148 size_t existing_log_size = 0, new_log_len = 0;
1149 uint64_t foffset = 0;
1150 int ret = 0;
1151 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
1152
1153 /*
1154 * Record the initial panic log buffer length so we can dump the coredump log
1155 * and panic log to disk
1156 */
1157 log_start = debug_buf_ptr;
1158#if CONFIG_EMBEDDED
1159 assert(panic_info->eph_other_log_offset != 0);
1160 assert(panic_info->eph_panic_log_len != 0);
1161 /* Include any data from before the panic log as well */
1162 existing_log_size = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1163 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1164#else /* CONFIG_EMBEDDED */
1165 if (panic_info->mph_panic_log_offset != 0) {
1166 existing_log_size = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1167 panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
1168 }
1169#endif /* CONFIG_EMBEDDED */
1170
1171 assert (existing_log_size <= debug_buf_size);
1172
1173 if (kd_variant == KERN_DUMP_DISK) {
1174 /* Open the file for output */
1175 if ((ret = (*outproc)(KDP_WRQ, NULL, 0, NULL)) != kIOReturnSuccess) {
1176 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1177 dump_succeeded = FALSE;
1178 goto exit;
1179 }
1180 }
1181 output_opened = true;
1182
1183 /* Initialize gzip, output context */
1184 bzero(&outvars, sizeof(outvars));
1185 outvars.outproc = outproc;
1186
1187 if (kd_variant == KERN_DUMP_DISK) {
1188 outvars.zoutput = kdp_core_zoutput;
1189 /* Space for file header, panic log, core log */
1190 foffset = (KERN_COREDUMP_HEADERSIZE + existing_log_size + KERN_COREDUMP_MAXDEBUGLOGSIZE +
1191 KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1) & ~(KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN - 1);
1192 kdp_core_header.log_offset = KERN_COREDUMP_HEADERSIZE;
1193
1194 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1195 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1196 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1197 sizeof(foffset), &foffset, foffset, ret);
1198 dump_succeeded = FALSE;
1199 goto exit;
1200 }
1201 } else if (kd_variant == KERN_DUMP_NET) {
1202 assert((kdp_core_zoffset + kdp_crashdump_pkt_size) <= kdp_core_zsize);
1203 outvars.zoutput = kdp_core_zoutputbuf;
1204#if CONFIG_EMBEDDED
1205 } else { /* KERN_DUMP_HW_SHMEM_DBG */
1206 outvars.zoutput = kdp_core_zoutput;
1207 kern_dump_hw_shmem_dbg_reset();
1208#endif
1209 }
1210
1211#if defined(__arm__) || defined(__arm64__)
1212 flush_mmu_tlb();
1213#endif
1214
1215 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores..." :
1216 "Transmitting kernel state, please wait:\n");
1217
1218 if (kd_variant == KERN_DUMP_DISK) {
1219 /*
1220 * Dump co-processors as well, foffset will be overwritten with the
1221 * offset of the next location in the file to be written to.
1222 */
1223 if (kern_do_coredump(&outvars, FALSE, foffset, &foffset) != 0) {
1224 dump_succeeded = FALSE;
1225 }
1226 } else {
1227 /* Only the kernel */
1228 if (kern_do_coredump(&outvars, TRUE, foffset, &foffset) != 0) {
1229 dump_succeeded = FALSE;
1230 }
1231 }
1232
1233 if (kd_variant == KERN_DUMP_DISK) {
1234#if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1235 /* Write the macOS panic stackshot on its own to a separate 'corefile' */
1236 if (panic_stackshot_buf && panic_stackshot_len) {
1237 uint64_t compressed_stackshot_len = 0;
1238
1239 /* Seek to the offset of the next 'file' (foffset provided/updated from kern_do_coredump) */
1240 if ((ret = kern_dump_seek_to_next_file(&outvars, foffset)) != kIOReturnSuccess) {
1241 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1242 dump_succeeded = FALSE;
1243 } else if ((ret = kdp_reset_output_vars(&outvars, panic_stackshot_len)) != KERN_SUCCESS) {
1244 kern_coredump_log(NULL, "Failed to reset outvars for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1245 dump_succeeded = FALSE;
1246 } else if ((ret = kdp_core_output(&outvars, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1247 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outvars, %lu, %p) returned 0x%x\n",
1248 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
1249 dump_succeeded = FALSE;
1250 } else if ((ret = kdp_core_output(&outvars, 0, NULL)) != KERN_SUCCESS) {
1251 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outvars, ret);
1252 dump_succeeded = FALSE;
1253 } else if ((ret = kern_dump_record_file(&outvars, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) {
1254 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1255 dump_succeeded = FALSE;
1256 } else {
1257 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1258 }
1259 }
1260#endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1261
1262 /* Write the debug log -- first seek to the end of the corefile header */
1263 foffset = KERN_COREDUMP_HEADERSIZE;
1264 if ((ret = (*outproc)(KDP_SEEK, NULL, sizeof(foffset), &foffset)) != kIOReturnSuccess) {
1265 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1266 sizeof(foffset), &foffset, foffset, ret);
1267 dump_succeeded = FALSE;
1268 goto exit;
1269 }
1270
1271 new_log_len = debug_buf_ptr - log_start;
1272 if (new_log_len > KERN_COREDUMP_MAXDEBUGLOGSIZE) {
1273 new_log_len = KERN_COREDUMP_MAXDEBUGLOGSIZE;
1274 }
1275
1276 /* This data is after the panic stackshot, we need to write it separately */
1277#if CONFIG_EMBEDDED
1278 existing_log_size -= panic_info->eph_other_log_len;
1279#else
1280 if (existing_log_size) {
1281 existing_log_size -= panic_info->mph_other_log_len;
1282 }
1283#endif
1284
1285 /*
1286 * Write out the paniclog (from the beginning of the debug
1287 * buffer until the start of the stackshot)
1288 */
1289 buf = debug_buf_base;
1290 if ((ret = (*outproc)(KDP_DATA, NULL, existing_log_size, buf)) != kIOReturnSuccess) {
1291 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1292 existing_log_size, buf, ret);
1293 dump_succeeded = FALSE;
1294 goto exit;
1295 }
1296
1297 /*
1298 * The next part of the log we're interested in is the beginning of the 'other' log.
1299 * Include any data after the panic stackshot but before we started the coredump log
1300 * (see above)
1301 */
1302#if CONFIG_EMBEDDED
1303 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1304 new_log_len += panic_info->eph_other_log_len;
1305#else /* CONFIG_EMBEDDED */
1306 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1307 new_log_len += panic_info->mph_other_log_len;
1308#endif /* CONFIG_EMBEDDED */
1309
1310 /* Write the coredump log */
1311 if ((ret = (*outproc)(KDP_DATA, NULL, new_log_len, buf)) != kIOReturnSuccess) {
1312 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1313 new_log_len, buf, ret);
1314 dump_succeeded = FALSE;
1315 goto exit;
1316 }
1317
1318 kdp_core_header.log_length = existing_log_size + new_log_len;
1319 kern_dump_update_header(&outvars);
1320 }
1321
1322exit:
1323 /* close / last packet */
1324 if (output_opened && (ret = (*outproc)(KDP_EOF, NULL, 0, ((void *) 0))) != kIOReturnSuccess) {
1325 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1326 dump_succeeded = FALSE;
1327 }
1328
1329 /* If applicable, update the panic header and flush it so we update the CRC */
1330#if CONFIG_EMBEDDED
1331 panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1332 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1333 paniclog_flush();
1334#else
1335 if (panic_info->mph_panic_log_offset != 0) {
1336 panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1337 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1338 paniclog_flush();
1339 }
1340#endif
1341
1342 return (dump_succeeded ? 0 : -1);
1343}
1344
1345boolean_t
1346dumped_kernel_core()
1347{
1348 return kern_dump_successful;
1349}
1350
1351int
1352kern_dump(enum kern_dump_type kd_variant)
1353{
1354 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1355 int ret = -1;
1356#if KASAN
1357 kasan_disable();
1358#endif
1359 if (kd_variant == KERN_DUMP_DISK) {
1360 if (dumped_local) return (0);
1361 if (local_dump_in_progress) return (-1);
1362 local_dump_in_progress = TRUE;
1363#if CONFIG_EMBEDDED
1364 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_BUSY;
1365#endif
1366 ret = do_kern_dump(&kern_dump_disk_proc, KERN_DUMP_DISK);
1367 if (ret == 0) {
1368 dumped_local = TRUE;
1369 kern_dump_successful = TRUE;
1370 local_dump_in_progress = FALSE;
1371 }
1372
1373 return ret;
1374#if CONFIG_EMBEDDED
1375 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1376 ret = do_kern_dump(&kern_dump_hw_shmem_dbg_buffer_proc, KERN_DUMP_HW_SHMEM_DBG);
1377 if (ret == 0) {
1378 kern_dump_successful = TRUE;
1379 }
1380 return ret;
1381#endif
1382 } else {
1383 ret = do_kern_dump(&kdp_send_crashdump_data, KERN_DUMP_NET);
1384 if (ret == 0) {
1385 kern_dump_successful = TRUE;
1386 }
1387 return ret;
1388 }
1389}
1390
1391#if CONFIG_EMBEDDED
1392void
1393panic_spin_shmcon()
1394{
1395 if (hwsd_info == NULL) {
1396 kern_coredump_log(NULL, "handshake structure not initialized\n");
1397 return;
1398 }
1399
1400 kern_coredump_log(NULL, "\nPlease go to https://panic.apple.com to report this panic\n");
1401 kern_coredump_log(NULL, "Waiting for hardware shared memory debugger, handshake structure is at virt: %p, phys %p\n",
1402 hwsd_info, (void *)kvtophys((vm_offset_t)hwsd_info));
1403
1404 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1405 hwsd_info->xhsdci_seq_no = 0;
1406 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1407
1408 for (;;) {
1409 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1410 if (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_BEGIN) {
1411 kern_dump(KERN_DUMP_HW_SHMEM_DBG);
1412 }
1413
1414 if ((hwsd_info->xhsdci_status == XHSDCI_COREDUMP_REMOTE_DONE) ||
1415 (hwsd_info->xhsdci_status == XHSDCI_COREDUMP_ERROR)) {
1416 hwsd_info->xhsdci_status = XHSDCI_STATUS_KERNEL_READY;
1417 hwsd_info->xhsdci_seq_no = 0;
1418 FlushPoC_DcacheRegion((vm_offset_t) hwsd_info, sizeof(*hwsd_info));
1419 }
1420 }
1421}
1422#endif /* CONFIG_EMBEDDED */
1423
1424static void *
1425kdp_core_zalloc(void * __unused ref, u_int items, u_int size)
1426{
1427 void * result;
1428
1429 result = (void *) (kdp_core_zmem + kdp_core_zoffset);
1430 kdp_core_zoffset += ~31L & (31 + (items * size)); // 32b align for vector crc
1431 assert(kdp_core_zoffset <= kdp_core_zsize);
1432
1433 return (result);
1434}
1435
1436static void
1437kdp_core_zfree(void * __unused ref, void * __unused ptr) {}
1438
1439
1440#if CONFIG_EMBEDDED
1441#define LEVEL Z_BEST_SPEED
1442#define NETBUF 0
1443#else
1444#define LEVEL Z_BEST_SPEED
1445#define NETBUF 1440
1446#endif
1447
1448void
1449kdp_core_init(void)
1450{
1451 int wbits = 12;
1452 int memlevel = 3;
1453 kern_return_t kr;
1454#if CONFIG_EMBEDDED
1455 int i = 0;
1456 vm_offset_t kdp_core_hw_shmem_buf = 0;
1457 struct kdp_hw_shmem_dbg_buf_elm *cur_elm = NULL;
1458 cache_info_t *cpuid_cache_info = NULL;
1459#endif
1460 kern_coredump_callback_config core_config = { };
1461
1462 if (kdp_core_zs.zalloc) return;
1463 kdp_core_zsize = round_page(NETBUF + zlib_deflate_memory_size(wbits, memlevel));
1464 printf("kdp_core zlib memory 0x%lx\n", kdp_core_zsize);
1465 kr = kmem_alloc(kernel_map, &kdp_core_zmem, kdp_core_zsize, VM_KERN_MEMORY_DIAG);
1466 assert (KERN_SUCCESS == kr);
1467
1468 kdp_core_zoffset = 0;
1469 kdp_core_zs.zalloc = kdp_core_zalloc;
1470 kdp_core_zs.zfree = kdp_core_zfree;
1471
1472 if (deflateInit2(&kdp_core_zs, LEVEL, Z_DEFLATED,
1473 wbits + 16 /*gzip mode*/, memlevel, Z_DEFAULT_STRATEGY)) {
1474 /* Allocation failed */
1475 bzero(&kdp_core_zs, sizeof(kdp_core_zs));
1476 kdp_core_zoffset = 0;
1477 }
1478
1479 bzero(&kdp_core_header, sizeof(kdp_core_header));
1480
1481 core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */
1482 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1483 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1484 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1485 core_config.kcc_coredump_save_sw_vers = kern_dump_save_sw_vers;
1486 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1487 core_config.kcc_coredump_save_misc_data = NULL;
1488
1489 kr = kern_register_xnu_coredump_helper(&core_config);
1490 assert(KERN_SUCCESS == kr);
1491
1492#if CONFIG_EMBEDDED
1493 if (!PE_consistent_debug_enabled()) {
1494 return;
1495 }
1496
1497 /*
1498 * We need to allocate physically contiguous memory since astris isn't capable
1499 * of doing address translations while the CPUs are running.
1500 */
1501 kdp_hw_shmem_dbg_bufsize = KDP_CORE_HW_SHMEM_DBG_TOTAL_BUF_SIZE;
1502 kr = kmem_alloc_contig(kernel_map, &kdp_core_hw_shmem_buf, kdp_hw_shmem_dbg_bufsize, VM_MAP_PAGE_MASK(kernel_map),
1503 0, 0, KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1504 assert(KERN_SUCCESS == kr);
1505
1506 /*
1507 * Put the connection info structure at the beginning of this buffer and adjust
1508 * the buffer size accordingly.
1509 */
1510 hwsd_info = (struct xnu_hw_shmem_dbg_command_info *) kdp_core_hw_shmem_buf;
1511 hwsd_info->xhsdci_status = XHSDCI_STATUS_NONE;
1512 hwsd_info->xhsdci_seq_no = 0;
1513 hwsd_info->xhsdci_buf_phys_addr = 0;
1514 hwsd_info->xhsdci_buf_data_length = 0;
1515 hwsd_info->xhsdci_coredump_total_size_uncomp = 0;
1516 hwsd_info->xhsdci_coredump_total_size_sent_uncomp = 0;
1517 hwsd_info->xhsdci_page_size = PAGE_SIZE;
1518
1519 cpuid_cache_info = cache_info();
1520 assert(cpuid_cache_info != NULL);
1521
1522 kdp_core_hw_shmem_buf += sizeof(*hwsd_info);
1523 /* Leave the handshake structure on its own cache line so buffer writes don't cause flushes of old handshake data */
1524 kdp_core_hw_shmem_buf = ROUNDUP(kdp_core_hw_shmem_buf, (uint64_t) cpuid_cache_info->c_linesz);
1525 kdp_hw_shmem_dbg_bufsize -= (uint32_t) (kdp_core_hw_shmem_buf - (vm_offset_t) hwsd_info);
1526 kdp_hw_shmem_dbg_bufsize /= KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS;
1527 /* The buffer size should be a cache-line length multiple */
1528 kdp_hw_shmem_dbg_bufsize -= (kdp_hw_shmem_dbg_bufsize % ROUNDDOWN(OPTIMAL_ASTRIS_READSIZE, cpuid_cache_info->c_linesz));
1529
1530 STAILQ_INIT(&free_hw_shmem_dbg_bufs);
1531 STAILQ_INIT(&hw_shmem_dbg_bufs_to_flush);
1532
1533 for (i = 0; i < KDP_CORE_HW_SHMEM_DBG_NUM_BUFFERS; i++) {
1534 cur_elm = kalloc(sizeof(*cur_elm));
1535 assert(cur_elm != NULL);
1536
1537 cur_elm->khsd_buf = kdp_core_hw_shmem_buf;
1538 cur_elm->khsd_data_length = 0;
1539
1540 kdp_core_hw_shmem_buf += kdp_hw_shmem_dbg_bufsize;
1541
1542 STAILQ_INSERT_HEAD(&free_hw_shmem_dbg_bufs, cur_elm, khsd_elms);
1543 }
1544
1545 nanoseconds_to_absolutetime(KDP_HW_SHMEM_DBG_TIMEOUT_DEADLINE_SECS * NSEC_PER_SEC,
1546 &kdp_hw_shmem_dbg_contact_deadline_interval);
1547
1548 PE_consistent_debug_register(kDbgIdAstrisConnection, kvtophys((vm_offset_t) hwsd_info), sizeof(pmap_paddr_t));
1549 PE_consistent_debug_register(kDbgIdAstrisConnectionVers, CUR_XNU_HWSDCI_STRUCT_VERS, sizeof(uint32_t));
1550#endif /* CONFIG_EMBEDDED */
1551
1552#if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1553 /* Allocate space in the kernel map for the panic stackshot */
1554 kr = kmem_alloc(kernel_map, &panic_stackshot_buf, PANIC_STACKSHOT_BUFSIZE, VM_KERN_MEMORY_DIAG);
1555 assert (KERN_SUCCESS == kr);
1556#endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1557}
1558
1559#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1560