1/*
2 * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1991 NeXT Computer, Inc. All rights reserved.
29 *
30 * File: bsd/kern/kern_core.c
31 *
32 * This file contains machine independent code for performing core dumps.
33 *
34 */
35#if CONFIG_COREDUMP
36
37#include <mach/vm_param.h>
38#include <mach/thread_status.h>
39#include <sys/content_protection.h>
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/signalvar.h>
43#include <sys/resourcevar.h>
44#include <sys/namei.h>
45#include <sys/vnode_internal.h>
46#include <sys/proc_internal.h>
47#include <sys/kauth.h>
48#include <sys/timeb.h>
49#include <sys/times.h>
50#include <sys/acct.h>
51#include <sys/file_internal.h>
52#include <sys/uio.h>
53#include <sys/kernel.h>
54#include <sys/stat.h>
55
56#include <mach-o/loader.h>
57#include <mach/vm_region.h>
58#include <mach/vm_statistics.h>
59
60#include <IOKit/IOBSD.h>
61
62#include <vm/vm_kern.h>
63#include <vm/vm_protos.h> /* last */
64#include <vm/vm_map.h> /* current_map() */
65#include <vm/pmap.h> /* pmap_user_va_bits() */
66#include <mach/mach_vm.h> /* mach_vm_region_recurse() */
67#include <mach/task.h> /* task_suspend() */
68#include <kern/task.h> /* get_task_numacts() */
69
70#include <security/audit/audit.h>
71
72#if CONFIG_MACF
73#include <security/mac_framework.h>
74#endif /* CONFIG_MACF */
75
76#include <kdp/core_notes.h>
77
78#define COREDUMP_CUSTOM_LOCATION_ENTITLEMENT "com.apple.private.custom-coredump-location"
79
80typedef struct {
81 int flavor; /* the number for this flavor */
82 mach_msg_type_number_t count; /* count of ints in this flavor */
83} mythread_state_flavor_t;
84
85#if defined (__i386__) || defined (__x86_64__)
86mythread_state_flavor_t thread_flavor_array[] = {
87 {x86_THREAD_STATE, x86_THREAD_STATE_COUNT},
88 {x86_FLOAT_STATE, x86_FLOAT_STATE_COUNT},
89 {x86_EXCEPTION_STATE, x86_EXCEPTION_STATE_COUNT},
90};
91int mynum_flavors = 3;
92#elif defined (__arm64__)
93mythread_state_flavor_t thread_flavor_array[] = {
94 {ARM_THREAD_STATE64, ARM_THREAD_STATE64_COUNT},
95 /* ARM64_TODO: VFP */
96 {ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT}
97};
98int mynum_flavors = 2;
99#else
100#error architecture not supported
101#endif
102
103
104typedef struct {
105 vm_offset_t header;
106 size_t hoffset;
107 mythread_state_flavor_t *flavors;
108 size_t tstate_size;
109 size_t flavor_count;
110} tir_t;
111
112extern int freespace_mb(vnode_t vp);
113extern void task_lock(task_t);
114extern void task_unlock(task_t);
115
116/* XXX not in a Mach header anywhere */
117kern_return_t thread_getstatus(thread_t act, int flavor,
118 thread_state_t tstate, mach_msg_type_number_t *count);
119void task_act_iterate_wth_args_locked(task_t, void (*)(thread_t, void *), void *);
120
121#ifdef SECURE_KERNEL
122__XNU_PRIVATE_EXTERN int do_coredump = 0; /* default: don't dump cores */
123#else
124__XNU_PRIVATE_EXTERN int do_coredump = 1; /* default: dump cores */
125#endif /* SECURE_KERNEL */
126__XNU_PRIVATE_EXTERN int sugid_coredump = 0; /* default: but not SGUID binaries */
127
128
129/* cpu_type returns only the most generic indication of the current CPU. */
130/* in a core we want to know the kind of process. */
131
132cpu_type_t
133process_cpu_type(proc_t core_proc)
134{
135 cpu_type_t what_we_think;
136#if defined (__i386__) || defined (__x86_64__)
137 if (IS_64BIT_PROCESS(core_proc)) {
138 what_we_think = CPU_TYPE_X86_64;
139 } else {
140 what_we_think = CPU_TYPE_I386;
141 }
142#elif defined(__arm64__)
143 if (IS_64BIT_PROCESS(core_proc)) {
144 what_we_think = CPU_TYPE_ARM64;
145 } else {
146 what_we_think = CPU_TYPE_ARM;
147 }
148#endif
149
150 return what_we_think;
151}
152
153cpu_type_t
154process_cpu_subtype(proc_t core_proc)
155{
156 cpu_type_t what_we_think;
157#if defined (__i386__) || defined (__x86_64__)
158 if (IS_64BIT_PROCESS(core_proc)) {
159 what_we_think = CPU_SUBTYPE_X86_64_ALL;
160 } else {
161 what_we_think = CPU_SUBTYPE_I386_ALL;
162 }
163#elif defined(__arm64__)
164 if (IS_64BIT_PROCESS(core_proc)) {
165 what_we_think = CPU_SUBTYPE_ARM64_ALL;
166 } else {
167 what_we_think = CPU_SUBTYPE_ARM_ALL;
168 }
169#endif
170 return what_we_think;
171}
172
173static void
174collectth_state(thread_t th_act, void *tirp)
175{
176 vm_offset_t header;
177 size_t hoffset, i;
178 mythread_state_flavor_t *flavors;
179 struct thread_command *tc;
180 tir_t *t = (tir_t *)tirp;
181
182 /*
183 * Fill in thread command structure.
184 */
185 header = t->header;
186 hoffset = t->hoffset;
187 flavors = t->flavors;
188
189 tc = (struct thread_command *) (header + hoffset);
190 tc->cmd = LC_THREAD;
191 tc->cmdsize = (uint32_t)(sizeof(struct thread_command)
192 + t->tstate_size);
193 hoffset += sizeof(struct thread_command);
194 /*
195 * Follow with a struct thread_state_flavor and
196 * the appropriate thread state struct for each
197 * thread state flavor.
198 */
199 for (i = 0; i < t->flavor_count; i++) {
200 *(mythread_state_flavor_t *)(header + hoffset) =
201 flavors[i];
202 hoffset += sizeof(mythread_state_flavor_t);
203 thread_getstatus(act: th_act, flavor: flavors[i].flavor,
204 tstate: (thread_state_t)(header + hoffset),
205 count: &flavors[i].count);
206 hoffset += flavors[i].count * sizeof(int);
207 }
208
209 t->hoffset = hoffset;
210}
211
212#if DEVELOPMENT || DEBUG
213#define COREDUMPLOG(fmt, args...) printf("coredump (%s, pid %d): " fmt "\n", core_proc->p_comm, proc_getpid(core_proc), ## args)
214#else
215#define COREDUMPLOG(fmt, args...)
216#endif
217
218/*
219 * LC_NOTE support for userspace coredumps.
220 */
221
222typedef int (write_note_cb_t)(struct vnode *vp, off_t foffset);
223
224static int
225note_addrable_bits(struct vnode *vp, off_t foffset)
226{
227 task_t t = current_task();
228 vfs_context_t ctx = vfs_context_current();
229 kauth_cred_t cred = vfs_context_ucred(ctx);
230
231 addrable_bits_note_t note = {
232 .version = ADDRABLE_BITS_VER,
233 .addressing_bits = pmap_user_va_bits(pmap: get_task_pmap(t)),
234 .unused = 0
235 };
236
237 return vn_rdwr_64(rw: UIO_WRITE, vp, base: (vm_offset_t)&note, len: sizeof(note), offset: foffset, segflg: UIO_SYSSPACE,
238 IO_NODELOCKED | IO_UNIT, cred, aresid: 0, p: current_proc());
239}
240
241/*
242 * note handling
243 */
244
245struct core_note {
246 size_t cn_size;
247 const char *cn_owner;
248 write_note_cb_t *cn_write_cb;
249} const core_notes[] = {
250 {
251 .cn_size = sizeof(addrable_bits_note_t),
252 .cn_owner = ADDRABLE_BITS_DATA_OWNER,
253 .cn_write_cb = note_addrable_bits,
254 }
255};
256
257const size_t notes_count = sizeof(core_notes) / sizeof(struct core_note);
258
259/*
260 * LC_NOTE commands are allocated as a part of Mach-O header and are written to
261 * disk at the end of coredump. LC_NOTE's payload has to be written in callbacks here.
262 */
263static int
264dump_notes(proc_t __unused core_proc, vm_offset_t header, size_t hoffset, struct vnode *vp, off_t foffset)
265{
266 for (size_t i = 0; i < notes_count; i++) {
267 int error = 0;
268
269 if (core_notes[i].cn_write_cb == NULL) {
270 continue;
271 }
272
273 /* Generate LC_NOTE command. */
274 struct note_command *nc = (struct note_command *)(header + hoffset);
275
276 nc->cmd = LC_NOTE;
277 nc->cmdsize = sizeof(struct note_command);
278 nc->offset = foffset;
279 nc->size = core_notes[i].cn_size;
280 strlcpy(dst: nc->data_owner, src: core_notes[i].cn_owner, n: sizeof(nc->data_owner));
281
282 hoffset += sizeof(struct note_command);
283
284 /* Add note's payload. */
285 error = core_notes[i].cn_write_cb(vp, foffset);
286 if (error != KERN_SUCCESS) {
287 COREDUMPLOG("failed to write LC_NOTE %s: error %d", core_notes[i].cn_owner, error);
288 return error;
289 }
290
291 foffset += core_notes[i].cn_size;
292 }
293
294 return 0;
295}
296
297/*
298 * coredump
299 *
300 * Description: Create a core image on the file "core" for the process
301 * indicated
302 *
303 * Parameters: core_proc Process to dump core [*]
304 * reserve_mb If non-zero, leave filesystem with
305 * at least this much free space.
306 * coredump_flags Extra options (ignore rlimit, run fsync)
307 *
308 * Returns: 0 Success
309 * !0 Failure errno
310 *
311 * IMPORTANT: This function can only be called on the current process, due
312 * to assumptions below; see variable declaration section for
313 * details.
314 */
315#define MAX_TSTATE_FLAVORS 10
316int
317coredump(proc_t core_proc, uint32_t reserve_mb, int coredump_flags)
318{
319/* Begin assumptions that limit us to only the current process */
320 vfs_context_t ctx = vfs_context_current();
321 vm_map_t map = current_map();
322 task_t task = current_task();
323/* End assumptions */
324 kauth_cred_t cred = vfs_context_ucred(ctx);
325 int error = 0;
326 struct vnode_attr *vap = NULL;
327 size_t thread_count, segment_count;
328 size_t command_size, header_size, tstate_size;
329 size_t hoffset;
330 off_t foffset;
331 mach_vm_offset_t vmoffset;
332 vm_offset_t header;
333 mach_vm_size_t vmsize;
334 vm_prot_t prot;
335 vm_prot_t maxprot;
336 int error1 = 0;
337 char stack_name[MAXCOMLEN + 6];
338 char *alloced_name = NULL;
339 char *name = NULL;
340 mythread_state_flavor_t flavors[MAX_TSTATE_FLAVORS];
341 vm_size_t mapsize;
342 size_t i;
343 uint32_t nesting_depth = 0;
344 kern_return_t kret;
345 struct vm_region_submap_info_64 vbr;
346 mach_msg_type_number_t vbrcount = 0;
347 tir_t tir1;
348 struct vnode * vp;
349 struct mach_header *mh = NULL; /* protected by is_64 */
350 struct mach_header_64 *mh64 = NULL; /* protected by is_64 */
351 int is_64 = 0;
352 size_t mach_header_sz = sizeof(struct mach_header);
353 size_t segment_command_sz = sizeof(struct segment_command);
354 size_t notes_size = 0;
355 const char *format = NULL;
356 char *custom_location_entitlement = NULL;
357 size_t custom_location_entitlement_len = 0;
358 char *alloced_format = NULL;
359 size_t alloced_format_len = 0;
360 bool include_iokit_memory = task_is_driver(task);
361 bool coredump_attempted = false;
362 bool task_locked = false;
363
364 if (current_proc() != core_proc) {
365 COREDUMPLOG("Skipping coredump (called against proc that is not current_proc: %p)", core_proc);
366 error = EFAULT;
367 goto out2;
368 }
369
370 if (do_coredump == 0 || /* Not dumping at all */
371 ((sugid_coredump == 0) && /* Not dumping SUID/SGID binaries */
372 ((kauth_cred_getsvuid(cred: cred) != kauth_cred_getruid(cred: cred)) ||
373 (kauth_cred_getsvgid(cred: cred) != kauth_cred_getrgid(cred: cred))))) {
374 error = EFAULT;
375 goto out2;
376 }
377
378#if CONFIG_MACF
379 error = mac_proc_check_dump_core(proc: core_proc);
380 if (error != 0) {
381 goto out2;
382 }
383#endif
384
385 if (IS_64BIT_PROCESS(core_proc)) {
386 is_64 = 1;
387 mach_header_sz = sizeof(struct mach_header_64);
388 segment_command_sz = sizeof(struct segment_command_64);
389 }
390
391 mapsize = get_vmmap_size(map);
392
393 custom_location_entitlement = IOCurrentTaskGetEntitlement(COREDUMP_CUSTOM_LOCATION_ENTITLEMENT);
394 if (custom_location_entitlement != NULL) {
395 custom_location_entitlement_len = strlen(s: custom_location_entitlement);
396 const char * dirname;
397 if (proc_is_driver(p: core_proc)) {
398 dirname = defaultdrivercorefiledir;
399 } else {
400 dirname = defaultcorefiledir;
401 }
402 size_t dirname_len = strlen(s: dirname);
403 size_t printed_len;
404
405 /* new format is dirname + "/" + string from entitlement */
406 alloced_format_len = dirname_len + 1 + custom_location_entitlement_len;
407 alloced_format = kalloc_data(alloced_format_len + 1, Z_ZERO | Z_WAITOK | Z_NOFAIL);
408 printed_len = snprintf(alloced_format, count: alloced_format_len + 1, "%s/%s", dirname, custom_location_entitlement);
409 assert(printed_len == alloced_format_len);
410
411 format = alloced_format;
412 coredump_flags |= COREDUMP_IGNORE_ULIMIT;
413 } else {
414 if (proc_is_driver(p: core_proc)) {
415 format = drivercorefilename;
416 } else {
417 format = corefilename;
418 }
419 }
420
421 if (((coredump_flags & COREDUMP_IGNORE_ULIMIT) == 0) &&
422 (mapsize >= proc_limitgetcur(p: core_proc, RLIMIT_CORE))) {
423 error = EFAULT;
424 goto out2;
425 }
426
427 /* log coredump failures from here */
428 coredump_attempted = true;
429
430 task_lock(task);
431 task_locked = true;
432 (void) task_suspend_internal_locked(task);
433
434 alloced_name = zalloc_flags(ZV_NAMEI, Z_NOWAIT | Z_ZERO);
435
436 /* create name according to sysctl'able format string */
437 /* if name creation fails, fall back to historical behaviour... */
438 if (alloced_name == NULL ||
439 proc_core_name(format, name: core_proc->p_comm, uid: kauth_cred_getuid(cred: cred),
440 pid: proc_getpid(core_proc), cr_name: alloced_name, MAXPATHLEN)) {
441 snprintf(stack_name, count: sizeof(stack_name),
442 "/cores/core.%d", proc_getpid(core_proc));
443 name = stack_name;
444 } else {
445 name = alloced_name;
446 }
447
448 COREDUMPLOG("writing core to %s", name);
449 if ((error = vnode_open(path: name, fmode: (O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, vpp: &vp, ctx))) {
450 COREDUMPLOG("failed to open core dump file %s: error %d", name, error);
451 goto out2;
452 }
453
454 vap = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO);
455 VATTR_INIT(vap);
456 VATTR_WANTED(vap, va_nlink);
457 /* Don't dump to non-regular files or files with links. */
458 if (vp->v_type != VREG ||
459 vnode_getattr(vp, vap, ctx) || vap->va_nlink != 1) {
460 COREDUMPLOG("failed to write core to non-regular file");
461 error = EFAULT;
462 goto out;
463 }
464
465 VATTR_INIT(vap); /* better to do it here than waste more stack in vnode_setsize */
466 VATTR_SET(vap, va_data_size, 0);
467 if (core_proc == initproc) {
468 VATTR_SET(vap, va_dataprotect_class, PROTECTION_CLASS_D);
469 }
470 vnode_setattr(vp, vap, ctx);
471 core_proc->p_acflag |= ACORE;
472
473 COREDUMPLOG("map size: %lu", mapsize);
474 if ((reserve_mb > 0) &&
475 ((freespace_mb(vp) - (mapsize >> 20)) < reserve_mb)) {
476 COREDUMPLOG("insufficient free space (free=%d MB, needed=%lu MB, reserve=%d MB)", freespace_mb(vp), (mapsize >> 20), reserve_mb);
477 error = ENOSPC;
478 goto out;
479 }
480
481 thread_count = get_task_numacts(task);
482 segment_count = get_vmmap_entries(map); /* XXX */
483 tir1.flavor_count = sizeof(thread_flavor_array) / sizeof(mythread_state_flavor_t);
484 bcopy(src: thread_flavor_array, dst: flavors, n: sizeof(thread_flavor_array));
485 tstate_size = 0;
486 for (i = 0; i < tir1.flavor_count; i++) {
487 tstate_size += sizeof(mythread_state_flavor_t) +
488 (flavors[i].count * sizeof(int));
489 }
490
491 {
492 size_t lhs;
493 size_t rhs;
494
495 /* lhs = segment_count * segment_command_sz */
496 if (os_mul_overflow(segment_count, segment_command_sz, &lhs)) {
497 COREDUMPLOG("error: segment size overflow: segment_count=%lu, segment_command_sz=%lu", segment_count, segment_command_sz);
498 error = ENOMEM;
499 goto out;
500 }
501
502 /* rhs = (tstate_size + sizeof(struct thread_command)) * thread_count */
503 if (os_add_and_mul_overflow(tstate_size, sizeof(struct thread_command), thread_count, &rhs)) {
504 COREDUMPLOG("error: thread state size overflow: tstate_size=%lu, thread_count=%lu", tstate_size, thread_count);
505 error = ENOMEM;
506 goto out;
507 }
508
509 /* command_size = lhs + rhs */
510 if (os_add_overflow(lhs, rhs, &command_size)) {
511 COREDUMPLOG("error: command size overflow: lhs=%lu, rhs=%lu", lhs, rhs);
512 error = ENOMEM;
513 goto out;
514 }
515
516 /* Add notes payload. */
517 if (os_mul_overflow(notes_count, sizeof(struct note_command), &notes_size)) {
518 COREDUMPLOG("error: note command size overflow: note=%lu", i);
519 error = ENOMEM;
520 goto out;
521 }
522
523 if (os_add_overflow(command_size, notes_size, &command_size)) {
524 COREDUMPLOG("error: notes overflow: notes_size=%lu", notes_size);
525 error = ENOMEM;
526 goto out;
527 }
528 }
529
530 if (os_add_overflow(command_size, mach_header_sz, &header_size)) {
531 COREDUMPLOG("error: header size overflow: command_size=%lu, mach_header_sz=%lu", command_size, mach_header_sz);
532 error = ENOMEM;
533 goto out;
534 }
535
536 if (kmem_alloc(map: kernel_map, addrp: &header, size: (vm_size_t)header_size,
537 flags: KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
538 COREDUMPLOG("error: failed to allocate memory for header (size=%lu)", header_size);
539 error = ENOMEM;
540 goto out;
541 }
542
543 /*
544 * Set up Mach-O header.
545 */
546 if (is_64) {
547 mh64 = (struct mach_header_64 *)header;
548 mh64->magic = MH_MAGIC_64;
549 mh64->cputype = process_cpu_type(core_proc);
550 mh64->cpusubtype = process_cpu_subtype(core_proc);
551 mh64->filetype = MH_CORE;
552 mh64->ncmds = (uint32_t)(segment_count + notes_count + thread_count);
553 mh64->sizeofcmds = (uint32_t)command_size;
554 } else {
555 mh = (struct mach_header *)header;
556 mh->magic = MH_MAGIC;
557 mh->cputype = process_cpu_type(core_proc);
558 mh->cpusubtype = process_cpu_subtype(core_proc);
559 mh->filetype = MH_CORE;
560 mh->ncmds = (uint32_t)(segment_count + notes_count + thread_count);
561 mh->sizeofcmds = (uint32_t)command_size;
562 }
563
564 hoffset = mach_header_sz; /* offset into header */
565 foffset = round_page(x: header_size); /* offset into file */
566 vmoffset = MACH_VM_MIN_ADDRESS; /* offset into VM */
567 COREDUMPLOG("mach header size: %zu", header_size);
568
569 /*
570 * We use to check for an error, here, now we try and get
571 * as much as we can
572 */
573 COREDUMPLOG("dumping %zu segments", segment_count);
574 while (segment_count > 0) {
575 struct segment_command *sc;
576 struct segment_command_64 *sc64;
577
578 /*
579 * Get region information for next region.
580 */
581
582 while (1) {
583 vbrcount = VM_REGION_SUBMAP_INFO_COUNT_64;
584 if ((kret = mach_vm_region_recurse(target_task: map,
585 address: &vmoffset, size: &vmsize, nesting_depth: &nesting_depth,
586 info: (vm_region_recurse_info_t)&vbr,
587 infoCnt: &vbrcount)) != KERN_SUCCESS) {
588 break;
589 }
590 /*
591 * If we get a valid mapping back, but we're dumping
592 * a 32 bit process, and it's over the allowable
593 * address space of a 32 bit process, it's the same
594 * as if mach_vm_region_recurse() failed.
595 */
596 if (!(is_64) &&
597 (vmoffset + vmsize > VM_MAX_ADDRESS)) {
598 kret = KERN_INVALID_ADDRESS;
599 COREDUMPLOG("exceeded allowable region for 32-bit process");
600 break;
601 }
602 if (vbr.is_submap) {
603 nesting_depth++;
604 continue;
605 } else {
606 break;
607 }
608 }
609 if (kret != KERN_SUCCESS) {
610 COREDUMPLOG("ending segment dump, kret=%d", kret);
611 break;
612 }
613
614 prot = vbr.protection;
615 maxprot = vbr.max_protection;
616
617 if ((prot | maxprot) == VM_PROT_NONE) {
618 /*
619 * Elide unreadable (likely reserved) segments
620 */
621 COREDUMPLOG("eliding unreadable segment %llx->%llx", vmoffset, vmoffset + vmsize);
622 vmoffset += vmsize;
623 continue;
624 }
625
626 /*
627 * Try as hard as possible to get read access to the data.
628 */
629 if ((prot & VM_PROT_READ) == 0) {
630 mach_vm_protect(target_task: map, address: vmoffset, size: vmsize, FALSE,
631 new_protection: prot | VM_PROT_READ);
632 }
633
634 /*
635 * But only try and perform the write if we can read it.
636 */
637 int64_t fsize = ((maxprot & VM_PROT_READ) == VM_PROT_READ
638 && (include_iokit_memory || vbr.user_tag != VM_MEMORY_IOKIT)
639 && coredumpok(map, va: vmoffset)) ? vmsize : 0;
640
641 if (fsize) {
642 int64_t resid = 0;
643 const enum uio_seg sflg = IS_64BIT_PROCESS(core_proc) ?
644 UIO_USERSPACE64 : UIO_USERSPACE32;
645
646 error = vn_rdwr_64(rw: UIO_WRITE, vp, base: vmoffset, len: fsize,
647 offset: foffset, segflg: sflg, IO_NODELOCKED | IO_UNIT,
648 cred, aresid: &resid, p: core_proc);
649
650 if (error) {
651 /*
652 * Mark segment as empty
653 */
654 fsize = 0;
655 COREDUMPLOG("failed to write segment %llx->%llx: error %d", vmoffset, vmoffset + vmsize, error);
656 } else if (resid) {
657 /*
658 * Partial write. Extend the file size so
659 * that the segment command contains a valid
660 * range of offsets, possibly creating a hole.
661 */
662 VATTR_INIT(vap);
663 VATTR_SET(vap, va_data_size, foffset + fsize);
664 vnode_setattr(vp, vap, ctx);
665 COREDUMPLOG("partially wrote segment %llx->%llx, resid %lld", vmoffset, vmoffset + vmsize, resid);
666 }
667 } else {
668 COREDUMPLOG("skipping unreadable segment %llx->%llx", vmoffset, vmoffset + vmsize);
669 }
670
671 /*
672 * Fill in segment command structure.
673 */
674
675 if (is_64) {
676 sc64 = (struct segment_command_64 *)(header + hoffset);
677 sc64->cmd = LC_SEGMENT_64;
678 sc64->cmdsize = sizeof(struct segment_command_64);
679 /* segment name is zeroed by kmem_alloc */
680 sc64->segname[0] = 0;
681 sc64->vmaddr = vmoffset;
682 sc64->vmsize = vmsize;
683 sc64->fileoff = foffset;
684 sc64->filesize = fsize;
685 sc64->maxprot = maxprot;
686 sc64->initprot = prot;
687 sc64->nsects = 0;
688 sc64->flags = 0;
689 } else {
690 sc = (struct segment_command *) (header + hoffset);
691 sc->cmd = LC_SEGMENT;
692 sc->cmdsize = sizeof(struct segment_command);
693 /* segment name is zeroed by kmem_alloc */
694 sc->segname[0] = 0;
695 sc->vmaddr = CAST_DOWN_EXPLICIT(uint32_t, vmoffset);
696 sc->vmsize = CAST_DOWN_EXPLICIT(uint32_t, vmsize);
697 sc->fileoff = CAST_DOWN_EXPLICIT(uint32_t, foffset); /* will never truncate */
698 sc->filesize = CAST_DOWN_EXPLICIT(uint32_t, fsize); /* will never truncate */
699 sc->maxprot = maxprot;
700 sc->initprot = prot;
701 sc->nsects = 0;
702 sc->flags = 0;
703 }
704
705 hoffset += segment_command_sz;
706 foffset += fsize;
707 vmoffset += vmsize;
708 segment_count--;
709 }
710 COREDUMPLOG("max file offset: %lld", foffset);
711
712 /*
713 * If there are remaining segments which have not been written
714 * out because break in the loop above, then they were not counted
715 * because they exceed the real address space of the executable
716 * type: remove them from the header's count. This is OK, since
717 * we are allowed to have a sparse area following the segments.
718 */
719 if (is_64) {
720 mh64->ncmds -= segment_count;
721 mh64->sizeofcmds -= segment_count * segment_command_sz;
722 } else {
723 mh->ncmds -= segment_count;
724 mh->sizeofcmds -= segment_count * segment_command_sz;
725 }
726
727 /* Add LC_NOTES */
728 COREDUMPLOG("dumping %zu notes", notes_count);
729 if (dump_notes(core_proc, header, hoffset, vp, foffset) != 0) {
730 error = EFAULT;
731 goto out;
732 }
733
734 tir1.header = header;
735 tir1.hoffset = hoffset + notes_size;
736 tir1.flavors = flavors;
737 tir1.tstate_size = tstate_size;
738 COREDUMPLOG("dumping %zu threads", thread_count);
739 task_act_iterate_wth_args_locked(task, collectth_state, &tir1);
740
741 /*
742 * Write out the Mach header at the beginning of the
743 * file. OK to use a 32 bit write for this.
744 */
745 error = vn_rdwr(rw: UIO_WRITE, vp, base: (caddr_t)header, len: (int)MIN(header_size, INT_MAX), offset: (off_t)0,
746 segflg: UIO_SYSSPACE, IO_NODELOCKED | IO_UNIT, cred, aresid: (int *) 0, p: core_proc);
747 if (error != KERN_SUCCESS) {
748 COREDUMPLOG("failed to write mach header: error %d", error);
749 }
750 kmem_free(map: kernel_map, addr: header, size: header_size);
751
752 if ((coredump_flags & COREDUMP_FULLFSYNC) && error == 0) {
753 error = VNOP_IOCTL(vp, F_FULLFSYNC, data: (caddr_t)NULL, fflag: 0, ctx);
754 if (error != KERN_SUCCESS) {
755 COREDUMPLOG("failed to FULLFSYNC core: error %d", error);
756 }
757 }
758out:
759 if (vap) {
760 kfree_type(struct vnode_attr, vap);
761 }
762 error1 = vnode_close(vp, FWRITE, ctx);
763 if (error1 != KERN_SUCCESS) {
764 COREDUMPLOG("failed to close core file: error %d", error1);
765 }
766out2:
767#if CONFIG_AUDIT
768 audit_proc_coredump(proc: core_proc, path: name, errcode: error);
769#endif
770 if (alloced_name != NULL) {
771 zfree(ZV_NAMEI, alloced_name);
772 }
773 if (alloced_format != NULL) {
774 kfree_data(alloced_format, alloced_format_len + 1);
775 }
776 if (custom_location_entitlement != NULL) {
777 kfree_data(custom_location_entitlement, custom_location_entitlement_len + 1);
778 }
779 if (error == 0) {
780 error = error1;
781 }
782
783 if (coredump_attempted) {
784 if (error != 0) {
785 COREDUMPLOG("core dump failed: error %d\n", error);
786 } else {
787 COREDUMPLOG("core dump succeeded");
788 }
789 }
790
791 if (task_locked) {
792 task_unlock(task);
793 }
794
795 return error;
796}
797
798#else /* CONFIG_COREDUMP */
799
800/* When core dumps aren't needed, no need to compile this file at all */
801
802#error assertion failed: this section is not compiled
803
804#endif /* CONFIG_COREDUMP */
805