1/*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (C) 1988, 1989, NeXT, Inc.
30 *
31 * File: kern/mach_loader.c
32 * Author: Avadis Tevanian, Jr.
33 *
34 * Mach object file loader (kernel version, for now).
35 *
36 * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT
37 * Started.
38 */
39
40#include <sys/param.h>
41#include <sys/vnode_internal.h>
42#include <sys/uio.h>
43#include <sys/namei.h>
44#include <sys/proc_internal.h>
45#include <sys/kauth.h>
46#include <sys/stat.h>
47#include <sys/malloc.h>
48#include <sys/mount_internal.h>
49#include <sys/fcntl.h>
50#include <sys/ubc_internal.h>
51#include <sys/imgact.h>
52#include <sys/codesign.h>
53#include <sys/proc_uuid_policy.h>
54#include <sys/reason.h>
55#include <sys/kdebug.h>
56
57#include <mach/mach_types.h>
58#include <mach/vm_map.h> /* vm_allocate() */
59#include <mach/mach_vm.h> /* mach_vm_allocate() */
60#include <mach/vm_statistics.h>
61#include <mach/task.h>
62#include <mach/thread_act.h>
63
64#include <machine/vmparam.h>
65#include <machine/exec.h>
66#include <machine/pal_routines.h>
67
68#include <kern/ast.h>
69#include <kern/kern_types.h>
70#include <kern/cpu_number.h>
71#include <kern/mach_loader.h>
72#include <kern/mach_fat.h>
73#include <kern/kalloc.h>
74#include <kern/task.h>
75#include <kern/thread.h>
76#include <kern/page_decrypt.h>
77
78#include <mach-o/fat.h>
79#include <mach-o/loader.h>
80
81#include <vm/pmap.h>
82#include <vm/vm_map.h>
83#include <vm/vm_kern.h>
84#include <vm/vm_pager.h>
85#include <vm/vnode_pager.h>
86#include <vm/vm_protos.h>
87#include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */
88
89#include <os/overflow.h>
90
91#if __x86_64__
92extern int bootarg_no32exec; /* bsd_init.c */
93#endif
94
95/*
96 * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE
97 * when KERNEL is defined.
98 */
99extern pmap_t pmap_create(ledger_t ledger, vm_map_size_t size,
100 boolean_t is_64bit);
101
102/* XXX should have prototypes in a shared header file */
103extern int get_map_nentries(vm_map_t);
104
105extern kern_return_t memory_object_signed(memory_object_control_t control,
106 boolean_t is_signed);
107
108/* An empty load_result_t */
109static const load_result_t load_result_null = {
110 .mach_header = MACH_VM_MIN_ADDRESS,
111 .entry_point = MACH_VM_MIN_ADDRESS,
112 .user_stack = MACH_VM_MIN_ADDRESS,
113 .user_stack_size = 0,
114 .user_stack_alloc = MACH_VM_MIN_ADDRESS,
115 .user_stack_alloc_size = 0,
116 .all_image_info_addr = MACH_VM_MIN_ADDRESS,
117 .all_image_info_size = 0,
118 .thread_count = 0,
119 .unixproc = 0,
120 .dynlinker = 0,
121 .needs_dynlinker = 0,
122 .validentry = 0,
123 .using_lcmain = 0,
124 .is_64bit_addr = 0,
125 .is_64bit_data = 0,
126 .csflags = 0,
127 .has_pagezero = 0,
128 .uuid = { 0 },
129 .min_vm_addr = MACH_VM_MAX_ADDRESS,
130 .max_vm_addr = MACH_VM_MIN_ADDRESS,
131 .cs_end_offset = 0,
132 .threadstate = NULL,
133 .threadstate_sz = 0
134};
135
136/*
137 * Prototypes of static functions.
138 */
139static load_return_t
140parse_machfile(
141 struct vnode *vp,
142 vm_map_t map,
143 thread_t thread,
144 struct mach_header *header,
145 off_t file_offset,
146 off_t macho_size,
147 int depth,
148 int64_t slide,
149 int64_t dyld_slide,
150 load_result_t *result,
151 load_result_t *binresult,
152 struct image_params *imgp
153);
154
155static load_return_t
156load_segment(
157 struct load_command *lcp,
158 uint32_t filetype,
159 void *control,
160 off_t pager_offset,
161 off_t macho_size,
162 struct vnode *vp,
163 vm_map_t map,
164 int64_t slide,
165 load_result_t *result
166);
167
168static load_return_t
169load_uuid(
170 struct uuid_command *uulp,
171 char *command_end,
172 load_result_t *result
173);
174
175static load_return_t
176load_code_signature(
177 struct linkedit_data_command *lcp,
178 struct vnode *vp,
179 off_t macho_offset,
180 off_t macho_size,
181 cpu_type_t cputype,
182 load_result_t *result,
183 struct image_params *imgp);
184
185#if CONFIG_CODE_DECRYPTION
186static load_return_t
187set_code_unprotect(
188 struct encryption_info_command *lcp,
189 caddr_t addr,
190 vm_map_t map,
191 int64_t slide,
192 struct vnode *vp,
193 off_t macho_offset,
194 cpu_type_t cputype,
195 cpu_subtype_t cpusubtype);
196#endif
197
198static
199load_return_t
200load_main(
201 struct entry_point_command *epc,
202 thread_t thread,
203 int64_t slide,
204 load_result_t *result
205);
206
207static load_return_t
208load_unixthread(
209 struct thread_command *tcp,
210 thread_t thread,
211 int64_t slide,
212 load_result_t *result
213);
214
215static load_return_t
216load_threadstate(
217 thread_t thread,
218 uint32_t *ts,
219 uint32_t total_size,
220 load_result_t *
221);
222
223static load_return_t
224load_threadstack(
225 thread_t thread,
226 uint32_t *ts,
227 uint32_t total_size,
228 mach_vm_offset_t *user_stack,
229 int *customstack,
230 load_result_t *result
231);
232
233static load_return_t
234load_threadentry(
235 thread_t thread,
236 uint32_t *ts,
237 uint32_t total_size,
238 mach_vm_offset_t *entry_point
239);
240
241static load_return_t
242load_dylinker(
243 struct dylinker_command *lcp,
244 integer_t archbits,
245 vm_map_t map,
246 thread_t thread,
247 int depth,
248 int64_t slide,
249 load_result_t *result,
250 struct image_params *imgp
251);
252
253struct macho_data;
254
255static load_return_t
256get_macho_vnode(
257 const char *path,
258 integer_t archbits,
259 struct mach_header *mach_header,
260 off_t *file_offset,
261 off_t *macho_size,
262 struct macho_data *macho_data,
263 struct vnode **vpp
264);
265
266static inline void
267widen_segment_command(const struct segment_command *scp32,
268 struct segment_command_64 *scp)
269{
270 scp->cmd = scp32->cmd;
271 scp->cmdsize = scp32->cmdsize;
272 bcopy(scp32->segname, scp->segname, sizeof(scp->segname));
273 scp->vmaddr = scp32->vmaddr;
274 scp->vmsize = scp32->vmsize;
275 scp->fileoff = scp32->fileoff;
276 scp->filesize = scp32->filesize;
277 scp->maxprot = scp32->maxprot;
278 scp->initprot = scp32->initprot;
279 scp->nsects = scp32->nsects;
280 scp->flags = scp32->flags;
281}
282
283static void
284note_all_image_info_section(const struct segment_command_64 *scp,
285 boolean_t is64, size_t section_size, const void *sections,
286 int64_t slide, load_result_t *result)
287{
288 const union {
289 struct section s32;
290 struct section_64 s64;
291 } *sectionp;
292 unsigned int i;
293
294
295 if (strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0)
296 return;
297 for (i = 0; i < scp->nsects; ++i) {
298 sectionp = (const void *)
299 ((const char *)sections + section_size * i);
300 if (0 == strncmp(sectionp->s64.sectname, "__all_image_info",
301 sizeof(sectionp->s64.sectname))) {
302 result->all_image_info_addr =
303 is64 ? sectionp->s64.addr : sectionp->s32.addr;
304 result->all_image_info_addr += slide;
305 result->all_image_info_size =
306 is64 ? sectionp->s64.size : sectionp->s32.size;
307 return;
308 }
309 }
310}
311
312#if __arm64__
313/*
314 * Allow bypassing some security rules (hard pagezero, no write+execute)
315 * in exchange for better binary compatibility for legacy apps built
316 * before 16KB-alignment was enforced.
317 */
318const int fourk_binary_compatibility_unsafe = TRUE;
319const int fourk_binary_compatibility_allow_wx = FALSE;
320#endif /* __arm64__ */
321
322load_return_t
323load_machfile(
324 struct image_params *imgp,
325 struct mach_header *header,
326 thread_t thread,
327 vm_map_t *mapp,
328 load_result_t *result
329)
330{
331 struct vnode *vp = imgp->ip_vp;
332 off_t file_offset = imgp->ip_arch_offset;
333 off_t macho_size = imgp->ip_arch_size;
334 off_t file_size = imgp->ip_vattr->va_data_size;
335 pmap_t pmap = 0; /* protected by create_map */
336 vm_map_t map;
337 load_result_t myresult;
338 load_return_t lret;
339 boolean_t enforce_hard_pagezero = TRUE;
340 int in_exec = (imgp->ip_flags & IMGPF_EXEC);
341 task_t task = current_task();
342 proc_t p = current_proc();
343 int64_t aslr_page_offset = 0;
344 int64_t dyld_aslr_page_offset = 0;
345 int64_t aslr_section_size = 0;
346 int64_t aslr_section_offset = 0;
347 kern_return_t kret;
348
349 if (macho_size > file_size) {
350 return(LOAD_BADMACHO);
351 }
352
353 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
354 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
355
356 task_t ledger_task;
357 if (imgp->ip_new_thread) {
358 ledger_task = get_threadtask(imgp->ip_new_thread);
359 } else {
360 ledger_task = task;
361 }
362 pmap = pmap_create(get_task_ledger(ledger_task),
363 (vm_map_size_t) 0,
364 result->is_64bit_addr);
365 map = vm_map_create(pmap,
366 0,
367 vm_compute_max_offset(result->is_64bit_addr),
368 TRUE);
369
370#if defined(__arm64__)
371 if (result->is_64bit_addr) {
372 /* enforce 16KB alignment of VM map entries */
373 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
374 } else {
375 vm_map_set_page_shift(map, page_shift_user32);
376 }
377#elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS)
378 /* enforce 16KB alignment for watch targets with new ABI */
379 vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT);
380#endif /* __arm64__ */
381
382#ifndef CONFIG_ENFORCE_SIGNED_CODE
383 /* This turns off faulting for executable pages, which allows
384 * to circumvent Code Signing Enforcement. The per process
385 * flag (CS_ENFORCEMENT) is not set yet, but we can use the
386 * global flag.
387 */
388 if ( !cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION) ) {
389 vm_map_disable_NX(map);
390 // TODO: Message Trace or log that this is happening
391 }
392#endif
393
394 /* Forcibly disallow execution from data pages on even if the arch
395 * normally permits it. */
396 if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC))
397 vm_map_disallow_data_exec(map);
398
399 /*
400 * Compute a random offset for ASLR, and an independent random offset for dyld.
401 */
402 if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) {
403 vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size);
404 aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size;
405
406 aslr_page_offset = random();
407 aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map);
408 aslr_page_offset <<= vm_map_page_shift(map);
409
410 dyld_aslr_page_offset = random();
411 dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map);
412 dyld_aslr_page_offset <<= vm_map_page_shift(map);
413
414 aslr_page_offset += aslr_section_offset;
415 }
416
417 if (!result)
418 result = &myresult;
419
420 *result = load_result_null;
421
422 /*
423 * re-set the bitness on the load result since we cleared the load result above.
424 */
425 result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR);
426 result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA);
427
428 lret = parse_machfile(vp, map, thread, header, file_offset, macho_size,
429 0, aslr_page_offset, dyld_aslr_page_offset, result,
430 NULL, imgp);
431
432 if (lret != LOAD_SUCCESS) {
433 vm_map_deallocate(map); /* will lose pmap reference too */
434 return(lret);
435 }
436
437#if __x86_64__
438 /*
439 * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries.
440 */
441 if (!result->is_64bit_addr) {
442 enforce_hard_pagezero = FALSE;
443 }
444
445 /*
446 * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits
447 * to the start address for "anywhere" memory allocations.
448 */
449#define VM_MAP_HIGH_START_BITS_COUNT 8
450#define VM_MAP_HIGH_START_BITS_SHIFT 27
451 if (result->is_64bit_addr &&
452 (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) {
453 int random_bits;
454 vm_map_offset_t high_start;
455
456 random_bits = random();
457 random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT)-1;
458 high_start = (((vm_map_offset_t)random_bits)
459 << VM_MAP_HIGH_START_BITS_SHIFT);
460 vm_map_set_high_start(map, high_start);
461 }
462#endif /* __x86_64__ */
463
464 /*
465 * Check to see if the page zero is enforced by the map->min_offset.
466 */
467 if (enforce_hard_pagezero &&
468 (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) {
469#if __arm64__
470 if (!result->is_64bit_addr && /* not 64-bit address space */
471 !(header->flags & MH_PIE) && /* not PIE */
472 (vm_map_page_shift(map) != FOURK_PAGE_SHIFT ||
473 PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */
474 result->has_pagezero && /* has a "soft" page zero */
475 fourk_binary_compatibility_unsafe) {
476 /*
477 * For backwards compatibility of "4K" apps on
478 * a 16K system, do not enforce a hard page zero...
479 */
480 } else
481#endif /* __arm64__ */
482 {
483 vm_map_deallocate(map); /* will lose pmap reference too */
484 return (LOAD_BADMACHO);
485 }
486 }
487
488 vm_commit_pagezero_status(map);
489
490 /*
491 * If this is an exec, then we are going to destroy the old
492 * task, and it's correct to halt it; if it's spawn, the
493 * task is not yet running, and it makes no sense.
494 */
495 if (in_exec) {
496 /*
497 * Mark the task as halting and start the other
498 * threads towards terminating themselves. Then
499 * make sure any threads waiting for a process
500 * transition get informed that we are committed to
501 * this transition, and then finally complete the
502 * task halting (wait for threads and then cleanup
503 * task resources).
504 *
505 * NOTE: task_start_halt() makes sure that no new
506 * threads are created in the task during the transition.
507 * We need to mark the workqueue as exiting before we
508 * wait for threads to terminate (at the end of which
509 * we no longer have a prohibition on thread creation).
510 *
511 * Finally, clean up any lingering workqueue data structures
512 * that may have been left behind by the workqueue threads
513 * as they exited (and then clean up the work queue itself).
514 */
515 kret = task_start_halt(task);
516 if (kret != KERN_SUCCESS) {
517 vm_map_deallocate(map); /* will lose pmap reference too */
518 return (LOAD_FAILURE);
519 }
520 proc_transcommit(p, 0);
521 workq_mark_exiting(p);
522 task_complete_halt(task);
523 workq_exit(p);
524
525 /*
526 * Roll up accounting info to new task. The roll up is done after
527 * task_complete_halt to make sure the thread accounting info is
528 * rolled up to current_task.
529 */
530 task_rollup_accounting_info(get_threadtask(thread), task);
531 }
532 *mapp = map;
533
534#ifdef CONFIG_32BIT_TELEMETRY
535 if (!result->is_64bit_data) {
536 /*
537 * This may not need to be an AST; we merely need to ensure that
538 * we gather telemetry at the point where all of the information
539 * that we want has been added to the process.
540 */
541 task_set_32bit_log_flag(get_threadtask(thread));
542 act_set_astbsd(thread);
543 }
544#endif /* CONFIG_32BIT_TELEMETRY */
545
546 return(LOAD_SUCCESS);
547}
548
549int macho_printf = 0;
550#define MACHO_PRINTF(args) \
551 do { \
552 if (macho_printf) { \
553 printf args; \
554 } \
555 } while (0)
556
557/*
558 * The file size of a mach-o file is limited to 32 bits; this is because
559 * this is the limit on the kalloc() of enough bytes for a mach_header and
560 * the contents of its sizeofcmds, which is currently constrained to 32
561 * bits in the file format itself. We read into the kernel buffer the
562 * commands section, and then parse it in order to parse the mach-o file
563 * format load_command segment(s). We are only interested in a subset of
564 * the total set of possible commands. If "map"==VM_MAP_NULL or
565 * "thread"==THREAD_NULL, do not make permament VM modifications,
566 * just preflight the parse.
567 */
568static
569load_return_t
570parse_machfile(
571 struct vnode *vp,
572 vm_map_t map,
573 thread_t thread,
574 struct mach_header *header,
575 off_t file_offset,
576 off_t macho_size,
577 int depth,
578 int64_t aslr_offset,
579 int64_t dyld_aslr_offset,
580 load_result_t *result,
581 load_result_t *binresult,
582 struct image_params *imgp
583)
584{
585 uint32_t ncmds;
586 struct load_command *lcp;
587 struct dylinker_command *dlp = 0;
588 integer_t dlarchbits = 0;
589 void * control;
590 load_return_t ret = LOAD_SUCCESS;
591 void * addr;
592 vm_size_t alloc_size, cmds_size;
593 size_t offset;
594 size_t oldoffset; /* for overflow check */
595 int pass;
596 proc_t p = current_proc(); /* XXXX */
597 int error;
598 int resid = 0;
599 size_t mach_header_sz = sizeof(struct mach_header);
600 boolean_t abi64;
601 boolean_t got_code_signatures = FALSE;
602 boolean_t found_header_segment = FALSE;
603 boolean_t found_xhdr = FALSE;
604 int64_t slide = 0;
605 boolean_t dyld_no_load_addr = FALSE;
606 boolean_t is_dyld = FALSE;
607 vm_map_offset_t effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
608#if __arm64__
609 uint32_t pagezero_end = 0;
610 uint32_t executable_end = 0;
611 uint32_t writable_start = 0;
612 vm_map_size_t effective_page_size;
613
614 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
615#endif /* __arm64__ */
616
617 if (header->magic == MH_MAGIC_64 ||
618 header->magic == MH_CIGAM_64) {
619 mach_header_sz = sizeof(struct mach_header_64);
620 }
621
622 /*
623 * Break infinite recursion
624 */
625 if (depth > 1) {
626 return(LOAD_FAILURE);
627 }
628
629 depth++;
630
631 /*
632 * Check to see if right machine type.
633 */
634 if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) ||
635 !grade_binary(header->cputype,
636 header->cpusubtype & ~CPU_SUBTYPE_MASK))
637 return(LOAD_BADARCH);
638
639#if __x86_64__
640 if (bootarg_no32exec && (header->cputype == CPU_TYPE_X86)) {
641 return(LOAD_BADARCH_X86);
642 }
643#endif
644
645 abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64);
646
647 switch (header->filetype) {
648
649 case MH_EXECUTE:
650 if (depth != 1) {
651 return (LOAD_FAILURE);
652 }
653#if CONFIG_EMBEDDED
654 if (header->flags & MH_DYLDLINK) {
655 /* Check properties of dynamic executables */
656 if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) {
657 return (LOAD_FAILURE);
658 }
659 result->needs_dynlinker = TRUE;
660 } else {
661 /* Check properties of static executables (disallowed except for development) */
662#if !(DEVELOPMENT || DEBUG)
663 return (LOAD_FAILURE);
664#endif
665 }
666#endif /* CONFIG_EMBEDDED */
667
668 break;
669 case MH_DYLINKER:
670 if (depth != 2) {
671 return (LOAD_FAILURE);
672 }
673 is_dyld = TRUE;
674 break;
675
676 default:
677 return (LOAD_FAILURE);
678 }
679
680 /*
681 * Get the pager for the file.
682 */
683 control = ubc_getobject(vp, UBC_FLAGS_NONE);
684
685 /* ensure header + sizeofcmds falls within the file */
686 if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) ||
687 (off_t)cmds_size > macho_size ||
688 round_page_overflow(cmds_size, &alloc_size)) {
689 return LOAD_BADMACHO;
690 }
691
692 /*
693 * Map the load commands into kernel memory.
694 */
695 addr = kalloc(alloc_size);
696 if (addr == NULL) {
697 return LOAD_NOSPACE;
698 }
699
700 error = vn_rdwr(UIO_READ, vp, addr, alloc_size, file_offset,
701 UIO_SYSSPACE, 0, kauth_cred_get(), &resid, p);
702 if (error) {
703 kfree(addr, alloc_size);
704 return LOAD_IOERROR;
705 }
706
707 if (resid) {
708 /* We must be able to read in as much as the mach_header indicated */
709 kfree(addr, alloc_size);
710 return LOAD_BADMACHO;
711 }
712
713 /*
714 * For PIE and dyld, slide everything by the ASLR offset.
715 */
716 if ((header->flags & MH_PIE) || is_dyld) {
717 slide = aslr_offset;
718 }
719
720 /*
721 * Scan through the commands, processing each one as necessary.
722 * We parse in three passes through the headers:
723 * 0: determine if TEXT and DATA boundary can be page-aligned
724 * 1: thread state, uuid, code signature
725 * 2: segments
726 * 3: dyld, encryption, check entry point
727 */
728
729 boolean_t slide_realign = FALSE;
730#if __arm64__
731 if (!abi64) {
732 slide_realign = TRUE;
733 }
734#endif
735
736 for (pass = 0; pass <= 3; pass++) {
737
738 if (pass == 0 && !slide_realign && !is_dyld) {
739 /* if we dont need to realign the slide or determine dyld's load
740 * address, pass 0 can be skipped */
741 continue;
742 } else if (pass == 1) {
743#if __arm64__
744 boolean_t is_pie;
745 int64_t adjust;
746
747 is_pie = ((header->flags & MH_PIE) != 0);
748 if (pagezero_end != 0 &&
749 pagezero_end < effective_page_size) {
750 /* need at least 1 page for PAGEZERO */
751 adjust = effective_page_size;
752 MACHO_PRINTF(("pagezero boundary at "
753 "0x%llx; adjust slide from "
754 "0x%llx to 0x%llx%s\n",
755 (uint64_t) pagezero_end,
756 slide,
757 slide + adjust,
758 (is_pie
759 ? ""
760 : " BUT NO PIE ****** :-(")));
761 if (is_pie) {
762 slide += adjust;
763 pagezero_end += adjust;
764 executable_end += adjust;
765 writable_start += adjust;
766 }
767 }
768 if (pagezero_end != 0) {
769 result->has_pagezero = TRUE;
770 }
771 if (executable_end == writable_start &&
772 (executable_end & effective_page_mask) != 0 &&
773 (executable_end & FOURK_PAGE_MASK) == 0) {
774
775 /*
776 * The TEXT/DATA boundary is 4K-aligned but
777 * not page-aligned. Adjust the slide to make
778 * it page-aligned and avoid having a page
779 * with both write and execute permissions.
780 */
781 adjust =
782 (effective_page_size -
783 (executable_end & effective_page_mask));
784 MACHO_PRINTF(("page-unaligned X-W boundary at "
785 "0x%llx; adjust slide from "
786 "0x%llx to 0x%llx%s\n",
787 (uint64_t) executable_end,
788 slide,
789 slide + adjust,
790 (is_pie
791 ? ""
792 : " BUT NO PIE ****** :-(")));
793 if (is_pie)
794 slide += adjust;
795 }
796#endif /* __arm64__ */
797
798 if (dyld_no_load_addr && binresult) {
799 /*
800 * The dyld Mach-O does not specify a load address. Try to locate
801 * it right after the main binary. If binresult == NULL, load
802 * directly to the given slide.
803 */
804 slide = vm_map_round_page(slide + binresult->max_vm_addr, effective_page_mask);
805 }
806 }
807
808 /*
809 * Check that the entry point is contained in an executable segments
810 */
811 if ((pass == 3) && (!result->using_lcmain && result->validentry == 0)) {
812 thread_state_initialize(thread);
813 ret = LOAD_FAILURE;
814 break;
815 }
816
817 /*
818 * Check that some segment maps the start of the mach-o file, which is
819 * needed by the dynamic loader to read the mach headers, etc.
820 */
821 if ((pass == 3) && (found_header_segment == FALSE)) {
822 ret = LOAD_BADMACHO;
823 break;
824 }
825
826 /*
827 * Loop through each of the load_commands indicated by the
828 * Mach-O header; if an absurd value is provided, we just
829 * run off the end of the reserved section by incrementing
830 * the offset too far, so we are implicitly fail-safe.
831 */
832 offset = mach_header_sz;
833 ncmds = header->ncmds;
834
835 while (ncmds--) {
836
837 /* ensure enough space for a minimal load command */
838 if (offset + sizeof(struct load_command) > cmds_size) {
839 ret = LOAD_BADMACHO;
840 break;
841 }
842
843 /*
844 * Get a pointer to the command.
845 */
846 lcp = (struct load_command *)(addr + offset);
847 oldoffset = offset;
848
849 /*
850 * Perform prevalidation of the struct load_command
851 * before we attempt to use its contents. Invalid
852 * values are ones which result in an overflow, or
853 * which can not possibly be valid commands, or which
854 * straddle or exist past the reserved section at the
855 * start of the image.
856 */
857 if (os_add_overflow(offset, lcp->cmdsize, &offset) ||
858 lcp->cmdsize < sizeof(struct load_command) ||
859 offset > cmds_size) {
860 ret = LOAD_BADMACHO;
861 break;
862 }
863
864 /*
865 * Act on struct load_command's for which kernel
866 * intervention is required.
867 */
868 switch(lcp->cmd) {
869 case LC_SEGMENT: {
870 struct segment_command *scp = (struct segment_command *) lcp;
871 if (pass == 0) {
872 if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) {
873 dyld_no_load_addr = TRUE;
874 if (!slide_realign) {
875 /* got what we need, bail early on pass 0 */
876 continue;
877 }
878 }
879
880#if __arm64__
881 assert(!abi64);
882
883 if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) {
884 /* PAGEZERO */
885 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end)) {
886 ret = LOAD_BADMACHO;
887 break;
888 }
889 }
890 if (scp->initprot & VM_PROT_EXECUTE) {
891 /* TEXT */
892 if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end)) {
893 ret = LOAD_BADMACHO;
894 break;
895 }
896 }
897 if (scp->initprot & VM_PROT_WRITE) {
898 /* DATA */
899 if (os_add_overflow(scp->vmaddr, slide, &writable_start)) {
900 ret = LOAD_BADMACHO;
901 break;
902 }
903 }
904#endif /* __arm64__ */
905 break;
906 }
907
908 if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) {
909 found_xhdr = TRUE;
910 }
911
912 if (pass != 2)
913 break;
914
915 if (abi64) {
916 /*
917 * Having an LC_SEGMENT command for the
918 * wrong ABI is invalid <rdar://problem/11021230>
919 */
920 ret = LOAD_BADMACHO;
921 break;
922 }
923
924 ret = load_segment(lcp,
925 header->filetype,
926 control,
927 file_offset,
928 macho_size,
929 vp,
930 map,
931 slide,
932 result);
933 if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) {
934 /* Enforce a single segment mapping offset zero, with R+X
935 * protection. */
936 if (found_header_segment ||
937 ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) != (VM_PROT_READ|VM_PROT_EXECUTE))) {
938 ret = LOAD_BADMACHO;
939 break;
940 }
941 found_header_segment = TRUE;
942 }
943
944 break;
945 }
946 case LC_SEGMENT_64: {
947 struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp;
948
949 if (pass == 0) {
950 if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) {
951 dyld_no_load_addr = TRUE;
952 if (!slide_realign) {
953 /* got what we need, bail early on pass 0 */
954 continue;
955 }
956 }
957 }
958
959 if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) {
960 found_xhdr = TRUE;
961 }
962
963 if (pass != 2)
964 break;
965
966 if (!abi64) {
967 /*
968 * Having an LC_SEGMENT_64 command for the
969 * wrong ABI is invalid <rdar://problem/11021230>
970 */
971 ret = LOAD_BADMACHO;
972 break;
973 }
974
975 ret = load_segment(lcp,
976 header->filetype,
977 control,
978 file_offset,
979 macho_size,
980 vp,
981 map,
982 slide,
983 result);
984
985 if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) {
986 /* Enforce a single segment mapping offset zero, with R+X
987 * protection. */
988 if (found_header_segment ||
989 ((scp64->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) != (VM_PROT_READ|VM_PROT_EXECUTE))) {
990 ret = LOAD_BADMACHO;
991 break;
992 }
993 found_header_segment = TRUE;
994 }
995
996 break;
997 }
998 case LC_UNIXTHREAD:
999 if (pass != 1)
1000 break;
1001 ret = load_unixthread(
1002 (struct thread_command *) lcp,
1003 thread,
1004 slide,
1005 result);
1006 break;
1007 case LC_MAIN:
1008 if (pass != 1)
1009 break;
1010 if (depth != 1)
1011 break;
1012 ret = load_main(
1013 (struct entry_point_command *) lcp,
1014 thread,
1015 slide,
1016 result);
1017 break;
1018 case LC_LOAD_DYLINKER:
1019 if (pass != 3)
1020 break;
1021 if ((depth == 1) && (dlp == 0)) {
1022 dlp = (struct dylinker_command *)lcp;
1023 dlarchbits = (header->cputype & CPU_ARCH_MASK);
1024 } else {
1025 ret = LOAD_FAILURE;
1026 }
1027 break;
1028 case LC_UUID:
1029 if (pass == 1 && depth == 1) {
1030 ret = load_uuid((struct uuid_command *) lcp,
1031 (char *)addr + cmds_size,
1032 result);
1033 }
1034 break;
1035 case LC_CODE_SIGNATURE:
1036 /* CODE SIGNING */
1037 if (pass != 1)
1038 break;
1039 /* pager -> uip ->
1040 load signatures & store in uip
1041 set VM object "signed_pages"
1042 */
1043 ret = load_code_signature(
1044 (struct linkedit_data_command *) lcp,
1045 vp,
1046 file_offset,
1047 macho_size,
1048 header->cputype,
1049 result,
1050 imgp);
1051 if (ret != LOAD_SUCCESS) {
1052 printf("proc %d: load code signature error %d "
1053 "for file \"%s\"\n",
1054 p->p_pid, ret, vp->v_name);
1055 /*
1056 * Allow injections to be ignored on devices w/o enforcement enabled
1057 */
1058 if (!cs_process_global_enforcement())
1059 ret = LOAD_SUCCESS; /* ignore error */
1060
1061 } else {
1062 got_code_signatures = TRUE;
1063 }
1064
1065 if (got_code_signatures) {
1066 unsigned tainted = CS_VALIDATE_TAINTED;
1067 boolean_t valid = FALSE;
1068 vm_size_t off = 0;
1069
1070
1071 if (cs_debug > 10)
1072 printf("validating initial pages of %s\n", vp->v_name);
1073
1074 while (off < alloc_size && ret == LOAD_SUCCESS) {
1075 tainted = CS_VALIDATE_TAINTED;
1076
1077 valid = cs_validate_range(vp,
1078 NULL,
1079 file_offset + off,
1080 addr + off,
1081 PAGE_SIZE,
1082 &tainted);
1083 if (!valid || (tainted & CS_VALIDATE_TAINTED)) {
1084 if (cs_debug)
1085 printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n",
1086 vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags);
1087 if (cs_process_global_enforcement() ||
1088 (result->csflags & (CS_HARD|CS_KILL|CS_ENFORCEMENT))) {
1089 ret = LOAD_FAILURE;
1090 }
1091 result->csflags &= ~CS_VALID;
1092 }
1093 off += PAGE_SIZE;
1094 }
1095 }
1096
1097 break;
1098#if CONFIG_CODE_DECRYPTION
1099 case LC_ENCRYPTION_INFO:
1100 case LC_ENCRYPTION_INFO_64:
1101 if (pass != 3)
1102 break;
1103 ret = set_code_unprotect(
1104 (struct encryption_info_command *) lcp,
1105 addr, map, slide, vp, file_offset,
1106 header->cputype, header->cpusubtype);
1107 if (ret != LOAD_SUCCESS) {
1108 os_reason_t load_failure_reason = OS_REASON_NULL;
1109 printf("proc %d: set_code_unprotect() error %d "
1110 "for file \"%s\"\n",
1111 p->p_pid, ret, vp->v_name);
1112 /*
1113 * Don't let the app run if it's
1114 * encrypted but we failed to set up the
1115 * decrypter. If the keys are missing it will
1116 * return LOAD_DECRYPTFAIL.
1117 */
1118 if (ret == LOAD_DECRYPTFAIL) {
1119 /* failed to load due to missing FP keys */
1120 proc_lock(p);
1121 p->p_lflag |= P_LTERM_DECRYPTFAIL;
1122 proc_unlock(p);
1123
1124 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1125 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0);
1126 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT);
1127 } else {
1128
1129 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
1130 p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0);
1131 load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT);
1132 }
1133
1134 assert(load_failure_reason != OS_REASON_NULL);
1135 psignal_with_reason(p, SIGKILL, load_failure_reason);
1136 }
1137 break;
1138#endif
1139#if __arm64__
1140 case LC_VERSION_MIN_IPHONEOS: {
1141 struct version_min_command *vmc;
1142
1143 if (pass != 1) {
1144 break;
1145 }
1146 vmc = (struct version_min_command *) lcp;
1147 if (vmc->sdk < (12 << 16)) {
1148 /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */
1149 result->legacy_footprint = TRUE;
1150 }
1151// printf("FBDP %s:%d vp %p (%s) sdk %d.%d.%d -> legacy_footprint=%d\n", __FUNCTION__, __LINE__, vp, vp->v_name, (vmc->sdk >> 16), ((vmc->sdk & 0xFF00) >> 8), (vmc->sdk & 0xFF), result->legacy_footprint);
1152 break;
1153 }
1154#endif /* __arm64__ */
1155 default:
1156 /* Other commands are ignored by the kernel */
1157 ret = LOAD_SUCCESS;
1158 break;
1159 }
1160 if (ret != LOAD_SUCCESS)
1161 break;
1162 }
1163 if (ret != LOAD_SUCCESS)
1164 break;
1165 }
1166
1167 if (ret == LOAD_SUCCESS) {
1168 if(!got_code_signatures && cs_process_global_enforcement()) {
1169 ret = LOAD_FAILURE;
1170 }
1171
1172 /* Make sure if we need dyld, we got it */
1173 if (result->needs_dynlinker && !dlp) {
1174 ret = LOAD_FAILURE;
1175 }
1176
1177 if ((ret == LOAD_SUCCESS) && (dlp != 0)) {
1178 /*
1179 * load the dylinker, and slide it by the independent DYLD ASLR
1180 * offset regardless of the PIE-ness of the main binary.
1181 */
1182 ret = load_dylinker(dlp, dlarchbits, map, thread, depth,
1183 dyld_aslr_offset, result, imgp);
1184 }
1185
1186 if ((ret == LOAD_SUCCESS) && (depth == 1)) {
1187 if (result->thread_count == 0) {
1188 ret = LOAD_FAILURE;
1189 }
1190#if CONFIG_ENFORCE_SIGNED_CODE
1191 if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) {
1192 ret = LOAD_FAILURE;
1193 }
1194#endif
1195 }
1196 }
1197
1198 if (ret == LOAD_BADMACHO && found_xhdr) {
1199 ret = LOAD_BADMACHO_UPX;
1200 }
1201
1202 kfree(addr, alloc_size);
1203
1204 return ret;
1205}
1206
1207#if CONFIG_CODE_DECRYPTION
1208
1209#define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096)
1210
1211static load_return_t
1212unprotect_dsmos_segment(
1213 uint64_t file_off,
1214 uint64_t file_size,
1215 struct vnode *vp,
1216 off_t macho_offset,
1217 vm_map_t map,
1218 vm_map_offset_t map_addr,
1219 vm_map_size_t map_size)
1220{
1221 kern_return_t kr;
1222
1223 /*
1224 * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of
1225 * this part of a Universal binary) are not protected...
1226 * The rest needs to be "transformed".
1227 */
1228 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE &&
1229 file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) {
1230 /* it's all unprotected, nothing to do... */
1231 kr = KERN_SUCCESS;
1232 } else {
1233 if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) {
1234 /*
1235 * We start mapping in the unprotected area.
1236 * Skip the unprotected part...
1237 */
1238 vm_map_offset_t delta;
1239
1240 delta = APPLE_UNPROTECTED_HEADER_SIZE;
1241 delta -= file_off;
1242 map_addr += delta;
1243 map_size -= delta;
1244 }
1245 /* ... transform the rest of the mapping. */
1246 struct pager_crypt_info crypt_info;
1247 crypt_info.page_decrypt = dsmos_page_transform;
1248 crypt_info.crypt_ops = NULL;
1249 crypt_info.crypt_end = NULL;
1250#pragma unused(vp, macho_offset)
1251 crypt_info.crypt_ops = (void *)0x2e69cf40;
1252 vm_map_offset_t crypto_backing_offset;
1253 crypto_backing_offset = -1; /* i.e. use map entry's offset */
1254#if VM_MAP_DEBUG_APPLE_PROTECT
1255 if (vm_map_debug_apple_protect) {
1256 struct proc *p;
1257 p = current_proc();
1258 printf("APPLE_PROTECT: %d[%s] map %p "
1259 "[0x%llx:0x%llx] %s(%s)\n",
1260 p->p_pid, p->p_comm, map,
1261 (uint64_t) map_addr,
1262 (uint64_t) (map_addr + map_size),
1263 __FUNCTION__, vp->v_name);
1264 }
1265#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
1266
1267 /* The DSMOS pager can only be used by apple signed code */
1268 struct cs_blob * blob = csvnode_get_blob(vp, file_off);
1269 if( blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path)
1270 {
1271 return LOAD_FAILURE;
1272 }
1273
1274 kr = vm_map_apple_protected(map,
1275 map_addr,
1276 map_addr + map_size,
1277 crypto_backing_offset,
1278 &crypt_info);
1279 }
1280
1281 if (kr != KERN_SUCCESS) {
1282 return LOAD_FAILURE;
1283 }
1284 return LOAD_SUCCESS;
1285}
1286#else /* CONFIG_CODE_DECRYPTION */
1287static load_return_t
1288unprotect_dsmos_segment(
1289 __unused uint64_t file_off,
1290 __unused uint64_t file_size,
1291 __unused struct vnode *vp,
1292 __unused off_t macho_offset,
1293 __unused vm_map_t map,
1294 __unused vm_map_offset_t map_addr,
1295 __unused vm_map_size_t map_size)
1296{
1297 return LOAD_SUCCESS;
1298}
1299#endif /* CONFIG_CODE_DECRYPTION */
1300
1301
1302/*
1303 * map_segment:
1304 * Maps a Mach-O segment, taking care of mis-alignment (wrt the system
1305 * page size) issues.
1306 *
1307 * The mapping might result in 1, 2 or 3 map entries:
1308 * 1. for the first page, which could be overlap with the previous
1309 * mapping,
1310 * 2. for the center (if applicable),
1311 * 3. for the last page, which could overlap with the next mapping.
1312 *
1313 * For each of those map entries, we might have to interpose a
1314 * "fourk_pager" to deal with mis-alignment wrt the system page size,
1315 * either in the mapping address and/or size or the file offset and/or
1316 * size.
1317 * The "fourk_pager" itself would be mapped with proper alignment
1318 * wrt the system page size and would then be populated with the
1319 * information about the intended mapping, with a "4KB" granularity.
1320 */
1321static kern_return_t
1322map_segment(
1323 vm_map_t map,
1324 vm_map_offset_t vm_start,
1325 vm_map_offset_t vm_end,
1326 memory_object_control_t control,
1327 vm_map_offset_t file_start,
1328 vm_map_offset_t file_end,
1329 vm_prot_t initprot,
1330 vm_prot_t maxprot,
1331 load_result_t *result)
1332{
1333 vm_map_offset_t cur_offset, cur_start, cur_end;
1334 kern_return_t ret;
1335 vm_map_offset_t effective_page_mask;
1336 vm_map_kernel_flags_t vmk_flags, cur_vmk_flags;
1337
1338 if (vm_end < vm_start ||
1339 file_end < file_start) {
1340 return LOAD_BADMACHO;
1341 }
1342 if (vm_end == vm_start ||
1343 file_end == file_start) {
1344 /* nothing to map... */
1345 return LOAD_SUCCESS;
1346 }
1347
1348 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1349
1350 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1351 if (vm_map_page_aligned(vm_start, effective_page_mask) &&
1352 vm_map_page_aligned(vm_end, effective_page_mask) &&
1353 vm_map_page_aligned(file_start, effective_page_mask) &&
1354 vm_map_page_aligned(file_end, effective_page_mask)) {
1355 /* all page-aligned and map-aligned: proceed */
1356 } else {
1357#if __arm64__
1358 /* use an intermediate "4K" pager */
1359 vmk_flags.vmkf_fourk = TRUE;
1360#else /* __arm64__ */
1361 panic("map_segment: unexpected mis-alignment "
1362 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n",
1363 (uint64_t) vm_start,
1364 (uint64_t) vm_end,
1365 (uint64_t) file_start,
1366 (uint64_t) file_end);
1367#endif /* __arm64__ */
1368 }
1369
1370 cur_offset = 0;
1371 cur_start = vm_start;
1372 cur_end = vm_start;
1373#if __arm64__
1374 if (!vm_map_page_aligned(vm_start, effective_page_mask)) {
1375 /* one 4K pager for the 1st page */
1376 cur_end = vm_map_round_page(cur_start, effective_page_mask);
1377 if (cur_end > vm_end) {
1378 cur_end = vm_start + (file_end - file_start);
1379 }
1380 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1381 ret = vm_map_enter_mem_object_control(
1382 map,
1383 &cur_start,
1384 cur_end - cur_start,
1385 (mach_vm_offset_t)0,
1386 VM_FLAGS_FIXED,
1387 vmk_flags,
1388 VM_KERN_MEMORY_NONE,
1389 control,
1390 file_start + cur_offset,
1391 TRUE, /* copy */
1392 initprot, maxprot,
1393 VM_INHERIT_DEFAULT);
1394 } else {
1395 ret = vm_map_enter_mem_object(
1396 map,
1397 &cur_start,
1398 cur_end - cur_start,
1399 (mach_vm_offset_t)0,
1400 VM_FLAGS_FIXED,
1401 vmk_flags,
1402 VM_KERN_MEMORY_NONE,
1403 IPC_PORT_NULL,
1404 0, /* offset */
1405 TRUE, /* copy */
1406 initprot, maxprot,
1407 VM_INHERIT_DEFAULT);
1408 }
1409 if (ret != KERN_SUCCESS) {
1410 return (LOAD_NOSPACE);
1411 }
1412 cur_offset += cur_end - cur_start;
1413 }
1414#endif /* __arm64__ */
1415 if (cur_end >= vm_start + (file_end - file_start)) {
1416 /* all mapped: done */
1417 goto done;
1418 }
1419 if (vm_map_round_page(cur_end, effective_page_mask) >=
1420 vm_map_trunc_page(vm_start + (file_end - file_start),
1421 effective_page_mask)) {
1422 /* no middle */
1423 } else {
1424 cur_start = cur_end;
1425 if ((vm_start & effective_page_mask) !=
1426 (file_start & effective_page_mask)) {
1427 /* one 4K pager for the middle */
1428 cur_vmk_flags = vmk_flags;
1429 } else {
1430 /* regular mapping for the middle */
1431 cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1432 }
1433
1434#if CONFIG_EMBEDDED
1435 (void) result;
1436#else /* CONFIG_EMBEDDED */
1437 /*
1438 * This process doesn't have its new csflags (from
1439 * the image being loaded) yet, so tell VM to override the
1440 * current process's CS_ENFORCEMENT for this mapping.
1441 */
1442 if (result->csflags & CS_ENFORCEMENT) {
1443 cur_vmk_flags.vmkf_cs_enforcement = TRUE;
1444 } else {
1445 cur_vmk_flags.vmkf_cs_enforcement = FALSE;
1446 }
1447 cur_vmk_flags.vmkf_cs_enforcement_override = TRUE;
1448#endif /* CONFIG_EMBEDDED */
1449
1450 cur_end = vm_map_trunc_page(vm_start + (file_end -
1451 file_start),
1452 effective_page_mask);
1453 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1454 ret = vm_map_enter_mem_object_control(
1455 map,
1456 &cur_start,
1457 cur_end - cur_start,
1458 (mach_vm_offset_t)0,
1459 VM_FLAGS_FIXED,
1460 cur_vmk_flags,
1461 VM_KERN_MEMORY_NONE,
1462 control,
1463 file_start + cur_offset,
1464 TRUE, /* copy */
1465 initprot, maxprot,
1466 VM_INHERIT_DEFAULT);
1467 } else {
1468 ret = vm_map_enter_mem_object(
1469 map,
1470 &cur_start,
1471 cur_end - cur_start,
1472 (mach_vm_offset_t)0,
1473 VM_FLAGS_FIXED,
1474 cur_vmk_flags,
1475 VM_KERN_MEMORY_NONE,
1476 IPC_PORT_NULL,
1477 0, /* offset */
1478 TRUE, /* copy */
1479 initprot, maxprot,
1480 VM_INHERIT_DEFAULT);
1481 }
1482 if (ret != KERN_SUCCESS) {
1483 return (LOAD_NOSPACE);
1484 }
1485 cur_offset += cur_end - cur_start;
1486 }
1487 if (cur_end >= vm_start + (file_end - file_start)) {
1488 /* all mapped: done */
1489 goto done;
1490 }
1491 cur_start = cur_end;
1492#if __arm64__
1493 if (!vm_map_page_aligned(vm_start + (file_end - file_start),
1494 effective_page_mask)) {
1495 /* one 4K pager for the last page */
1496 cur_end = vm_start + (file_end - file_start);
1497 if (control != MEMORY_OBJECT_CONTROL_NULL) {
1498 ret = vm_map_enter_mem_object_control(
1499 map,
1500 &cur_start,
1501 cur_end - cur_start,
1502 (mach_vm_offset_t)0,
1503 VM_FLAGS_FIXED,
1504 vmk_flags,
1505 VM_KERN_MEMORY_NONE,
1506 control,
1507 file_start + cur_offset,
1508 TRUE, /* copy */
1509 initprot, maxprot,
1510 VM_INHERIT_DEFAULT);
1511 } else {
1512 ret = vm_map_enter_mem_object(
1513 map,
1514 &cur_start,
1515 cur_end - cur_start,
1516 (mach_vm_offset_t)0,
1517 VM_FLAGS_FIXED,
1518 vmk_flags,
1519 VM_KERN_MEMORY_NONE,
1520 IPC_PORT_NULL,
1521 0, /* offset */
1522 TRUE, /* copy */
1523 initprot, maxprot,
1524 VM_INHERIT_DEFAULT);
1525 }
1526 if (ret != KERN_SUCCESS) {
1527 return (LOAD_NOSPACE);
1528 }
1529 cur_offset += cur_end - cur_start;
1530 }
1531#endif /* __arm64__ */
1532done:
1533 assert(cur_end >= vm_start + (file_end - file_start));
1534 return LOAD_SUCCESS;
1535}
1536
1537static
1538load_return_t
1539load_segment(
1540 struct load_command *lcp,
1541 uint32_t filetype,
1542 void * control,
1543 off_t pager_offset,
1544 off_t macho_size,
1545 struct vnode *vp,
1546 vm_map_t map,
1547 int64_t slide,
1548 load_result_t *result)
1549{
1550 struct segment_command_64 segment_command, *scp;
1551 kern_return_t ret;
1552 vm_map_size_t delta_size;
1553 vm_prot_t initprot;
1554 vm_prot_t maxprot;
1555 size_t segment_command_size, total_section_size,
1556 single_section_size;
1557 vm_map_offset_t file_offset, file_size;
1558 vm_map_offset_t vm_offset, vm_size;
1559 vm_map_offset_t vm_start, vm_end, vm_end_aligned;
1560 vm_map_offset_t file_start, file_end;
1561 kern_return_t kr;
1562 boolean_t verbose;
1563 vm_map_size_t effective_page_size;
1564 vm_map_offset_t effective_page_mask;
1565#if __arm64__
1566 vm_map_kernel_flags_t vmk_flags;
1567 boolean_t fourk_align;
1568#endif /* __arm64__ */
1569
1570 effective_page_size = MAX(PAGE_SIZE, vm_map_page_size(map));
1571 effective_page_mask = MAX(PAGE_MASK, vm_map_page_mask(map));
1572
1573 verbose = FALSE;
1574 if (LC_SEGMENT_64 == lcp->cmd) {
1575 segment_command_size = sizeof(struct segment_command_64);
1576 single_section_size = sizeof(struct section_64);
1577#if __arm64__
1578 /* 64-bit binary: should already be 16K-aligned */
1579 fourk_align = FALSE;
1580#endif /* __arm64__ */
1581 } else {
1582 segment_command_size = sizeof(struct segment_command);
1583 single_section_size = sizeof(struct section);
1584#if __arm64__
1585 /* 32-bit binary: might need 4K-alignment */
1586 if (effective_page_size != FOURK_PAGE_SIZE) {
1587 /* not using 4K page size: need fourk_pager */
1588 fourk_align = TRUE;
1589 verbose = TRUE;
1590 } else {
1591 /* using 4K page size: no need for re-alignment */
1592 fourk_align = FALSE;
1593 }
1594#endif /* __arm64__ */
1595 }
1596 if (lcp->cmdsize < segment_command_size)
1597 return (LOAD_BADMACHO);
1598 total_section_size = lcp->cmdsize - segment_command_size;
1599
1600 if (LC_SEGMENT_64 == lcp->cmd) {
1601 scp = (struct segment_command_64 *)lcp;
1602 } else {
1603 scp = &segment_command;
1604 widen_segment_command((struct segment_command *)lcp, scp);
1605 }
1606
1607 if (verbose) {
1608 MACHO_PRINTF(("+++ load_segment %s "
1609 "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] "
1610 "prot %d/%d flags 0x%x\n",
1611 scp->segname,
1612 (uint64_t)(slide + scp->vmaddr),
1613 (uint64_t)(slide + scp->vmaddr + scp->vmsize),
1614 pager_offset + scp->fileoff,
1615 pager_offset + scp->fileoff + scp->filesize,
1616 scp->initprot,
1617 scp->maxprot,
1618 scp->flags));
1619 }
1620
1621 /*
1622 * Make sure what we get from the file is really ours (as specified
1623 * by macho_size).
1624 */
1625 if (scp->fileoff + scp->filesize < scp->fileoff ||
1626 scp->fileoff + scp->filesize > (uint64_t)macho_size) {
1627 return (LOAD_BADMACHO);
1628 }
1629 /*
1630 * Ensure that the number of sections specified would fit
1631 * within the load command size.
1632 */
1633 if (total_section_size / single_section_size < scp->nsects) {
1634 return (LOAD_BADMACHO);
1635 }
1636 /*
1637 * Make sure the segment is page-aligned in the file.
1638 */
1639 file_offset = pager_offset + scp->fileoff; /* limited to 32 bits */
1640 file_size = scp->filesize;
1641#if __arm64__
1642 if (fourk_align) {
1643 if ((file_offset & FOURK_PAGE_MASK) != 0) {
1644 /*
1645 * we can't mmap() it if it's not at least 4KB-aligned
1646 * in the file
1647 */
1648 return LOAD_BADMACHO;
1649 }
1650 } else
1651#endif /* __arm64__ */
1652 if ((file_offset & PAGE_MASK_64) != 0 ||
1653 /* we can't mmap() it if it's not page-aligned in the file */
1654 (file_offset & vm_map_page_mask(map)) != 0) {
1655 /*
1656 * The 1st test would have failed if the system's page size
1657 * was what this process believe is the page size, so let's
1658 * fail here too for the sake of consistency.
1659 */
1660 return (LOAD_BADMACHO);
1661 }
1662
1663 /*
1664 * If we have a code signature attached for this slice
1665 * require that the segments are within the signed part
1666 * of the file.
1667 */
1668 if (result->cs_end_offset &&
1669 result->cs_end_offset < (off_t)scp->fileoff &&
1670 result->cs_end_offset - scp->fileoff < scp->filesize)
1671 {
1672 if (cs_debug)
1673 printf("section outside code signature\n");
1674 return LOAD_BADMACHO;
1675 }
1676
1677 vm_offset = scp->vmaddr + slide;
1678 vm_size = scp->vmsize;
1679
1680 if (vm_size == 0)
1681 return (LOAD_SUCCESS);
1682 if (scp->vmaddr == 0 &&
1683 file_size == 0 &&
1684 vm_size != 0 &&
1685 (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE &&
1686 (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) {
1687 /*
1688 * For PIE, extend page zero rather than moving it. Extending
1689 * page zero keeps early allocations from falling predictably
1690 * between the end of page zero and the beginning of the first
1691 * slid segment.
1692 */
1693 /*
1694 * This is a "page zero" segment: it starts at address 0,
1695 * is not mapped from the binary file and is not accessible.
1696 * User-space should never be able to access that memory, so
1697 * make it completely off limits by raising the VM map's
1698 * minimum offset.
1699 */
1700 vm_end = vm_offset + vm_size;
1701 if (vm_end < vm_offset) {
1702 return (LOAD_BADMACHO);
1703 }
1704 if (verbose) {
1705 MACHO_PRINTF(("++++++ load_segment: "
1706 "page_zero up to 0x%llx\n",
1707 (uint64_t) vm_end));
1708 }
1709#if __arm64__
1710 if (fourk_align) {
1711 /* raise min_offset as much as page-alignment allows */
1712 vm_end_aligned = vm_map_trunc_page(vm_end,
1713 effective_page_mask);
1714 } else
1715#endif /* __arm64__ */
1716 {
1717 vm_end = vm_map_round_page(vm_end,
1718 PAGE_MASK_64);
1719 vm_end_aligned = vm_end;
1720 }
1721 ret = vm_map_raise_min_offset(map,
1722 vm_end_aligned);
1723#if __arm64__
1724 if (ret == 0 &&
1725 vm_end > vm_end_aligned) {
1726 /* use fourk_pager to map the rest of pagezero */
1727 assert(fourk_align);
1728 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1729 vmk_flags.vmkf_fourk = TRUE;
1730 ret = vm_map_enter_mem_object(
1731 map,
1732 &vm_end_aligned,
1733 vm_end - vm_end_aligned,
1734 (mach_vm_offset_t) 0, /* mask */
1735 VM_FLAGS_FIXED,
1736 vmk_flags,
1737 VM_KERN_MEMORY_NONE,
1738 IPC_PORT_NULL,
1739 0,
1740 FALSE, /* copy */
1741 (scp->initprot & VM_PROT_ALL),
1742 (scp->maxprot & VM_PROT_ALL),
1743 VM_INHERIT_DEFAULT);
1744 }
1745#endif /* __arm64__ */
1746
1747 if (ret != KERN_SUCCESS) {
1748 return (LOAD_FAILURE);
1749 }
1750 return (LOAD_SUCCESS);
1751 } else {
1752#if CONFIG_EMBEDDED
1753 /* not PAGEZERO: should not be mapped at address 0 */
1754 if (filetype != MH_DYLINKER && scp->vmaddr == 0) {
1755 return LOAD_BADMACHO;
1756 }
1757#endif /* CONFIG_EMBEDDED */
1758 }
1759
1760#if __arm64__
1761 if (fourk_align) {
1762 /* 4K-align */
1763 file_start = vm_map_trunc_page(file_offset,
1764 FOURK_PAGE_MASK);
1765 file_end = vm_map_round_page(file_offset + file_size,
1766 FOURK_PAGE_MASK);
1767 vm_start = vm_map_trunc_page(vm_offset,
1768 FOURK_PAGE_MASK);
1769 vm_end = vm_map_round_page(vm_offset + vm_size,
1770 FOURK_PAGE_MASK);
1771 if (!strncmp(scp->segname, "__LINKEDIT", 11) &&
1772 page_aligned(file_start) &&
1773 vm_map_page_aligned(file_start, vm_map_page_mask(map)) &&
1774 page_aligned(vm_start) &&
1775 vm_map_page_aligned(vm_start, vm_map_page_mask(map))) {
1776 /* XXX last segment: ignore mis-aligned tail */
1777 file_end = vm_map_round_page(file_end,
1778 effective_page_mask);
1779 vm_end = vm_map_round_page(vm_end,
1780 effective_page_mask);
1781 }
1782 } else
1783#endif /* __arm64__ */
1784 {
1785 file_start = vm_map_trunc_page(file_offset,
1786 effective_page_mask);
1787 file_end = vm_map_round_page(file_offset + file_size,
1788 effective_page_mask);
1789 vm_start = vm_map_trunc_page(vm_offset,
1790 effective_page_mask);
1791 vm_end = vm_map_round_page(vm_offset + vm_size,
1792 effective_page_mask);
1793 }
1794
1795 if (vm_start < result->min_vm_addr)
1796 result->min_vm_addr = vm_start;
1797 if (vm_end > result->max_vm_addr)
1798 result->max_vm_addr = vm_end;
1799
1800 if (map == VM_MAP_NULL)
1801 return (LOAD_SUCCESS);
1802
1803 if (vm_size > 0) {
1804 initprot = (scp->initprot) & VM_PROT_ALL;
1805 maxprot = (scp->maxprot) & VM_PROT_ALL;
1806 /*
1807 * Map a copy of the file into the address space.
1808 */
1809 if (verbose) {
1810 MACHO_PRINTF(("++++++ load_segment: "
1811 "mapping at vm [0x%llx:0x%llx] of "
1812 "file [0x%llx:0x%llx]\n",
1813 (uint64_t) vm_start,
1814 (uint64_t) vm_end,
1815 (uint64_t) file_start,
1816 (uint64_t) file_end));
1817 }
1818 ret = map_segment(map,
1819 vm_start,
1820 vm_end,
1821 control,
1822 file_start,
1823 file_end,
1824 initprot,
1825 maxprot,
1826 result);
1827 if (ret) {
1828 return LOAD_NOSPACE;
1829 }
1830
1831#if FIXME
1832 /*
1833 * If the file didn't end on a page boundary,
1834 * we need to zero the leftover.
1835 */
1836 delta_size = map_size - scp->filesize;
1837 if (delta_size > 0) {
1838 mach_vm_offset_t tmp;
1839
1840 ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD);
1841 if (ret != KERN_SUCCESS) {
1842 return(LOAD_RESOURCE);
1843 }
1844
1845 if (copyout(tmp, map_addr + scp->filesize,
1846 delta_size)) {
1847 (void) mach_vm_deallocate(
1848 kernel_map, tmp, delta_size);
1849 return (LOAD_FAILURE);
1850 }
1851
1852 (void) mach_vm_deallocate(kernel_map, tmp, delta_size);
1853 }
1854#endif /* FIXME */
1855 }
1856
1857 /*
1858 * If the virtual size of the segment is greater
1859 * than the size from the file, we need to allocate
1860 * zero fill memory for the rest.
1861 */
1862 if ((vm_end - vm_start) > (file_end - file_start)) {
1863 delta_size = (vm_end - vm_start) - (file_end - file_start);
1864 } else {
1865 delta_size = 0;
1866 }
1867 if (delta_size > 0) {
1868 mach_vm_offset_t tmp;
1869
1870 tmp = vm_start + (file_end - file_start);
1871 if (verbose) {
1872 MACHO_PRINTF(("++++++ load_segment: "
1873 "delta mapping vm [0x%llx:0x%llx]\n",
1874 (uint64_t) tmp,
1875 (uint64_t) (tmp + delta_size)));
1876 }
1877 kr = map_segment(map,
1878 tmp,
1879 tmp + delta_size,
1880 MEMORY_OBJECT_CONTROL_NULL,
1881 0,
1882 delta_size,
1883 scp->initprot,
1884 scp->maxprot,
1885 result);
1886 if (kr != KERN_SUCCESS) {
1887 return(LOAD_NOSPACE);
1888 }
1889 }
1890
1891 if ( (scp->fileoff == 0) && (scp->filesize != 0) )
1892 result->mach_header = vm_offset;
1893
1894 if (scp->flags & SG_PROTECTED_VERSION_1) {
1895 ret = unprotect_dsmos_segment(file_start,
1896 file_end - file_start,
1897 vp,
1898 pager_offset,
1899 map,
1900 vm_start,
1901 vm_end - vm_start);
1902 if (ret != LOAD_SUCCESS) {
1903 return ret;
1904 }
1905 } else {
1906 ret = LOAD_SUCCESS;
1907 }
1908
1909 if (LOAD_SUCCESS == ret &&
1910 filetype == MH_DYLINKER &&
1911 result->all_image_info_addr == MACH_VM_MIN_ADDRESS) {
1912 note_all_image_info_section(scp,
1913 LC_SEGMENT_64 == lcp->cmd,
1914 single_section_size,
1915 ((const char *)lcp +
1916 segment_command_size),
1917 slide,
1918 result);
1919 }
1920
1921 if (result->entry_point != MACH_VM_MIN_ADDRESS) {
1922 if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) {
1923 if ((scp->initprot & (VM_PROT_READ|VM_PROT_EXECUTE)) == (VM_PROT_READ|VM_PROT_EXECUTE)) {
1924 result->validentry = 1;
1925 } else {
1926 /* right range but wrong protections, unset if previously validated */
1927 result->validentry = 0;
1928 }
1929 }
1930 }
1931
1932 return ret;
1933}
1934
1935static
1936load_return_t
1937load_uuid(
1938 struct uuid_command *uulp,
1939 char *command_end,
1940 load_result_t *result
1941)
1942{
1943 /*
1944 * We need to check the following for this command:
1945 * - The command size should be atleast the size of struct uuid_command
1946 * - The UUID part of the command should be completely within the mach-o header
1947 */
1948
1949 if ((uulp->cmdsize < sizeof(struct uuid_command)) ||
1950 (((char *)uulp + sizeof(struct uuid_command)) > command_end)) {
1951 return (LOAD_BADMACHO);
1952 }
1953
1954 memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid));
1955 return (LOAD_SUCCESS);
1956}
1957
1958static
1959load_return_t
1960load_main(
1961 struct entry_point_command *epc,
1962 thread_t thread,
1963 int64_t slide,
1964 load_result_t *result
1965)
1966{
1967 mach_vm_offset_t addr;
1968 kern_return_t ret;
1969
1970 if (epc->cmdsize < sizeof(*epc))
1971 return (LOAD_BADMACHO);
1972 if (result->thread_count != 0) {
1973 return (LOAD_FAILURE);
1974 }
1975
1976 if (thread == THREAD_NULL)
1977 return (LOAD_SUCCESS);
1978
1979 /*
1980 * LC_MAIN specifies stack size but not location.
1981 * Add guard page to allocation size (MAXSSIZ includes guard page).
1982 */
1983 if (epc->stacksize) {
1984 if (os_add_overflow(epc->stacksize, 4*PAGE_SIZE, &result->user_stack_size)) {
1985 /*
1986 * We are going to immediately throw away this result, but we want
1987 * to make sure we aren't loading a dangerously close to
1988 * overflowing value, since this will have a guard page added to it
1989 * and be rounded to page boundaries
1990 */
1991 return LOAD_BADMACHO;
1992 }
1993 result->user_stack_size = epc->stacksize;
1994 if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) {
1995 return LOAD_BADMACHO;
1996 }
1997 } else {
1998 result->user_stack_alloc_size = MAXSSIZ;
1999 }
2000
2001 /* use default location for stack */
2002 ret = thread_userstackdefault(&addr, result->is_64bit_addr);
2003 if (ret != KERN_SUCCESS)
2004 return(LOAD_FAILURE);
2005
2006 /* The stack slides down from the default location */
2007 result->user_stack = addr;
2008 result->user_stack -= slide;
2009
2010 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2011 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2012 return (LOAD_FAILURE);
2013 }
2014
2015 /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */
2016 result->needs_dynlinker = TRUE;
2017 result->using_lcmain = TRUE;
2018
2019 ret = thread_state_initialize( thread );
2020 if (ret != KERN_SUCCESS) {
2021 return(LOAD_FAILURE);
2022 }
2023
2024 result->unixproc = TRUE;
2025 result->thread_count++;
2026
2027 return(LOAD_SUCCESS);
2028}
2029
2030
2031static
2032load_return_t
2033load_unixthread(
2034 struct thread_command *tcp,
2035 thread_t thread,
2036 int64_t slide,
2037 load_result_t *result
2038)
2039{
2040 load_return_t ret;
2041 int customstack =0;
2042 mach_vm_offset_t addr;
2043 if (tcp->cmdsize < sizeof(*tcp))
2044 return (LOAD_BADMACHO);
2045 if (result->thread_count != 0) {
2046 return (LOAD_FAILURE);
2047 }
2048
2049 if (thread == THREAD_NULL)
2050 return (LOAD_SUCCESS);
2051
2052 ret = load_threadstack(thread,
2053 (uint32_t *)(((vm_offset_t)tcp) +
2054 sizeof(struct thread_command)),
2055 tcp->cmdsize - sizeof(struct thread_command),
2056 &addr, &customstack, result);
2057 if (ret != LOAD_SUCCESS)
2058 return(ret);
2059
2060 /* LC_UNIXTHREAD optionally specifies stack size and location */
2061
2062 if (!customstack) {
2063 result->user_stack_alloc_size = MAXSSIZ;
2064 }
2065
2066 /* The stack slides down from the default location */
2067 result->user_stack = addr;
2068 result->user_stack -= slide;
2069
2070 ret = load_threadentry(thread,
2071 (uint32_t *)(((vm_offset_t)tcp) +
2072 sizeof(struct thread_command)),
2073 tcp->cmdsize - sizeof(struct thread_command),
2074 &addr);
2075 if (ret != LOAD_SUCCESS)
2076 return(ret);
2077
2078 if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) {
2079 /* Already processed LC_MAIN or LC_UNIXTHREAD */
2080 return (LOAD_FAILURE);
2081 }
2082
2083 result->entry_point = addr;
2084 result->entry_point += slide;
2085
2086 ret = load_threadstate(thread,
2087 (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)),
2088 tcp->cmdsize - sizeof(struct thread_command),
2089 result);
2090 if (ret != LOAD_SUCCESS)
2091 return (ret);
2092
2093 result->unixproc = TRUE;
2094 result->thread_count++;
2095
2096 return(LOAD_SUCCESS);
2097}
2098
2099static
2100load_return_t
2101load_threadstate(
2102 thread_t thread,
2103 uint32_t *ts,
2104 uint32_t total_size,
2105 load_result_t *result
2106)
2107{
2108 uint32_t size;
2109 int flavor;
2110 uint32_t thread_size;
2111 uint32_t *local_ts = NULL;
2112 uint32_t local_ts_size = 0;
2113 int ret;
2114
2115 (void)thread;
2116
2117 if (total_size > 0) {
2118 local_ts_size = total_size;
2119 local_ts = kalloc(local_ts_size);
2120 if (local_ts == NULL) {
2121 return LOAD_FAILURE;
2122 }
2123 memcpy(local_ts, ts, local_ts_size);
2124 ts = local_ts;
2125 }
2126
2127 /*
2128 * Validate the new thread state; iterate through the state flavors in
2129 * the Mach-O file.
2130 * XXX: we should validate the machine state here, to avoid failing at
2131 * activation time where we can't bail out cleanly.
2132 */
2133 while (total_size > 0) {
2134 flavor = *ts++;
2135 size = *ts++;
2136
2137 if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) ||
2138 os_sub_overflow(total_size, thread_size, &total_size)) {
2139 ret = LOAD_BADMACHO;
2140 goto bad;
2141 }
2142
2143 ts += size; /* ts is a (uint32_t *) */
2144 }
2145
2146 result->threadstate = local_ts;
2147 result->threadstate_sz = local_ts_size;
2148 return LOAD_SUCCESS;
2149
2150bad:
2151 if (local_ts) {
2152 kfree(local_ts, local_ts_size);
2153 }
2154 return ret;
2155}
2156
2157static
2158load_return_t
2159load_threadstack(
2160 thread_t thread,
2161 uint32_t *ts,
2162 uint32_t total_size,
2163 mach_vm_offset_t *user_stack,
2164 int *customstack,
2165 load_result_t *result
2166)
2167{
2168 kern_return_t ret;
2169 uint32_t size;
2170 int flavor;
2171 uint32_t stack_size;
2172
2173 while (total_size > 0) {
2174 flavor = *ts++;
2175 size = *ts++;
2176 if (UINT32_MAX-2 < size ||
2177 UINT32_MAX/sizeof(uint32_t) < size+2)
2178 return (LOAD_BADMACHO);
2179 stack_size = (size+2)*sizeof(uint32_t);
2180 if (stack_size > total_size)
2181 return(LOAD_BADMACHO);
2182 total_size -= stack_size;
2183
2184 /*
2185 * Third argument is a kernel space pointer; it gets cast
2186 * to the appropriate type in thread_userstack() based on
2187 * the value of flavor.
2188 */
2189 ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data);
2190 if (ret != KERN_SUCCESS) {
2191 return(LOAD_FAILURE);
2192 }
2193 ts += size; /* ts is a (uint32_t *) */
2194 }
2195 return(LOAD_SUCCESS);
2196}
2197
2198static
2199load_return_t
2200load_threadentry(
2201 thread_t thread,
2202 uint32_t *ts,
2203 uint32_t total_size,
2204 mach_vm_offset_t *entry_point
2205)
2206{
2207 kern_return_t ret;
2208 uint32_t size;
2209 int flavor;
2210 uint32_t entry_size;
2211
2212 /*
2213 * Set the thread state.
2214 */
2215 *entry_point = MACH_VM_MIN_ADDRESS;
2216 while (total_size > 0) {
2217 flavor = *ts++;
2218 size = *ts++;
2219 if (UINT32_MAX-2 < size ||
2220 UINT32_MAX/sizeof(uint32_t) < size+2)
2221 return (LOAD_BADMACHO);
2222 entry_size = (size+2)*sizeof(uint32_t);
2223 if (entry_size > total_size)
2224 return(LOAD_BADMACHO);
2225 total_size -= entry_size;
2226 /*
2227 * Third argument is a kernel space pointer; it gets cast
2228 * to the appropriate type in thread_entrypoint() based on
2229 * the value of flavor.
2230 */
2231 ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point);
2232 if (ret != KERN_SUCCESS) {
2233 return(LOAD_FAILURE);
2234 }
2235 ts += size; /* ts is a (uint32_t *) */
2236 }
2237 return(LOAD_SUCCESS);
2238}
2239
2240struct macho_data {
2241 struct nameidata __nid;
2242 union macho_vnode_header {
2243 struct mach_header mach_header;
2244 struct fat_header fat_header;
2245 char __pad[512];
2246 } __header;
2247};
2248
2249#define DEFAULT_DYLD_PATH "/usr/lib/dyld"
2250
2251#if (DEVELOPMENT || DEBUG)
2252extern char dyld_alt_path[];
2253extern int use_alt_dyld;
2254#endif
2255
2256static uint64_t get_va_fsid(struct vnode_attr *vap)
2257{
2258 if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2259 return *(uint64_t *)&vap->va_fsid64;
2260 } else {
2261 return vap->va_fsid;
2262 }
2263}
2264
2265static load_return_t
2266load_dylinker(
2267 struct dylinker_command *lcp,
2268 integer_t archbits,
2269 vm_map_t map,
2270 thread_t thread,
2271 int depth,
2272 int64_t slide,
2273 load_result_t *result,
2274 struct image_params *imgp
2275)
2276{
2277 const char *name;
2278 struct vnode *vp = NULLVP; /* set by get_macho_vnode() */
2279 struct mach_header *header;
2280 off_t file_offset = 0; /* set by get_macho_vnode() */
2281 off_t macho_size = 0; /* set by get_macho_vnode() */
2282 load_result_t *myresult;
2283 kern_return_t ret;
2284 struct macho_data *macho_data;
2285 struct {
2286 struct mach_header __header;
2287 load_result_t __myresult;
2288 struct macho_data __macho_data;
2289 } *dyld_data;
2290
2291 if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize)
2292 return LOAD_BADMACHO;
2293
2294 name = (const char *)lcp + lcp->name.offset;
2295
2296 /* Check for a proper null terminated string. */
2297 size_t maxsz = lcp->cmdsize - lcp->name.offset;
2298 size_t namelen = strnlen(name, maxsz);
2299 if (namelen >= maxsz) {
2300 return LOAD_BADMACHO;
2301 }
2302
2303#if (DEVELOPMENT || DEBUG)
2304
2305 /*
2306 * rdar://23680808
2307 * If an alternate dyld has been specified via boot args, check
2308 * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this
2309 * executable and redirect the kernel to load that linker.
2310 */
2311
2312 if (use_alt_dyld) {
2313 int policy_error;
2314 uint32_t policy_flags = 0;
2315 int32_t policy_gencount = 0;
2316
2317 policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount);
2318 if (policy_error == 0) {
2319 if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) {
2320 name = dyld_alt_path;
2321 }
2322 }
2323 }
2324#endif
2325
2326#if !(DEVELOPMENT || DEBUG)
2327 if (0 != strcmp(name, DEFAULT_DYLD_PATH)) {
2328 return (LOAD_BADMACHO);
2329 }
2330#endif
2331
2332 /* Allocate wad-of-data from heap to reduce excessively deep stacks */
2333
2334 MALLOC(dyld_data, void *, sizeof (*dyld_data), M_TEMP, M_WAITOK);
2335 header = &dyld_data->__header;
2336 myresult = &dyld_data->__myresult;
2337 macho_data = &dyld_data->__macho_data;
2338
2339 ret = get_macho_vnode(name, archbits, header,
2340 &file_offset, &macho_size, macho_data, &vp);
2341 if (ret)
2342 goto novp_out;
2343
2344 *myresult = load_result_null;
2345 myresult->is_64bit_addr = result->is_64bit_addr;
2346 myresult->is_64bit_data = result->is_64bit_data;
2347
2348 ret = parse_machfile(vp, map, thread, header, file_offset,
2349 macho_size, depth, slide, 0, myresult, result, imgp);
2350
2351 if (ret == LOAD_SUCCESS) {
2352 if (result->threadstate) {
2353 /* don't use the app's threadstate if we have a dyld */
2354 kfree(result->threadstate, result->threadstate_sz);
2355 }
2356 result->threadstate = myresult->threadstate;
2357 result->threadstate_sz = myresult->threadstate_sz;
2358
2359 result->dynlinker = TRUE;
2360 result->entry_point = myresult->entry_point;
2361 result->validentry = myresult->validentry;
2362 result->all_image_info_addr = myresult->all_image_info_addr;
2363 result->all_image_info_size = myresult->all_image_info_size;
2364 if (myresult->platform_binary) {
2365 result->csflags |= CS_DYLD_PLATFORM;
2366 }
2367 }
2368
2369 struct vnode_attr va;
2370 VATTR_INIT(&va);
2371 VATTR_WANTED(&va, va_fsid64);
2372 VATTR_WANTED(&va, va_fsid);
2373 VATTR_WANTED(&va, va_fileid);
2374 int error = vnode_getattr(vp, &va, imgp->ip_vfs_context);
2375 if (error == 0) {
2376 imgp->ip_dyld_fsid = get_va_fsid(&va);
2377 imgp->ip_dyld_fsobjid = va.va_fileid;
2378 }
2379
2380 vnode_put(vp);
2381novp_out:
2382 FREE(dyld_data, M_TEMP);
2383 return (ret);
2384
2385}
2386
2387static load_return_t
2388load_code_signature(
2389 struct linkedit_data_command *lcp,
2390 struct vnode *vp,
2391 off_t macho_offset,
2392 off_t macho_size,
2393 cpu_type_t cputype,
2394 load_result_t *result,
2395 struct image_params *imgp)
2396{
2397 int ret;
2398 kern_return_t kr;
2399 vm_offset_t addr;
2400 int resid;
2401 struct cs_blob *blob;
2402 int error;
2403 vm_size_t blob_size;
2404
2405 addr = 0;
2406 blob = NULL;
2407
2408 if (lcp->cmdsize != sizeof (struct linkedit_data_command) ||
2409 lcp->dataoff + lcp->datasize > macho_size) {
2410 ret = LOAD_BADMACHO;
2411 goto out;
2412 }
2413
2414 blob = ubc_cs_blob_get(vp, cputype, macho_offset);
2415
2416 if (blob != NULL) {
2417 /* we already have a blob for this vnode and cputype */
2418 if (blob->csb_cpu_type != cputype ||
2419 blob->csb_base_offset != macho_offset) {
2420 /* the blob has changed for this vnode: fail ! */
2421 ret = LOAD_BADMACHO;
2422 goto out;
2423 }
2424
2425 /* It matches the blob we want here, let's verify the version */
2426 if (ubc_cs_generation_check(vp) == 0) {
2427 /* No need to revalidate, we're good! */
2428 ret = LOAD_SUCCESS;
2429 goto out;
2430 }
2431
2432 /* That blob may be stale, let's revalidate. */
2433 error = ubc_cs_blob_revalidate(vp, blob, imgp, 0);
2434 if (error == 0) {
2435 /* Revalidation succeeded, we're good! */
2436 ret = LOAD_SUCCESS;
2437 goto out;
2438 }
2439
2440 if (error != EAGAIN) {
2441 printf("load_code_signature: revalidation failed: %d\n", error);
2442 ret = LOAD_FAILURE;
2443 goto out;
2444 }
2445
2446 assert(error == EAGAIN);
2447
2448 /*
2449 * Revalidation was not possible for this blob. We just continue as if there was no blob,
2450 * rereading the signature, and ubc_cs_blob_add will do the right thing.
2451 */
2452 blob = NULL;
2453 }
2454
2455 blob_size = lcp->datasize;
2456 kr = ubc_cs_blob_allocate(&addr, &blob_size);
2457 if (kr != KERN_SUCCESS) {
2458 ret = LOAD_NOSPACE;
2459 goto out;
2460 }
2461
2462 resid = 0;
2463 error = vn_rdwr(UIO_READ,
2464 vp,
2465 (caddr_t) addr,
2466 lcp->datasize,
2467 macho_offset + lcp->dataoff,
2468 UIO_SYSSPACE,
2469 0,
2470 kauth_cred_get(),
2471 &resid,
2472 current_proc());
2473 if (error || resid != 0) {
2474 ret = LOAD_IOERROR;
2475 goto out;
2476 }
2477
2478 if (ubc_cs_blob_add(vp,
2479 cputype,
2480 macho_offset,
2481 &addr,
2482 lcp->datasize,
2483 imgp,
2484 0,
2485 &blob)) {
2486 if (addr) {
2487 ubc_cs_blob_deallocate(addr, blob_size);
2488 }
2489 ret = LOAD_FAILURE;
2490 goto out;
2491 } else {
2492 /* ubc_cs_blob_add() has consumed "addr" */
2493 addr = 0;
2494 }
2495
2496#if CHECK_CS_VALIDATION_BITMAP
2497 ubc_cs_validation_bitmap_allocate( vp );
2498#endif
2499
2500 ret = LOAD_SUCCESS;
2501out:
2502 if (ret == LOAD_SUCCESS) {
2503 if (blob == NULL)
2504 panic("success, but no blob!");
2505
2506 result->csflags |= blob->csb_flags;
2507 result->platform_binary = blob->csb_platform_binary;
2508 result->cs_end_offset = blob->csb_end_offset;
2509 }
2510 if (addr != 0) {
2511 ubc_cs_blob_deallocate(addr, blob_size);
2512 addr = 0;
2513 }
2514
2515 return ret;
2516}
2517
2518
2519#if CONFIG_CODE_DECRYPTION
2520
2521static load_return_t
2522set_code_unprotect(
2523 struct encryption_info_command *eip,
2524 caddr_t addr,
2525 vm_map_t map,
2526 int64_t slide,
2527 struct vnode *vp,
2528 off_t macho_offset,
2529 cpu_type_t cputype,
2530 cpu_subtype_t cpusubtype)
2531{
2532 int error, len;
2533 pager_crypt_info_t crypt_info;
2534 const char * cryptname = 0;
2535 char *vpath;
2536
2537 size_t offset;
2538 struct segment_command_64 *seg64;
2539 struct segment_command *seg32;
2540 vm_map_offset_t map_offset, map_size;
2541 vm_object_offset_t crypto_backing_offset;
2542 kern_return_t kr;
2543
2544 if (eip->cmdsize < sizeof(*eip)) return LOAD_BADMACHO;
2545
2546 switch(eip->cryptid) {
2547 case 0:
2548 /* not encrypted, just an empty load command */
2549 return LOAD_SUCCESS;
2550 case 1:
2551 cryptname="com.apple.unfree";
2552 break;
2553 case 0x10:
2554 /* some random cryptid that you could manually put into
2555 * your binary if you want NULL */
2556 cryptname="com.apple.null";
2557 break;
2558 default:
2559 return LOAD_BADMACHO;
2560 }
2561
2562 if (map == VM_MAP_NULL) return (LOAD_SUCCESS);
2563 if (NULL == text_crypter_create) return LOAD_FAILURE;
2564
2565 MALLOC_ZONE(vpath, char *, MAXPATHLEN, M_NAMEI, M_WAITOK);
2566 if(vpath == NULL) return LOAD_FAILURE;
2567
2568 len = MAXPATHLEN;
2569 error = vn_getpath(vp, vpath, &len);
2570 if (error) {
2571 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2572 return LOAD_FAILURE;
2573 }
2574
2575 /* set up decrypter first */
2576 crypt_file_data_t crypt_data = {
2577 .filename = vpath,
2578 .cputype = cputype,
2579 .cpusubtype = cpusubtype};
2580 kr=text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data);
2581#if VM_MAP_DEBUG_APPLE_PROTECT
2582 if (vm_map_debug_apple_protect) {
2583 struct proc *p;
2584 p = current_proc();
2585 printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n",
2586 p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr);
2587 }
2588#endif /* VM_MAP_DEBUG_APPLE_PROTECT */
2589 FREE_ZONE(vpath, MAXPATHLEN, M_NAMEI);
2590
2591 if(kr) {
2592 printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n",
2593 cryptname, kr);
2594 if (kr == kIOReturnNotPrivileged) {
2595 /* text encryption returned decryption failure */
2596 return(LOAD_DECRYPTFAIL);
2597 }else
2598 return LOAD_RESOURCE;
2599 }
2600
2601 /* this is terrible, but we have to rescan the load commands to find the
2602 * virtual address of this encrypted stuff. This code is gonna look like
2603 * the dyld source one day... */
2604 struct mach_header *header = (struct mach_header *)addr;
2605 size_t mach_header_sz = sizeof(struct mach_header);
2606 if (header->magic == MH_MAGIC_64 ||
2607 header->magic == MH_CIGAM_64) {
2608 mach_header_sz = sizeof(struct mach_header_64);
2609 }
2610 offset = mach_header_sz;
2611 uint32_t ncmds = header->ncmds;
2612 while (ncmds--) {
2613 /*
2614 * Get a pointer to the command.
2615 */
2616 struct load_command *lcp = (struct load_command *)(addr + offset);
2617 offset += lcp->cmdsize;
2618
2619 switch(lcp->cmd) {
2620 case LC_SEGMENT_64:
2621 seg64 = (struct segment_command_64 *)lcp;
2622 if ((seg64->fileoff <= eip->cryptoff) &&
2623 (seg64->fileoff+seg64->filesize >=
2624 eip->cryptoff+eip->cryptsize)) {
2625 map_offset = seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide;
2626 map_size = eip->cryptsize;
2627 crypto_backing_offset = macho_offset + eip->cryptoff;
2628 goto remap_now;
2629 }
2630 case LC_SEGMENT:
2631 seg32 = (struct segment_command *)lcp;
2632 if ((seg32->fileoff <= eip->cryptoff) &&
2633 (seg32->fileoff+seg32->filesize >=
2634 eip->cryptoff+eip->cryptsize)) {
2635 map_offset = seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide;
2636 map_size = eip->cryptsize;
2637 crypto_backing_offset = macho_offset + eip->cryptoff;
2638 goto remap_now;
2639 }
2640 }
2641 }
2642
2643 /* if we get here, did not find anything */
2644 return LOAD_BADMACHO;
2645
2646remap_now:
2647 /* now remap using the decrypter */
2648 MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n",
2649 (uint64_t) map_offset,
2650 (uint64_t) (map_offset+map_size)));
2651 kr = vm_map_apple_protected(map,
2652 map_offset,
2653 map_offset+map_size,
2654 crypto_backing_offset,
2655 &crypt_info);
2656 if (kr) {
2657 printf("set_code_unprotect(): mapping failed with %x\n", kr);
2658 return LOAD_PROTECT;
2659 }
2660
2661 return LOAD_SUCCESS;
2662}
2663
2664#endif
2665
2666/*
2667 * This routine exists to support the load_dylinker().
2668 *
2669 * This routine has its own, separate, understanding of the FAT file format,
2670 * which is terrifically unfortunate.
2671 */
2672static
2673load_return_t
2674get_macho_vnode(
2675 const char *path,
2676 integer_t archbits,
2677 struct mach_header *mach_header,
2678 off_t *file_offset,
2679 off_t *macho_size,
2680 struct macho_data *data,
2681 struct vnode **vpp
2682)
2683{
2684 struct vnode *vp;
2685 vfs_context_t ctx = vfs_context_current();
2686 proc_t p = vfs_context_proc(ctx);
2687 kauth_cred_t kerncred;
2688 struct nameidata *ndp = &data->__nid;
2689 boolean_t is_fat;
2690 struct fat_arch fat_arch;
2691 int error;
2692 int resid;
2693 union macho_vnode_header *header = &data->__header;
2694 off_t fsize = (off_t)0;
2695
2696 /*
2697 * Capture the kernel credential for use in the actual read of the
2698 * file, since the user doing the execution may have execute rights
2699 * but not read rights, but to exec something, we have to either map
2700 * or read it into the new process address space, which requires
2701 * read rights. This is to deal with lack of common credential
2702 * serialization code which would treat NOCRED as "serialize 'root'".
2703 */
2704 kerncred = vfs_context_ucred(vfs_context_kernel());
2705
2706 /* init the namei data to point the file user's program name */
2707 NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx);
2708
2709 if ((error = namei(ndp)) != 0) {
2710 if (error == ENOENT) {
2711 error = LOAD_ENOENT;
2712 } else {
2713 error = LOAD_FAILURE;
2714 }
2715 return(error);
2716 }
2717 nameidone(ndp);
2718 vp = ndp->ni_vp;
2719
2720 /* check for regular file */
2721 if (vp->v_type != VREG) {
2722 error = LOAD_PROTECT;
2723 goto bad1;
2724 }
2725
2726 /* get size */
2727 if ((error = vnode_size(vp, &fsize, ctx)) != 0) {
2728 error = LOAD_FAILURE;
2729 goto bad1;
2730 }
2731
2732 /* Check mount point */
2733 if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
2734 error = LOAD_PROTECT;
2735 goto bad1;
2736 }
2737
2738 /* check access */
2739 if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) {
2740 error = LOAD_PROTECT;
2741 goto bad1;
2742 }
2743
2744 /* try to open it */
2745 if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
2746 error = LOAD_PROTECT;
2747 goto bad1;
2748 }
2749
2750 if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof (*header), 0,
2751 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) {
2752 error = LOAD_IOERROR;
2753 goto bad2;
2754 }
2755
2756 if (resid) {
2757 error = LOAD_BADMACHO;
2758 goto bad2;
2759 }
2760
2761 if (header->mach_header.magic == MH_MAGIC ||
2762 header->mach_header.magic == MH_MAGIC_64) {
2763 is_fat = FALSE;
2764 } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) {
2765 is_fat = TRUE;
2766 } else {
2767 error = LOAD_BADMACHO;
2768 goto bad2;
2769 }
2770
2771 if (is_fat) {
2772
2773 error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header),
2774 sizeof(*header));
2775 if (error != LOAD_SUCCESS) {
2776 goto bad2;
2777 }
2778
2779 /* Look up our architecture in the fat file. */
2780 error = fatfile_getarch_with_bits(archbits,
2781 (vm_offset_t)(&header->fat_header), sizeof(*header), &fat_arch);
2782 if (error != LOAD_SUCCESS)
2783 goto bad2;
2784
2785 /* Read the Mach-O header out of it */
2786 error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header,
2787 sizeof (header->mach_header), fat_arch.offset,
2788 UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p);
2789 if (error) {
2790 error = LOAD_IOERROR;
2791 goto bad2;
2792 }
2793
2794 if (resid) {
2795 error = LOAD_BADMACHO;
2796 goto bad2;
2797 }
2798
2799 /* Is this really a Mach-O? */
2800 if (header->mach_header.magic != MH_MAGIC &&
2801 header->mach_header.magic != MH_MAGIC_64) {
2802 error = LOAD_BADMACHO;
2803 goto bad2;
2804 }
2805
2806 *file_offset = fat_arch.offset;
2807 *macho_size = fat_arch.size;
2808 } else {
2809 /*
2810 * Force get_macho_vnode() to fail if the architecture bits
2811 * do not match the expected architecture bits. This in
2812 * turn causes load_dylinker() to fail for the same reason,
2813 * so it ensures the dynamic linker and the binary are in
2814 * lock-step. This is potentially bad, if we ever add to
2815 * the CPU_ARCH_* bits any bits that are desirable but not
2816 * required, since the dynamic linker might work, but we will
2817 * refuse to load it because of this check.
2818 */
2819 if ((cpu_type_t)(header->mach_header.cputype & CPU_ARCH_MASK) != archbits) {
2820 error = LOAD_BADARCH;
2821 goto bad2;
2822 }
2823
2824 *file_offset = 0;
2825 *macho_size = fsize;
2826 }
2827
2828 *mach_header = header->mach_header;
2829 *vpp = vp;
2830
2831 ubc_setsize(vp, fsize);
2832 return (error);
2833
2834bad2:
2835 (void) VNOP_CLOSE(vp, FREAD, ctx);
2836bad1:
2837 vnode_put(vp);
2838 return(error);
2839}
2840