1/*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * This file contains static dyld helper functions for
31 * exclusive use in platform startup code.
32 */
33
34#include <mach-o/fixup-chains.h>
35#include <mach-o/loader.h>
36
37#if defined(HAS_APPLE_PAC)
38#include <ptrauth.h>
39#endif /* defined(HAS_APPLE_PAC) */
40
41#ifndef dyldLogFunc
42#define dyldLogFunc(msg, ...) kprintf(msg, ## __VA_ARGS__)
43#endif
44
45#if 0
46#define dyldLogFunc(msg, ...) ({int _wait = 0; do { asm volatile ("yield" : "+r"(_wait) : ); } while(!_wait); })
47#endif
48#define LogFixups 0
49
50// cannot safely callout out to functions like strcmp before initial fixup
51static inline int
52strings_are_equal(const char* a, const char* b)
53{
54 while (*a && *b) {
55 if (*a != *b) {
56 return 0;
57 }
58 ++a;
59 ++b;
60 }
61 return *a == *b;
62}
63
64/*
65 * Functions from dyld to rebase, fixup and sign the contents of MH_FILESET
66 * kernel collections.
67 */
68
69union ChainedFixupPointerOnDisk {
70 uint64_t raw64;
71 struct dyld_chained_ptr_64_kernel_cache_rebase fixup64;
72};
73
74static uint64_t __unused
75sign_pointer(struct dyld_chained_ptr_64_kernel_cache_rebase pointer __unused,
76 void *loc __unused,
77 uint64_t target __unused)
78{
79#if HAS_APPLE_PAC
80 uint64_t discriminator = pointer.diversity;
81 if (pointer.addrDiv) {
82 if (discriminator) {
83 discriminator = __builtin_ptrauth_blend_discriminator(loc, discriminator);
84 } else {
85 discriminator = (uint64_t)(uintptr_t)loc;
86 }
87 }
88 switch (pointer.key) {
89 case 0: // IA
90 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 0, discriminator);
91 case 1: // IB
92 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 1, discriminator);
93 case 2: // DA
94 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 2, discriminator);
95 case 3: // DB
96 return (uint64_t)__builtin_ptrauth_sign_unauthenticated((void*)target, 3, discriminator);
97 }
98#endif
99 return target;
100}
101
102static inline __attribute__((__always_inline__)) void
103fixup_value(union ChainedFixupPointerOnDisk* fixupLoc __unused,
104 const struct dyld_chained_starts_in_segment* segInfo,
105 uintptr_t slide __unused,
106 const void* basePointers[KCNumKinds] __unused,
107 int* stop)
108{
109 if (LogFixups) {
110 dyldLogFunc("[LOG] kernel-fixups: fixup_value %p\n", fixupLoc);
111 }
112 switch (segInfo->pointer_format) {
113#if __LP64__
114 case DYLD_CHAINED_PTR_64_KERNEL_CACHE:
115 case DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE: {
116 const void* baseAddress = basePointers[fixupLoc->fixup64.cacheLevel];
117 if (baseAddress == 0) {
118 dyldLogFunc("Invalid cache level: %d\n", fixupLoc->fixup64.cacheLevel);
119 *stop = 1;
120 return;
121 }
122 uintptr_t slidValue = (uintptr_t)baseAddress + fixupLoc->fixup64.target;
123 if (LogFixups) {
124 dyldLogFunc("[LOG] kernel-fixups: slidValue %p (base=%p, target=%p)\n", (void*)slidValue,
125 (const void *)baseAddress, (void *)(uintptr_t)fixupLoc->fixup64.target);
126 }
127#if HAS_APPLE_PAC
128 if (fixupLoc->fixup64.isAuth) {
129 slidValue = sign_pointer(pointer: fixupLoc->fixup64, loc: fixupLoc, target: slidValue);
130 }
131#else
132 if (fixupLoc->fixup64.isAuth) {
133 dyldLogFunc("Unexpected authenticated fixup\n");
134 *stop = 1;
135 return;
136 }
137#endif // HAS_APPLE_PAC
138 fixupLoc->raw64 = slidValue;
139 break;
140 }
141#endif // __LP64__
142 default:
143 dyldLogFunc("unsupported pointer chain format: 0x%04X", segInfo->pointer_format);
144 *stop = 1;
145 break;
146 }
147}
148
149static inline __attribute__((__always_inline__)) int
150walk_chain(const struct mach_header_64* mh,
151 const struct dyld_chained_starts_in_segment* segInfo,
152 uint32_t pageIndex,
153 uint16_t offsetInPage,
154 uintptr_t slide __unused,
155 const void* basePointers[KCNumKinds])
156{
157 if (LogFixups) {
158 dyldLogFunc("[LOG] kernel-fixups: walk_chain page[%d]\n", pageIndex);
159 }
160 int stop = 0;
161 uintptr_t pageContentStart = (uintptr_t)mh + (uintptr_t)segInfo->segment_offset
162 + (pageIndex * segInfo->page_size);
163 union ChainedFixupPointerOnDisk* chain = (union ChainedFixupPointerOnDisk*)(pageContentStart + offsetInPage);
164 int chainEnd = 0;
165 if (LogFixups) {
166 dyldLogFunc("[LOG] kernel-fixups: segInfo->segment_offset 0x%llx\n", segInfo->segment_offset);
167 dyldLogFunc("[LOG] kernel-fixups: segInfo->segment_pagesize %d\n", segInfo->page_size);
168 dyldLogFunc("[LOG] kernel-fixups: segInfo pointer format %d\n", segInfo->pointer_format);
169 }
170 while (!stop && !chainEnd) {
171 // copy chain content, in case handler modifies location to final value
172 if (LogFixups) {
173 dyldLogFunc("[LOG] kernel-fixups: value of chain %p", chain);
174 }
175 union ChainedFixupPointerOnDisk chainContent __unused = *chain;
176 fixup_value(fixupLoc: chain, segInfo, slide, basePointers, stop: &stop);
177 if (!stop) {
178 switch (segInfo->pointer_format) {
179#if __LP64__
180 case DYLD_CHAINED_PTR_64_KERNEL_CACHE:
181 if (chainContent.fixup64.next == 0) {
182 chainEnd = 1;
183 } else {
184 if (LogFixups) {
185 dyldLogFunc("[LOG] kernel-fixups: chainContent fixup 64.next %d\n", chainContent.fixup64.next);
186 }
187 chain = (union ChainedFixupPointerOnDisk*)((uintptr_t)chain + chainContent.fixup64.next * 4);
188 }
189 break;
190 case DYLD_CHAINED_PTR_X86_64_KERNEL_CACHE:
191 if (chainContent.fixup64.next == 0) {
192 chainEnd = 1;
193 } else {
194 if (LogFixups) {
195 dyldLogFunc("[LOG] kernel-fixups: chainContent fixup x86 64.next %d\n", chainContent.fixup64.next);
196 }
197 chain = (union ChainedFixupPointerOnDisk*)((uintptr_t)chain + chainContent.fixup64.next);
198 }
199 break;
200#endif // __LP64__
201 default:
202 dyldLogFunc("unknown pointer format 0x%04X", segInfo->pointer_format);
203 stop = 1;
204 }
205 }
206 }
207 return stop;
208}
209
210static inline __attribute__((__always_inline__)) int
211kernel_collection_slide(const struct mach_header_64* mh, const void* basePointers[KCNumKinds])
212{
213 // First find the slide and chained fixups load command
214 uint64_t textVMAddr = 0;
215 const struct linkedit_data_command* chainedFixups = 0;
216 uint64_t linkeditVMAddr = 0;
217 uint64_t linkeditFileOffset = 0;
218
219 if (LogFixups) {
220 dyldLogFunc("[LOG] kernel-fixups: parsing load commands\n");
221 }
222
223 const struct load_command* startCmds = 0;
224 if (mh->magic == MH_MAGIC_64) {
225 startCmds = (struct load_command*)((uintptr_t)mh + sizeof(struct mach_header_64));
226 } else if (mh->magic == MH_MAGIC) {
227 startCmds = (struct load_command*)((uintptr_t)mh + sizeof(struct mach_header));
228 } else {
229 //const uint32_t* h = (uint32_t*)mh;
230 //diag.error("file does not start with MH_MAGIC[_64]: 0x%08X 0x%08X", h[0], h [1]);
231 return 1; // not a mach-o file
232 }
233 const struct load_command* const cmdsEnd = (struct load_command*)((uintptr_t)startCmds + mh->sizeofcmds);
234 const struct load_command* cmd = startCmds;
235 for (uint32_t i = 0; i < mh->ncmds; ++i) {
236 if (LogFixups) {
237 dyldLogFunc("[LOG] kernel-fixups: parsing load command %d with cmd=0x%x\n", i, cmd->cmd);
238 }
239 const struct load_command* nextCmd = (struct load_command*)((uintptr_t)cmd + cmd->cmdsize);
240 if (cmd->cmdsize < 8) {
241 //diag.error("malformed load command #%d of %d at %p with mh=%p, size (0x%X) too small", i, this->ncmds, cmd, this, cmd->cmdsize);
242 return 1;
243 }
244 if ((nextCmd > cmdsEnd) || (nextCmd < startCmds)) {
245 //diag.error("malformed load command #%d of %d at %p with mh=%p, size (0x%X) is too large, load commands end at %p", i, this->ncmds, cmd, this, cmd->cmdsize, cmdsEnd);
246 return 1;
247 }
248 if (cmd->cmd == LC_DYLD_CHAINED_FIXUPS) {
249 chainedFixups = (const struct linkedit_data_command*)cmd;
250 } else if (cmd->cmd == LC_SEGMENT_64) {
251 const struct segment_command_64* seg = (const struct segment_command_64*)(uintptr_t)cmd;
252
253 if (LogFixups) {
254 dyldLogFunc("[LOG] kernel-fixups: segment name vm start and size: %s 0x%llx 0x%llx\n",
255 seg->segname, seg->vmaddr, seg->vmsize);
256 }
257 if (strings_are_equal(a: seg->segname, b: "__TEXT")) {
258 textVMAddr = seg->vmaddr;
259 } else if (strings_are_equal(a: seg->segname, b: "__LINKEDIT")) {
260 linkeditVMAddr = seg->vmaddr;
261 linkeditFileOffset = seg->fileoff;
262 }
263 }
264 cmd = nextCmd;
265 }
266
267 uintptr_t slide = (uintptr_t)mh - (uintptr_t)textVMAddr;
268
269 if (LogFixups) {
270 dyldLogFunc("[LOG] kernel-fixups: slide %lx\n", slide);
271 }
272
273 if (chainedFixups == 0) {
274 return 0;
275 }
276
277 if (LogFixups) {
278 dyldLogFunc("[LOG] kernel-fixups: found chained fixups %p\n", chainedFixups);
279 dyldLogFunc("[LOG] kernel-fixups: found linkeditVMAddr %p\n", (void*)linkeditVMAddr);
280 dyldLogFunc("[LOG] kernel-fixups: found linkeditFileOffset %p\n", (void*)linkeditFileOffset);
281 }
282
283 // Now we have the chained fixups, walk it to apply all the rebases
284 uint64_t offsetInLinkedit = chainedFixups->dataoff - linkeditFileOffset;
285 uintptr_t linkeditStartAddr = (uintptr_t)linkeditVMAddr + slide;
286 if (LogFixups) {
287 dyldLogFunc("[LOG] kernel-fixups: offsetInLinkedit %llx\n", offsetInLinkedit);
288 dyldLogFunc("[LOG] kernel-fixups: linkeditStartAddr %p\n", (void*)linkeditStartAddr);
289 }
290
291 const struct dyld_chained_fixups_header* fixupsHeader = (const struct dyld_chained_fixups_header*)(linkeditStartAddr + offsetInLinkedit);
292 const struct dyld_chained_starts_in_image* fixupStarts = (const struct dyld_chained_starts_in_image*)((uintptr_t)fixupsHeader + fixupsHeader->starts_offset);
293 if (LogFixups) {
294 dyldLogFunc("[LOG] kernel-fixups: fixupsHeader %p\n", fixupsHeader);
295 dyldLogFunc("[LOG] kernel-fixups: fixupStarts %p\n", fixupStarts);
296 }
297
298 int stopped = 0;
299 for (uint32_t segIndex = 0; segIndex < fixupStarts->seg_count && !stopped; ++segIndex) {
300 if (LogFixups) {
301 dyldLogFunc("[LOG] kernel-fixups: segment %d\n", segIndex);
302 }
303 if (fixupStarts->seg_info_offset[segIndex] == 0) {
304 continue;
305 }
306 const struct dyld_chained_starts_in_segment* segInfo = (const struct dyld_chained_starts_in_segment*)((uintptr_t)fixupStarts + fixupStarts->seg_info_offset[segIndex]);
307 for (uint32_t pageIndex = 0; pageIndex < segInfo->page_count && !stopped; ++pageIndex) {
308 uint16_t offsetInPage = segInfo->page_start[pageIndex];
309 if (offsetInPage == DYLD_CHAINED_PTR_START_NONE) {
310 continue;
311 }
312 if (offsetInPage & DYLD_CHAINED_PTR_START_MULTI) {
313 // FIXME: Implement this
314 return 1;
315 } else {
316 // one chain per page
317 if (walk_chain(mh, segInfo, pageIndex, offsetInPage, slide, basePointers)) {
318 stopped = 1;
319 }
320 }
321 }
322 }
323
324 return stopped;
325}
326
327/*
328 * Utility functions to adjust the load command vmaddrs in constituent MachO's
329 * of an MH_FILESET kernel collection.
330 */
331
332MARK_AS_FIXUP_TEXT static void
333kernel_collection_adjust_fileset_entry_addrs(struct mach_header_64 *mh, uintptr_t adj)
334{
335 struct load_command *lc;
336 struct segment_command_64 *seg, *linkedit_cmd = NULL;
337 struct symtab_command *symtab_cmd = NULL;
338 struct section_64 *sec;
339 uint32_t i, j;
340
341 lc = (struct load_command *)((uintptr_t)mh + sizeof(*mh));
342 for (i = 0; i < mh->ncmds; i++,
343 lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize)) {
344 if (lc->cmd == LC_SYMTAB) {
345 symtab_cmd = (struct symtab_command *)lc;
346 continue;
347 }
348 if (lc->cmd != LC_SEGMENT_64) {
349 continue;
350 }
351 if (strings_are_equal(a: ((struct segment_command_64 *)(uintptr_t)lc)->segname, SEG_LINKEDIT)) {
352 linkedit_cmd = ((struct segment_command_64 *)(uintptr_t)lc);
353 }
354
355 seg = (struct segment_command_64 *)(uintptr_t)lc;
356 seg->vmaddr += adj;
357 /* slide/adjust every section in the segment */
358 sec = (struct section_64 *)((uintptr_t)seg + sizeof(*seg));
359 for (j = 0; j < seg->nsects; j++, sec++) {
360 sec->addr += adj;
361 }
362 }
363
364
365 if (symtab_cmd != NULL && linkedit_cmd != NULL) {
366 struct nlist_64 *sym;
367 uint32_t cnt = 0;
368
369 if (LogFixups) {
370 dyldLogFunc("[LOG] Symbols:\n");
371 dyldLogFunc("[LOG] nsyms: %d, symoff: 0x%x\n", symtab_cmd->nsyms, symtab_cmd->symoff);
372 }
373
374 if (symtab_cmd->nsyms == 0) {
375 dyldLogFunc("[LOG] No symbols to relocate\n");
376 }
377
378 sym = (struct nlist_64 *)(linkedit_cmd->vmaddr + symtab_cmd->symoff - linkedit_cmd->fileoff);
379
380 for (i = 0; i < symtab_cmd->nsyms; i++) {
381 if (sym[i].n_type & N_STAB) {
382 continue;
383 }
384 sym[i].n_value += adj;
385 cnt++;
386 }
387 if (LogFixups) {
388 dyldLogFunc("[LOG] KASLR: Relocated %d symbols\n", cnt);
389 }
390 }
391}
392
393MARK_AS_FIXUP_TEXT static void
394kernel_collection_adjust_mh_addrs(struct mach_header_64 *kc_mh, uintptr_t adj,
395 bool pageable, uintptr_t *kc_lowest_vmaddr, uintptr_t *kc_highest_vmaddr,
396 uintptr_t *kc_lowest_ro_vmaddr, uintptr_t *kc_highest_ro_vmaddr,
397 uintptr_t *kc_lowest_rx_vmaddr, uintptr_t *kc_highest_rx_vmaddr,
398 uintptr_t *kc_highest_nle_vmaddr)
399{
400 assert(kc_mh->filetype == MH_FILESET);
401
402 struct load_command *lc;
403 struct fileset_entry_command *fse;
404 struct segment_command_64 *seg;
405 struct section_64 *sec;
406 struct mach_header_64 *mh;
407 uintptr_t lowest_vmaddr = UINTPTR_MAX, highest_vmaddr = 0, highest_nle_vmaddr = 0;
408 uintptr_t lowest_ro_vmaddr = UINTPTR_MAX, highest_ro_vmaddr = 0;
409 uintptr_t lowest_rx_vmaddr = UINTPTR_MAX, highest_rx_vmaddr = 0;
410 uint32_t i, j;
411 int is_linkedit = 0;
412
413 /*
414 * Slide (offset/adjust) every segment/section of every kext contained
415 * in this MH_FILESET mach-o.
416 */
417 lc = (struct load_command *)((uintptr_t)kc_mh + sizeof(*kc_mh));
418 for (i = 0; i < kc_mh->ncmds; i++,
419 lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize)) {
420 if (lc->cmd == LC_FILESET_ENTRY) {
421 fse = (struct fileset_entry_command *)(uintptr_t)lc;
422 /*
423 * The fileset_entry contains a pointer to the mach-o
424 * of a kext (or the kernel). Slide/adjust this command, and
425 * then slide/adjust all the sub-commands in the mach-o.
426 */
427 if (LogFixups) {
428 dyldLogFunc("[MH] sliding %s", (char *)((uintptr_t)fse +
429 (uintptr_t)(fse->entry_id.offset)));
430 }
431 mh = (struct mach_header_64 *)((uintptr_t)fse->vmaddr + adj);
432 if (!pageable) {
433 /*
434 * Do not adjust mach headers of entries in pageable KC as that
435 * would pull those pages in prematurely
436 */
437 kernel_collection_adjust_fileset_entry_addrs(mh, adj);
438 }
439 fse->vmaddr += adj;
440 } else if (lc->cmd == LC_SEGMENT_64) {
441 /*
442 * Slide/adjust all LC_SEGMENT_64 commands in the fileset
443 * (and any sections in those segments)
444 */
445 seg = (struct segment_command_64 *)(uintptr_t)lc;
446 seg->vmaddr += adj;
447 sec = (struct section_64 *)((uintptr_t)seg + sizeof(*seg));
448 for (j = 0; j < seg->nsects; j++, sec++) {
449 sec->addr += adj;
450 }
451 if (seg->vmsize == 0) {
452 continue;
453 }
454 /*
455 * Record vmaddr range covered by all non-empty segments in the
456 * kernel collection.
457 */
458 if (seg->vmaddr < lowest_vmaddr) {
459 lowest_vmaddr = (uintptr_t)seg->vmaddr;
460 }
461
462 is_linkedit = strings_are_equal(a: seg->segname, b: "__LINKEDIT");
463
464 if (seg->vmaddr + seg->vmsize > highest_vmaddr) {
465 highest_vmaddr = (uintptr_t)seg->vmaddr + (uintptr_t)seg->vmsize;
466 if (!is_linkedit) {
467 highest_nle_vmaddr = highest_vmaddr;
468 }
469 }
470
471 if ((seg->maxprot & VM_PROT_WRITE) || is_linkedit) {
472 continue;
473 }
474 /*
475 * Record vmaddr range covered by non-empty read-only segments
476 * in the kernel collection (excluding LINKEDIT).
477 */
478 if (seg->vmaddr < lowest_ro_vmaddr) {
479 lowest_ro_vmaddr = (uintptr_t)seg->vmaddr;
480 }
481 if (seg->vmaddr + seg->vmsize > highest_ro_vmaddr) {
482 highest_ro_vmaddr = (uintptr_t)seg->vmaddr + (uintptr_t)seg->vmsize;
483 }
484
485 if (!(seg->maxprot & VM_PROT_EXECUTE)) {
486 continue;
487 }
488 /*
489 * Record vmaddr range covered by contiguous execute segments
490 * in the kernel collection.
491 */
492 if (seg->vmaddr < lowest_rx_vmaddr && (lowest_rx_vmaddr <= seg->vmaddr + seg->vmsize || lowest_rx_vmaddr == UINTPTR_MAX)) {
493 lowest_rx_vmaddr = (uintptr_t)seg->vmaddr;
494 }
495 if (seg->vmaddr + seg->vmsize > highest_rx_vmaddr && (highest_rx_vmaddr >= seg->vmaddr || highest_rx_vmaddr == 0)) {
496 highest_rx_vmaddr = (uintptr_t)seg->vmaddr + (uintptr_t)seg->vmsize;
497 }
498 }
499 }
500 if (kc_lowest_vmaddr) {
501 *kc_lowest_vmaddr = lowest_vmaddr;
502 }
503 if (kc_highest_vmaddr) {
504 *kc_highest_vmaddr = highest_vmaddr;
505 }
506 if (kc_lowest_ro_vmaddr) {
507 *kc_lowest_ro_vmaddr = lowest_ro_vmaddr;
508 }
509 if (kc_highest_ro_vmaddr) {
510 *kc_highest_ro_vmaddr = highest_ro_vmaddr;
511 }
512 if (kc_lowest_rx_vmaddr) {
513 *kc_lowest_rx_vmaddr = lowest_rx_vmaddr;
514 }
515 if (kc_highest_rx_vmaddr) {
516 *kc_highest_rx_vmaddr = highest_rx_vmaddr;
517 }
518 if (kc_highest_nle_vmaddr) {
519 *kc_highest_nle_vmaddr = highest_nle_vmaddr;
520 }
521}
522
523/*
524 * Rebaser functions for the traditional arm64e static kernelcache with
525 * threaded rebase.
526 */
527
528static void
529rebase_chain(uintptr_t chainStartAddress, uint64_t stepMultiplier, uintptr_t baseAddress __unused, uint64_t slide)
530{
531 uint64_t delta = 0;
532 uintptr_t address = chainStartAddress;
533 do {
534 uint64_t value = *(uint64_t*)address;
535
536#if HAS_APPLE_PAC
537 uint16_t diversity = (uint16_t)(value >> 32);
538 bool hasAddressDiversity = (value & (1ULL << 48)) != 0;
539 ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
540#endif
541 bool isAuthenticated = (value & (1ULL << 63)) != 0;
542 bool isRebase = (value & (1ULL << 62)) == 0;
543 if (isRebase) {
544 if (isAuthenticated) {
545 // The new value for a rebase is the low 32-bits of the threaded value plus the slide.
546 uint64_t newValue = (value & 0xFFFFFFFF) + slide;
547 // Add in the offset from the mach_header
548 newValue += baseAddress;
549#if HAS_APPLE_PAC
550 // We have bits to merge in to the discriminator
551 uintptr_t discriminator = diversity;
552 if (hasAddressDiversity) {
553 // First calculate a new discriminator using the address of where we are trying to store the value
554 // Only blend if we have a discriminator
555 if (discriminator) {
556 discriminator = __builtin_ptrauth_blend_discriminator((void*)address, discriminator);
557 } else {
558 discriminator = address;
559 }
560 }
561 switch (key) {
562 case ptrauth_key_asia:
563 newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asia, discriminator);
564 break;
565 case ptrauth_key_asib:
566 newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asib, discriminator);
567 break;
568 case ptrauth_key_asda:
569 newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asda, discriminator);
570 break;
571 case ptrauth_key_asdb:
572 newValue = (uintptr_t)__builtin_ptrauth_sign_unauthenticated((void*)newValue, ptrauth_key_asdb, discriminator);
573 break;
574 }
575#endif
576 *(uint64_t*)address = newValue;
577 } else {
578 // Regular pointer which needs to fit in 51-bits of value.
579 // C++ RTTI uses the top bit, so we'll allow the whole top-byte
580 // and the bottom 43-bits to be fit in to 51-bits.
581 uint64_t top8Bits = value & 0x0007F80000000000ULL;
582 uint64_t bottom43Bits = value & 0x000007FFFFFFFFFFULL;
583 uint64_t targetValue = (top8Bits << 13) | (((intptr_t)(bottom43Bits << 21) >> 21) & 0x00FFFFFFFFFFFFFF);
584 targetValue = targetValue + slide;
585 *(uint64_t*)address = targetValue;
586 }
587 }
588
589 // The delta is bits [51..61]
590 // And bit 62 is to tell us if we are a rebase (0) or bind (1)
591 value &= ~(1ULL << 62);
592 delta = (value & 0x3FF8000000000000) >> 51;
593 address += delta * stepMultiplier;
594 } while (delta != 0);
595}
596
597static bool __unused
598rebase_threaded_starts(uint32_t *threadArrayStart, uint32_t *threadArrayEnd,
599 uintptr_t macho_header_addr, uintptr_t macho_header_vmaddr, size_t slide)
600{
601 uint32_t threadStartsHeader = *threadArrayStart;
602 uint64_t stepMultiplier = (threadStartsHeader & 1) == 1 ? 8 : 4;
603 for (uint32_t* threadOffset = threadArrayStart + 1; threadOffset != threadArrayEnd; ++threadOffset) {
604 if (*threadOffset == 0xFFFFFFFF) {
605 break;
606 }
607 rebase_chain(chainStartAddress: macho_header_addr + *threadOffset, stepMultiplier, baseAddress: macho_header_vmaddr, slide);
608 }
609 return true;
610}
611