| 1 | /* |
| 2 | * Copyright (c) 2007 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
| 5 | * |
| 6 | * This file contains Original Code and/or Modifications of Original Code |
| 7 | * as defined in and that are subject to the Apple Public Source License |
| 8 | * Version 2.0 (the 'License'). You may not use this file except in |
| 9 | * compliance with the License. The rights granted to you under the License |
| 10 | * may not be used to create, or enable the creation or redistribution of, |
| 11 | * unlawful or unlicensed copies of an Apple operating system, or to |
| 12 | * circumvent, violate, or enable the circumvention or violation of, any |
| 13 | * terms of an Apple operating system software license agreement. |
| 14 | * |
| 15 | * Please obtain a copy of the License at |
| 16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
| 17 | * |
| 18 | * The Original Code and all software distributed under the License are |
| 19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
| 20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
| 21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
| 22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
| 23 | * Please see the License for the specific language governing rights and |
| 24 | * limitations under the License. |
| 25 | * |
| 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
| 27 | */ |
| 28 | /* |
| 29 | * @OSF_COPYRIGHT@ |
| 30 | * |
| 31 | */ |
| 32 | |
| 33 | #ifndef ARM_CPU_DATA |
| 34 | #define ARM_CPU_DATA |
| 35 | |
| 36 | #ifdef MACH_KERNEL_PRIVATE |
| 37 | |
| 38 | #include <mach_assert.h> |
| 39 | #include <kern/assert.h> |
| 40 | #include <kern/kern_types.h> |
| 41 | #include <kern/processor.h> |
| 42 | #include <pexpert/pexpert.h> |
| 43 | #include <arm/thread.h> |
| 44 | #include <arm64/proc_reg.h> |
| 45 | |
| 46 | #include <mach/mach_types.h> |
| 47 | #include <machine/thread.h> |
| 48 | |
| 49 | __ASSUME_PTR_ABI_SINGLE_BEGIN |
| 50 | |
| 51 | static inline __attribute__((const)) thread_t |
| 52 | current_thread_fast(void) |
| 53 | { |
| 54 | #if defined(__arm64__) |
| 55 | /* |
| 56 | * rdar://73762648 clang nowadays insists that this is not constant |
| 57 | * |
| 58 | * __builtin_arm_rsr64("TPIDR_EL1") |
| 59 | * |
| 60 | * and ignores the "attribute const", so do it the "dumb" way. |
| 61 | */ |
| 62 | unsigned long result; |
| 63 | __asm__ ("mrs %0, TPIDR_EL1" : "=r" (result)); |
| 64 | return __unsafe_forge_single(thread_t, result); |
| 65 | #else |
| 66 | // TPIDRPRW |
| 67 | return __unsafe_forge_single(thread_t, __builtin_arm_mrc(15, 0, 13, 0, 4)); |
| 68 | #endif |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * The "volatile" flavor of current_thread() is intended for use by |
| 73 | * scheduler code which may need to update the thread pointer in the |
| 74 | * course of a context switch. Any call to current_thread() made |
| 75 | * prior to the thread pointer update should be safe to optimize away |
| 76 | * as it should be consistent with that thread's state to the extent |
| 77 | * the compiler can reason about it. Likewise, the context switch |
| 78 | * path will eventually result in an arbitrary branch to the new |
| 79 | * thread's pc, about which the compiler won't be able to reason. |
| 80 | * Thus any compile-time optimization of current_thread() calls made |
| 81 | * within the new thread should be safely encapsulated in its |
| 82 | * register/stack state. The volatile form therefore exists to cover |
| 83 | * the window between the thread pointer update and the branch to |
| 84 | * the new pc. |
| 85 | */ |
| 86 | static inline thread_t |
| 87 | current_thread_volatile(void) |
| 88 | { |
| 89 | /* |
| 90 | * The compiler might decide to treat rsr64 as const (comes and goes), |
| 91 | * which can allow it to eliminate redundant calls, which we don't want |
| 92 | * here. Thus we use volatile asm. Which gives us control on semantics. |
| 93 | * |
| 94 | * The mrc used for arm32 should be treated as volatile however. |
| 95 | */ |
| 96 | #if defined(__arm64__) |
| 97 | unsigned long result; |
| 98 | __asm__ volatile ("mrs %0, TPIDR_EL1" : "=r" (result)); |
| 99 | return __unsafe_forge_single(thread_t, result); |
| 100 | #else |
| 101 | // TPIDRPRW |
| 102 | return __unsafe_forge_single(thread_t, __builtin_arm_mrc(15, 0, 13, 0, 4)); |
| 103 | #endif |
| 104 | } |
| 105 | |
| 106 | #if defined(__arm64__) |
| 107 | |
| 108 | static inline vm_offset_t |
| 109 | exception_stack_pointer(void) |
| 110 | { |
| 111 | vm_offset_t result = 0; |
| 112 | __asm__ volatile ( |
| 113 | "msr SPSel, #1 \n" |
| 114 | "mov %0, sp \n" |
| 115 | "msr SPSel, #0 \n" |
| 116 | : "=r" (result)); |
| 117 | |
| 118 | return result; |
| 119 | } |
| 120 | |
| 121 | #endif /* defined(__arm64__) */ |
| 122 | |
| 123 | #define getCpuDatap() current_thread()->machine.CpuDatap |
| 124 | #define current_cpu_datap() getCpuDatap() |
| 125 | |
| 126 | extern int get_preemption_level(void); |
| 127 | extern unsigned int get_preemption_level_for_thread(thread_t); |
| 128 | |
| 129 | #define mp_disable_preemption() _disable_preemption() |
| 130 | #define mp_enable_preemption() _enable_preemption() |
| 131 | |
| 132 | __ASSUME_PTR_ABI_SINGLE_END |
| 133 | |
| 134 | #endif /* MACH_KERNEL_PRIVATE */ |
| 135 | |
| 136 | #endif /* ARM_CPU_DATA */ |
| 137 | |