1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 *
31 */
32
33#ifndef ARM_CPU_DATA
34#define ARM_CPU_DATA
35
36#ifdef MACH_KERNEL_PRIVATE
37
38#include <mach_assert.h>
39#include <kern/assert.h>
40#include <kern/kern_types.h>
41#include <kern/processor.h>
42#include <pexpert/pexpert.h>
43#include <arm/thread.h>
44#include <arm/proc_reg.h>
45
46#include <mach/mach_types.h>
47#include <machine/thread.h>
48
49#define current_thread() current_thread_fast()
50
51static inline __pure2 thread_t current_thread_fast(void)
52{
53#if defined(__arm64__)
54 return (thread_t)(__builtin_arm_rsr64("TPIDR_EL1"));
55#else
56 return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW
57#endif
58}
59
60/*
61 * The "volatile" flavor of current_thread() is intended for use by
62 * scheduler code which may need to update the thread pointer in the
63 * course of a context switch. Any call to current_thread() made
64 * prior to the thread pointer update should be safe to optimize away
65 * as it should be consistent with that thread's state to the extent
66 * the compiler can reason about it. Likewise, the context switch
67 * path will eventually result in an arbitrary branch to the new
68 * thread's pc, about which the compiler won't be able to reason.
69 * Thus any compile-time optimization of current_thread() calls made
70 * within the new thread should be safely encapsulated in its
71 * register/stack state. The volatile form therefore exists to cover
72 * the window between the thread pointer update and the branch to
73 * the new pc.
74 */
75static inline thread_t current_thread_volatile(void)
76{
77 /* The compiler treats rsr64 as const, which can allow
78 it to eliminate redundant calls, which we don't want here.
79 Thus we use volatile asm. The mrc used for arm32 should be
80 treated as volatile however. */
81#if defined(__arm64__)
82 thread_t result;
83 __asm__ volatile("mrs %0, TPIDR_EL1" : "=r" (result));
84 return result;
85#else
86 return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW
87#endif
88}
89
90#if defined(__arm64__)
91
92static inline vm_offset_t exception_stack_pointer(void)
93{
94 vm_offset_t result = 0;
95 __asm__ volatile(
96 "msr SPSel, #1 \n"
97 "mov %0, sp \n"
98 "msr SPSel, #0 \n"
99 : "=r" (result));
100
101 return result;
102}
103
104#endif /* defined(__arm64__) */
105
106#define getCpuDatap() current_thread()->machine.CpuDatap
107#define current_cpu_datap() getCpuDatap()
108
109extern int get_preemption_level(void);
110extern void _enable_preemption_no_check(void);
111
112#define enable_preemption_no_check() _enable_preemption_no_check()
113#define mp_disable_preemption() _disable_preemption()
114#define mp_enable_preemption() _enable_preemption()
115#define mp_enable_preemption_no_check() _enable_preemption_no_check()
116
117#endif /* MACH_KERNEL_PRIVATE */
118
119#endif /* ARM_CPU_DATA */
120