1/*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31
32#ifndef _ARM_MISC_PROTOS_H_
33#define _ARM_MISC_PROTOS_H_
34
35#include <kern/kern_types.h>
36
37typedef struct boot_args boot_args;
38/* The address of the end of the kernelcache. */
39extern vm_offset_t end_kern;
40/* The lowest address in the kernelcache. */
41extern vm_offset_t segLOWEST;
42
43
44extern void machine_startup(__unused boot_args *args) __attribute__((noinline));
45
46
47extern void arm_auxkc_init(void *mh, void *base);
48
49extern void arm_vm_init(uint64_t memory_size, boot_args *args);
50extern void arm_vm_prot_init(boot_args *args);
51extern void arm_vm_prot_finalize(boot_args *args);
52
53#if __arm64__
54extern void arm_set_kernel_tbi(void);
55
56void __attribute__((__noreturn__)) _was_in_userspace(void);
57
58#endif /* __arm64__ */
59
60extern kern_return_t DebuggerXCallEnter(boolean_t, bool);
61extern void DebuggerXCallReturn(void);
62
63#if __arm64__ && DEBUG
64extern void dump_kva_space(void);
65#endif /* __arm64__ && DEBUG */
66
67extern void Load_context(thread_t);
68extern void Idle_load_context(void) __attribute__((noreturn));
69extern thread_t Switch_context(thread_t, thread_continue_t, thread_t);
70extern thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor);
71extern void __dead2 Call_continuation(thread_continue_t, void *, wait_result_t, boolean_t enable_interrupts);
72
73
74
75/**
76 * Indicate during a context-switch event that we have updated some CPU
77 * state which requires a later context-sync event.
78 *
79 * On ARMv8.5 and later CPUs, this function sets a flag that will trigger an
80 * explicit isb instruction sometime before the upcoming eret instruction.
81 *
82 * Prior to ARMv8.5, the eret instruction itself is always synchronizing, and
83 * this function is an empty stub which serves only as documentation.
84 */
85#if __ARM_ARCH_8_5__
86extern void arm_context_switch_requires_sync(void);
87#else
88static inline void
89arm_context_switch_requires_sync(void)
90{
91}
92#endif /* __ARM_ARCH_8_5__ */
93
94#if __has_feature(ptrauth_calls)
95extern boolean_t arm_user_jop_disabled(void);
96#endif /* __has_feature(ptrauth_calls) */
97
98extern void DebuggerCall(unsigned int reason, void *ctx);
99extern void DebuggerXCall(void *ctx);
100
101extern int copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes);
102extern int copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes);
103
104extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t nbytes);
105
106extern void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
107extern void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
108
109#if defined(__arm64__)
110extern void copy_legacy_debug_state(arm_legacy_debug_state_t * src, arm_legacy_debug_state_t *target, __unused boolean_t all);
111extern void copy_debug_state32(arm_debug_state32_t * src, arm_debug_state32_t *target, __unused boolean_t all);
112extern void copy_debug_state64(arm_debug_state64_t * src, arm_debug_state64_t *target, __unused boolean_t all);
113
114extern boolean_t debug_legacy_state_is_valid(arm_legacy_debug_state_t *ds);
115extern boolean_t debug_state_is_valid32(arm_debug_state32_t *ds);
116extern boolean_t debug_state_is_valid64(arm_debug_state64_t *ds);
117
118extern int copyio_check_user_addr(user_addr_t user_addr, vm_size_t nbytes);
119
120/*
121 * Get a quick virtual mapping of a physical page and run a callback on that
122 * page's virtual address.
123 */
124extern int apply_func_phys(addr64_t src64, vm_size_t bytes, int (*func)(void * buffer, vm_size_t bytes, void * arg), void * arg);
125
126#else /* !defined(__arm64__) */
127#error Unknown architecture.
128#endif /* defined(__arm64__) */
129
130#endif /* _ARM_MISC_PROTOS_H_ */
131