1/*
2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifndef _ARM_LOCKS_H_
30#define _ARM_LOCKS_H_
31
32#ifdef MACH_KERNEL_PRIVATE
33#ifndef LCK_SPIN_IS_TICKET_LOCK
34#define LCK_SPIN_IS_TICKET_LOCK 0
35#endif
36#endif /* MACH_KERNEL_PRIVATE */
37
38#include <kern/lock_types.h>
39#ifdef MACH_KERNEL_PRIVATE
40#include <kern/sched_hygiene.h>
41#include <kern/startup.h>
42#if LCK_SPIN_IS_TICKET_LOCK
43#include <kern/ticket_lock.h>
44#endif
45#endif
46
47#ifdef MACH_KERNEL_PRIVATE
48#if LCK_SPIN_IS_TICKET_LOCK
49typedef lck_ticket_t lck_spin_t;
50#else
51typedef struct {
52 struct hslock hwlock;
53 unsigned long type;
54} lck_spin_t;
55
56#define lck_spin_data hwlock.lock_data
57
58#define LCK_SPIN_TAG_DESTROYED 0xdead /* lock marked as Destroyed */
59
60#define LCK_SPIN_TYPE 0x00000011
61#define LCK_SPIN_TYPE_DESTROYED 0x000000ee
62#endif
63
64#elif KERNEL_PRIVATE
65
66typedef struct {
67 uintptr_t opaque[2] __kernel_data_semantics;
68} lck_spin_t;
69
70typedef struct {
71 uintptr_t opaque[2] __kernel_data_semantics;
72} lck_mtx_t;
73
74typedef struct {
75 uintptr_t opaque[16];
76} lck_mtx_ext_t;
77
78#else
79
80typedef struct __lck_spin_t__ lck_spin_t;
81typedef struct __lck_mtx_t__ lck_mtx_t;
82typedef struct __lck_mtx_ext_t__ lck_mtx_ext_t;
83
84#endif /* !KERNEL_PRIVATE */
85#ifdef MACH_KERNEL_PRIVATE
86
87/*
88 * static panic deadline, in timebase units, for
89 * hw_lock_{bit,lock}{,_nopreempt} and hw_wait_while_equals()
90 */
91extern uint64_t _Atomic lock_panic_timeout;
92
93/* Adaptive spin before blocking */
94extern machine_timeout_t MutexSpin;
95extern uint64_t low_MutexSpin;
96extern int64_t high_MutexSpin;
97
98#if CONFIG_PV_TICKET
99extern bool has_lock_pv;
100#endif
101
102#ifdef LOCK_PRIVATE
103
104#define LOCK_SNOOP_SPINS 100
105#define LOCK_PRETEST 0
106
107#define wait_for_event() __builtin_arm_wfe()
108
109#if SCHED_HYGIENE_DEBUG
110#define lock_disable_preemption_for_thread(t) ({ \
111 thread_t __dpft_thread = (t); \
112 uint32_t *__dpft_countp = &__dpft_thread->machine.preemption_count; \
113 uint32_t __dpft_count; \
114 \
115 __dpft_count = *__dpft_countp; \
116 os_atomic_store(__dpft_countp, __dpft_count + 1, compiler_acq_rel); \
117 \
118 if (__dpft_count == 0 && sched_preemption_disable_debug_mode) { \
119 _prepare_preemption_disable_measurement(); \
120 } \
121})
122#else /* SCHED_HYGIENE_DEBUG */
123#define lock_disable_preemption_for_thread(t) ({ \
124 uint32_t *__dpft_countp = &(t)->machine.preemption_count; \
125 \
126 os_atomic_store(__dpft_countp, *__dpft_countp + 1, compiler_acq_rel); \
127})
128#endif /* SCHED_HYGIENE_DEBUG */
129#define lock_enable_preemption() enable_preemption()
130#define lock_preemption_level_for_thread(t) get_preemption_level_for_thread(t)
131#define lock_preemption_disabled_for_thread(t) (get_preemption_level_for_thread(t) != 0)
132#define current_thread() current_thread_fast()
133
134#define __hw_spin_wait_load(ptr, load_var, cond_result, cond_expr) ({ \
135 load_var = os_atomic_load_exclusive(ptr, relaxed); \
136 cond_result = (cond_expr); \
137 if (__probable(cond_result)) { \
138 os_atomic_clear_exclusive(); \
139 } else { \
140 wait_for_event(); \
141 } \
142})
143
144#endif /* LOCK_PRIVATE */
145#endif /* MACH_KERNEL_PRIVATE */
146#endif /* _ARM_LOCKS_H_ */
147