1/*
2 * Copyright (c) 2018 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _KERN_LOCKSTAT_H
29#define _KERN_LOCKSTAT_H
30
31#include <machine/locks.h>
32#include <machine/atomic.h>
33#include <kern/lock_group.h>
34#include <kern/lock_mtx.h>
35
36__BEGIN_DECLS
37#pragma GCC visibility push(hidden)
38
39#if XNU_KERNEL_PRIVATE
40
41/*
42 * DTrace lockstat probe definitions
43 *
44 */
45
46enum lockstat_probe_id {
47 LS_NO_PROBE,
48
49 /* Spinlocks */
50 LS_LCK_SPIN_LOCK_ACQUIRE,
51 LS_LCK_SPIN_LOCK_SPIN,
52 LS_LCK_SPIN_UNLOCK_RELEASE,
53
54 /*
55 * Mutexes can also have interlock-spin events, which are
56 * unique to our lock implementation.
57 */
58 LS_LCK_MTX_LOCK_ACQUIRE,
59 LS_LCK_MTX_LOCK_SPIN_ACQUIRE,
60 LS_LCK_MTX_TRY_LOCK_ACQUIRE,
61 LS_LCK_MTX_TRY_LOCK_SPIN_ACQUIRE,
62 LS_LCK_MTX_UNLOCK_RELEASE,
63
64 LS_LCK_MTX_LOCK_BLOCK,
65 LS_LCK_MTX_LOCK_ADAPTIVE_SPIN,
66 LS_LCK_MTX_LOCK_SPIN_SPIN,
67
68
69 /*
70 * Reader-writer locks support a blocking upgrade primitive, as
71 * well as the possibility of spinning on the interlock.
72 */
73 LS_LCK_RW_LOCK_SHARED_ACQUIRE,
74 LS_LCK_RW_LOCK_SHARED_BLOCK,
75 LS_LCK_RW_LOCK_SHARED_SPIN,
76
77 LS_LCK_RW_LOCK_EXCL_ACQUIRE,
78 LS_LCK_RW_LOCK_EXCL_BLOCK,
79 LS_LCK_RW_LOCK_EXCL_SPIN,
80
81 LS_LCK_RW_DONE_RELEASE,
82
83 LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE,
84 LS_LCK_RW_TRY_LOCK_SHARED_SPIN,
85
86 LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE,
87 LS_LCK_RW_TRY_LOCK_EXCL_ILK_SPIN,
88
89 LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE,
90 LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN,
91 LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK,
92
93 LS_LCK_RW_LOCK_EXCL_TO_SHARED_DOWNGRADE,
94 LS_LCK_RW_LOCK_EXCL_TO_SHARED_ILK_SPIN,
95
96 /* Ticket lock */
97 LS_LCK_TICKET_LOCK_ACQUIRE,
98 LS_LCK_TICKET_LOCK_RELEASE,
99 LS_LCK_TICKET_LOCK_SPIN,
100
101 LS_NPROBES
102};
103
104#if CONFIG_DTRACE
105/*
106 * Time threshold before dtrace lockstat spin
107 * probes are triggered
108 */
109extern machine_timeout_t dtrace_spin_threshold;
110extern uint32_t lockstat_probemap[LS_NPROBES];
111
112extern void lck_grp_stat_enable(lck_grp_stat_t *stat);
113
114extern void lck_grp_stat_disable(lck_grp_stat_t *stat);
115
116extern bool lck_grp_stat_enabled(lck_grp_stat_t *stat);
117
118extern void lck_grp_stat_inc(lck_grp_t *grp, lck_grp_stat_t *stat, bool always);
119
120#endif /* CONFIG_DTRACE */
121#endif /* XNU_KERNEL_PRIVATE */
122#if MACH_KERNEL_PRIVATE
123#if CONFIG_DTRACE
124
125extern void dtrace_probe(uint32_t, uint64_t, uint64_t, uint64_t, uint64_t, uint64_t);
126
127static inline void
128lockprof_probe(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t val)
129{
130 dtrace_probe(stat->lgs_probeid, (uintptr_t)grp, val, 0, 0, 0);
131}
132
133__attribute__((always_inline))
134static inline void
135lockstat_probe(
136 enum lockstat_probe_id pid,
137 const void *lock,
138 uint64_t arg0,
139 uint64_t arg1,
140 uint64_t arg2,
141 uint64_t arg3)
142{
143 uint32_t id = lockstat_probemap[pid];
144
145 if (__improbable(id)) {
146 dtrace_probe(id, (uintptr_t)lock, arg0, arg1, arg2, arg3);
147 }
148}
149
150__pure2
151static inline uint32_t
152lockstat_enabled(void)
153{
154 return lck_debug_state.lds_value;
155}
156
157/*
158 * Macros to record lockstat probes.
159 */
160#define LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3, ...) \
161 lockstat_probe(probe, lp, arg0, arg1, arg2, arg3)
162#define LOCKSTAT_RECORD__(probe, lp, arg0, arg1, arg2, arg3, ...) \
163 LOCKSTAT_RECORD_(probe, lp, arg0, arg1, arg2, arg3)
164#define LOCKSTAT_RECORD(probe, lp, ...) \
165 LOCKSTAT_RECORD__(probe, lp, ##__VA_ARGS__, 0, 0, 0, 0)
166
167__attribute__((always_inline, overloadable))
168static inline bool
169__lck_time_stat_enabled(enum lockstat_probe_id lspid, uint32_t grp_attr_id)
170{
171 if (__improbable(grp_attr_id & LCK_GRP_ATTR_TIME_STAT)) {
172 return true;
173 }
174 if (__improbable(lspid && lockstat_probemap[lspid])) {
175 return true;
176 }
177 return false;
178}
179
180__attribute__((always_inline, overloadable))
181static inline bool
182__lck_time_stat_enabled(enum lockstat_probe_id lspid, lck_grp_t *grp)
183{
184 uint32_t grp_attr_id = grp ? grp->lck_grp_attr_id : 0;
185
186 return __lck_time_stat_enabled(lspid, grp_attr_id);
187}
188
189#if LOCK_STATS
190extern void __lck_grp_spin_update_held(lck_grp_t *grp);
191extern void __lck_grp_spin_update_miss(lck_grp_t *grp);
192extern void __lck_grp_spin_update_spin(lck_grp_t *grp, uint64_t time);
193extern void __lck_grp_ticket_update_held(lck_grp_t *grp);
194extern void __lck_grp_ticket_update_miss(lck_grp_t *grp);
195extern void __lck_grp_ticket_update_spin(lck_grp_t *grp, uint64_t time);
196#define LOCK_STATS_CALL(fn, ...) fn(__VA_ARGS__)
197#else
198#define LOCK_STATS_CALL(fn, ...) ((void)0)
199#endif
200
201static inline enum lockstat_probe_id
202lck_mtx_acquire_probe(bool spin, bool try_lock)
203{
204 if (spin) {
205 if (try_lock) {
206 return LS_LCK_MTX_TRY_LOCK_SPIN_ACQUIRE;
207 }
208 return LS_LCK_MTX_LOCK_SPIN_ACQUIRE;
209 } else {
210 if (try_lock) {
211 return LS_LCK_MTX_TRY_LOCK_ACQUIRE;
212 }
213 return LS_LCK_MTX_LOCK_ACQUIRE;
214 }
215}
216
217__attribute__((cold))
218__header_always_inline void
219lck_mtx_prof_probe(
220 enum lockstat_probe_id id,
221 lck_mtx_t *mtx,
222 uint32_t grp_attr_id,
223 bool profile)
224{
225#pragma unused(mtx)
226 if (profile) {
227 lck_grp_t *grp = LCK_GRP_NULL;
228
229 switch (id) {
230 case LS_LCK_MTX_LOCK_ACQUIRE:
231 case LS_LCK_MTX_LOCK_SPIN_ACQUIRE:
232 case LS_LCK_MTX_TRY_LOCK_ACQUIRE:
233 case LS_LCK_MTX_TRY_LOCK_SPIN_ACQUIRE:
234 grp = lck_grp_resolve(grp_attr_id);
235 __builtin_assume(grp != NULL);
236 lck_grp_stat_inc(grp, stat: &grp->lck_grp_stats.lgss_mtx_held, true);
237 break;
238 default:
239 break;
240 }
241 }
242 LOCKSTAT_RECORD(id, mtx, (uintptr_t)lck_grp_resolve(grp_attr_id));
243}
244
245#define lck_mtx_time_stat_begin(id) ({ \
246 uint64_t __start = 0; \
247 if (__lck_time_stat_enabled(id, LCK_GRP_NULL)) { \
248 __start = ml_get_timebase(); \
249 __builtin_assume(__start != 0); \
250 } \
251 __start; \
252})
253
254extern void lck_mtx_time_stat_record(
255 enum lockstat_probe_id id,
256 lck_mtx_t *mtx,
257 uint32_t grp_attr_id,
258 uint64_t start);
259
260/*
261 * Enable this preprocessor define to record the first miss alone
262 * By default, we count every miss, hence multiple misses may be
263 * recorded for a single lock acquire attempt via lck_mtx_lock
264 */
265#define LCK_MTX_LOCK_FIRST_MISS_ONLY 0
266
267static inline void
268LCK_MTX_PROF_MISS(lck_mtx_t *mtx, uint32_t grp_attr_id, int *first_miss)
269{
270 lck_grp_t *grp = lck_grp_resolve(grp_attr_id);
271
272#pragma unused(mtx, grp, first_miss)
273#if LCK_MTX_LOCK_FIRST_MISS_ONLY
274 if (*first_miss & 1) {
275 return;
276 }
277 *first_miss |= 1;
278#endif /* LCK_MTX_LOCK_FIRST_MISS_ONLY */
279 lck_grp_stat_inc(grp, stat: &grp->lck_grp_stats.lgss_mtx_miss, true);
280}
281
282static void inline
283LCK_MTX_PROF_WAIT(
284 lck_mtx_t *mtx,
285 uint32_t grp_attr_id,
286 bool direct_wait,
287 int *first_miss)
288{
289 lck_grp_t *grp = lck_grp_resolve(grp_attr_id);
290
291#pragma unused(mtx, first_miss)
292#if LCK_MTX_LOCK_FIRST_MISS_ONLY
293 if (*first_miss & 2) {
294 return;
295 }
296 *first_miss |= 2;
297#endif /* LCK_MTX_LOCK_FIRST_MISS_ONLY */
298 if (direct_wait) {
299 lck_grp_stat_inc(grp, stat: &grp->lck_grp_stats.lgss_mtx_direct_wait, true);
300 } else {
301 lck_grp_stat_inc(grp, stat: &grp->lck_grp_stats.lgss_mtx_wait, true);
302 }
303}
304
305#else /* !CONFIG_DTRACE */
306
307#define lockstat_enabled(probe, lock, ...) 0u
308#define LOCKSTAT_RECORD(probe, lock, ...) ((void)0)
309
310#define __lck_time_stat_enabled(lspid, grp) false
311#define lck_mtx_prof_probe(id, mtx, grp, profile) ((void)0)
312#define lck_mtx_time_stat_begin(id) 0ull
313#define lck_mtx_time_stat_record(id, lck, grp, start) ((void)(start))
314
315#endif /* !CONFIG_DTRACE */
316
317static inline void
318lck_grp_spin_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
319{
320#pragma unused(lock)
321#if CONFIG_DTRACE
322 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
323 LOCK_STATS_CALL(__lck_grp_spin_update_held, grp);
324#endif /* CONFIG_DTRACE */
325}
326
327static inline void
328lck_grp_spin_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
329{
330#pragma unused(lock)
331#if CONFIG_DTRACE
332 LOCK_STATS_CALL(__lck_grp_spin_update_miss, grp);
333#endif /* CONFIG_DTRACE */
334}
335
336static inline void
337lck_grp_spin_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
338{
339#pragma unused(lock, time)
340#if CONFIG_DTRACE
341 if (time > os_atomic_load(&dtrace_spin_threshold, relaxed)) {
342 LOCKSTAT_RECORD(LS_LCK_SPIN_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
343 }
344 LOCK_STATS_CALL(__lck_grp_spin_update_spin, grp, time);
345#endif /* CONFIG_DTRACE */
346}
347
348static inline bool
349lck_grp_spin_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
350{
351#pragma unused(lock)
352 bool enabled = __lck_time_stat_enabled(lspid: LS_LCK_SPIN_LOCK_SPIN, LCK_GRP_PROBEARG(grp));
353#if CONFIG_DTRACE && LOCK_STATS
354 enabled |= (grp && lck_grp_stat_enabled(&grp->lck_grp_stats.lgss_spin_spin));
355#endif /* CONFIG_DTRACE && LOCK_STATS */
356 return enabled;
357}
358
359static inline void
360lck_grp_ticket_update_held(void *lock LCK_GRP_ARG(lck_grp_t *grp))
361{
362#pragma unused(lock)
363#if CONFIG_DTRACE
364 LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_ACQUIRE, lock, (uintptr_t)LCK_GRP_PROBEARG(grp));
365 LOCK_STATS_CALL(__lck_grp_ticket_update_held, grp);
366#endif /* CONFIG_DTRACE */
367}
368
369static inline void
370lck_grp_ticket_update_miss(void *lock LCK_GRP_ARG(lck_grp_t *grp))
371{
372#pragma unused(lock)
373#if CONFIG_DTRACE
374 LOCK_STATS_CALL(__lck_grp_ticket_update_miss, grp);
375#endif /* CONFIG_DTRACE */
376}
377
378static inline bool
379lck_grp_ticket_spin_enabled(void *lock LCK_GRP_ARG(lck_grp_t *grp))
380{
381#pragma unused(lock)
382 bool enabled = __lck_time_stat_enabled(lspid: LS_LCK_TICKET_LOCK_SPIN, LCK_GRP_PROBEARG(grp));
383#if CONFIG_DTRACE && LOCK_STATS
384 enabled |= (grp && lck_grp_stat_enabled(&grp->lck_grp_stats.lgss_ticket_spin));
385#endif /* CONFIG_DTRACE && LOCK_STATS */
386 return enabled;
387}
388
389static inline void
390lck_grp_ticket_update_spin(void *lock LCK_GRP_ARG(lck_grp_t *grp), uint64_t time)
391{
392#pragma unused(lock, time)
393#if CONFIG_DTRACE
394 if (time > os_atomic_load(&dtrace_spin_threshold, relaxed)) {
395 LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_SPIN, lock, time LCK_GRP_ARG((uintptr_t)grp));
396 }
397 LOCK_STATS_CALL(__lck_grp_ticket_update_spin, grp, time);
398#endif /* CONFIG_DTRACE */
399}
400
401/*
402 * Mutexes
403 */
404#define LCK_MTX_ACQUIRED(mtx, grp, spin, profile) \
405 lck_mtx_prof_probe(lck_mtx_acquire_probe(spin, false), mtx, grp, profile)
406
407#define LCK_MTX_TRY_ACQUIRED(mtx, grp, spin, profile) \
408 lck_mtx_prof_probe(lck_mtx_acquire_probe(spin, true), mtx, grp, profile)
409
410#define LCK_MTX_RELEASED(mtx, grp, profile) \
411 lck_mtx_prof_probe(LS_LCK_MTX_UNLOCK_RELEASE, mtx, grp, profile)
412
413#define LCK_MTX_BLOCK_BEGIN() \
414 lck_mtx_time_stat_begin(LS_LCK_MTX_LOCK_BLOCK)
415
416#define LCK_MTX_BLOCK_END(mtx, grp, start) \
417 lck_mtx_time_stat_record(LS_LCK_MTX_LOCK_BLOCK, mtx, grp, start)
418
419#define LCK_MTX_ADAPTIVE_SPIN_BEGIN() \
420 lck_mtx_time_stat_begin(LS_LCK_MTX_LOCK_ADAPTIVE_SPIN)
421
422#define LCK_MTX_ADAPTIVE_SPIN_END(mtx, grp, start) \
423 lck_mtx_time_stat_record(LS_LCK_MTX_LOCK_ADAPTIVE_SPIN, mtx, grp, start)
424
425#define LCK_MTX_SPIN_SPIN_BEGIN() \
426 lck_mtx_time_stat_begin(LS_LCK_MTX_LOCK_SPIN_SPIN)
427
428#define LCK_MTX_SPIN_SPIN_END(mtx, grp, start) \
429 lck_mtx_time_stat_record(LS_LCK_MTX_LOCK_SPIN_SPIN, mtx, grp, start)
430
431#endif /* MACH_KERNEL_PRIVATE */
432
433#pragma GCC visibility pop
434__END_DECLS
435
436#endif /* _KERN_LOCKSTAT_H */
437