1 | /* |
2 | * Copyright (c) 2000-2021 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | /* |
29 | * @OSF_COPYRIGHT@ |
30 | */ |
31 | /* |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University |
34 | * All Rights Reserved. |
35 | * |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright |
38 | * notice and this permission notice appear in all copies of the |
39 | * software, derivative works or modified versions, and any portions |
40 | * thereof, and that both notices appear in supporting documentation. |
41 | * |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. |
45 | * |
46 | * Carnegie Mellon requests users of this software to return to |
47 | * |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science |
50 | * Carnegie Mellon University |
51 | * Pittsburgh PA 15213-3890 |
52 | * |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. |
55 | */ |
56 | |
57 | #define LOCK_PRIVATE 1 |
58 | |
59 | #include <mach_ldebug.h> |
60 | #include <debug.h> |
61 | |
62 | #include <mach/mach_host_server.h> |
63 | #include <mach_debug/lockgroup_info.h> |
64 | |
65 | #if __x86_64__ |
66 | #include <i386/tsc.h> |
67 | #endif |
68 | |
69 | #include <kern/compact_id.h> |
70 | #include <kern/kalloc.h> |
71 | #include <kern/lock_stat.h> |
72 | #include <kern/locks.h> |
73 | |
74 | #include <os/atomic_private.h> |
75 | |
76 | static KALLOC_TYPE_DEFINE(KT_LCK_GRP_ATTR, lck_grp_attr_t, KT_PRIV_ACCT); |
77 | static KALLOC_TYPE_DEFINE(KT_LCK_GRP, lck_grp_t, KT_PRIV_ACCT); |
78 | static KALLOC_TYPE_DEFINE(KT_LCK_ATTR, lck_attr_t, KT_PRIV_ACCT); |
79 | |
80 | SECURITY_READ_ONLY_LATE(lck_attr_t) lck_attr_default; |
81 | static SECURITY_READ_ONLY_LATE(lck_grp_attr_t) lck_grp_attr_default; |
82 | static lck_grp_t lck_grp_compat_grp; |
83 | COMPACT_ID_TABLE_DEFINE(static, lck_grp_table); |
84 | struct lck_debug_state lck_debug_state; |
85 | |
86 | #pragma mark lock group attributes |
87 | |
88 | lck_grp_attr_t * |
89 | lck_grp_attr_alloc_init(void) |
90 | { |
91 | lck_grp_attr_t *attr; |
92 | |
93 | attr = zalloc(kt_view: KT_LCK_GRP_ATTR); |
94 | lck_grp_attr_setdefault(attr); |
95 | return attr; |
96 | } |
97 | |
98 | void |
99 | lck_grp_attr_setdefault(lck_grp_attr_t *attr) |
100 | { |
101 | attr->grp_attr_val = lck_grp_attr_default.grp_attr_val; |
102 | } |
103 | |
104 | void |
105 | lck_grp_attr_setstat(lck_grp_attr_t *attr __unused) |
106 | { |
107 | attr->grp_attr_val |= LCK_GRP_ATTR_STAT; |
108 | } |
109 | |
110 | |
111 | void |
112 | lck_grp_attr_free(lck_grp_attr_t *attr) |
113 | { |
114 | zfree(KT_LCK_GRP_ATTR, attr); |
115 | } |
116 | |
117 | #pragma mark lock groups |
118 | |
119 | __startup_func |
120 | static void |
121 | lck_group_init(void) |
122 | { |
123 | if (LcksOpts & LCK_OPTION_ENABLE_STAT) { |
124 | lck_grp_attr_default.grp_attr_val |= LCK_GRP_ATTR_STAT; |
125 | } |
126 | if (LcksOpts & LCK_OPTION_ENABLE_TIME_STAT) { |
127 | lck_grp_attr_default.grp_attr_val |= LCK_GRP_ATTR_TIME_STAT; |
128 | } |
129 | if (LcksOpts & LCK_OPTION_ENABLE_DEBUG) { |
130 | lck_grp_attr_default.grp_attr_val |= LCK_GRP_ATTR_DEBUG; |
131 | } |
132 | |
133 | if (LcksOpts & LCK_OPTION_ENABLE_DEBUG) { |
134 | lck_attr_default.lck_attr_val = LCK_ATTR_DEBUG; |
135 | } else { |
136 | lck_attr_default.lck_attr_val = LCK_ATTR_NONE; |
137 | } |
138 | |
139 | /* |
140 | * This is a little gross, this allows us to use the table before |
141 | * compact_table_init() is called on it, but we have a chicken |
142 | * and egg problem otherwise. |
143 | * |
144 | * compact_table_init() really only inits the ticket lock |
145 | * with the proper lock group |
146 | */ |
147 | lck_grp_init(grp: &lck_grp_compat_grp, grp_name: "Compatibility APIs" , |
148 | attr: &lck_grp_attr_default); |
149 | *compact_id_resolve(table: &lck_grp_table, compact_id: 0) = LCK_GRP_NULL; |
150 | } |
151 | STARTUP(LOCKS, STARTUP_RANK_FIRST, lck_group_init); |
152 | |
153 | __startup_func |
154 | void |
155 | lck_grp_startup_init(struct lck_grp_spec *sp) |
156 | { |
157 | lck_grp_init_flags(grp: sp->grp, grp_name: sp->grp_name, grp_flags: sp->grp_flags | |
158 | lck_grp_attr_default.grp_attr_val); |
159 | } |
160 | |
161 | bool |
162 | lck_grp_has_stats(lck_grp_t *grp) |
163 | { |
164 | return grp->lck_grp_attr_id & LCK_GRP_ATTR_STAT; |
165 | } |
166 | |
167 | lck_grp_t * |
168 | lck_grp_alloc_init(const char *grp_name, lck_grp_attr_t *attr) |
169 | { |
170 | lck_grp_t *grp; |
171 | |
172 | if (attr == LCK_GRP_ATTR_NULL) { |
173 | attr = &lck_grp_attr_default; |
174 | } |
175 | grp = zalloc(kt_view: KT_LCK_GRP); |
176 | lck_grp_init_flags(grp, grp_name, |
177 | grp_flags: attr->grp_attr_val | LCK_GRP_ATTR_ALLOCATED); |
178 | return grp; |
179 | } |
180 | |
181 | void |
182 | lck_grp_init(lck_grp_t *grp, const char *grp_name, lck_grp_attr_t *attr) |
183 | { |
184 | if (attr == LCK_GRP_ATTR_NULL) { |
185 | attr = &lck_grp_attr_default; |
186 | } |
187 | lck_grp_init_flags(grp, grp_name, grp_flags: attr->grp_attr_val); |
188 | } |
189 | |
190 | lck_grp_t * |
191 | lck_grp_init_flags(lck_grp_t *grp, const char *grp_name, lck_grp_options_t flags) |
192 | { |
193 | bzero(s: grp, n: sizeof(lck_grp_t)); |
194 | os_ref_init_raw(&grp->lck_grp_refcnt, NULL); |
195 | (void)strlcpy(dst: grp->lck_grp_name, src: grp_name, LCK_GRP_MAX_NAME); |
196 | |
197 | #if CONFIG_DTRACE |
198 | lck_grp_stats_t *stats = &grp->lck_grp_stats; |
199 | |
200 | if (flags & LCK_GRP_ATTR_STAT) { |
201 | lck_grp_stat_enable(stat: &stats->lgss_spin_held); |
202 | lck_grp_stat_enable(stat: &stats->lgss_spin_miss); |
203 | |
204 | lck_grp_stat_enable(stat: &stats->lgss_ticket_held); |
205 | lck_grp_stat_enable(stat: &stats->lgss_ticket_miss); |
206 | |
207 | lck_grp_stat_enable(stat: &stats->lgss_mtx_held); |
208 | lck_grp_stat_enable(stat: &stats->lgss_mtx_direct_wait); |
209 | lck_grp_stat_enable(stat: &stats->lgss_mtx_miss); |
210 | lck_grp_stat_enable(stat: &stats->lgss_mtx_wait); |
211 | } |
212 | if (flags & LCK_GRP_ATTR_TIME_STAT) { |
213 | lck_grp_stat_enable(stat: &stats->lgss_spin_spin); |
214 | lck_grp_stat_enable(stat: &stats->lgss_ticket_spin); |
215 | } |
216 | #endif /* CONFIG_DTRACE */ |
217 | |
218 | /* must be last as it publishes the group */ |
219 | if (startup_phase > STARTUP_SUB_LOCKS) { |
220 | compact_id_table_lock(table: &lck_grp_table); |
221 | } |
222 | flags |= compact_id_get_locked(table: &lck_grp_table, limit: LCK_GRP_ATTR_ID_MASK, value: grp); |
223 | grp->lck_grp_attr_id = flags; |
224 | if (startup_phase > STARTUP_SUB_LOCKS) { |
225 | compact_id_table_unlock(table: &lck_grp_table); |
226 | } |
227 | |
228 | return grp; |
229 | } |
230 | |
231 | lck_grp_t * |
232 | lck_grp_resolve(uint32_t grp_attr_id) |
233 | { |
234 | grp_attr_id &= LCK_GRP_ATTR_ID_MASK; |
235 | return *compact_id_resolve(table: &lck_grp_table, compact_id: grp_attr_id); |
236 | } |
237 | |
238 | __abortlike |
239 | static void |
240 | __lck_grp_assert_id_panic(lck_grp_t *grp, uint32_t grp_attr_id) |
241 | { |
242 | panic("lck_grp_t %p has ID %d, but %d was expected" , grp, |
243 | grp->lck_grp_attr_id & LCK_GRP_ATTR_ID_MASK, |
244 | grp_attr_id & LCK_GRP_ATTR_ID_MASK); |
245 | } |
246 | |
247 | __attribute__((always_inline)) |
248 | void |
249 | lck_grp_assert_id(lck_grp_t *grp, uint32_t grp_attr_id) |
250 | { |
251 | if ((grp->lck_grp_attr_id ^ grp_attr_id) & LCK_GRP_ATTR_ID_MASK) { |
252 | __lck_grp_assert_id_panic(grp, grp_attr_id); |
253 | } |
254 | } |
255 | |
256 | static void |
257 | lck_grp_destroy(lck_grp_t *grp) |
258 | { |
259 | compact_id_put(table: &lck_grp_table, |
260 | compact_id: grp->lck_grp_attr_id & LCK_GRP_ATTR_ID_MASK); |
261 | zfree(KT_LCK_GRP, grp); |
262 | } |
263 | |
264 | void |
265 | lck_grp_free(lck_grp_t *grp) |
266 | { |
267 | lck_grp_deallocate(grp, NULL); |
268 | } |
269 | |
270 | void |
271 | lck_grp_reference(lck_grp_t *grp, uint32_t *cnt) |
272 | { |
273 | if (cnt) { |
274 | os_atomic_inc(cnt, relaxed); |
275 | } |
276 | if (grp->lck_grp_attr_id & LCK_GRP_ATTR_ALLOCATED) { |
277 | os_ref_retain_raw(&grp->lck_grp_refcnt, NULL); |
278 | } |
279 | } |
280 | |
281 | void |
282 | lck_grp_deallocate(lck_grp_t *grp, uint32_t *cnt) |
283 | { |
284 | if (cnt) { |
285 | os_atomic_dec(cnt, relaxed); |
286 | } |
287 | if ((grp->lck_grp_attr_id & LCK_GRP_ATTR_ALLOCATED) && |
288 | os_ref_release_raw(&grp->lck_grp_refcnt, 0) == 0) { |
289 | lck_grp_destroy(grp); |
290 | } |
291 | } |
292 | |
293 | void |
294 | lck_grp_foreach(bool (^block)(lck_grp_t *)) |
295 | { |
296 | compact_id_for_each(table: &lck_grp_table, stride: 64, cb: (bool (^)(void *))block); |
297 | } |
298 | |
299 | void |
300 | lck_grp_enable_feature(lck_debug_feature_t feat) |
301 | { |
302 | uint32_t bit = 1u << feat; |
303 | |
304 | compact_id_table_lock(table: &lck_grp_table); |
305 | if (lck_debug_state.lds_counts[feat]++ == 0) { |
306 | os_atomic_or(&lck_debug_state.lds_value, bit, relaxed); |
307 | } |
308 | compact_id_table_unlock(table: &lck_grp_table); |
309 | } |
310 | |
311 | void |
312 | lck_grp_disable_feature(lck_debug_feature_t feat) |
313 | { |
314 | uint32_t bit = 1u << feat; |
315 | long v; |
316 | |
317 | compact_id_table_lock(table: &lck_grp_table); |
318 | v = --lck_debug_state.lds_counts[feat]; |
319 | if (v < 0) { |
320 | panic("lck_debug_state: feature %d imbalance" , feat); |
321 | } |
322 | if (v == 0) { |
323 | os_atomic_andnot(&lck_debug_state.lds_value, bit, relaxed); |
324 | } |
325 | compact_id_table_unlock(table: &lck_grp_table); |
326 | } |
327 | |
328 | kern_return_t |
329 | host_lockgroup_info( |
330 | host_t host, |
331 | lockgroup_info_array_t *lockgroup_infop, |
332 | mach_msg_type_number_t *lockgroup_infoCntp) |
333 | { |
334 | lockgroup_info_t *info; |
335 | vm_offset_t addr; |
336 | vm_size_t size, used; |
337 | vm_size_t vmsize, vmused; |
338 | uint32_t needed; |
339 | __block uint32_t count = 0; |
340 | vm_map_copy_t copy; |
341 | kern_return_t kr; |
342 | |
343 | if (host == HOST_NULL) { |
344 | return KERN_INVALID_HOST; |
345 | } |
346 | |
347 | /* |
348 | * Give about 10% of slop here, lock groups are mostly allocated |
349 | * during boot or kext loads, and is extremely unlikely to grow |
350 | * rapidly. |
351 | */ |
352 | needed = os_atomic_load(&lck_grp_table.cidt_count, relaxed); |
353 | needed += needed / 8; |
354 | size = needed * sizeof(lockgroup_info_t); |
355 | vmsize = vm_map_round_page(size, VM_MAP_PAGE_MASK(ipc_kernel_map)); |
356 | kr = kmem_alloc(map: ipc_kernel_map, addrp: &addr, size: vmsize, |
357 | flags: KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_IPC); |
358 | if (kr != KERN_SUCCESS) { |
359 | return kr; |
360 | } |
361 | |
362 | info = (lockgroup_info_t *)addr; |
363 | |
364 | lck_grp_foreach(block: ^bool (lck_grp_t *grp) { |
365 | info[count].lock_spin_cnt = grp->lck_grp_spincnt; |
366 | info[count].lock_rw_cnt = grp->lck_grp_rwcnt; |
367 | info[count].lock_mtx_cnt = grp->lck_grp_mtxcnt; |
368 | |
369 | #if CONFIG_DTRACE |
370 | info[count].lock_spin_held_cnt = grp->lck_grp_stats.lgss_spin_held.lgs_count; |
371 | info[count].lock_spin_miss_cnt = grp->lck_grp_stats.lgss_spin_miss.lgs_count; |
372 | |
373 | // Historically on x86, held was used for "direct wait" and util for "held" |
374 | info[count].lock_mtx_util_cnt = grp->lck_grp_stats.lgss_mtx_held.lgs_count; |
375 | info[count].lock_mtx_held_cnt = grp->lck_grp_stats.lgss_mtx_direct_wait.lgs_count; |
376 | info[count].lock_mtx_miss_cnt = grp->lck_grp_stats.lgss_mtx_miss.lgs_count; |
377 | info[count].lock_mtx_wait_cnt = grp->lck_grp_stats.lgss_mtx_wait.lgs_count; |
378 | #endif /* CONFIG_DTRACE */ |
379 | |
380 | memcpy(dst: info[count].lockgroup_name, src: grp->lck_grp_name, LOCKGROUP_MAX_NAME); |
381 | |
382 | return ++count >= needed ? false : true; |
383 | }); |
384 | |
385 | /* |
386 | * We might have found less groups than `needed` |
387 | * get rid of the excess now: |
388 | * - [0, used) is what we want to return |
389 | * - [0, size) is what we allocated |
390 | */ |
391 | used = count * sizeof(lockgroup_info_t); |
392 | vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map)); |
393 | |
394 | if (vmused < vmsize) { |
395 | kmem_free(map: ipc_kernel_map, addr: addr + vmused, size: vmsize - vmused); |
396 | } |
397 | |
398 | kr = vm_map_unwire(map: ipc_kernel_map, start: addr, end: addr + vmused, FALSE); |
399 | assert(kr == KERN_SUCCESS); |
400 | |
401 | kr = vm_map_copyin(src_map: ipc_kernel_map, src_addr: addr, len: used, TRUE, copy_result: ©); |
402 | assert(kr == KERN_SUCCESS); |
403 | |
404 | *lockgroup_infop = (lockgroup_info_t *)copy; |
405 | *lockgroup_infoCntp = count; |
406 | |
407 | return KERN_SUCCESS; |
408 | } |
409 | |
410 | #pragma mark lock attributes |
411 | |
412 | __startup_func |
413 | void |
414 | lck_attr_startup_init(struct lck_attr_startup_spec *sp) |
415 | { |
416 | lck_attr_t *attr = sp->lck_attr; |
417 | lck_attr_setdefault(attr); |
418 | attr->lck_attr_val |= sp->lck_attr_set_flags; |
419 | attr->lck_attr_val &= ~sp->lck_attr_clear_flags; |
420 | } |
421 | |
422 | lck_attr_t * |
423 | lck_attr_alloc_init(void) |
424 | { |
425 | lck_attr_t *attr; |
426 | |
427 | attr = zalloc(kt_view: KT_LCK_ATTR); |
428 | lck_attr_setdefault(attr); |
429 | return attr; |
430 | } |
431 | |
432 | |
433 | void |
434 | lck_attr_setdefault(lck_attr_t *attr) |
435 | { |
436 | attr->lck_attr_val = lck_attr_default.lck_attr_val; |
437 | } |
438 | |
439 | |
440 | void |
441 | lck_attr_setdebug(lck_attr_t *attr) |
442 | { |
443 | os_atomic_or(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed); |
444 | } |
445 | |
446 | void |
447 | lck_attr_cleardebug(lck_attr_t *attr) |
448 | { |
449 | os_atomic_andnot(&attr->lck_attr_val, LCK_ATTR_DEBUG, relaxed); |
450 | } |
451 | |
452 | void |
453 | lck_attr_rw_shared_priority(lck_attr_t *attr) |
454 | { |
455 | os_atomic_or(&attr->lck_attr_val, LCK_ATTR_RW_SHARED_PRIORITY, relaxed); |
456 | } |
457 | |
458 | |
459 | void |
460 | lck_attr_free(lck_attr_t *attr) |
461 | { |
462 | zfree(KT_LCK_ATTR, attr); |
463 | } |
464 | |
465 | #pragma mark lock stat |
466 | #if CONFIG_DTRACE |
467 | |
468 | void |
469 | lck_grp_stat_enable(lck_grp_stat_t *stat) |
470 | { |
471 | /* callers ensure this is properly synchronized */ |
472 | stat->lgs_enablings++; |
473 | } |
474 | |
475 | void |
476 | lck_grp_stat_disable(lck_grp_stat_t *stat) |
477 | { |
478 | stat->lgs_enablings--; |
479 | } |
480 | |
481 | bool |
482 | lck_grp_stat_enabled(lck_grp_stat_t *stat) |
483 | { |
484 | return stat->lgs_enablings != 0; |
485 | } |
486 | |
487 | |
488 | __attribute__((always_inline)) |
489 | void |
490 | lck_grp_stat_inc(lck_grp_t *grp, lck_grp_stat_t *stat, bool always) |
491 | { |
492 | #pragma unused(grp) |
493 | if (always || lck_grp_stat_enabled(stat)) { |
494 | __unused uint64_t val = os_atomic_inc_orig(&stat->lgs_count, relaxed); |
495 | if (__improbable(stat->lgs_limit && (val % (stat->lgs_limit)) == 0)) { |
496 | lockprof_probe(grp, stat, val); |
497 | } |
498 | } |
499 | } |
500 | |
501 | #if LOCK_STATS |
502 | |
503 | static inline void |
504 | lck_grp_inc_time_stats(lck_grp_t *grp, lck_grp_stat_t *stat, uint64_t time) |
505 | { |
506 | if (lck_grp_stat_enabled(stat)) { |
507 | __unused uint64_t val = os_atomic_add_orig(&stat->lgs_count, time, relaxed); |
508 | if (__improbable(stat->lgs_limit)) { |
509 | while (__improbable(time > stat->lgs_limit)) { |
510 | time -= stat->lgs_limit; |
511 | lockprof_probe(grp, stat, val); |
512 | } |
513 | if (__improbable(((val % stat->lgs_limit) + time) > stat->lgs_limit)) { |
514 | lockprof_probe(grp, stat, val); |
515 | } |
516 | } |
517 | } |
518 | } |
519 | |
520 | void |
521 | __lck_grp_spin_update_held(lck_grp_t *grp) |
522 | { |
523 | if (grp) { |
524 | lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_spin_held, false); |
525 | } |
526 | } |
527 | |
528 | void |
529 | __lck_grp_spin_update_miss(lck_grp_t *grp) |
530 | { |
531 | if (grp) { |
532 | lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_spin_miss, false); |
533 | } |
534 | } |
535 | |
536 | void |
537 | __lck_grp_spin_update_spin(lck_grp_t *grp, uint64_t time) |
538 | { |
539 | if (grp) { |
540 | lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_spin_spin; |
541 | lck_grp_inc_time_stats(grp, stat, time); |
542 | } |
543 | } |
544 | |
545 | void |
546 | __lck_grp_ticket_update_held(lck_grp_t *grp) |
547 | { |
548 | if (grp) { |
549 | lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_ticket_held, false); |
550 | } |
551 | } |
552 | |
553 | void |
554 | __lck_grp_ticket_update_miss(lck_grp_t *grp) |
555 | { |
556 | if (grp) { |
557 | lck_grp_stat_inc(grp, &grp->lck_grp_stats.lgss_ticket_miss, false); |
558 | } |
559 | } |
560 | |
561 | void |
562 | __lck_grp_ticket_update_spin(lck_grp_t *grp, uint64_t time) |
563 | { |
564 | if (grp) { |
565 | lck_grp_stat_t *stat = &grp->lck_grp_stats.lgss_ticket_spin; |
566 | lck_grp_inc_time_stats(grp, stat, time); |
567 | } |
568 | } |
569 | |
570 | #endif /* LOCK_STATS */ |
571 | |
572 | void |
573 | lck_mtx_time_stat_record( |
574 | enum lockstat_probe_id pid, |
575 | lck_mtx_t *mtx, |
576 | uint32_t grp_attr_id, |
577 | uint64_t start) |
578 | { |
579 | uint32_t id = lockstat_probemap[pid]; |
580 | |
581 | if (__improbable(start && id)) { |
582 | uint64_t delta = ml_get_timebase() - start; |
583 | lck_grp_t *grp = lck_grp_resolve(grp_attr_id); |
584 | |
585 | #if __x86_64__ |
586 | delta = tmrCvt(delta, tscFCvtt2n); |
587 | #endif |
588 | dtrace_probe(id, (uintptr_t)mtx, delta, (uintptr_t)grp, 0, 0); |
589 | } |
590 | } |
591 | |
592 | #endif /* CONFIG_DTRACE */ |
593 | |