1 | /* |
2 | * Copyright (c) 2012-2013 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #include <mach/host_priv.h> |
29 | #include <mach/host_special_ports.h> |
30 | #include <mach/mach_types.h> |
31 | #include <mach/telemetry_notification_server.h> |
32 | |
33 | #include <kern/assert.h> |
34 | #include <kern/clock.h> |
35 | #include <kern/debug.h> |
36 | #include <kern/host.h> |
37 | #include <kern/kalloc.h> |
38 | #include <kern/kern_types.h> |
39 | #include <kern/locks.h> |
40 | #include <kern/misc_protos.h> |
41 | #include <kern/sched.h> |
42 | #include <kern/sched_prim.h> |
43 | #include <kern/telemetry.h> |
44 | #include <kern/timer_call.h> |
45 | #include <kern/policy_internal.h> |
46 | #include <kern/kcdata.h> |
47 | |
48 | #include <pexpert/pexpert.h> |
49 | |
50 | #include <vm/vm_kern.h> |
51 | #include <vm/vm_shared_region.h> |
52 | |
53 | #include <kperf/callstack.h> |
54 | #include <kern/backtrace.h> |
55 | #include <kern/monotonic.h> |
56 | |
57 | #include <sys/kdebug.h> |
58 | #include <uuid/uuid.h> |
59 | #include <kdp/kdp_dyld.h> |
60 | |
61 | #define TELEMETRY_DEBUG 0 |
62 | |
63 | extern int proc_pid(void *); |
64 | extern char *proc_name_address(void *p); |
65 | extern uint64_t proc_uniqueid(void *p); |
66 | extern uint64_t proc_was_throttled(void *p); |
67 | extern uint64_t proc_did_throttle(void *p); |
68 | extern int proc_selfpid(void); |
69 | extern boolean_t task_did_exec(task_t task); |
70 | extern boolean_t task_is_exec_copy(task_t task); |
71 | |
72 | struct micro_snapshot_buffer { |
73 | vm_offset_t buffer; |
74 | uint32_t size; |
75 | uint32_t current_position; |
76 | uint32_t end_point; |
77 | }; |
78 | |
79 | void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer); |
80 | int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark, struct micro_snapshot_buffer * current_buffer); |
81 | |
82 | #define TELEMETRY_DEFAULT_SAMPLE_RATE (1) /* 1 sample every 1 second */ |
83 | #define TELEMETRY_DEFAULT_BUFFER_SIZE (16*1024) |
84 | #define TELEMETRY_MAX_BUFFER_SIZE (64*1024) |
85 | |
86 | #define TELEMETRY_DEFAULT_NOTIFY_LEEWAY (4*1024) // Userland gets 4k of leeway to collect data after notification |
87 | #define TELEMETRY_MAX_UUID_COUNT (128) // Max of 128 non-shared-cache UUIDs to log for symbolication |
88 | |
89 | uint32_t telemetry_sample_rate = 0; |
90 | volatile boolean_t telemetry_needs_record = FALSE; |
91 | volatile boolean_t telemetry_needs_timer_arming_record = FALSE; |
92 | |
93 | /* |
94 | * If TRUE, record micro-stackshot samples for all tasks. |
95 | * If FALSE, only sample tasks which are marked for telemetry. |
96 | */ |
97 | boolean_t telemetry_sample_all_tasks = FALSE; |
98 | boolean_t telemetry_sample_pmis = FALSE; |
99 | uint32_t telemetry_active_tasks = 0; // Number of tasks opted into telemetry |
100 | |
101 | uint32_t telemetry_timestamp = 0; |
102 | |
103 | /* |
104 | * The telemetry_buffer is responsible |
105 | * for timer samples and interrupt samples that are driven by |
106 | * compute_averages(). It will notify its client (if one |
107 | * exists) when it has enough data to be worth flushing. |
108 | */ |
109 | struct micro_snapshot_buffer telemetry_buffer = {0, 0, 0, 0}; |
110 | |
111 | int telemetry_bytes_since_last_mark = -1; // How much data since buf was last marked? |
112 | int telemetry_buffer_notify_at = 0; |
113 | |
114 | lck_grp_t telemetry_lck_grp; |
115 | lck_mtx_t telemetry_mtx; |
116 | lck_mtx_t telemetry_pmi_mtx; |
117 | |
118 | #define TELEMETRY_LOCK() do { lck_mtx_lock(&telemetry_mtx); } while (0) |
119 | #define TELEMETRY_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&telemetry_mtx) |
120 | #define TELEMETRY_UNLOCK() do { lck_mtx_unlock(&telemetry_mtx); } while (0) |
121 | |
122 | #define TELEMETRY_PMI_LOCK() do { lck_mtx_lock(&telemetry_pmi_mtx); } while (0) |
123 | #define TELEMETRY_PMI_UNLOCK() do { lck_mtx_unlock(&telemetry_pmi_mtx); } while (0) |
124 | |
125 | void telemetry_init(void) |
126 | { |
127 | kern_return_t ret; |
128 | uint32_t telemetry_notification_leeway; |
129 | |
130 | lck_grp_init(&telemetry_lck_grp, "telemetry group" , LCK_GRP_ATTR_NULL); |
131 | lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); |
132 | lck_mtx_init(&telemetry_pmi_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); |
133 | |
134 | if (!PE_parse_boot_argn("telemetry_buffer_size" , &telemetry_buffer.size, sizeof(telemetry_buffer.size))) { |
135 | telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE; |
136 | } |
137 | |
138 | if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) |
139 | telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE; |
140 | |
141 | ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size, VM_KERN_MEMORY_DIAG); |
142 | if (ret != KERN_SUCCESS) { |
143 | kprintf("Telemetry: Allocation failed: %d\n" , ret); |
144 | return; |
145 | } |
146 | bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size); |
147 | |
148 | if (!PE_parse_boot_argn("telemetry_notification_leeway" , &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) { |
149 | /* |
150 | * By default, notify the user to collect the buffer when there is this much space left in the buffer. |
151 | */ |
152 | telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; |
153 | } |
154 | if (telemetry_notification_leeway >= telemetry_buffer.size) { |
155 | printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n" , |
156 | telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY); |
157 | telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; |
158 | } |
159 | telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway; |
160 | |
161 | if (!PE_parse_boot_argn("telemetry_sample_rate" , &telemetry_sample_rate, sizeof(telemetry_sample_rate))) { |
162 | telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE; |
163 | } |
164 | |
165 | /* |
166 | * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args. |
167 | */ |
168 | if (!PE_parse_boot_argn("telemetry_sample_all_tasks" , &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) { |
169 | |
170 | #if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) |
171 | telemetry_sample_all_tasks = FALSE; |
172 | #else |
173 | telemetry_sample_all_tasks = TRUE; |
174 | #endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */ |
175 | |
176 | } |
177 | |
178 | kprintf("Telemetry: Sampling %stasks once per %u second%s\n" , |
179 | (telemetry_sample_all_tasks) ? "all " : "" , |
180 | telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s" ); |
181 | } |
182 | |
183 | /* |
184 | * Enable or disable global microstackshots (ie telemetry_sample_all_tasks). |
185 | * |
186 | * enable_disable == 1: turn it on |
187 | * enable_disable == 0: turn it off |
188 | */ |
189 | void |
190 | telemetry_global_ctl(int enable_disable) |
191 | { |
192 | if (enable_disable == 1) { |
193 | telemetry_sample_all_tasks = TRUE; |
194 | } else { |
195 | telemetry_sample_all_tasks = FALSE; |
196 | } |
197 | } |
198 | |
199 | /* |
200 | * Opt the given task into or out of the telemetry stream. |
201 | * |
202 | * Supported reasons (callers may use any or all of): |
203 | * TF_CPUMON_WARNING |
204 | * TF_WAKEMON_WARNING |
205 | * |
206 | * enable_disable == 1: turn it on |
207 | * enable_disable == 0: turn it off |
208 | */ |
209 | void |
210 | telemetry_task_ctl(task_t task, uint32_t reasons, int enable_disable) |
211 | { |
212 | task_lock(task); |
213 | telemetry_task_ctl_locked(task, reasons, enable_disable); |
214 | task_unlock(task); |
215 | } |
216 | |
217 | void |
218 | telemetry_task_ctl_locked(task_t task, uint32_t reasons, int enable_disable) |
219 | { |
220 | uint32_t origflags; |
221 | |
222 | assert((reasons != 0) && ((reasons | TF_TELEMETRY) == TF_TELEMETRY)); |
223 | |
224 | task_lock_assert_owned(task); |
225 | |
226 | origflags = task->t_flags; |
227 | |
228 | if (enable_disable == 1) { |
229 | task->t_flags |= reasons; |
230 | if ((origflags & TF_TELEMETRY) == 0) { |
231 | OSIncrementAtomic(&telemetry_active_tasks); |
232 | #if TELEMETRY_DEBUG |
233 | printf("%s: telemetry OFF -> ON (%d active)\n" , proc_name_address(task->bsd_info), telemetry_active_tasks); |
234 | #endif |
235 | } |
236 | } else { |
237 | task->t_flags &= ~reasons; |
238 | if (((origflags & TF_TELEMETRY) != 0) && ((task->t_flags & TF_TELEMETRY) == 0)) { |
239 | /* |
240 | * If this task went from having at least one telemetry bit to having none, |
241 | * the net change was to disable telemetry for the task. |
242 | */ |
243 | OSDecrementAtomic(&telemetry_active_tasks); |
244 | #if TELEMETRY_DEBUG |
245 | printf("%s: telemetry ON -> OFF (%d active)\n" , proc_name_address(task->bsd_info), telemetry_active_tasks); |
246 | #endif |
247 | } |
248 | } |
249 | } |
250 | |
251 | /* |
252 | * Determine if the current thread is eligible for telemetry: |
253 | * |
254 | * telemetry_sample_all_tasks: All threads are eligible. This takes precedence. |
255 | * telemetry_active_tasks: Count of tasks opted in. |
256 | * task->t_flags & TF_TELEMETRY: This task is opted in. |
257 | */ |
258 | static boolean_t |
259 | telemetry_is_active(thread_t thread) |
260 | { |
261 | task_t task = thread->task; |
262 | |
263 | if (task == kernel_task) { |
264 | /* Kernel threads never return to an AST boundary, and are ineligible */ |
265 | return FALSE; |
266 | } |
267 | |
268 | if (telemetry_sample_all_tasks || telemetry_sample_pmis) { |
269 | return TRUE; |
270 | } |
271 | |
272 | if ((telemetry_active_tasks > 0) && ((thread->task->t_flags & TF_TELEMETRY) != 0)) { |
273 | return TRUE; |
274 | } |
275 | |
276 | return FALSE; |
277 | } |
278 | |
279 | /* |
280 | * Userland is arming a timer. If we are eligible for such a record, |
281 | * sample now. No need to do this one at the AST because we're already at |
282 | * a safe place in this system call. |
283 | */ |
284 | int telemetry_timer_event(__unused uint64_t deadline, __unused uint64_t interval, __unused uint64_t leeway) |
285 | { |
286 | if (telemetry_needs_timer_arming_record == TRUE) { |
287 | telemetry_needs_timer_arming_record = FALSE; |
288 | telemetry_take_sample(current_thread(), kTimerArmingRecord | kUserMode, &telemetry_buffer); |
289 | } |
290 | |
291 | return (0); |
292 | } |
293 | |
294 | #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) |
295 | static void |
296 | telemetry_pmi_handler(bool user_mode, __unused void *ctx) |
297 | { |
298 | telemetry_mark_curthread(user_mode, TRUE); |
299 | } |
300 | #endif /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */ |
301 | |
302 | int telemetry_pmi_setup(enum telemetry_pmi pmi_ctr, uint64_t period) |
303 | { |
304 | #if defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) |
305 | static boolean_t sample_all_tasks_aside = FALSE; |
306 | static uint32_t active_tasks_aside = FALSE; |
307 | int error = 0; |
308 | const char *name = "?" ; |
309 | |
310 | unsigned int ctr = 0; |
311 | |
312 | TELEMETRY_PMI_LOCK(); |
313 | |
314 | switch (pmi_ctr) { |
315 | case TELEMETRY_PMI_NONE: |
316 | if (!telemetry_sample_pmis) { |
317 | error = 1; |
318 | goto out; |
319 | } |
320 | |
321 | telemetry_sample_pmis = FALSE; |
322 | telemetry_sample_all_tasks = sample_all_tasks_aside; |
323 | telemetry_active_tasks = active_tasks_aside; |
324 | error = mt_microstackshot_stop(); |
325 | if (!error) { |
326 | printf("telemetry: disabling ustackshot on PMI\n" ); |
327 | } |
328 | goto out; |
329 | |
330 | case TELEMETRY_PMI_INSTRS: |
331 | ctr = MT_CORE_INSTRS; |
332 | name = "instructions" ; |
333 | break; |
334 | |
335 | case TELEMETRY_PMI_CYCLES: |
336 | ctr = MT_CORE_CYCLES; |
337 | name = "cycles" ; |
338 | break; |
339 | |
340 | default: |
341 | error = 1; |
342 | goto out; |
343 | } |
344 | |
345 | telemetry_sample_pmis = TRUE; |
346 | sample_all_tasks_aside = telemetry_sample_all_tasks; |
347 | active_tasks_aside = telemetry_active_tasks; |
348 | telemetry_sample_all_tasks = FALSE; |
349 | telemetry_active_tasks = 0; |
350 | |
351 | error = mt_microstackshot_start(ctr, period, telemetry_pmi_handler, NULL); |
352 | if (!error) { |
353 | printf("telemetry: ustackshot every %llu %s\n" , period, name); |
354 | } |
355 | |
356 | out: |
357 | TELEMETRY_PMI_UNLOCK(); |
358 | return error; |
359 | #else /* defined(MT_CORE_INSTRS) && defined(MT_CORE_CYCLES) */ |
360 | #pragma unused(pmi_ctr, period) |
361 | return 1; |
362 | #endif /* !defined(MT_CORE_INSTRS) || !defined(MT_CORE_CYCLES) */ |
363 | } |
364 | |
365 | /* |
366 | * Mark the current thread for an interrupt-based |
367 | * telemetry record, to be sampled at the next AST boundary. |
368 | */ |
369 | void telemetry_mark_curthread(boolean_t interrupted_userspace, boolean_t pmi) |
370 | { |
371 | uint32_t ast_bits = 0; |
372 | thread_t thread = current_thread(); |
373 | |
374 | /* |
375 | * If telemetry isn't active for this thread, return and try |
376 | * again next time. |
377 | */ |
378 | if (telemetry_is_active(thread) == FALSE) { |
379 | return; |
380 | } |
381 | |
382 | ast_bits |= (interrupted_userspace ? AST_TELEMETRY_USER : AST_TELEMETRY_KERNEL); |
383 | if (pmi) { |
384 | ast_bits |= AST_TELEMETRY_PMI; |
385 | } |
386 | |
387 | telemetry_needs_record = FALSE; |
388 | thread_ast_set(thread, ast_bits); |
389 | ast_propagate(thread); |
390 | } |
391 | |
392 | void compute_telemetry(void *arg __unused) |
393 | { |
394 | if (telemetry_sample_all_tasks || (telemetry_active_tasks > 0)) { |
395 | if ((++telemetry_timestamp) % telemetry_sample_rate == 0) { |
396 | telemetry_needs_record = TRUE; |
397 | telemetry_needs_timer_arming_record = TRUE; |
398 | } |
399 | } |
400 | } |
401 | |
402 | /* |
403 | * If userland has registered a port for telemetry notifications, send one now. |
404 | */ |
405 | static void |
406 | telemetry_notify_user(void) |
407 | { |
408 | mach_port_t user_port = MACH_PORT_NULL; |
409 | |
410 | kern_return_t kr = host_get_telemetry_port(host_priv_self(), &user_port); |
411 | if ((kr != KERN_SUCCESS) || !IPC_PORT_VALID(user_port)) { |
412 | return; |
413 | } |
414 | |
415 | telemetry_notification(user_port, 0); |
416 | ipc_port_release_send(user_port); |
417 | } |
418 | |
419 | void telemetry_ast(thread_t thread, ast_t reasons) |
420 | { |
421 | assert((reasons & AST_TELEMETRY_ALL) != 0); |
422 | |
423 | uint8_t record_type = 0; |
424 | if (reasons & AST_TELEMETRY_IO) { |
425 | record_type |= kIORecord; |
426 | } |
427 | if (reasons & (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL)) { |
428 | record_type |= (reasons & AST_TELEMETRY_PMI) ? kPMIRecord : |
429 | kInterruptRecord; |
430 | } |
431 | |
432 | uint8_t user_telemetry = (reasons & AST_TELEMETRY_USER) ? kUserMode : 0; |
433 | |
434 | uint8_t microsnapshot_flags = record_type | user_telemetry; |
435 | |
436 | telemetry_take_sample(thread, microsnapshot_flags, &telemetry_buffer); |
437 | } |
438 | |
439 | void telemetry_take_sample(thread_t thread, uint8_t microsnapshot_flags, struct micro_snapshot_buffer * current_buffer) |
440 | { |
441 | task_t task; |
442 | void *p; |
443 | uint32_t btcount = 0, bti; |
444 | struct micro_snapshot *msnap; |
445 | struct task_snapshot *tsnap; |
446 | struct thread_snapshot *thsnap; |
447 | clock_sec_t secs; |
448 | clock_usec_t usecs; |
449 | vm_size_t framesize; |
450 | uint32_t current_record_start; |
451 | uint32_t tmp = 0; |
452 | boolean_t notify = FALSE; |
453 | |
454 | if (thread == THREAD_NULL) |
455 | return; |
456 | |
457 | task = thread->task; |
458 | if ((task == TASK_NULL) || (task == kernel_task) || task_did_exec(task) || task_is_exec_copy(task)) |
459 | return; |
460 | |
461 | /* telemetry_XXX accessed outside of lock for instrumentation only */ |
462 | KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_START, |
463 | microsnapshot_flags, telemetry_bytes_since_last_mark, 0, |
464 | (&telemetry_buffer != current_buffer)); |
465 | |
466 | p = get_bsdtask_info(task); |
467 | |
468 | /* |
469 | * Gather up the data we'll need for this sample. The sample is written into the kernel |
470 | * buffer with the global telemetry lock held -- so we must do our (possibly faulting) |
471 | * copies from userland here, before taking the lock. |
472 | */ |
473 | uintptr_t frames[MAX_CALLSTACK_FRAMES] = {}; |
474 | bool user64; |
475 | int backtrace_error = backtrace_user(frames, MAX_CALLSTACK_FRAMES, &btcount, &user64); |
476 | if (backtrace_error) { |
477 | return; |
478 | } |
479 | |
480 | /* |
481 | * Find the actual [slid] address of the shared cache's UUID, and copy it in from userland. |
482 | */ |
483 | int shared_cache_uuid_valid = 0; |
484 | uint64_t shared_cache_base_address; |
485 | struct _dyld_cache_header ; |
486 | uint64_t shared_cache_slide; |
487 | |
488 | /* |
489 | * Don't copy in the entire shared cache header; we only need the UUID. Calculate the |
490 | * offset of that one field. |
491 | */ |
492 | int = (char *)&shared_cache_header.uuid - (char *)&shared_cache_header; |
493 | vm_shared_region_t sr = vm_shared_region_get(task); |
494 | if (sr != NULL) { |
495 | if ((vm_shared_region_start_address(sr, &shared_cache_base_address) == KERN_SUCCESS) && |
496 | (copyin(shared_cache_base_address + sc_header_uuid_offset, (char *)&shared_cache_header.uuid, |
497 | sizeof (shared_cache_header.uuid)) == 0)) { |
498 | shared_cache_uuid_valid = 1; |
499 | shared_cache_slide = vm_shared_region_get_slide(sr); |
500 | } |
501 | // vm_shared_region_get() gave us a reference on the shared region. |
502 | vm_shared_region_deallocate(sr); |
503 | } |
504 | |
505 | /* |
506 | * Retrieve the array of UUID's for binaries used by this task. |
507 | * We reach down into DYLD's data structures to find the array. |
508 | * |
509 | * XXX - make this common with kdp? |
510 | */ |
511 | uint32_t uuid_info_count = 0; |
512 | mach_vm_address_t uuid_info_addr = 0; |
513 | if (task_has_64Bit_addr(task)) { |
514 | struct user64_dyld_all_image_infos task_image_infos; |
515 | if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) { |
516 | uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount; |
517 | uuid_info_addr = task_image_infos.uuidArray; |
518 | } |
519 | } else { |
520 | struct user32_dyld_all_image_infos task_image_infos; |
521 | if (copyin(task->all_image_info_addr, (char *)&task_image_infos, sizeof(task_image_infos)) == 0) { |
522 | uuid_info_count = task_image_infos.uuidArrayCount; |
523 | uuid_info_addr = task_image_infos.uuidArray; |
524 | } |
525 | } |
526 | |
527 | /* |
528 | * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating |
529 | * this data structure), we zero the uuid_info_count so that we won't even try to save load info |
530 | * for this task. |
531 | */ |
532 | if (!uuid_info_addr) { |
533 | uuid_info_count = 0; |
534 | } |
535 | |
536 | /* |
537 | * Don't copy in an unbounded amount of memory. The main binary and interesting |
538 | * non-shared-cache libraries should be in the first few images. |
539 | */ |
540 | if (uuid_info_count > TELEMETRY_MAX_UUID_COUNT) { |
541 | uuid_info_count = TELEMETRY_MAX_UUID_COUNT; |
542 | } |
543 | |
544 | uint32_t uuid_info_size = (uint32_t)(task_has_64Bit_addr(thread->task) ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info)); |
545 | uint32_t uuid_info_array_size = uuid_info_count * uuid_info_size; |
546 | char *uuid_info_array = NULL; |
547 | |
548 | if (uuid_info_count > 0) { |
549 | if ((uuid_info_array = (char *)kalloc(uuid_info_array_size)) == NULL) { |
550 | return; |
551 | } |
552 | |
553 | /* |
554 | * Copy in the UUID info array. |
555 | * It may be nonresident, in which case just fix up nloadinfos to 0 in the task snapshot. |
556 | */ |
557 | if (copyin(uuid_info_addr, uuid_info_array, uuid_info_array_size) != 0) { |
558 | kfree(uuid_info_array, uuid_info_array_size); |
559 | uuid_info_array = NULL; |
560 | uuid_info_array_size = 0; |
561 | } |
562 | } |
563 | |
564 | /* |
565 | * Look for a dispatch queue serial number, and copy it in from userland if present. |
566 | */ |
567 | uint64_t dqserialnum = 0; |
568 | int dqserialnum_valid = 0; |
569 | |
570 | uint64_t dqkeyaddr = thread_dispatchqaddr(thread); |
571 | if (dqkeyaddr != 0) { |
572 | uint64_t dqaddr = 0; |
573 | uint64_t dq_serialno_offset = get_task_dispatchqueue_serialno_offset(task); |
574 | if ((copyin(dqkeyaddr, (char *)&dqaddr, (task_has_64Bit_addr(task) ? 8 : 4)) == 0) && |
575 | (dqaddr != 0) && (dq_serialno_offset != 0)) { |
576 | uint64_t dqserialnumaddr = dqaddr + dq_serialno_offset; |
577 | if (copyin(dqserialnumaddr, (char *)&dqserialnum, (task_has_64Bit_addr(task) ? 8 : 4)) == 0) { |
578 | dqserialnum_valid = 1; |
579 | } |
580 | } |
581 | } |
582 | |
583 | clock_get_calendar_microtime(&secs, &usecs); |
584 | |
585 | TELEMETRY_LOCK(); |
586 | |
587 | /* |
588 | * If our buffer is not backed by anything, |
589 | * then we cannot take the sample. Meant to allow us to deallocate the window |
590 | * buffer if it is disabled. |
591 | */ |
592 | if (!current_buffer->buffer) |
593 | goto cancel_sample; |
594 | |
595 | /* |
596 | * We do the bulk of the operation under the telemetry lock, on assumption that |
597 | * any page faults during execution will not cause another AST_TELEMETRY_ALL |
598 | * to deadlock; they will just block until we finish. This makes it easier |
599 | * to copy into the buffer directly. As soon as we unlock, userspace can copy |
600 | * out of our buffer. |
601 | */ |
602 | |
603 | copytobuffer: |
604 | |
605 | current_record_start = current_buffer->current_position; |
606 | |
607 | if ((current_buffer->size - current_buffer->current_position) < sizeof(struct micro_snapshot)) { |
608 | /* |
609 | * We can't fit a record in the space available, so wrap around to the beginning. |
610 | * Save the current position as the known end point of valid data. |
611 | */ |
612 | current_buffer->end_point = current_record_start; |
613 | current_buffer->current_position = 0; |
614 | if (current_record_start == 0) { |
615 | /* This sample is too large to fit in the buffer even when we started at 0, so skip it */ |
616 | goto cancel_sample; |
617 | } |
618 | goto copytobuffer; |
619 | } |
620 | |
621 | msnap = (struct micro_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position); |
622 | msnap->snapshot_magic = STACKSHOT_MICRO_SNAPSHOT_MAGIC; |
623 | msnap->ms_flags = microsnapshot_flags; |
624 | msnap->ms_opaque_flags = 0; /* namespace managed by userspace */ |
625 | msnap->ms_cpu = cpu_number(); |
626 | msnap->ms_time = secs; |
627 | msnap->ms_time_microsecs = usecs; |
628 | |
629 | current_buffer->current_position += sizeof(struct micro_snapshot); |
630 | |
631 | if ((current_buffer->size - current_buffer->current_position) < sizeof(struct task_snapshot)) { |
632 | current_buffer->end_point = current_record_start; |
633 | current_buffer->current_position = 0; |
634 | if (current_record_start == 0) { |
635 | /* This sample is too large to fit in the buffer even when we started at 0, so skip it */ |
636 | goto cancel_sample; |
637 | } |
638 | goto copytobuffer; |
639 | } |
640 | |
641 | tsnap = (struct task_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position); |
642 | bzero(tsnap, sizeof(*tsnap)); |
643 | tsnap->snapshot_magic = STACKSHOT_TASK_SNAPSHOT_MAGIC; |
644 | tsnap->pid = proc_pid(p); |
645 | tsnap->uniqueid = proc_uniqueid(p); |
646 | tsnap->user_time_in_terminated_threads = task->total_user_time; |
647 | tsnap->system_time_in_terminated_threads = task->total_system_time; |
648 | tsnap->suspend_count = task->suspend_count; |
649 | tsnap->task_size = (typeof(tsnap->task_size)) (get_task_phys_footprint(task) / PAGE_SIZE); |
650 | tsnap->faults = task->faults; |
651 | tsnap->pageins = task->pageins; |
652 | tsnap->cow_faults = task->cow_faults; |
653 | /* |
654 | * The throttling counters are maintained as 64-bit counters in the proc |
655 | * structure. However, we reserve 32-bits (each) for them in the task_snapshot |
656 | * struct to save space and since we do not expect them to overflow 32-bits. If we |
657 | * find these values overflowing in the future, the fix would be to simply |
658 | * upgrade these counters to 64-bit in the task_snapshot struct |
659 | */ |
660 | tsnap->was_throttled = (uint32_t) proc_was_throttled(p); |
661 | tsnap->did_throttle = (uint32_t) proc_did_throttle(p); |
662 | |
663 | if (task->t_flags & TF_TELEMETRY) { |
664 | tsnap->ss_flags |= kTaskRsrcFlagged; |
665 | } |
666 | |
667 | if (proc_get_effective_task_policy(task, TASK_POLICY_DARWIN_BG)) { |
668 | tsnap->ss_flags |= kTaskDarwinBG; |
669 | } |
670 | |
671 | proc_get_darwinbgstate(task, &tmp); |
672 | |
673 | if (proc_get_effective_task_policy(task, TASK_POLICY_ROLE) == TASK_FOREGROUND_APPLICATION) { |
674 | tsnap->ss_flags |= kTaskIsForeground; |
675 | } |
676 | |
677 | if (tmp & PROC_FLAG_ADAPTIVE_IMPORTANT) { |
678 | tsnap->ss_flags |= kTaskIsBoosted; |
679 | } |
680 | |
681 | if (tmp & PROC_FLAG_SUPPRESSED) { |
682 | tsnap->ss_flags |= kTaskIsSuppressed; |
683 | } |
684 | |
685 | tsnap->latency_qos = task_grab_latency_qos(task); |
686 | |
687 | strlcpy(tsnap->p_comm, proc_name_address(p), sizeof(tsnap->p_comm)); |
688 | if (task_has_64Bit_addr(thread->task)) { |
689 | tsnap->ss_flags |= kUser64_p; |
690 | } |
691 | |
692 | if (shared_cache_uuid_valid) { |
693 | tsnap->shared_cache_slide = shared_cache_slide; |
694 | bcopy(shared_cache_header.uuid, tsnap->shared_cache_identifier, sizeof (shared_cache_header.uuid)); |
695 | } |
696 | |
697 | current_buffer->current_position += sizeof(struct task_snapshot); |
698 | |
699 | /* |
700 | * Directly after the task snapshot, place the array of UUID's corresponding to the binaries |
701 | * used by this task. |
702 | */ |
703 | if ((current_buffer->size - current_buffer->current_position) < uuid_info_array_size) { |
704 | current_buffer->end_point = current_record_start; |
705 | current_buffer->current_position = 0; |
706 | if (current_record_start == 0) { |
707 | /* This sample is too large to fit in the buffer even when we started at 0, so skip it */ |
708 | goto cancel_sample; |
709 | } |
710 | goto copytobuffer; |
711 | } |
712 | |
713 | /* |
714 | * Copy the UUID info array into our sample. |
715 | */ |
716 | if (uuid_info_array_size > 0) { |
717 | bcopy(uuid_info_array, (char *)(current_buffer->buffer + current_buffer->current_position), uuid_info_array_size); |
718 | tsnap->nloadinfos = uuid_info_count; |
719 | } |
720 | |
721 | current_buffer->current_position += uuid_info_array_size; |
722 | |
723 | /* |
724 | * After the task snapshot & list of binary UUIDs, we place a thread snapshot. |
725 | */ |
726 | |
727 | if ((current_buffer->size - current_buffer->current_position) < sizeof(struct thread_snapshot)) { |
728 | /* wrap and overwrite */ |
729 | current_buffer->end_point = current_record_start; |
730 | current_buffer->current_position = 0; |
731 | if (current_record_start == 0) { |
732 | /* This sample is too large to fit in the buffer even when we started at 0, so skip it */ |
733 | goto cancel_sample; |
734 | } |
735 | goto copytobuffer; |
736 | } |
737 | |
738 | thsnap = (struct thread_snapshot *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position); |
739 | bzero(thsnap, sizeof(*thsnap)); |
740 | |
741 | thsnap->snapshot_magic = STACKSHOT_THREAD_SNAPSHOT_MAGIC; |
742 | thsnap->thread_id = thread_tid(thread); |
743 | thsnap->state = thread->state; |
744 | thsnap->priority = thread->base_pri; |
745 | thsnap->sched_pri = thread->sched_pri; |
746 | thsnap->sched_flags = thread->sched_flags; |
747 | thsnap->ss_flags |= kStacksPCOnly; |
748 | thsnap->ts_qos = thread->effective_policy.thep_qos; |
749 | thsnap->ts_rqos = thread->requested_policy.thrp_qos; |
750 | thsnap->ts_rqos_override = MAX(thread->requested_policy.thrp_qos_override, |
751 | thread->requested_policy.thrp_qos_workq_override); |
752 | |
753 | if (proc_get_effective_thread_policy(thread, TASK_POLICY_DARWIN_BG)) { |
754 | thsnap->ss_flags |= kThreadDarwinBG; |
755 | } |
756 | |
757 | thsnap->user_time = timer_grab(&thread->user_timer); |
758 | |
759 | uint64_t tval = timer_grab(&thread->system_timer); |
760 | |
761 | if (thread->precise_user_kernel_time) { |
762 | thsnap->system_time = tval; |
763 | } else { |
764 | thsnap->user_time += tval; |
765 | thsnap->system_time = 0; |
766 | } |
767 | |
768 | current_buffer->current_position += sizeof(struct thread_snapshot); |
769 | |
770 | /* |
771 | * If this thread has a dispatch queue serial number, include it here. |
772 | */ |
773 | if (dqserialnum_valid) { |
774 | if ((current_buffer->size - current_buffer->current_position) < sizeof(dqserialnum)) { |
775 | /* wrap and overwrite */ |
776 | current_buffer->end_point = current_record_start; |
777 | current_buffer->current_position = 0; |
778 | if (current_record_start == 0) { |
779 | /* This sample is too large to fit in the buffer even when we started at 0, so skip it */ |
780 | goto cancel_sample; |
781 | } |
782 | goto copytobuffer; |
783 | } |
784 | |
785 | thsnap->ss_flags |= kHasDispatchSerial; |
786 | bcopy(&dqserialnum, (char *)current_buffer->buffer + current_buffer->current_position, sizeof (dqserialnum)); |
787 | current_buffer->current_position += sizeof (dqserialnum); |
788 | } |
789 | |
790 | if (user64) { |
791 | framesize = 8; |
792 | thsnap->ss_flags |= kUser64_p; |
793 | } else { |
794 | framesize = 4; |
795 | } |
796 | |
797 | /* |
798 | * If we can't fit this entire stacktrace then cancel this record, wrap to the beginning, |
799 | * and start again there so that we always store a full record. |
800 | */ |
801 | if ((current_buffer->size - current_buffer->current_position)/framesize < btcount) { |
802 | current_buffer->end_point = current_record_start; |
803 | current_buffer->current_position = 0; |
804 | if (current_record_start == 0) { |
805 | /* This sample is too large to fit in the buffer even when we started at 0, so skip it */ |
806 | goto cancel_sample; |
807 | } |
808 | goto copytobuffer; |
809 | } |
810 | |
811 | for (bti=0; bti < btcount; bti++, current_buffer->current_position += framesize) { |
812 | if (framesize == 8) { |
813 | *(uint64_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = frames[bti]; |
814 | } else { |
815 | *(uint32_t *)(uintptr_t)(current_buffer->buffer + current_buffer->current_position) = (uint32_t)frames[bti]; |
816 | } |
817 | } |
818 | |
819 | if (current_buffer->end_point < current_buffer->current_position) { |
820 | /* |
821 | * Each time the cursor wraps around to the beginning, we leave a |
822 | * differing amount of unused space at the end of the buffer. Make |
823 | * sure the cursor pushes the end point in case we're making use of |
824 | * more of the buffer than we did the last time we wrapped. |
825 | */ |
826 | current_buffer->end_point = current_buffer->current_position; |
827 | } |
828 | |
829 | thsnap->nuser_frames = btcount; |
830 | |
831 | /* |
832 | * Now THIS is a hack. |
833 | */ |
834 | if (current_buffer == &telemetry_buffer) { |
835 | telemetry_bytes_since_last_mark += (current_buffer->current_position - current_record_start); |
836 | if (telemetry_bytes_since_last_mark > telemetry_buffer_notify_at) { |
837 | notify = TRUE; |
838 | } |
839 | } |
840 | |
841 | cancel_sample: |
842 | TELEMETRY_UNLOCK(); |
843 | |
844 | KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_RECORD) | DBG_FUNC_END, |
845 | notify, telemetry_bytes_since_last_mark, |
846 | current_buffer->current_position, current_buffer->end_point); |
847 | |
848 | if (notify) { |
849 | telemetry_notify_user(); |
850 | } |
851 | |
852 | if (uuid_info_array != NULL) { |
853 | kfree(uuid_info_array, uuid_info_array_size); |
854 | } |
855 | } |
856 | |
857 | #if TELEMETRY_DEBUG |
858 | static void |
859 | log_telemetry_output(vm_offset_t buf, uint32_t pos, uint32_t sz) |
860 | { |
861 | struct micro_snapshot *p; |
862 | uint32_t offset; |
863 | |
864 | printf("Copying out %d bytes of telemetry at offset %d\n" , sz, pos); |
865 | |
866 | buf += pos; |
867 | |
868 | /* |
869 | * Find and log each timestamp in this chunk of buffer. |
870 | */ |
871 | for (offset = 0; offset < sz; offset++) { |
872 | p = (struct micro_snapshot *)(buf + offset); |
873 | if (p->snapshot_magic == STACKSHOT_MICRO_SNAPSHOT_MAGIC) { |
874 | printf("telemetry timestamp: %lld\n" , p->ms_time); |
875 | } |
876 | } |
877 | } |
878 | #endif |
879 | |
880 | int telemetry_gather(user_addr_t buffer, uint32_t *length, boolean_t mark) |
881 | { |
882 | return telemetry_buffer_gather(buffer, length, mark, &telemetry_buffer); |
883 | } |
884 | |
885 | int telemetry_buffer_gather(user_addr_t buffer, uint32_t *length, boolean_t mark, struct micro_snapshot_buffer * current_buffer) |
886 | { |
887 | int result = 0; |
888 | uint32_t oldest_record_offset; |
889 | |
890 | KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_START, |
891 | mark, telemetry_bytes_since_last_mark, 0, |
892 | (&telemetry_buffer != current_buffer)); |
893 | |
894 | TELEMETRY_LOCK(); |
895 | |
896 | if (current_buffer->buffer == 0) { |
897 | *length = 0; |
898 | goto out; |
899 | } |
900 | |
901 | if (*length < current_buffer->size) { |
902 | result = KERN_NO_SPACE; |
903 | goto out; |
904 | } |
905 | |
906 | /* |
907 | * Copy the ring buffer out to userland in order sorted by time: least recent to most recent. |
908 | * First, we need to search forward from the cursor to find the oldest record in our buffer. |
909 | */ |
910 | oldest_record_offset = current_buffer->current_position; |
911 | do { |
912 | if (((oldest_record_offset + sizeof(uint32_t)) > current_buffer->size) || |
913 | ((oldest_record_offset + sizeof(uint32_t)) > current_buffer->end_point)) { |
914 | |
915 | if (*(uint32_t *)(uintptr_t)(current_buffer->buffer) == 0) { |
916 | /* |
917 | * There is no magic number at the start of the buffer, which means |
918 | * it's empty; nothing to see here yet. |
919 | */ |
920 | *length = 0; |
921 | goto out; |
922 | } |
923 | /* |
924 | * We've looked through the end of the active buffer without finding a valid |
925 | * record; that means all valid records are in a single chunk, beginning at |
926 | * the very start of the buffer. |
927 | */ |
928 | |
929 | oldest_record_offset = 0; |
930 | assert(*(uint32_t *)(uintptr_t)(current_buffer->buffer) == STACKSHOT_MICRO_SNAPSHOT_MAGIC); |
931 | break; |
932 | } |
933 | |
934 | if (*(uint32_t *)(uintptr_t)(current_buffer->buffer + oldest_record_offset) == STACKSHOT_MICRO_SNAPSHOT_MAGIC) |
935 | break; |
936 | |
937 | /* |
938 | * There are no alignment guarantees for micro-stackshot records, so we must search at each |
939 | * byte offset. |
940 | */ |
941 | oldest_record_offset++; |
942 | } while (oldest_record_offset != current_buffer->current_position); |
943 | |
944 | /* |
945 | * If needed, copyout in two chunks: from the oldest record to the end of the buffer, and then |
946 | * from the beginning of the buffer up to the current position. |
947 | */ |
948 | if (oldest_record_offset != 0) { |
949 | #if TELEMETRY_DEBUG |
950 | log_telemetry_output(current_buffer->buffer, oldest_record_offset, |
951 | current_buffer->end_point - oldest_record_offset); |
952 | #endif |
953 | if ((result = copyout((void *)(current_buffer->buffer + oldest_record_offset), buffer, |
954 | current_buffer->end_point - oldest_record_offset)) != 0) { |
955 | *length = 0; |
956 | goto out; |
957 | } |
958 | *length = current_buffer->end_point - oldest_record_offset; |
959 | } else { |
960 | *length = 0; |
961 | } |
962 | |
963 | #if TELEMETRY_DEBUG |
964 | log_telemetry_output(current_buffer->buffer, 0, current_buffer->current_position); |
965 | #endif |
966 | if ((result = copyout((void *)current_buffer->buffer, buffer + *length, |
967 | current_buffer->current_position)) != 0) { |
968 | *length = 0; |
969 | goto out; |
970 | } |
971 | *length += (uint32_t)current_buffer->current_position; |
972 | |
973 | out: |
974 | |
975 | if (mark && (*length > 0)) { |
976 | telemetry_bytes_since_last_mark = 0; |
977 | } |
978 | |
979 | TELEMETRY_UNLOCK(); |
980 | |
981 | KDBG(MACHDBG_CODE(DBG_MACH_STACKSHOT, MICROSTACKSHOT_GATHER) | DBG_FUNC_END, |
982 | current_buffer->current_position, *length, |
983 | current_buffer->end_point, (&telemetry_buffer != current_buffer)); |
984 | |
985 | return (result); |
986 | } |
987 | |
988 | /************************/ |
989 | /* BOOT PROFILE SUPPORT */ |
990 | /************************/ |
991 | /* |
992 | * Boot Profiling |
993 | * |
994 | * The boot-profiling support is a mechanism to sample activity happening on the |
995 | * system during boot. This mechanism sets up a periodic timer and on every timer fire, |
996 | * captures a full backtrace into the boot profiling buffer. This buffer can be pulled |
997 | * out and analyzed from user-space. It is turned on using the following boot-args: |
998 | * "bootprofile_buffer_size" specifies the size of the boot profile buffer |
999 | * "bootprofile_interval_ms" specifies the interval for the profiling timer |
1000 | * |
1001 | * Process Specific Boot Profiling |
1002 | * |
1003 | * The boot-arg "bootprofile_proc_name" can be used to specify a certain |
1004 | * process that needs to profiled during boot. Setting this boot-arg changes |
1005 | * the way stackshots are captured. At every timer fire, the code looks at the |
1006 | * currently running process and takes a stackshot only if the requested process |
1007 | * is on-core (which makes it unsuitable for MP systems). |
1008 | * |
1009 | * Trigger Events |
1010 | * |
1011 | * The boot-arg "bootprofile_type=boot" starts the timer during early boot. Using |
1012 | * "wake" starts the timer at AP wake from suspend-to-RAM. |
1013 | */ |
1014 | |
1015 | #define BOOTPROFILE_MAX_BUFFER_SIZE (64*1024*1024) /* see also COPYSIZELIMIT_PANIC */ |
1016 | |
1017 | vm_offset_t bootprofile_buffer = 0; |
1018 | uint32_t bootprofile_buffer_size = 0; |
1019 | uint32_t bootprofile_buffer_current_position = 0; |
1020 | uint32_t bootprofile_interval_ms = 0; |
1021 | uint32_t bootprofile_stackshot_flags = 0; |
1022 | uint64_t bootprofile_interval_abs = 0; |
1023 | uint64_t bootprofile_next_deadline = 0; |
1024 | uint32_t bootprofile_all_procs = 0; |
1025 | char bootprofile_proc_name[17]; |
1026 | uint64_t bootprofile_delta_since_timestamp = 0; |
1027 | lck_grp_t bootprofile_lck_grp; |
1028 | lck_mtx_t bootprofile_mtx; |
1029 | |
1030 | |
1031 | enum { |
1032 | kBootProfileDisabled = 0, |
1033 | kBootProfileStartTimerAtBoot, |
1034 | kBootProfileStartTimerAtWake |
1035 | } bootprofile_type = kBootProfileDisabled; |
1036 | |
1037 | |
1038 | static timer_call_data_t bootprofile_timer_call_entry; |
1039 | |
1040 | #define BOOTPROFILE_LOCK() do { lck_mtx_lock(&bootprofile_mtx); } while(0) |
1041 | #define BOOTPROFILE_TRY_SPIN_LOCK() lck_mtx_try_lock_spin(&bootprofile_mtx) |
1042 | #define BOOTPROFILE_UNLOCK() do { lck_mtx_unlock(&bootprofile_mtx); } while(0) |
1043 | |
1044 | static void bootprofile_timer_call( |
1045 | timer_call_param_t param0, |
1046 | timer_call_param_t param1); |
1047 | |
1048 | void bootprofile_init(void) |
1049 | { |
1050 | kern_return_t ret; |
1051 | char type[32]; |
1052 | |
1053 | lck_grp_init(&bootprofile_lck_grp, "bootprofile group" , LCK_GRP_ATTR_NULL); |
1054 | lck_mtx_init(&bootprofile_mtx, &bootprofile_lck_grp, LCK_ATTR_NULL); |
1055 | |
1056 | if (!PE_parse_boot_argn("bootprofile_buffer_size" , &bootprofile_buffer_size, sizeof(bootprofile_buffer_size))) { |
1057 | bootprofile_buffer_size = 0; |
1058 | } |
1059 | |
1060 | if (bootprofile_buffer_size > BOOTPROFILE_MAX_BUFFER_SIZE) |
1061 | bootprofile_buffer_size = BOOTPROFILE_MAX_BUFFER_SIZE; |
1062 | |
1063 | if (!PE_parse_boot_argn("bootprofile_interval_ms" , &bootprofile_interval_ms, sizeof(bootprofile_interval_ms))) { |
1064 | bootprofile_interval_ms = 0; |
1065 | } |
1066 | |
1067 | if (!PE_parse_boot_argn("bootprofile_stackshot_flags" , &bootprofile_stackshot_flags, sizeof(bootprofile_stackshot_flags))) { |
1068 | bootprofile_stackshot_flags = 0; |
1069 | } |
1070 | |
1071 | if (!PE_parse_boot_argn("bootprofile_proc_name" , &bootprofile_proc_name, sizeof(bootprofile_proc_name))) { |
1072 | bootprofile_all_procs = 1; |
1073 | bootprofile_proc_name[0] = '\0'; |
1074 | } |
1075 | |
1076 | if (PE_parse_boot_argn("bootprofile_type" , type, sizeof(type))) { |
1077 | if (0 == strcmp(type, "boot" )) { |
1078 | bootprofile_type = kBootProfileStartTimerAtBoot; |
1079 | } else if (0 == strcmp(type, "wake" )) { |
1080 | bootprofile_type = kBootProfileStartTimerAtWake; |
1081 | } else { |
1082 | bootprofile_type = kBootProfileDisabled; |
1083 | } |
1084 | } else { |
1085 | bootprofile_type = kBootProfileDisabled; |
1086 | } |
1087 | |
1088 | clock_interval_to_absolutetime_interval(bootprofile_interval_ms, NSEC_PER_MSEC, &bootprofile_interval_abs); |
1089 | |
1090 | /* Both boot args must be set to enable */ |
1091 | if ((bootprofile_type == kBootProfileDisabled) || (bootprofile_buffer_size == 0) || (bootprofile_interval_abs == 0)) { |
1092 | return; |
1093 | } |
1094 | |
1095 | ret = kmem_alloc(kernel_map, &bootprofile_buffer, bootprofile_buffer_size, VM_KERN_MEMORY_DIAG); |
1096 | if (ret != KERN_SUCCESS) { |
1097 | kprintf("Boot profile: Allocation failed: %d\n" , ret); |
1098 | return; |
1099 | } |
1100 | bzero((void *) bootprofile_buffer, bootprofile_buffer_size); |
1101 | |
1102 | kprintf("Boot profile: Sampling %s once per %u ms at %s\n" , bootprofile_all_procs ? "all procs" : bootprofile_proc_name, bootprofile_interval_ms, |
1103 | bootprofile_type == kBootProfileStartTimerAtBoot ? "boot" : (bootprofile_type == kBootProfileStartTimerAtWake ? "wake" : "unknown" )); |
1104 | |
1105 | timer_call_setup(&bootprofile_timer_call_entry, |
1106 | bootprofile_timer_call, |
1107 | NULL); |
1108 | |
1109 | if (bootprofile_type == kBootProfileStartTimerAtBoot) { |
1110 | bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs; |
1111 | timer_call_enter_with_leeway(&bootprofile_timer_call_entry, |
1112 | NULL, |
1113 | bootprofile_next_deadline, |
1114 | 0, |
1115 | TIMER_CALL_SYS_NORMAL, |
1116 | FALSE); |
1117 | } |
1118 | } |
1119 | |
1120 | void |
1121 | bootprofile_wake_from_sleep(void) |
1122 | { |
1123 | if (bootprofile_type == kBootProfileStartTimerAtWake) { |
1124 | bootprofile_next_deadline = mach_absolute_time() + bootprofile_interval_abs; |
1125 | timer_call_enter_with_leeway(&bootprofile_timer_call_entry, |
1126 | NULL, |
1127 | bootprofile_next_deadline, |
1128 | 0, |
1129 | TIMER_CALL_SYS_NORMAL, |
1130 | FALSE); |
1131 | } |
1132 | } |
1133 | |
1134 | |
1135 | static void |
1136 | bootprofile_timer_call( |
1137 | timer_call_param_t param0 __unused, |
1138 | timer_call_param_t param1 __unused) |
1139 | { |
1140 | unsigned retbytes = 0; |
1141 | int pid_to_profile = -1; |
1142 | |
1143 | if (!BOOTPROFILE_TRY_SPIN_LOCK()) { |
1144 | goto reprogram; |
1145 | } |
1146 | |
1147 | /* Check if process-specific boot profiling is turned on */ |
1148 | if (!bootprofile_all_procs) { |
1149 | /* |
1150 | * Since boot profiling initializes really early in boot, it is |
1151 | * possible that at this point, the task/proc is not initialized. |
1152 | * Nothing to do in that case. |
1153 | */ |
1154 | |
1155 | if ((current_task() != NULL) && (current_task()->bsd_info != NULL) && |
1156 | (0 == strncmp(bootprofile_proc_name, proc_name_address(current_task()->bsd_info), 17))) { |
1157 | pid_to_profile = proc_selfpid(); |
1158 | } |
1159 | else { |
1160 | /* |
1161 | * Process-specific boot profiling requested but the on-core process is |
1162 | * something else. Nothing to do here. |
1163 | */ |
1164 | BOOTPROFILE_UNLOCK(); |
1165 | goto reprogram; |
1166 | } |
1167 | } |
1168 | |
1169 | /* initiate a stackshot with whatever portion of the buffer is left */ |
1170 | if (bootprofile_buffer_current_position < bootprofile_buffer_size) { |
1171 | |
1172 | uint32_t flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_TRYLOCK | STACKSHOT_SAVE_LOADINFO |
1173 | | STACKSHOT_GET_GLOBAL_MEM_STATS; |
1174 | #if __x86_64__ |
1175 | flags |= STACKSHOT_SAVE_KEXT_LOADINFO; |
1176 | #endif /* __x86_64__ */ |
1177 | |
1178 | |
1179 | /* OR on flags specified in boot-args */ |
1180 | flags |= bootprofile_stackshot_flags; |
1181 | if ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) && (bootprofile_delta_since_timestamp == 0)) { |
1182 | /* Can't take deltas until the first one */ |
1183 | flags &= ~ STACKSHOT_COLLECT_DELTA_SNAPSHOT; |
1184 | } |
1185 | |
1186 | uint64_t timestamp = 0; |
1187 | if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) { |
1188 | timestamp = mach_absolute_time(); |
1189 | } |
1190 | |
1191 | kern_return_t r = stack_snapshot_from_kernel( |
1192 | pid_to_profile, (void *)(bootprofile_buffer + bootprofile_buffer_current_position), |
1193 | bootprofile_buffer_size - bootprofile_buffer_current_position, |
1194 | flags, bootprofile_delta_since_timestamp, &retbytes); |
1195 | |
1196 | /* |
1197 | * We call with STACKSHOT_TRYLOCK because the stackshot lock is coarser |
1198 | * than the bootprofile lock. If someone else has the lock we'll just |
1199 | * try again later. |
1200 | */ |
1201 | |
1202 | if (r == KERN_LOCK_OWNED) { |
1203 | BOOTPROFILE_UNLOCK(); |
1204 | goto reprogram; |
1205 | } |
1206 | |
1207 | if (bootprofile_stackshot_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT && |
1208 | r == KERN_SUCCESS) { |
1209 | bootprofile_delta_since_timestamp = timestamp; |
1210 | } |
1211 | |
1212 | bootprofile_buffer_current_position += retbytes; |
1213 | } |
1214 | |
1215 | BOOTPROFILE_UNLOCK(); |
1216 | |
1217 | /* If we didn't get any data or have run out of buffer space, stop profiling */ |
1218 | if ((retbytes == 0) || (bootprofile_buffer_current_position == bootprofile_buffer_size)) { |
1219 | return; |
1220 | } |
1221 | |
1222 | |
1223 | reprogram: |
1224 | /* If the user gathered the buffer, no need to keep profiling */ |
1225 | if (bootprofile_interval_abs == 0) { |
1226 | return; |
1227 | } |
1228 | |
1229 | clock_deadline_for_periodic_event(bootprofile_interval_abs, |
1230 | mach_absolute_time(), |
1231 | &bootprofile_next_deadline); |
1232 | timer_call_enter_with_leeway(&bootprofile_timer_call_entry, |
1233 | NULL, |
1234 | bootprofile_next_deadline, |
1235 | 0, |
1236 | TIMER_CALL_SYS_NORMAL, |
1237 | FALSE); |
1238 | } |
1239 | |
1240 | void bootprofile_get(void **buffer, uint32_t *length) |
1241 | { |
1242 | BOOTPROFILE_LOCK(); |
1243 | *buffer = (void*) bootprofile_buffer; |
1244 | *length = bootprofile_buffer_current_position; |
1245 | BOOTPROFILE_UNLOCK(); |
1246 | } |
1247 | |
1248 | int bootprofile_gather(user_addr_t buffer, uint32_t *length) |
1249 | { |
1250 | int result = 0; |
1251 | |
1252 | BOOTPROFILE_LOCK(); |
1253 | |
1254 | if (bootprofile_buffer == 0) { |
1255 | *length = 0; |
1256 | goto out; |
1257 | } |
1258 | |
1259 | if (*length < bootprofile_buffer_current_position) { |
1260 | result = KERN_NO_SPACE; |
1261 | goto out; |
1262 | } |
1263 | |
1264 | if ((result = copyout((void *)bootprofile_buffer, buffer, |
1265 | bootprofile_buffer_current_position)) != 0) { |
1266 | *length = 0; |
1267 | goto out; |
1268 | } |
1269 | *length = bootprofile_buffer_current_position; |
1270 | |
1271 | /* cancel future timers */ |
1272 | bootprofile_interval_abs = 0; |
1273 | |
1274 | out: |
1275 | |
1276 | BOOTPROFILE_UNLOCK(); |
1277 | |
1278 | return (result); |
1279 | } |
1280 | |