1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @Apple_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
21 */
22
23#include <libkern/libkern.h>
24#include <libkern/coreanalytics/coreanalytics.h>
25#include <mach/mach_types.h>
26#include <sys/errno.h>
27#include <sys/kauth.h>
28#include <sys/proc_internal.h>
29#include <sys/stackshot.h>
30#include <sys/sysproto.h>
31#include <sys/sysctl.h>
32#include <pexpert/device_tree.h>
33#include <pexpert/pexpert.h>
34#include <os/log.h>
35#include <IOKit/IOBSD.h>
36
37extern uint32_t stackshot_estimate_adj;
38EXPERIMENT_FACTOR_UINT(_kern, stackshot_estimate_adj, &stackshot_estimate_adj, 0, 100,
39 "adjust stackshot estimates up by this percentage");
40
41#define SSHOT_ANALYTICS_PERIOD_HOURS 1
42
43enum stackshot_report_setting {
44 STACKSHOT_REPORT_NONE = 0,
45 STACKSHOT_REPORT_NO_ENT = 1, /* report if missing entitlement */
46 STACKSHOT_REPORT_ALL = 2, /* always report */
47};
48
49#define STACKSHOT_ENTITLEMENT_REPORT STACKSHOT_REPORT_ALL
50#define STACKSHOT_ENTITLEMENT_REFUSE false
51/*
52 * Controls for Stackshot entitlement; changable with boot args
53 * stackshot_entitlement_report=0 or 1 or 2 (send CoreAnalytics when called without entitlement(1) or always(2))
54 * stackshot_entitlement_fail=0 or 1 (fail call without entitlement)
55 * This only effects requests from userspace.
56 *
57 * For reporting, we only report a given command once.
58 */
59SECURITY_READ_ONLY_LATE(uint8_t) stackshot_entitlement_report = STACKSHOT_ENTITLEMENT_REPORT;
60SECURITY_READ_ONLY_LATE(bool) stackshot_entitlement_refuse = STACKSHOT_ENTITLEMENT_REFUSE;
61
62#define STACKSHOT_ENTITLEMENT "com.apple.private.stackshot"
63#define STACKSHOT_STATS_ENTITLEMENT "com.apple.private.stackshot.stats"
64#define SSHOT_ENTITLEMENT_BOOTARG_REPORT "sshot-entitlement-report"
65#define SSHOT_ENTITLEMENT_BOOTARG_FAIL "sshot-entitlement-refuse"
66
67/* use single printable characters; these are in order of the stackshot syscall's checks */
68enum stackshot_progress {
69 STACKSHOT_NOT_ROOT = 'R',
70 STACKSHOT_NOT_ENTITLED = 'E',
71 STACKSHOT_PERMITTED = 'P',
72 STACKSHOT_ATTEMPTED = 'A',
73 STACKSHOT_SUCCEEDED = 'S',
74};
75
76CA_EVENT(stackshot_entitlement_report,
77 CA_INT, sshot_count,
78 CA_BOOL, sshot_refused,
79 CA_BOOL, sshot_have_entitlement,
80 CA_BOOL, sshot_fromtest,
81 CA_STATIC_STRING(2), sshot_progress,
82 CA_STATIC_STRING(CA_PROCNAME_LEN), sshot_pcomm,
83 CA_STATIC_STRING(33), sshot_pname);
84
85static thread_call_t sshot_entitlement_thread_call;
86
87#define SSHOT_ENTITLEMENT_RECENT 16 /* track 16 recent violators */
88struct stackshot_entitlement_report {
89 uint64_t ser_lastev;
90 uint32_t ser_count;
91 command_t ser_pcomm;
92 proc_name_t ser_pname;
93 bool ser_have_entitlement;
94 char ser_progress; /* from enum stackshot_progress */
95#if DEVELOPMENT || DEBUG
96 bool ser_test;
97#endif
98};
99static LCK_GRP_DECLARE(sshot_report_lck_grp, "stackshot_entitlement_repot");
100static LCK_MTX_DECLARE(sshot_report_lck, &sshot_report_lck_grp);
101static struct stackshot_entitlement_report *sshot_report_recent[SSHOT_ENTITLEMENT_RECENT];
102static bool sshot_report_batch_scheduled = false;
103#if DEVELOPMENT || DEBUG
104static uint32_t sshot_report_test_events = 0;
105static uint64_t sshot_report_test_counts = 0;
106#endif
107
108static void
109stackshot_entitlement_send_report(const struct stackshot_entitlement_report *ser)
110{
111 ca_event_t ca_event = CA_EVENT_ALLOCATE(stackshot_entitlement_report);
112 CA_EVENT_TYPE(stackshot_entitlement_report) * ser_event = ca_event->data;
113 ser_event->sshot_count = ser->ser_count;
114 ser_event->sshot_refused = stackshot_entitlement_refuse;
115#if DEVELOPMENT || DEBUG
116 ser_event->sshot_fromtest = ser->ser_test;
117#else
118 ser_event->sshot_fromtest = false;
119#endif
120 ser_event->sshot_have_entitlement = ser->ser_have_entitlement;
121 ser_event->sshot_progress[0] = ser->ser_progress;
122 ser_event->sshot_progress[1] = '\0';
123 static_assert(sizeof(ser_event->sshot_pcomm) == sizeof(ser->ser_pcomm), "correct sshot_pcomm/ser_pcomm sizing");
124 strlcpy(dst: ser_event->sshot_pcomm, src: ser->ser_pcomm, n: sizeof(ser->ser_pcomm));
125 static_assert(sizeof(ser_event->sshot_pname) == sizeof(ser->ser_pname), "correct sshot_pcomm/ser_pcomm sizing");
126 strlcpy(dst: ser_event->sshot_pname, src: ser->ser_pname, n: sizeof(ser->ser_pname));
127 CA_EVENT_SEND(ca_event);
128}
129
130static void
131sshot_entitlement_schedule_batch(void)
132{
133 static const uint64_t analytics_period_ns = SSHOT_ANALYTICS_PERIOD_HOURS * 60 * 60 * NSEC_PER_SEC;
134 uint64_t analytics_period_absolutetime;
135 nanoseconds_to_absolutetime(nanoseconds: analytics_period_ns, result: &analytics_period_absolutetime);
136
137 thread_call_enter_delayed(call: sshot_entitlement_thread_call, deadline: analytics_period_absolutetime + mach_absolute_time());
138}
139
140__attribute__((always_inline))
141static void
142sshot_entitlement_copy_for_send(const struct stackshot_entitlement_report *src,
143 struct stackshot_entitlement_report *dst)
144{
145 bcopy(src, dst, n: sizeof(*src));
146#if DEVELOPMENT || DEBUG
147 if (src->ser_test) {
148 sshot_report_test_events++;
149 sshot_report_test_counts += src->ser_count;
150 }
151#endif
152}
153
154#define SSHOT_ENTITLEMENT_REPORT_NORMAL 0
155#define SSHOT_ENTITLEMENT_REPORT_TEST(x) ((int)((x) ?: 1)) // always non-zero
156#define SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW SSHOT_ENTITLEMENT_REPORT_TEST(-1)
157
158static void
159stackshot_entitlement_do_report(bool have_entitlement, enum stackshot_progress progress, int testval)
160{
161#pragma unused(testval)
162#if DEVELOPMENT || DEBUG
163 const bool from_test = (testval != SSHOT_ENTITLEMENT_REPORT_NORMAL);
164#endif
165 const struct proc *p = current_proc();
166 struct stackshot_entitlement_report *ser = kalloc_data(sizeof(*ser), Z_WAITOK | Z_NOFAIL);
167 struct stackshot_entitlement_report *tofree = NULL;
168 struct stackshot_entitlement_report myser = {0};
169 struct stackshot_entitlement_report oldser = {0};
170 bool send_myser = false;
171 bool send_oldser = false;
172
173 myser.ser_count = 0;
174 myser.ser_have_entitlement = have_entitlement;
175 myser.ser_progress = (uint8_t)progress;
176 static_assert(sizeof(p->p_comm) == sizeof(myser.ser_pcomm), "correct p_comm/ser_pcomm sizing");
177 strlcpy(dst: myser.ser_pcomm, src: p->p_comm, n: sizeof(myser.ser_pcomm));
178 static_assert(sizeof(p->p_name) == sizeof(myser.ser_pname), "correct p_name/ser_pname sizing");
179 strlcpy(dst: myser.ser_pname, src: p->p_name, n: sizeof(myser.ser_pname));
180#if DEVELOPMENT || DEBUG
181 myser.ser_test = from_test;
182 if (testval && (myser.ser_pcomm[0] != 0)) {
183 myser.ser_pcomm[0] += (testval - 1);
184 }
185#endif
186 lck_mtx_lock(lck: &sshot_report_lck);
187 // Search the table, looking for a match or a NULL slot. While we search, track
188 // the slot with the oldest use time as an eviction candidate, for LRU behavior
189
190 struct stackshot_entitlement_report **tslot = NULL;
191 bool match = false;
192 for (int i = 0; i < SSHOT_ENTITLEMENT_RECENT; i++) {
193 struct stackshot_entitlement_report **curp = &sshot_report_recent[i];
194 struct stackshot_entitlement_report *cur = *curp;
195
196 if (cur == NULL) {
197 tslot = curp;
198 break;
199 }
200 if (cur->ser_have_entitlement == myser.ser_have_entitlement &&
201 cur->ser_progress == myser.ser_progress &&
202 strncmp(s1: cur->ser_pcomm, s2: myser.ser_pcomm, n: sizeof(cur->ser_pcomm)) == 0 &&
203 strncmp(s1: cur->ser_pname, s2: myser.ser_pname, n: sizeof(cur->ser_pname)) == 0) {
204 match = true;
205 tslot = curp;
206 break;
207 }
208 // not a match; track the slot with the oldest event to evict
209 if (tslot == NULL ||
210 ((*tslot)->ser_lastev > cur->ser_lastev)) {
211 tslot = curp;
212 }
213 }
214 // Either we have:
215 // a match,
216 // no match and an empty (NULL) slot, or
217 // no match, a full table, and tslot points at the entry with the lowest count
218 struct stackshot_entitlement_report *cur = NULL; // the entry to bump the count of
219 if (match) {
220 cur = *tslot;
221 tofree = ser;
222 } else {
223 struct stackshot_entitlement_report *old = *tslot;
224 if (old != NULL && old->ser_count > 0) {
225 sshot_entitlement_copy_for_send(src: old, dst: &oldser);
226 send_oldser = true;
227 }
228 // fill it in and install it
229 bcopy(src: &myser, dst: ser, n: sizeof(*cur));
230 cur = *tslot = ser;
231 tofree = old; // if there's an old one, free it after we drop the lock
232 }
233 // Now we have an installed structure, bump the count
234 uint32_t ncount;
235 uint32_t toadd = 1;
236#if DEVELOPMENT || DEBUG
237 if (testval == SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW) {
238 toadd = UINT32_MAX;
239 }
240#endif
241 if (os_add_overflow(cur->ser_count, toadd, &ncount)) {
242 // overflow; send the existing structure
243 sshot_entitlement_copy_for_send(src: cur, dst: &myser);
244 send_myser = true;
245 ncount = toadd;
246 }
247 cur->ser_lastev = mach_absolute_time();
248 cur->ser_count = ncount;
249#if DEVELOPMENT || DEBUG
250 cur->ser_test = from_test;
251#endif
252 // see if we need to schedule the background task
253 const bool batch_is_scheduled = sshot_report_batch_scheduled;
254 if (!batch_is_scheduled) {
255 sshot_report_batch_scheduled = true;
256 }
257 lck_mtx_unlock(lck: &sshot_report_lck);
258 //
259 // we just bumped a counter in the structure, so schedule an analytics
260 // dump in an hour if one isn't already scheduled.
261 //
262 // The flag gets cleared when the batch clears out the data, making the
263 // next event reschedule immediately.
264 if (!batch_is_scheduled) {
265 sshot_entitlement_schedule_batch();
266 }
267
268 if (tofree != NULL) {
269 kfree_data(tofree, sizeof(*tofree));
270 }
271 if (send_myser) {
272 stackshot_entitlement_send_report(ser: &myser);
273 }
274 if (send_oldser) {
275 stackshot_entitlement_send_report(ser: &oldser);
276 }
277}
278
279static void
280sshot_entitlement_send_batch(void *arg0, void *arg1)
281{
282#pragma unused(arg0, arg1)
283 struct stackshot_entitlement_report *ser = kalloc_data(sizeof(*ser) * SSHOT_ENTITLEMENT_RECENT, Z_WAITOK | Z_NOFAIL);
284 size_t count = 0;
285 // Walk through the array, find non-zero counts and:
286 // * copy them into our local array for reporting, and
287 // * zeroing the counts.
288 lck_mtx_lock(lck: &sshot_report_lck);
289 for (size_t i = 0; i < SSHOT_ENTITLEMENT_RECENT; i++) {
290 struct stackshot_entitlement_report *cur = sshot_report_recent[i];
291 if (cur == NULL || cur->ser_count == 0) {
292 continue;
293 }
294 sshot_entitlement_copy_for_send(src: cur, dst: &ser[count]);
295 count++;
296 cur->ser_count = 0;
297 }
298 sshot_report_batch_scheduled = false;
299 lck_mtx_unlock(lck: &sshot_report_lck);
300 for (size_t i = 0; i < count; i++) {
301 stackshot_entitlement_send_report(ser: &ser[i]);
302 }
303}
304
305#if DEVELOPMENT || DEBUG
306/*
307 * Manual trigger of a set of entitlement reports and the associated batch
308 * processing for testing on dev/debug kernel.
309 */
310static int
311sysctl_stackshot_entitlement_test SYSCTL_HANDLER_ARGS
312{
313#pragma unused(arg1, arg2)
314 int error, val = 0;
315 error = sysctl_handle_int(oidp, &val, 0, req);
316 if (error || !req->newptr) {
317 return error;
318 }
319 static LCK_MTX_DECLARE(sshot_report_test_lck, &sshot_report_lck_grp);
320 static bool sshot_report_test_active;
321 // avoid multiple active tests
322 lck_mtx_lock(&sshot_report_test_lck);
323 if (sshot_report_test_active) {
324 lck_mtx_unlock(&sshot_report_test_lck);
325 return EBUSY;
326 }
327 sshot_report_test_active = true;
328 lck_mtx_unlock(&sshot_report_test_lck);
329
330 sshot_entitlement_send_batch(NULL, NULL); // flush out existing data
331 sshot_report_test_events = 0;
332 sshot_report_test_counts = 0;
333
334 // fill with test events
335 for (int idx = 0; idx < SSHOT_ENTITLEMENT_RECENT; idx++) {
336 stackshot_entitlement_do_report(false, STACKSHOT_NOT_ENTITLED, SSHOT_ENTITLEMENT_REPORT_TEST(idx + 1));
337 }
338 sshot_entitlement_send_batch(NULL, NULL);
339 const uint32_t post_batch = sshot_report_test_events;
340 const uint64_t post_batch_counts = sshot_report_test_counts;
341
342 // overflow test
343 stackshot_entitlement_do_report(false, STACKSHOT_NOT_ENTITLED, SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW);
344 stackshot_entitlement_do_report(false, STACKSHOT_NOT_ENTITLED, SSHOT_ENTITLEMENT_REPORT_TEST_OVERFLOW);
345 sshot_entitlement_send_batch(NULL, NULL);
346 const uint32_t post_overflow = sshot_report_test_events - post_batch;
347 const uint64_t post_overflow_counts = sshot_report_test_counts - post_batch_counts;
348
349 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: made %d events, %d events sent, %d counts (both should == events)",
350 SSHOT_ENTITLEMENT_RECENT, post_batch, (int)post_batch_counts);
351 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: overflow, %d events sent (expect 2), %llx counts (expect %llx)",
352 post_overflow, (long long)post_overflow_counts, 2 * (long long)UINT32_MAX);
353
354 lck_mtx_lock(&sshot_report_test_lck);
355 sshot_report_test_active = false;
356 lck_mtx_unlock(&sshot_report_test_lck);
357
358 if (post_batch != SSHOT_ENTITLEMENT_RECENT ||
359 post_batch_counts != SSHOT_ENTITLEMENT_RECENT ||
360 post_overflow != 2 ||
361 post_overflow_counts != 2 * (long long)UINT32_MAX) {
362 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: failed");
363 return EDEVERR;
364 }
365
366 os_log_error(OS_LOG_DEFAULT, "sysctl_stackshot_entitlement_test: success");
367 return 0;
368}
369SYSCTL_PROC(_debug, OID_AUTO, stackshot_entitlement_send_batch,
370 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0,
371 &sysctl_stackshot_entitlement_test, "I", "");
372
373/* Return current entitlement enforcement state. */
374static int
375sysctl_stackshot_entitlement_status SYSCTL_HANDLER_ARGS
376{
377 int return_value = ((stackshot_entitlement_report & 0xf) | (stackshot_entitlement_refuse ? 0x10 : 0));
378 return SYSCTL_OUT(req, &return_value, sizeof(return_value));
379}
380SYSCTL_PROC(_kern, OID_AUTO, stackshot_entitlement_status,
381 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, 0, 0,
382 &sysctl_stackshot_entitlement_status, "I", "");
383
384#endif /* DEVELOPMENT || DEBUG */
385
386__startup_func
387static void
388atboot_stackshot_entitlement(void)
389{
390 uint32_t boot_arg;
391 if (PE_parse_boot_argn(SSHOT_ENTITLEMENT_BOOTARG_REPORT, arg_ptr: &boot_arg, max_arg: sizeof(boot_arg))) {
392 /* clamp to valid values */
393 boot_arg = (boot_arg <= STACKSHOT_REPORT_ALL ? boot_arg : STACKSHOT_REPORT_ALL);
394 stackshot_entitlement_report = (uint8_t)boot_arg;
395 }
396 if (PE_parse_boot_argn(SSHOT_ENTITLEMENT_BOOTARG_FAIL, arg_ptr: &boot_arg, max_arg: sizeof(boot_arg))) {
397 stackshot_entitlement_refuse = (boot_arg != 0);
398 }
399 sshot_entitlement_thread_call = thread_call_allocate_with_options(
400 func: sshot_entitlement_send_batch, NULL, pri: THREAD_CALL_PRIORITY_LOW, options: THREAD_CALL_OPTIONS_ONCE);
401}
402STARTUP(SYSCTL, STARTUP_RANK_MIDDLE, atboot_stackshot_entitlement);
403
404
405static int
406sysctl_stackshot_stats SYSCTL_HANDLER_ARGS
407{
408#pragma unused(oidp, arg1, arg2)
409 stackshot_stats_t stats;
410 proc_t self = current_proc();
411
412 /* root processes and non-root processes with the STATS entitlement can read this */
413 if (suser(cred: kauth_cred_get(), acflag: &self->p_acflag) != 0 &&
414 !IOCurrentTaskHasEntitlement(STACKSHOT_STATS_ENTITLEMENT)) {
415 return EPERM;
416 }
417
418 if (req->newptr != USER_ADDR_NULL) {
419 return EPERM;
420 }
421 if (req->oldptr == USER_ADDR_NULL) {
422 req->oldidx = sizeof(stats);
423 return 0;
424 }
425 extern void stackshot_get_timing(uint64_t *last_abs_start, uint64_t *last_abs_end, uint64_t *count, uint64_t *total_duration);
426 stackshot_get_timing(last_abs_start: &stats.ss_last_start, last_abs_end: &stats.ss_last_end, count: &stats.ss_count, total_duration: &stats.ss_duration);
427
428 return SYSCTL_OUT(req, &stats, MIN(sizeof(stats), req->oldlen));
429}
430
431SYSCTL_PROC(_kern, OID_AUTO, stackshot_stats,
432 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED |
433 CTLFLAG_KERN,
434 NULL, 0, sysctl_stackshot_stats, "S,stackshot_stats",
435 "Get stackshot statistics");
436
437/*
438 * Stackshot system calls
439 */
440
441#if CONFIG_TELEMETRY
442extern kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
443#endif /* CONFIG_TELEMETRY */
444extern kern_return_t kern_stack_snapshot_with_reason(char* reason);
445extern kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user);
446
447static int
448stackshot_kern_return_to_bsd_error(kern_return_t kr)
449{
450 switch (kr) {
451 case KERN_SUCCESS:
452 return 0;
453 case KERN_RESOURCE_SHORTAGE:
454 /* could not allocate memory, or stackshot is actually bigger than
455 * SANE_TRACEBUF_SIZE */
456 return ENOMEM;
457 case KERN_INSUFFICIENT_BUFFER_SIZE:
458 case KERN_NO_SPACE:
459 /* ran out of buffer to write the stackshot. Normally this error
460 * causes a larger buffer to be allocated in-kernel, rather than
461 * being returned to the user. */
462 return ENOSPC;
463 case KERN_NO_ACCESS:
464 return EPERM;
465 case KERN_MEMORY_PRESENT:
466 return EEXIST;
467 case KERN_NOT_SUPPORTED:
468 return ENOTSUP;
469 case KERN_NOT_IN_SET:
470 /* requested existing buffer, but there isn't one. */
471 return ENOENT;
472 case KERN_ABORTED:
473 /* kdp did not report an error, but also did not produce any data */
474 return EINTR;
475 case KERN_FAILURE:
476 /* stackshot came across inconsistent data and needed to bail out */
477 return EBUSY;
478 case KERN_OPERATION_TIMED_OUT:
479 /* debugger synchronization timed out */
480 return ETIMEDOUT;
481 default:
482 return EINVAL;
483 }
484}
485
486/*
487 * stack_snapshot_with_config: Obtains a coherent set of stack traces for specified threads on the sysem,
488 * tracing both kernel and user stacks where available. Allocates a buffer from the
489 * kernel and maps the buffer into the calling task's address space.
490 *
491 * Inputs: uap->stackshot_config_version - version of the stackshot config that is being passed
492 * uap->stackshot_config - pointer to the stackshot config
493 * uap->stackshot_config_size- size of the stackshot config being passed
494 * Outputs: EINVAL if there is a problem with the arguments
495 * EFAULT if we failed to copy in the arguments succesfully
496 * EPERM if the caller is not privileged
497 * ENOTSUP if the caller is passing a version of arguments that is not supported by the kernel
498 * (indicates libsyscall:kernel mismatch) or if the caller is requesting unsupported flags
499 * ENOENT if the caller is requesting an existing buffer that doesn't exist or if the
500 * requested PID isn't found
501 * ENOMEM if the kernel is unable to allocate enough memory to serve the request
502 * ENOSPC if there isn't enough space in the caller's address space to remap the buffer
503 * ESRCH if the target PID isn't found
504 * returns KERN_SUCCESS on success
505 */
506int
507stack_snapshot_with_config(struct proc *p, struct stack_snapshot_with_config_args *uap, __unused int *retval)
508{
509 int error = 0;
510 kern_return_t kr;
511 const uint8_t report = stackshot_entitlement_report;
512 const bool refuse = stackshot_entitlement_refuse;
513 enum stackshot_progress progress = STACKSHOT_NOT_ROOT;
514 bool has_entitlement = true;
515
516 if ((error = suser(cred: kauth_cred_get(), acflag: &p->p_acflag))) {
517 goto err;
518 }
519 progress = STACKSHOT_NOT_ENTITLED;
520
521 if ((report != STACKSHOT_REPORT_NONE || refuse) &&
522 !IOCurrentTaskHasEntitlement(STACKSHOT_ENTITLEMENT)) {
523 has_entitlement = false;
524 if (refuse) {
525 error = EPERM;
526 goto err;
527 }
528 }
529 progress = STACKSHOT_PERMITTED;
530
531 if ((void*)uap->stackshot_config == NULL) {
532 error = EINVAL;
533 goto err;
534 }
535
536 switch (uap->stackshot_config_version) {
537 case STACKSHOT_CONFIG_TYPE:
538 if (uap->stackshot_config_size != sizeof(stackshot_config_t)) {
539 error = EINVAL;
540 break;
541 }
542 stackshot_config_t config;
543 error = copyin(uap->stackshot_config, &config, sizeof(stackshot_config_t));
544 if (error != KERN_SUCCESS) {
545 error = EFAULT;
546 break;
547 }
548 kr = kern_stack_snapshot_internal(stackshot_config_version: uap->stackshot_config_version, stackshot_config: &config, stackshot_config_size: sizeof(stackshot_config_t), TRUE);
549 error = stackshot_kern_return_to_bsd_error(kr);
550 progress = (error == 0) ? STACKSHOT_SUCCEEDED : STACKSHOT_ATTEMPTED;
551 break;
552 default:
553 error = ENOTSUP;
554 break;
555 }
556err:
557 if (report == STACKSHOT_REPORT_ALL || (report == STACKSHOT_REPORT_NO_ENT && !has_entitlement)) {
558 stackshot_entitlement_do_report(have_entitlement: has_entitlement, progress, SSHOT_ENTITLEMENT_REPORT_NORMAL);
559 }
560 return error;
561}
562
563#if CONFIG_TELEMETRY
564/*
565 * microstackshot: Catch all system call for microstackshot related operations, including
566 * enabling/disabling both global and windowed microstackshots as well
567 * as retrieving windowed or global stackshots and the boot profile.
568 * Inputs: uap->tracebuf - address of the user space destination
569 * buffer
570 * uap->tracebuf_size - size of the user space trace buffer
571 * uap->flags - various flags
572 * Outputs: EPERM if the caller is not privileged
573 * EINVAL if the supplied mss_args is NULL, mss_args.tracebuf is NULL or mss_args.tracebuf_size is not sane
574 * ENOMEM if we don't have enough memory to satisfy the request
575 * *retval contains the number of bytes traced, if successful
576 * and -1 otherwise.
577 */
578int
579microstackshot(struct proc *p, struct microstackshot_args *uap, int32_t *retval)
580{
581 int error = 0;
582 kern_return_t kr;
583
584 if ((error = suser(cred: kauth_cred_get(), acflag: &p->p_acflag))) {
585 return error;
586 }
587
588 kr = stack_microstackshot(tracebuf: uap->tracebuf, tracebuf_size: uap->tracebuf_size, flags: uap->flags, retval);
589 return stackshot_kern_return_to_bsd_error(kr);
590}
591#endif /* CONFIG_TELEMETRY */
592
593/*
594 * kern_stack_snapshot_with_reason: Obtains a coherent set of stack traces for specified threads on the sysem,
595 * tracing both kernel and user stacks where available. Allocates a buffer from the
596 * kernel and stores the address of this buffer.
597 *
598 * Inputs: reason - the reason for triggering a stackshot (unused at the moment, but in the
599 * future will be saved in the stackshot)
600 * Outputs: EINVAL/ENOTSUP if there is a problem with the arguments
601 * EPERM if the caller doesn't pass at least one KERNEL stackshot flag
602 * ENOMEM if the kernel is unable to allocate enough memory to serve the request
603 * ESRCH if the target PID isn't found
604 * returns KERN_SUCCESS on success
605 */
606int
607kern_stack_snapshot_with_reason(__unused char *reason)
608{
609 stackshot_config_t config;
610 kern_return_t kr;
611
612 config.sc_pid = -1;
613 config.sc_flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | STACKSHOT_SAVE_IN_KERNEL_BUFFER |
614 STACKSHOT_KCDATA_FORMAT | STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_THREAD_WAITINFO |
615 STACKSHOT_NO_IO_STATS | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT);
616 config.sc_delta_timestamp = 0;
617 config.sc_out_buffer_addr = 0;
618 config.sc_out_size_addr = 0;
619
620 kr = kern_stack_snapshot_internal(STACKSHOT_CONFIG_TYPE, stackshot_config: &config, stackshot_config_size: sizeof(stackshot_config_t), FALSE);
621 return stackshot_kern_return_to_bsd_error(kr);
622}
623