1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#include <sys/types.h>
28#include <sys/time.h>
29
30#include <sys/codesign.h>
31#include <sys/errno.h>
32#include <sys/stat.h>
33#include <sys/conf.h>
34#include <sys/systm.h>
35#include <sys/kauth.h>
36#include <sys/utfconv.h>
37
38#include <sys/fasttrap.h>
39#include <sys/fasttrap_impl.h>
40#include <sys/fasttrap_isa.h>
41#include <sys/dtrace.h>
42#include <sys/dtrace_impl.h>
43#include <sys/proc.h>
44
45#include <security/mac_framework.h>
46
47#include <miscfs/devfs/devfs.h>
48#include <sys/proc_internal.h>
49#include <sys/dtrace_glue.h>
50#include <sys/dtrace_ptss.h>
51
52#include <kern/cs_blobs.h>
53#include <kern/thread.h>
54#include <kern/zalloc.h>
55
56#include <mach/thread_act.h>
57
58extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread);
59
60/* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
61#define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
62
63__private_extern__
64void
65qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
66
67/*
68 * User-Land Trap-Based Tracing
69 * ----------------------------
70 *
71 * The fasttrap provider allows DTrace consumers to instrument any user-level
72 * instruction to gather data; this includes probes with semantic
73 * signifigance like entry and return as well as simple offsets into the
74 * function. While the specific techniques used are very ISA specific, the
75 * methodology is generalizable to any architecture.
76 *
77 *
78 * The General Methodology
79 * -----------------------
80 *
81 * With the primary goal of tracing every user-land instruction and the
82 * limitation that we can't trust user space so don't want to rely on much
83 * information there, we begin by replacing the instructions we want to trace
84 * with trap instructions. Each instruction we overwrite is saved into a hash
85 * table keyed by process ID and pc address. When we enter the kernel due to
86 * this trap instruction, we need the effects of the replaced instruction to
87 * appear to have occurred before we proceed with the user thread's
88 * execution.
89 *
90 * Each user level thread is represented by a ulwp_t structure which is
91 * always easily accessible through a register. The most basic way to produce
92 * the effects of the instruction we replaced is to copy that instruction out
93 * to a bit of scratch space reserved in the user thread's ulwp_t structure
94 * (a sort of kernel-private thread local storage), set the PC to that
95 * scratch space and single step. When we reenter the kernel after single
96 * stepping the instruction we must then adjust the PC to point to what would
97 * normally be the next instruction. Of course, special care must be taken
98 * for branches and jumps, but these represent such a small fraction of any
99 * instruction set that writing the code to emulate these in the kernel is
100 * not too difficult.
101 *
102 * Return probes may require several tracepoints to trace every return site,
103 * and, conversely, each tracepoint may activate several probes (the entry
104 * and offset 0 probes, for example). To solve this muliplexing problem,
105 * tracepoints contain lists of probes to activate and probes contain lists
106 * of tracepoints to enable. If a probe is activated, it adds its ID to
107 * existing tracepoints or creates new ones as necessary.
108 *
109 * Most probes are activated _before_ the instruction is executed, but return
110 * probes are activated _after_ the effects of the last instruction of the
111 * function are visible. Return probes must be fired _after_ we have
112 * single-stepped the instruction whereas all other probes are fired
113 * beforehand.
114 *
115 *
116 * Lock Ordering
117 * -------------
118 *
119 * The lock ordering below -- both internally and with respect to the DTrace
120 * framework -- is a little tricky and bears some explanation. Each provider
121 * has a lock (ftp_mtx) that protects its members including reference counts
122 * for enabled probes (ftp_rcount), consumers actively creating probes
123 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
124 * from being freed. A provider is looked up by taking the bucket lock for the
125 * provider hash table, and is returned with its lock held. The provider lock
126 * may be taken in functions invoked by the DTrace framework, but may not be
127 * held while calling functions in the DTrace framework.
128 *
129 * To ensure consistency over multiple calls to the DTrace framework, the
130 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
131 * not be taken when holding the provider lock as that would create a cyclic
132 * lock ordering. In situations where one would naturally take the provider
133 * lock and then the creation lock, we instead up a reference count to prevent
134 * the provider from disappearing, drop the provider lock, and acquire the
135 * creation lock.
136 *
137 * Briefly:
138 * bucket lock before provider lock
139 * DTrace before provider lock
140 * creation lock before DTrace
141 * never hold the provider lock and creation lock simultaneously
142 */
143
144static dtrace_meta_provider_id_t fasttrap_meta_id;
145
146static thread_t fasttrap_cleanup_thread;
147
148static LCK_GRP_DECLARE(fasttrap_lck_grp, "fasttrap");
149static LCK_ATTR_DECLARE(fasttrap_lck_attr, 0, 0);
150static LCK_MTX_DECLARE_ATTR(fasttrap_cleanup_mtx,
151 &fasttrap_lck_grp, &fasttrap_lck_attr);
152
153
154#define FASTTRAP_CLEANUP_PROVIDER 0x1
155#define FASTTRAP_CLEANUP_TRACEPOINT 0x2
156
157static uint32_t fasttrap_cleanup_work = 0;
158
159/*
160 * Generation count on modifications to the global tracepoint lookup table.
161 */
162static volatile uint64_t fasttrap_mod_gen;
163
164/*
165 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
166 * base on system memory. Each time a probe is created, fasttrap_total is
167 * incremented by the number of tracepoints that may be associated with that
168 * probe; fasttrap_total is capped at fasttrap_max.
169 */
170
171static uint32_t fasttrap_max;
172static uint32_t fasttrap_retired;
173static uint32_t fasttrap_total;
174
175
176#define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
177#define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
178#define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
179
180fasttrap_hash_t fasttrap_tpoints;
181static fasttrap_hash_t fasttrap_provs;
182static fasttrap_hash_t fasttrap_procs;
183
184static uint64_t fasttrap_pid_count; /* pid ref count */
185static LCK_MTX_DECLARE_ATTR(fasttrap_count_mtx, /* lock on ref count */
186 &fasttrap_lck_grp, &fasttrap_lck_attr);
187
188#define FASTTRAP_ENABLE_FAIL 1
189#define FASTTRAP_ENABLE_PARTIAL 2
190
191static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
192static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
193
194static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
195 const dtrace_pattr_t *);
196static void fasttrap_provider_retire(proc_t*, const char *, int);
197static void fasttrap_provider_free(fasttrap_provider_t *);
198
199static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
200static void fasttrap_proc_release(fasttrap_proc_t *);
201
202#define FASTTRAP_PROVS_INDEX(pid, name) \
203 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
204
205#define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
206
207/*
208 * APPLE NOTE: To save memory, some common memory allocations are given
209 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
210 * which means it would fall into the kalloc.128 bucket. With
211 * 20k elements allocated, the space saved is substantial.
212 */
213
214ZONE_DEFINE(fasttrap_tracepoint_t_zone, "dtrace.fasttrap_tracepoint_t",
215 sizeof(fasttrap_tracepoint_t), ZC_NONE);
216
217/*
218 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
219 * that the sweet spot for reducing memory footprint is covering the first
220 * three sizes. Everything larger goes into the common pool.
221 */
222#define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
223
224struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
225
226static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
227 "",
228 "dtrace.fasttrap_probe_t[1]",
229 "dtrace.fasttrap_probe_t[2]",
230 "dtrace.fasttrap_probe_t[3]"
231};
232
233static int
234fasttrap_highbit(ulong_t i)
235{
236 int h = 1;
237
238 if (i == 0)
239 return (0);
240#ifdef _LP64
241 if (i & 0xffffffff00000000ul) {
242 h += 32; i >>= 32;
243 }
244#endif
245 if (i & 0xffff0000) {
246 h += 16; i >>= 16;
247 }
248 if (i & 0xff00) {
249 h += 8; i >>= 8;
250 }
251 if (i & 0xf0) {
252 h += 4; i >>= 4;
253 }
254 if (i & 0xc) {
255 h += 2; i >>= 2;
256 }
257 if (i & 0x2) {
258 h += 1;
259 }
260 return (h);
261}
262
263static uint_t
264fasttrap_hash_str(const char *p)
265{
266 unsigned int g;
267 uint_t hval = 0;
268
269 while (*p) {
270 hval = (hval << 4) + *p++;
271 if ((g = (hval & 0xf0000000)) != 0)
272 hval ^= g >> 24;
273 hval &= ~g;
274 }
275 return (hval);
276}
277
278/*
279 * APPLE NOTE: fasttrap_sigtrap not implemented
280 */
281void
282fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
283{
284#pragma unused(p, t, pc)
285
286#if !defined(__APPLE__)
287 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
288
289 sqp->sq_info.si_signo = SIGTRAP;
290 sqp->sq_info.si_code = TRAP_DTRACE;
291 sqp->sq_info.si_addr = (caddr_t)pc;
292
293 mutex_enter(&p->p_lock);
294 sigaddqa(p, t, sqp);
295 mutex_exit(&p->p_lock);
296
297 if (t != NULL)
298 aston(t);
299#endif /* __APPLE__ */
300
301 printf("fasttrap_sigtrap called with no implementation.\n");
302}
303
304/*
305 * This function ensures that no threads are actively using the memory
306 * associated with probes that were formerly live.
307 */
308static void
309fasttrap_mod_barrier(uint64_t gen)
310{
311 unsigned int i;
312
313 if (gen < fasttrap_mod_gen)
314 return;
315
316 fasttrap_mod_gen++;
317
318 for (i = 0; i < NCPU; i++) {
319 lck_mtx_lock(lck: &cpu_core[i].cpuc_pid_lock);
320 lck_mtx_unlock(lck: &cpu_core[i].cpuc_pid_lock);
321 }
322}
323
324static void fasttrap_pid_cleanup(uint32_t);
325
326static unsigned int
327fasttrap_pid_cleanup_providers(void)
328{
329 fasttrap_provider_t **fpp, *fp;
330 fasttrap_bucket_t *bucket;
331 dtrace_provider_id_t provid;
332 unsigned int later = 0, i;
333
334 /*
335 * Iterate over all the providers trying to remove the marked
336 * ones. If a provider is marked but not retired, we just
337 * have to take a crack at removing it -- it's no big deal if
338 * we can't.
339 */
340 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
341 bucket = &fasttrap_provs.fth_table[i];
342 lck_mtx_lock(lck: &bucket->ftb_mtx);
343 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
344
345 while ((fp = *fpp) != NULL) {
346 if (!fp->ftp_marked) {
347 fpp = &fp->ftp_next;
348 continue;
349 }
350
351 lck_mtx_lock(lck: &fp->ftp_mtx);
352
353 /*
354 * If this provider has consumers actively
355 * creating probes (ftp_ccount) or is a USDT
356 * provider (ftp_mcount), we can't unregister
357 * or even condense.
358 */
359 if (fp->ftp_ccount != 0 ||
360 fp->ftp_mcount != 0) {
361 fp->ftp_marked = 0;
362 lck_mtx_unlock(lck: &fp->ftp_mtx);
363 continue;
364 }
365
366 if (!fp->ftp_retired || fp->ftp_rcount != 0)
367 fp->ftp_marked = 0;
368
369 lck_mtx_unlock(lck: &fp->ftp_mtx);
370
371 /*
372 * If we successfully unregister this
373 * provider we can remove it from the hash
374 * chain and free the memory. If our attempt
375 * to unregister fails and this is a retired
376 * provider, increment our flag to try again
377 * pretty soon. If we've consumed more than
378 * half of our total permitted number of
379 * probes call dtrace_condense() to try to
380 * clean out the unenabled probes.
381 */
382 provid = fp->ftp_provid;
383 if (dtrace_unregister(provid) != 0) {
384 if (fasttrap_total > fasttrap_max / 2)
385 (void) dtrace_condense(provid);
386 later += fp->ftp_marked;
387 fpp = &fp->ftp_next;
388 } else {
389 *fpp = fp->ftp_next;
390 fasttrap_provider_free(fp);
391 }
392 }
393 lck_mtx_unlock(lck: &bucket->ftb_mtx);
394 }
395
396 return later;
397}
398
399typedef struct fasttrap_tracepoint_spec {
400 pid_t fttps_pid;
401 user_addr_t fttps_pc;
402} fasttrap_tracepoint_spec_t;
403
404static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
405static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
406static LCK_MTX_DECLARE_ATTR(fasttrap_retired_mtx,
407 &fasttrap_lck_grp, &fasttrap_lck_attr);
408
409#define DEFAULT_RETIRED_SIZE 256
410
411static void
412fasttrap_tracepoint_cleanup(void)
413{
414 size_t i;
415 pid_t pid = 0;
416 user_addr_t pc;
417 proc_t *p = PROC_NULL;
418 fasttrap_tracepoint_t *tp = NULL;
419 lck_mtx_lock(lck: &fasttrap_retired_mtx);
420 fasttrap_bucket_t *bucket;
421 for (i = 0; i < fasttrap_cur_retired; i++) {
422 pc = fasttrap_retired_spec[i].fttps_pc;
423 if (fasttrap_retired_spec[i].fttps_pid != pid) {
424 pid = fasttrap_retired_spec[i].fttps_pid;
425 if (p != PROC_NULL) {
426 sprunlock(p);
427 }
428 if ((p = sprlock(pid)) == PROC_NULL) {
429 pid = 0;
430 continue;
431 }
432 }
433 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
434 lck_mtx_lock(lck: &bucket->ftb_mtx);
435 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
436 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
437 tp->ftt_proc->ftpc_acount != 0)
438 break;
439 }
440 /*
441 * Check that the tracepoint is not gone or has not been
442 * re-activated for another probe
443 */
444 if (tp == NULL || tp->ftt_retired == 0) {
445 lck_mtx_unlock(lck: &bucket->ftb_mtx);
446 continue;
447 }
448 fasttrap_tracepoint_remove(p, tp);
449 lck_mtx_unlock(lck: &bucket->ftb_mtx);
450 }
451 if (p != PROC_NULL) {
452 sprunlock(p);
453 }
454
455 fasttrap_cur_retired = 0;
456
457 lck_mtx_unlock(lck: &fasttrap_retired_mtx);
458}
459
460void
461fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
462{
463 if (tp->ftt_retired)
464 return;
465 lck_mtx_lock(lck: &fasttrap_retired_mtx);
466 fasttrap_tracepoint_spec_t *s = &fasttrap_retired_spec[fasttrap_cur_retired++];
467 s->fttps_pid = proc_getpid(p);
468 s->fttps_pc = tp->ftt_pc;
469
470 if (fasttrap_cur_retired == fasttrap_retired_size) {
471 fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
472 fasttrap_retired_size * 2 *
473 sizeof(*fasttrap_retired_spec),
474 KM_SLEEP);
475 memcpy(dst: new_retired, src: fasttrap_retired_spec, n: sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
476 kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
477 fasttrap_retired_size *= 2;
478 fasttrap_retired_spec = new_retired;
479 }
480
481 lck_mtx_unlock(lck: &fasttrap_retired_mtx);
482
483 tp->ftt_retired = 1;
484
485 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
486}
487
488static void
489fasttrap_pid_cleanup_compute_priority(void)
490{
491 if (fasttrap_total > (fasttrap_max / 100 * 90) || fasttrap_retired > fasttrap_max / 2) {
492 thread_precedence_policy_data_t precedence = {12 /* BASEPRI_PREEMPT_HIGH */};
493 thread_policy_set(thread: fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, policy_info: (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
494 }
495 else {
496 thread_precedence_policy_data_t precedence = {-39 /* BASEPRI_USER_INITIATED */};
497 thread_policy_set(thread: fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, policy_info: (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
498
499 }
500}
501
502/*
503 * This is the timeout's callback for cleaning up the providers and their
504 * probes.
505 */
506/*ARGSUSED*/
507__attribute__((noreturn))
508static void
509fasttrap_pid_cleanup_cb(void)
510{
511 uint32_t work = 0;
512 lck_mtx_lock(lck: &fasttrap_cleanup_mtx);
513 msleep(chan: &fasttrap_pid_cleanup_cb, mtx: &fasttrap_cleanup_mtx, PRIBIO, wmesg: "fasttrap_pid_cleanup_cb", NULL);
514 while (1) {
515 unsigned int later = 0;
516
517 work = os_atomic_xchg(&fasttrap_cleanup_work, 0, relaxed);
518 lck_mtx_unlock(lck: &fasttrap_cleanup_mtx);
519 if (work & FASTTRAP_CLEANUP_PROVIDER) {
520 later = fasttrap_pid_cleanup_providers();
521 }
522 if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
523 fasttrap_tracepoint_cleanup();
524 }
525 lck_mtx_lock(lck: &fasttrap_cleanup_mtx);
526
527 fasttrap_pid_cleanup_compute_priority();
528 if (!fasttrap_cleanup_work) {
529 /*
530 * If we were unable to remove a retired provider, try again after
531 * a second. This situation can occur in certain circumstances where
532 * providers cannot be unregistered even though they have no probes
533 * enabled because of an execution of dtrace -l or something similar.
534 * If the timeout has been disabled (set to 1 because we're trying
535 * to detach), we set fasttrap_cleanup_work to ensure that we'll
536 * get a chance to do that work if and when the timeout is reenabled
537 * (if detach fails).
538 */
539 if (later > 0) {
540 struct timespec t = {.tv_sec = 1, .tv_nsec = 0};
541 msleep(chan: &fasttrap_pid_cleanup_cb, mtx: &fasttrap_cleanup_mtx, PRIBIO, wmesg: "fasttrap_pid_cleanup_cb", ts: &t);
542 }
543 else
544 msleep(chan: &fasttrap_pid_cleanup_cb, mtx: &fasttrap_cleanup_mtx, PRIBIO, wmesg: "fasttrap_pid_cleanup_cb", NULL);
545 }
546 }
547
548}
549
550/*
551 * Activates the asynchronous cleanup mechanism.
552 */
553static void
554fasttrap_pid_cleanup(uint32_t work)
555{
556 lck_mtx_lock(lck: &fasttrap_cleanup_mtx);
557 os_atomic_or(&fasttrap_cleanup_work, work, relaxed);
558 fasttrap_pid_cleanup_compute_priority();
559 wakeup(chan: &fasttrap_pid_cleanup_cb);
560 lck_mtx_unlock(lck: &fasttrap_cleanup_mtx);
561}
562
563static int
564fasttrap_setdebug(proc_t *p)
565{
566 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
567
568 /*
569 * CS_KILL and CS_HARD will cause code-signing to kill the process
570 * when the process text is modified, so register the intent
571 * to allow invalid access beforehand.
572 */
573 if ((proc_getcsflags(p) & (CS_KILL|CS_HARD))) {
574 proc_unlock(p);
575 for (int i = 0; i < DTRACE_NCLIENTS; i++) {
576 dtrace_state_t *state = dtrace_state_get(minor: i);
577 if (state == NULL)
578 continue;
579 if (state->dts_cred.dcr_cred == NULL)
580 continue;
581 /*
582 * The get_task call flags whether the process should
583 * be flagged to have the cs_allow_invalid call
584 * succeed. We want the best credential that any dtrace
585 * client has, so try all of them.
586 */
587
588 /*
589 * mac_proc_check_get_task() can trigger upcalls. It's
590 * not safe to hold proc references accross upcalls, so
591 * just drop the reference. Given the context, it
592 * should not be possible for the process to actually
593 * disappear.
594 */
595 struct proc_ident pident = proc_ident(p);
596 sprunlock(p);
597 p = PROC_NULL;
598
599 (void) mac_proc_check_get_task(cred: state->dts_cred.dcr_cred, pident: &pident, TASK_FLAVOR_CONTROL);
600
601 p = sprlock(pid: pident.p_pid);
602 if (p == PROC_NULL) {
603 return (ESRCH);
604 }
605 }
606 int rc = cs_allow_invalid(p);
607 proc_lock(p);
608 if (rc == 0) {
609 return (EACCES);
610 }
611 }
612 return (0);
613}
614
615/*
616 * This is called from cfork() via dtrace_fasttrap_fork(). The child
617 * process's address space is a (roughly) a copy of the parent process's so
618 * we have to remove all the instrumentation we had previously enabled in the
619 * parent.
620 */
621static void
622fasttrap_fork(proc_t *p, proc_t *cp)
623{
624 pid_t ppid = proc_getpid(p);
625 unsigned int i;
626
627 ASSERT(current_proc() == p);
628 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
629 ASSERT(p->p_dtrace_count > 0);
630 ASSERT(cp->p_dtrace_count == 0);
631
632 /*
633 * This would be simpler and faster if we maintained per-process
634 * hash tables of enabled tracepoints. It could, however, potentially
635 * slow down execution of a tracepoint since we'd need to go
636 * through two levels of indirection. In the future, we should
637 * consider either maintaining per-process ancillary lists of
638 * enabled tracepoints or hanging a pointer to a per-process hash
639 * table of enabled tracepoints off the proc structure.
640 */
641
642 /*
643 * We don't have to worry about the child process disappearing
644 * because we're in fork().
645 */
646 if (cp != sprlock(pid: proc_getpid(cp))) {
647 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", proc_getpid(cp));
648 return;
649 }
650
651 proc_lock(cp);
652 if (fasttrap_setdebug(p: cp) == ESRCH) {
653 printf("fasttrap_fork: failed to re-acquire proc\n");
654 return;
655 }
656 proc_unlock(cp);
657
658 /*
659 * Iterate over every tracepoint looking for ones that belong to the
660 * parent process, and remove each from the child process.
661 */
662 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
663 fasttrap_tracepoint_t *tp;
664 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
665
666 lck_mtx_lock(lck: &bucket->ftb_mtx);
667 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
668 if (tp->ftt_pid == ppid &&
669 tp->ftt_proc->ftpc_acount != 0) {
670 fasttrap_tracepoint_remove(cp, tp);
671
672 /*
673 * The count of active providers can only be
674 * decremented (i.e. to zero) during exec,
675 * exit, and removal of a meta provider so it
676 * should be impossible to drop the count
677 * mid-fork.
678 */
679 ASSERT(tp->ftt_proc->ftpc_acount != 0);
680 }
681 }
682 lck_mtx_unlock(lck: &bucket->ftb_mtx);
683 }
684
685 /*
686 * Free any ptss pages/entries in the child.
687 */
688 dtrace_ptss_fork(parent: p, child: cp);
689
690 sprunlock(p: cp);
691}
692
693/*
694 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
695 * is set on the proc structure to indicate that there is a pid provider
696 * associated with this process.
697 */
698static void
699fasttrap_exec_exit(proc_t *p)
700{
701 ASSERT(p == current_proc());
702 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
703 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
704
705
706 /* APPLE NOTE: Okay, the locking here is really odd and needs some
707 * explaining. This method is always called with the proc_lock held.
708 * We must drop the proc_lock before calling fasttrap_provider_retire
709 * to avoid a deadlock when it takes the bucket lock.
710 *
711 * Next, the dtrace_ptss_exec_exit function requires the sprlock
712 * be held, but not the proc_lock.
713 *
714 * Finally, we must re-acquire the proc_lock
715 */
716 proc_unlock(p);
717
718 /*
719 * We clean up the pid provider for this process here; user-land
720 * static probes are handled by the meta-provider remove entry point.
721 */
722 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
723
724 /*
725 * APPLE NOTE: We also need to remove any aliased providers.
726 * XXX optimization: track which provider types are instantiated
727 * and only retire as needed.
728 */
729 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
730 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
731
732 /*
733 * This should be called after it is no longer possible for a user
734 * thread to execute (potentially dtrace instrumented) instructions.
735 */
736 lck_mtx_lock(lck: &p->p_dtrace_sprlock);
737 dtrace_ptss_exec_exit(p);
738 lck_mtx_unlock(lck: &p->p_dtrace_sprlock);
739
740 proc_lock(p);
741}
742
743
744/*ARGSUSED*/
745static void
746fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
747{
748#pragma unused(arg, desc)
749 /*
750 * There are no "default" pid probes.
751 */
752}
753
754static int
755fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
756{
757 fasttrap_tracepoint_t *tp, *new_tp = NULL;
758 fasttrap_bucket_t *bucket;
759 fasttrap_id_t *id;
760 pid_t pid;
761 user_addr_t pc;
762
763 ASSERT(index < probe->ftp_ntps);
764
765 pid = probe->ftp_pid;
766 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
767 id = &probe->ftp_tps[index].fit_id;
768
769 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
770
771 /*
772 * Before we make any modifications, make sure we've imposed a barrier
773 * on the generation in which this probe was last modified.
774 */
775 fasttrap_mod_barrier(gen: probe->ftp_gen);
776
777 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
778
779 /*
780 * If the tracepoint has already been enabled, just add our id to the
781 * list of interested probes. This may be our second time through
782 * this path in which case we'll have constructed the tracepoint we'd
783 * like to install. If we can't find a match, and have an allocated
784 * tracepoint ready to go, enable that one now.
785 *
786 * A tracepoint whose process is defunct is also considered defunct.
787 */
788again:
789 lck_mtx_lock(lck: &bucket->ftb_mtx);
790 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
791 int rc = 0;
792 /*
793 * Note that it's safe to access the active count on the
794 * associated proc structure because we know that at least one
795 * provider (this one) will still be around throughout this
796 * operation.
797 */
798 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
799 tp->ftt_proc->ftpc_acount == 0)
800 continue;
801
802 /*
803 * Now that we've found a matching tracepoint, it would be
804 * a decent idea to confirm that the tracepoint is still
805 * enabled and the trap instruction hasn't been overwritten.
806 * Since this is a little hairy, we'll punt for now.
807 */
808 if (!tp->ftt_installed) {
809 if (fasttrap_tracepoint_install(p, tp) != 0)
810 rc = FASTTRAP_ENABLE_PARTIAL;
811 }
812 /*
813 * This can't be the first interested probe. We don't have
814 * to worry about another thread being in the midst of
815 * deleting this tracepoint (which would be the only valid
816 * reason for a tracepoint to have no interested probes)
817 * since we're holding P_PR_LOCK for this process.
818 */
819 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
820
821 switch (id->fti_ptype) {
822 case DTFTP_ENTRY:
823 case DTFTP_OFFSETS:
824 case DTFTP_IS_ENABLED:
825 id->fti_next = tp->ftt_ids;
826 dtrace_membar_producer();
827 tp->ftt_ids = id;
828 dtrace_membar_producer();
829 break;
830
831 case DTFTP_RETURN:
832 case DTFTP_POST_OFFSETS:
833 id->fti_next = tp->ftt_retids;
834 dtrace_membar_producer();
835 tp->ftt_retids = id;
836 dtrace_membar_producer();
837 break;
838
839 default:
840 ASSERT(0);
841 }
842
843 tp->ftt_retired = 0;
844
845 lck_mtx_unlock(lck: &bucket->ftb_mtx);
846
847 if (new_tp != NULL) {
848 new_tp->ftt_ids = NULL;
849 new_tp->ftt_retids = NULL;
850 }
851
852 return rc;
853 }
854
855 /*
856 * If we have a good tracepoint ready to go, install it now while
857 * we have the lock held and no one can screw with us.
858 */
859 if (new_tp != NULL) {
860 int rc = 0;
861
862 new_tp->ftt_next = bucket->ftb_data;
863 dtrace_membar_producer();
864 bucket->ftb_data = new_tp;
865 dtrace_membar_producer();
866 lck_mtx_unlock(lck: &bucket->ftb_mtx);
867
868 /*
869 * Activate the tracepoint in the ISA-specific manner.
870 * If this fails, we need to report the failure, but
871 * indicate that this tracepoint must still be disabled
872 * by calling fasttrap_tracepoint_disable().
873 */
874 if (fasttrap_tracepoint_install(p, new_tp) != 0)
875 rc = FASTTRAP_ENABLE_PARTIAL;
876 /*
877 * Increment the count of the number of tracepoints active in
878 * the victim process.
879 */
880 //ASSERT(p->p_proc_flag & P_PR_LOCK);
881 p->p_dtrace_count++;
882
883
884 return (rc);
885 }
886
887 lck_mtx_unlock(lck: &bucket->ftb_mtx);
888
889 /*
890 * Initialize the tracepoint that's been preallocated with the probe.
891 */
892 new_tp = probe->ftp_tps[index].fit_tp;
893 new_tp->ftt_retired = 0;
894
895 ASSERT(new_tp->ftt_pid == pid);
896 ASSERT(new_tp->ftt_pc == pc);
897 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
898 ASSERT(new_tp->ftt_ids == NULL);
899 ASSERT(new_tp->ftt_retids == NULL);
900
901 switch (id->fti_ptype) {
902 case DTFTP_ENTRY:
903 case DTFTP_OFFSETS:
904 case DTFTP_IS_ENABLED:
905 id->fti_next = NULL;
906 new_tp->ftt_ids = id;
907 break;
908
909 case DTFTP_RETURN:
910 case DTFTP_POST_OFFSETS:
911 id->fti_next = NULL;
912 new_tp->ftt_retids = id;
913 break;
914
915 default:
916 ASSERT(0);
917 }
918
919 /*
920 * If the ISA-dependent initialization goes to plan, go back to the
921 * beginning and try to install this freshly made tracepoint.
922 */
923 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
924 goto again;
925
926 new_tp->ftt_ids = NULL;
927 new_tp->ftt_retids = NULL;
928
929 return (FASTTRAP_ENABLE_FAIL);
930}
931
932static void
933fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
934{
935 fasttrap_bucket_t *bucket;
936 fasttrap_provider_t *provider = probe->ftp_prov;
937 fasttrap_tracepoint_t **pp, *tp;
938 fasttrap_id_t *id, **idp;
939 pid_t pid;
940 user_addr_t pc;
941
942 ASSERT(index < probe->ftp_ntps);
943
944 pid = probe->ftp_pid;
945 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
946 id = &probe->ftp_tps[index].fit_id;
947
948 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
949
950 /*
951 * Find the tracepoint and make sure that our id is one of the
952 * ones registered with it.
953 */
954 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
955 lck_mtx_lock(lck: &bucket->ftb_mtx);
956 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
957 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
958 tp->ftt_proc == provider->ftp_proc)
959 break;
960 }
961
962 /*
963 * If we somehow lost this tracepoint, we're in a world of hurt.
964 */
965 ASSERT(tp != NULL);
966
967 switch (id->fti_ptype) {
968 case DTFTP_ENTRY:
969 case DTFTP_OFFSETS:
970 case DTFTP_IS_ENABLED:
971 ASSERT(tp->ftt_ids != NULL);
972 idp = &tp->ftt_ids;
973 break;
974
975 case DTFTP_RETURN:
976 case DTFTP_POST_OFFSETS:
977 ASSERT(tp->ftt_retids != NULL);
978 idp = &tp->ftt_retids;
979 break;
980
981 default:
982 /* Fix compiler warning... */
983 idp = NULL;
984 ASSERT(0);
985 }
986
987 while ((*idp)->fti_probe != probe) {
988 idp = &(*idp)->fti_next;
989 ASSERT(*idp != NULL);
990 }
991
992 id = *idp;
993 *idp = id->fti_next;
994 dtrace_membar_producer();
995
996 ASSERT(id->fti_probe == probe);
997
998 /*
999 * If there are other registered enablings of this tracepoint, we're
1000 * all done, but if this was the last probe assocated with this
1001 * this tracepoint, we need to remove and free it.
1002 */
1003 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1004
1005 /*
1006 * If the current probe's tracepoint is in use, swap it
1007 * for an unused tracepoint.
1008 */
1009 if (tp == probe->ftp_tps[index].fit_tp) {
1010 fasttrap_probe_t *tmp_probe;
1011 fasttrap_tracepoint_t **tmp_tp;
1012 uint_t tmp_index;
1013
1014 if (tp->ftt_ids != NULL) {
1015 tmp_probe = tp->ftt_ids->fti_probe;
1016 /* LINTED - alignment */
1017 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1018 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1019 } else {
1020 tmp_probe = tp->ftt_retids->fti_probe;
1021 /* LINTED - alignment */
1022 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1023 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1024 }
1025
1026 ASSERT(*tmp_tp != NULL);
1027 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1028 ASSERT((*tmp_tp)->ftt_ids == NULL);
1029 ASSERT((*tmp_tp)->ftt_retids == NULL);
1030
1031 probe->ftp_tps[index].fit_tp = *tmp_tp;
1032 *tmp_tp = tp;
1033
1034 }
1035
1036 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1037
1038 /*
1039 * Tag the modified probe with the generation in which it was
1040 * changed.
1041 */
1042 probe->ftp_gen = fasttrap_mod_gen;
1043 return;
1044 }
1045
1046 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1047
1048 /*
1049 * We can't safely remove the tracepoint from the set of active
1050 * tracepoints until we've actually removed the fasttrap instruction
1051 * from the process's text. We can, however, operate on this
1052 * tracepoint secure in the knowledge that no other thread is going to
1053 * be looking at it since we hold P_PR_LOCK on the process if it's
1054 * live or we hold the provider lock on the process if it's dead and
1055 * gone.
1056 */
1057
1058 /*
1059 * We only need to remove the actual instruction if we're looking
1060 * at an existing process
1061 */
1062 if (p != NULL) {
1063 /*
1064 * If we fail to restore the instruction we need to kill
1065 * this process since it's in a completely unrecoverable
1066 * state.
1067 */
1068 if (fasttrap_tracepoint_remove(p, tp) != 0)
1069 fasttrap_sigtrap(p, NULL, pc);
1070
1071 /*
1072 * Decrement the count of the number of tracepoints active
1073 * in the victim process.
1074 */
1075 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1076 p->p_dtrace_count--;
1077 }
1078
1079 /*
1080 * Remove the probe from the hash table of active tracepoints.
1081 */
1082 lck_mtx_lock(lck: &bucket->ftb_mtx);
1083 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1084 ASSERT(*pp != NULL);
1085 while (*pp != tp) {
1086 pp = &(*pp)->ftt_next;
1087 ASSERT(*pp != NULL);
1088 }
1089
1090 *pp = tp->ftt_next;
1091 dtrace_membar_producer();
1092
1093 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1094
1095 /*
1096 * Tag the modified probe with the generation in which it was changed.
1097 */
1098 probe->ftp_gen = fasttrap_mod_gen;
1099}
1100
1101static void
1102fasttrap_enable_callbacks(void)
1103{
1104 /*
1105 * We don't have to play the rw lock game here because we're
1106 * providing something rather than taking something away --
1107 * we can be sure that no threads have tried to follow this
1108 * function pointer yet.
1109 */
1110 lck_mtx_lock(lck: &fasttrap_count_mtx);
1111 if (fasttrap_pid_count == 0) {
1112 ASSERT(dtrace_pid_probe_ptr == NULL);
1113 ASSERT(dtrace_return_probe_ptr == NULL);
1114 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1115 dtrace_return_probe_ptr = &fasttrap_return_probe;
1116 }
1117 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1118 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1119 fasttrap_pid_count++;
1120 lck_mtx_unlock(lck: &fasttrap_count_mtx);
1121}
1122
1123static void
1124fasttrap_disable_callbacks(void)
1125{
1126 //ASSERT(MUTEX_HELD(&cpu_lock));
1127
1128 lck_mtx_lock(lck: &fasttrap_count_mtx);
1129 ASSERT(fasttrap_pid_count > 0);
1130 fasttrap_pid_count--;
1131 if (fasttrap_pid_count == 0) {
1132 dtrace_cpu_t *cur, *cpu = CPU;
1133
1134 /*
1135 * APPLE NOTE: This loop seems broken, it touches every CPU
1136 * but the one we're actually running on. Need to ask Sun folks
1137 * if that is safe. Scenario is this: We're running on CPU A,
1138 * and lock all but A. Then we get preempted, and start running
1139 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1140 */
1141 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1142 lck_rw_lock_exclusive(lck: &cur->cpu_ft_lock);
1143 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1144 }
1145
1146 dtrace_pid_probe_ptr = NULL;
1147 dtrace_return_probe_ptr = NULL;
1148
1149 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1150 lck_rw_unlock_exclusive(lck: &cur->cpu_ft_lock);
1151 // rw_exit(&cur->cpu_ft_lock);
1152 }
1153 }
1154 lck_mtx_unlock(lck: &fasttrap_count_mtx);
1155}
1156
1157/*ARGSUSED*/
1158static int
1159fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1160{
1161#pragma unused(arg, id)
1162 fasttrap_probe_t *probe = parg;
1163 proc_t *p;
1164 int i, rc;
1165
1166 ASSERT(probe != NULL);
1167 ASSERT(!probe->ftp_enabled);
1168 ASSERT(id == probe->ftp_id);
1169 // ASSERT(MUTEX_HELD(&cpu_lock));
1170
1171 /*
1172 * Increment the count of enabled probes on this probe's provider;
1173 * the provider can't go away while the probe still exists. We
1174 * must increment this even if we aren't able to properly enable
1175 * this probe.
1176 */
1177 lck_mtx_lock(lck: &probe->ftp_prov->ftp_mtx);
1178 probe->ftp_prov->ftp_rcount++;
1179 lck_mtx_unlock(lck: &probe->ftp_prov->ftp_mtx);
1180
1181 /*
1182 * If this probe's provider is retired (meaning it was valid in a
1183 * previously exec'ed incarnation of this address space), bail out. The
1184 * provider can't go away while we're in this code path.
1185 */
1186 if (probe->ftp_prov->ftp_retired)
1187 return(0);
1188
1189 /*
1190 * If we can't find the process, it may be that we're in the context of
1191 * a fork in which the traced process is being born and we're copying
1192 * USDT probes. Otherwise, the process is gone so bail.
1193 */
1194 if ((p = sprlock(pid: probe->ftp_pid)) == PROC_NULL) {
1195 /*
1196 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1197 * does not return process's with SIDL set, but we always return
1198 * the child process.
1199 */
1200 return(0);
1201 }
1202
1203 proc_lock(p);
1204 int p_pid = proc_pid(p);
1205
1206 rc = fasttrap_setdebug(p);
1207 switch (rc) {
1208 case EACCES:
1209 proc_unlock(p);
1210 sprunlock(p);
1211 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1212 "Process does not allow invalid code pages\n", p_pid);
1213 return (0);
1214 case ESRCH:
1215 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1216 "Failed to re-acquire process\n", p_pid);
1217 return (0);
1218 default:
1219 assert(rc == 0);
1220 break;
1221 }
1222
1223 /*
1224 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1225 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1226 * To mimic this, we allocate on demand scratch space. If this is the first
1227 * time a probe has been enabled in this process, we need to allocate scratch
1228 * space for each already existing thread. Now is a good time to do this, as
1229 * the target process is suspended and the proc_lock is held.
1230 */
1231 if (p->p_dtrace_ptss_pages == NULL) {
1232 dtrace_ptss_enable(p);
1233 }
1234
1235 proc_unlock(p);
1236
1237 /*
1238 * We have to enable the trap entry point before any user threads have
1239 * the chance to execute the trap instruction we're about to place
1240 * in their process's text.
1241 */
1242 fasttrap_enable_callbacks();
1243
1244 /*
1245 * Enable all the tracepoints and add this probe's id to each
1246 * tracepoint's list of active probes.
1247 */
1248 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1249 if ((rc = fasttrap_tracepoint_enable(p, probe, index: i)) != 0) {
1250 /*
1251 * If enabling the tracepoint failed completely,
1252 * we don't have to disable it; if the failure
1253 * was only partial we must disable it.
1254 */
1255 if (rc == FASTTRAP_ENABLE_FAIL)
1256 i--;
1257 else
1258 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1259
1260 /*
1261 * Back up and pull out all the tracepoints we've
1262 * created so far for this probe.
1263 */
1264 while (i >= 0) {
1265 fasttrap_tracepoint_disable(p, probe, index: i);
1266 i--;
1267 }
1268
1269 sprunlock(p);
1270
1271 /*
1272 * Since we're not actually enabling this probe,
1273 * drop our reference on the trap table entry.
1274 */
1275 fasttrap_disable_callbacks();
1276 return(0);
1277 }
1278 }
1279
1280 sprunlock(p);
1281
1282 probe->ftp_enabled = 1;
1283 return (0);
1284}
1285
1286/*ARGSUSED*/
1287static void
1288fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1289{
1290#pragma unused(arg, id)
1291 fasttrap_probe_t *probe = parg;
1292 fasttrap_provider_t *provider = probe->ftp_prov;
1293 proc_t *p;
1294 int i, whack = 0;
1295
1296 ASSERT(id == probe->ftp_id);
1297
1298 /*
1299 * We won't be able to acquire a /proc-esque lock on the process
1300 * iff the process is dead and gone. In this case, we rely on the
1301 * provider lock as a point of mutual exclusion to prevent other
1302 * DTrace consumers from disabling this probe.
1303 */
1304 p = sprlock(pid: probe->ftp_pid);
1305
1306 lck_mtx_lock(lck: &provider->ftp_mtx);
1307
1308 /*
1309 * Disable all the associated tracepoints (for fully enabled probes).
1310 */
1311 if (probe->ftp_enabled) {
1312 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1313 fasttrap_tracepoint_disable(p, probe, index: i);
1314 }
1315 }
1316
1317 ASSERT(provider->ftp_rcount > 0);
1318 provider->ftp_rcount--;
1319
1320 if (p != NULL) {
1321 /*
1322 * Even though we may not be able to remove it entirely, we
1323 * mark this retired provider to get a chance to remove some
1324 * of the associated probes.
1325 */
1326 if (provider->ftp_retired && !provider->ftp_marked)
1327 whack = provider->ftp_marked = 1;
1328 lck_mtx_unlock(lck: &provider->ftp_mtx);
1329
1330 sprunlock(p);
1331 } else {
1332 /*
1333 * If the process is dead, we're just waiting for the
1334 * last probe to be disabled to be able to free it.
1335 */
1336 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1337 whack = provider->ftp_marked = 1;
1338 lck_mtx_unlock(lck: &provider->ftp_mtx);
1339 }
1340
1341 if (whack) {
1342 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1343 }
1344
1345 if (!probe->ftp_enabled)
1346 return;
1347
1348 probe->ftp_enabled = 0;
1349
1350 // ASSERT(MUTEX_HELD(&cpu_lock));
1351 fasttrap_disable_callbacks();
1352}
1353
1354/*ARGSUSED*/
1355static void
1356fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1357 dtrace_argdesc_t *desc)
1358{
1359#pragma unused(arg, id)
1360 fasttrap_probe_t *probe = parg;
1361 char *str;
1362 int i, ndx;
1363
1364 desc->dtargd_native[0] = '\0';
1365 desc->dtargd_xlate[0] = '\0';
1366
1367 if (probe->ftp_prov->ftp_retired != 0 ||
1368 desc->dtargd_ndx < 0 ||
1369 desc->dtargd_ndx >= probe->ftp_nargs) {
1370 desc->dtargd_ndx = DTRACE_ARGNONE;
1371 return;
1372 }
1373
1374 ndx = (probe->ftp_argmap != NULL) ?
1375 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1376
1377 if (probe->ftp_ntypes == NULL)
1378 return;
1379
1380 str = probe->ftp_ntypes;
1381 for (i = 0; i < ndx; i++) {
1382 str += strlen(s: str) + 1;
1383 }
1384
1385 (void) strlcpy(dst: desc->dtargd_native, src: str, n: sizeof(desc->dtargd_native));
1386
1387 if (probe->ftp_xtypes == NULL)
1388 return;
1389
1390 str = probe->ftp_xtypes;
1391 for (i = 0; i < desc->dtargd_ndx; i++) {
1392 str += strlen(s: str) + 1;
1393 }
1394
1395 (void) strlcpy(dst: desc->dtargd_xlate, src: str, n: sizeof(desc->dtargd_xlate));
1396}
1397
1398/*ARGSUSED*/
1399static void
1400fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1401{
1402#pragma unused(arg, id)
1403 fasttrap_probe_t *probe = parg;
1404 unsigned int i;
1405
1406 ASSERT(probe != NULL);
1407 ASSERT(!probe->ftp_enabled);
1408 ASSERT(fasttrap_total >= probe->ftp_ntps);
1409
1410 os_atomic_sub(&fasttrap_total, probe->ftp_ntps, relaxed);
1411 os_atomic_sub(&fasttrap_retired, probe->ftp_ntps, relaxed);
1412
1413 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1414 fasttrap_mod_barrier(gen: probe->ftp_gen);
1415
1416 for (i = 0; i < probe->ftp_ntps; i++) {
1417 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1418 }
1419
1420 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1421 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1422 } else {
1423 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1424 kmem_free(probe, size);
1425 }
1426}
1427
1428
1429static const dtrace_pattr_t pid_attr = {
1430{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1431{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1432{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1433{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1434{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1435};
1436
1437static dtrace_pops_t pid_pops = {
1438 .dtps_provide = fasttrap_pid_provide,
1439 .dtps_provide_module = NULL,
1440 .dtps_enable = fasttrap_pid_enable,
1441 .dtps_disable = fasttrap_pid_disable,
1442 .dtps_suspend = NULL,
1443 .dtps_resume = NULL,
1444 .dtps_getargdesc = fasttrap_pid_getargdesc,
1445 .dtps_getargval = fasttrap_pid_getarg,
1446 .dtps_usermode = NULL,
1447 .dtps_destroy = fasttrap_pid_destroy
1448};
1449
1450static dtrace_pops_t usdt_pops = {
1451 .dtps_provide = fasttrap_pid_provide,
1452 .dtps_provide_module = NULL,
1453 .dtps_enable = fasttrap_pid_enable,
1454 .dtps_disable = fasttrap_pid_disable,
1455 .dtps_suspend = NULL,
1456 .dtps_resume = NULL,
1457 .dtps_getargdesc = fasttrap_pid_getargdesc,
1458 .dtps_getargval = fasttrap_usdt_getarg,
1459 .dtps_usermode = NULL,
1460 .dtps_destroy = fasttrap_pid_destroy
1461};
1462
1463static fasttrap_proc_t *
1464fasttrap_proc_lookup(pid_t pid)
1465{
1466 fasttrap_bucket_t *bucket;
1467 fasttrap_proc_t *fprc, *new_fprc;
1468
1469 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1470 lck_mtx_lock(lck: &bucket->ftb_mtx);
1471
1472 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1473 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1474 lck_mtx_lock(lck: &fprc->ftpc_mtx);
1475 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1476 fprc->ftpc_rcount++;
1477 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1478 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1479 lck_mtx_unlock(lck: &fprc->ftpc_mtx);
1480
1481 return (fprc);
1482 }
1483 }
1484
1485 /*
1486 * Drop the bucket lock so we don't try to perform a sleeping
1487 * allocation under it.
1488 */
1489 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1490
1491 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1492 ASSERT(new_fprc != NULL);
1493 new_fprc->ftpc_pid = pid;
1494 new_fprc->ftpc_rcount = 1;
1495 new_fprc->ftpc_acount = 1;
1496
1497 lck_mtx_lock(lck: &bucket->ftb_mtx);
1498
1499 /*
1500 * Take another lap through the list to make sure a proc hasn't
1501 * been created for this pid while we weren't under the bucket lock.
1502 */
1503 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1504 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1505 lck_mtx_lock(lck: &fprc->ftpc_mtx);
1506 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1507 fprc->ftpc_rcount++;
1508 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1509 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1510 lck_mtx_unlock(lck: &fprc->ftpc_mtx);
1511
1512 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1513
1514 return (fprc);
1515 }
1516 }
1517
1518 /*
1519 * APPLE NOTE: We have to initialize all locks explicitly
1520 */
1521 lck_mtx_init(lck: &new_fprc->ftpc_mtx, grp: &fasttrap_lck_grp, attr: &fasttrap_lck_attr);
1522
1523 new_fprc->ftpc_next = bucket->ftb_data;
1524 bucket->ftb_data = new_fprc;
1525
1526 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1527
1528 return (new_fprc);
1529}
1530
1531static void
1532fasttrap_proc_release(fasttrap_proc_t *proc)
1533{
1534 fasttrap_bucket_t *bucket;
1535 fasttrap_proc_t *fprc, **fprcp;
1536 pid_t pid = proc->ftpc_pid;
1537
1538 lck_mtx_lock(lck: &proc->ftpc_mtx);
1539
1540 ASSERT(proc->ftpc_rcount != 0);
1541 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1542
1543 if (--proc->ftpc_rcount != 0) {
1544 lck_mtx_unlock(lck: &proc->ftpc_mtx);
1545 return;
1546 }
1547
1548 lck_mtx_unlock(lck: &proc->ftpc_mtx);
1549
1550 /*
1551 * There should definitely be no live providers associated with this
1552 * process at this point.
1553 */
1554 ASSERT(proc->ftpc_acount == 0);
1555
1556 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1557 lck_mtx_lock(lck: &bucket->ftb_mtx);
1558
1559 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1560 while ((fprc = *fprcp) != NULL) {
1561 if (fprc == proc)
1562 break;
1563
1564 fprcp = &fprc->ftpc_next;
1565 }
1566
1567 /*
1568 * Something strange has happened if we can't find the proc.
1569 */
1570 ASSERT(fprc != NULL);
1571
1572 *fprcp = fprc->ftpc_next;
1573
1574 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1575
1576 /*
1577 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1578 * memory is freed even without the destroy. Maybe accounting cleanup?
1579 */
1580 lck_mtx_destroy(lck: &fprc->ftpc_mtx, grp: &fasttrap_lck_grp);
1581
1582 kmem_free(fprc, sizeof (fasttrap_proc_t));
1583}
1584
1585/*
1586 * Lookup a fasttrap-managed provider based on its name and associated proc.
1587 * A reference to the proc must be held for the duration of the call.
1588 * If the pattr argument is non-NULL, this function instantiates the provider
1589 * if it doesn't exist otherwise it returns NULL. The provider is returned
1590 * with its lock held.
1591 */
1592static fasttrap_provider_t *
1593fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1594 const dtrace_pattr_t *pattr)
1595{
1596 pid_t pid = proc_getpid(p);
1597 fasttrap_provider_t *fp, *new_fp = NULL;
1598 fasttrap_bucket_t *bucket;
1599 char provname[DTRACE_PROVNAMELEN];
1600 cred_t *cred;
1601
1602 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1603 ASSERT(pattr != NULL);
1604
1605 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1606 lck_mtx_lock(lck: &bucket->ftb_mtx);
1607
1608 /*
1609 * Take a lap through the list and return the match if we find it.
1610 */
1611 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1612 if (fp->ftp_pid == pid &&
1613 fp->ftp_provider_type == provider_type &&
1614 strncmp(s1: fp->ftp_name, s2: name, n: sizeof(fp->ftp_name)) == 0 &&
1615 !fp->ftp_retired) {
1616 lck_mtx_lock(lck: &fp->ftp_mtx);
1617 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1618 return (fp);
1619 }
1620 }
1621
1622 /*
1623 * Drop the bucket lock so we don't try to perform a sleeping
1624 * allocation under it.
1625 */
1626 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1627
1628 /*
1629 * Make sure the process isn't a child
1630 * isn't a zombie (but may be in fork).
1631 */
1632 proc_lock(p);
1633 if (p->p_lflag & P_LEXIT) {
1634 proc_unlock(p);
1635 return (NULL);
1636 }
1637
1638 /*
1639 * Increment p_dtrace_probes so that the process knows to inform us
1640 * when it exits or execs. fasttrap_provider_free() decrements this
1641 * when we're done with this provider.
1642 */
1643 p->p_dtrace_probes++;
1644
1645 /*
1646 * Grab the credentials for this process so we have
1647 * something to pass to dtrace_register().
1648 * APPLE NOTE: We have no equivalent to crhold,
1649 * even though there is a cr_ref filed in ucred.
1650 */
1651 cred = kauth_cred_proc_ref(procp: p);
1652 proc_unlock(p);
1653
1654 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1655 ASSERT(new_fp != NULL);
1656 new_fp->ftp_pid = proc_getpid(p);
1657 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1658 new_fp->ftp_provider_type = provider_type;
1659
1660 /*
1661 * APPLE NOTE: locks require explicit init
1662 */
1663 lck_mtx_init(lck: &new_fp->ftp_mtx, grp: &fasttrap_lck_grp, attr: &fasttrap_lck_attr);
1664 lck_mtx_init(lck: &new_fp->ftp_cmtx, grp: &fasttrap_lck_grp, attr: &fasttrap_lck_attr);
1665
1666 ASSERT(new_fp->ftp_proc != NULL);
1667
1668 lck_mtx_lock(lck: &bucket->ftb_mtx);
1669
1670 /*
1671 * Take another lap through the list to make sure a provider hasn't
1672 * been created for this pid while we weren't under the bucket lock.
1673 */
1674 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1675 if (fp->ftp_pid == pid && strncmp(s1: fp->ftp_name, s2: name, n: sizeof(fp->ftp_name)) == 0 &&
1676 !fp->ftp_retired) {
1677 lck_mtx_lock(lck: &fp->ftp_mtx);
1678 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1679 fasttrap_provider_free(new_fp);
1680 kauth_cred_unref(&cred);
1681 return (fp);
1682 }
1683 }
1684
1685 (void) strlcpy(dst: new_fp->ftp_name, src: name, n: sizeof(new_fp->ftp_name));
1686
1687 /*
1688 * Fail and return NULL if either the provider name is too long
1689 * or we fail to register this new provider with the DTrace
1690 * framework. Note that this is the only place we ever construct
1691 * the full provider name -- we keep it in pieces in the provider
1692 * structure.
1693 */
1694 if (snprintf(provname, count: sizeof (provname), "%s%u", name, (uint_t)pid) >=
1695 (int)sizeof (provname) ||
1696 dtrace_register(provname, pattr,
1697 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1698 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1699 &new_fp->ftp_provid) != 0) {
1700 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1701 fasttrap_provider_free(new_fp);
1702 kauth_cred_unref(&cred);
1703 return (NULL);
1704 }
1705
1706 new_fp->ftp_next = bucket->ftb_data;
1707 bucket->ftb_data = new_fp;
1708
1709 lck_mtx_lock(lck: &new_fp->ftp_mtx);
1710 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1711
1712 kauth_cred_unref(&cred);
1713
1714 return (new_fp);
1715}
1716
1717static void
1718fasttrap_provider_free(fasttrap_provider_t *provider)
1719{
1720 pid_t pid = provider->ftp_pid;
1721 proc_t *p;
1722
1723 /*
1724 * There need to be no associated enabled probes, no consumers
1725 * creating probes, and no meta providers referencing this provider.
1726 */
1727 ASSERT(provider->ftp_rcount == 0);
1728 ASSERT(provider->ftp_ccount == 0);
1729 ASSERT(provider->ftp_mcount == 0);
1730
1731 /*
1732 * If this provider hasn't been retired, we need to explicitly drop the
1733 * count of active providers on the associated process structure.
1734 */
1735 if (!provider->ftp_retired) {
1736 os_atomic_dec(&provider->ftp_proc->ftpc_acount, relaxed);
1737 ASSERT(provider->ftp_proc->ftpc_acount <
1738 provider->ftp_proc->ftpc_rcount);
1739 }
1740
1741 fasttrap_proc_release(proc: provider->ftp_proc);
1742
1743 /*
1744 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1745 * memory is freed even without the destroy. Maybe accounting cleanup?
1746 */
1747 lck_mtx_destroy(lck: &provider->ftp_mtx, grp: &fasttrap_lck_grp);
1748 lck_mtx_destroy(lck: &provider->ftp_cmtx, grp: &fasttrap_lck_grp);
1749
1750 kmem_free(provider, sizeof (fasttrap_provider_t));
1751
1752 /*
1753 * Decrement p_dtrace_probes on the process whose provider we're
1754 * freeing. We don't have to worry about clobbering somone else's
1755 * modifications to it because we have locked the bucket that
1756 * corresponds to this process's hash chain in the provider hash
1757 * table. Don't sweat it if we can't find the process.
1758 */
1759 if ((p = proc_find(pid)) == NULL) {
1760 return;
1761 }
1762
1763 proc_lock(p);
1764 p->p_dtrace_probes--;
1765 proc_unlock(p);
1766
1767 proc_rele(p);
1768}
1769
1770static void
1771fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1772{
1773 fasttrap_provider_t *fp;
1774 fasttrap_bucket_t *bucket;
1775 dtrace_provider_id_t provid;
1776 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1777
1778 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(proc_getpid(p), name)];
1779 lck_mtx_lock(lck: &bucket->ftb_mtx);
1780
1781 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1782 if (fp->ftp_pid == proc_getpid(p) && strncmp(s1: fp->ftp_name, s2: name, n: sizeof(fp->ftp_name)) == 0 &&
1783 !fp->ftp_retired)
1784 break;
1785 }
1786
1787 if (fp == NULL) {
1788 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1789 return;
1790 }
1791
1792 lck_mtx_lock(lck: &fp->ftp_mtx);
1793 ASSERT(!mprov || fp->ftp_mcount > 0);
1794 if (mprov && --fp->ftp_mcount != 0) {
1795 lck_mtx_unlock(lck: &fp->ftp_mtx);
1796 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1797 return;
1798 }
1799
1800 /*
1801 * Mark the provider to be removed in our post-processing step, mark it
1802 * retired, and drop the active count on its proc. Marking it indicates
1803 * that we should try to remove it; setting the retired flag indicates
1804 * that we're done with this provider; dropping the active the proc
1805 * releases our hold, and when this reaches zero (as it will during
1806 * exit or exec) the proc and associated providers become defunct.
1807 *
1808 * We obviously need to take the bucket lock before the provider lock
1809 * to perform the lookup, but we need to drop the provider lock
1810 * before calling into the DTrace framework since we acquire the
1811 * provider lock in callbacks invoked from the DTrace framework. The
1812 * bucket lock therefore protects the integrity of the provider hash
1813 * table.
1814 */
1815 os_atomic_dec(&fp->ftp_proc->ftpc_acount, relaxed);
1816 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1817
1818 /*
1819 * Add this provider probes to the retired count and
1820 * make sure we don't add them twice
1821 */
1822 os_atomic_add(&fasttrap_retired, fp->ftp_pcount, relaxed);
1823 fp->ftp_pcount = 0;
1824
1825 fp->ftp_retired = 1;
1826 fp->ftp_marked = 1;
1827 provid = fp->ftp_provid;
1828 lck_mtx_unlock(lck: &fp->ftp_mtx);
1829
1830 /*
1831 * We don't have to worry about invalidating the same provider twice
1832 * since fasttrap_provider_lookup() will ignore providers that have
1833 * been marked as retired.
1834 */
1835 dtrace_invalidate(provid);
1836
1837 lck_mtx_unlock(lck: &bucket->ftb_mtx);
1838
1839 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1840}
1841
1842static int
1843fasttrap_uint32_cmp(const void *ap, const void *bp)
1844{
1845 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1846}
1847
1848static int
1849fasttrap_uint64_cmp(const void *ap, const void *bp)
1850{
1851 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1852}
1853
1854static int
1855fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1856{
1857 proc_t *p;
1858 fasttrap_provider_t *provider;
1859 fasttrap_probe_t *pp;
1860 fasttrap_tracepoint_t *tp;
1861 const char *name;
1862 unsigned int i, aframes, whack;
1863
1864 /*
1865 * There needs to be at least one desired trace point.
1866 */
1867 if (pdata->ftps_noffs == 0)
1868 return (EINVAL);
1869
1870 switch (pdata->ftps_probe_type) {
1871 case DTFTP_ENTRY:
1872 name = "entry";
1873 aframes = FASTTRAP_ENTRY_AFRAMES;
1874 break;
1875 case DTFTP_RETURN:
1876 name = "return";
1877 aframes = FASTTRAP_RETURN_AFRAMES;
1878 break;
1879 case DTFTP_OFFSETS:
1880 aframes = 0;
1881 name = NULL;
1882 break;
1883 default:
1884 return (EINVAL);
1885 }
1886
1887 const char* provider_name;
1888 switch (pdata->ftps_provider_type) {
1889 case DTFTP_PROVIDER_PID:
1890 provider_name = FASTTRAP_PID_NAME;
1891 break;
1892 case DTFTP_PROVIDER_OBJC:
1893 provider_name = FASTTRAP_OBJC_NAME;
1894 break;
1895 case DTFTP_PROVIDER_ONESHOT:
1896 provider_name = FASTTRAP_ONESHOT_NAME;
1897 break;
1898 default:
1899 return (EINVAL);
1900 }
1901
1902 p = proc_find(pid: pdata->ftps_pid);
1903 if (p == PROC_NULL)
1904 return (ESRCH);
1905
1906 if ((provider = fasttrap_provider_lookup(p, provider_type: pdata->ftps_provider_type,
1907 name: provider_name, pattr: &pid_attr)) == NULL) {
1908 proc_rele(p);
1909 return (ESRCH);
1910 }
1911
1912 proc_rele(p);
1913 /*
1914 * Increment this reference count to indicate that a consumer is
1915 * actively adding a new probe associated with this provider. This
1916 * prevents the provider from being deleted -- we'll need to check
1917 * for pending deletions when we drop this reference count.
1918 */
1919 provider->ftp_ccount++;
1920 lck_mtx_unlock(lck: &provider->ftp_mtx);
1921
1922 /*
1923 * Grab the creation lock to ensure consistency between calls to
1924 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1925 * other threads creating probes. We must drop the provider lock
1926 * before taking this lock to avoid a three-way deadlock with the
1927 * DTrace framework.
1928 */
1929 lck_mtx_lock(lck: &provider->ftp_cmtx);
1930
1931 if (name == NULL) {
1932 for (i = 0; i < pdata->ftps_noffs; i++) {
1933 char name_str[17];
1934
1935 (void) snprintf(name_str, count: sizeof(name_str), "%llx",
1936 (uint64_t)pdata->ftps_offs[i]);
1937
1938 if (dtrace_probe_lookup(provider->ftp_provid,
1939 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1940 continue;
1941
1942 os_atomic_inc(&fasttrap_total, relaxed);
1943 if (fasttrap_total > fasttrap_max) {
1944 os_atomic_dec(&fasttrap_total, relaxed);
1945 goto no_mem;
1946 }
1947 provider->ftp_pcount++;
1948
1949 pp = zalloc_flags(fasttrap_probe_t_zones[1], Z_WAITOK | Z_ZERO);
1950
1951 pp->ftp_prov = provider;
1952 pp->ftp_faddr = pdata->ftps_pc;
1953 pp->ftp_fsize = pdata->ftps_size;
1954 pp->ftp_pid = pdata->ftps_pid;
1955 pp->ftp_ntps = 1;
1956
1957 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
1958
1959 tp->ftt_proc = provider->ftp_proc;
1960 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1961 tp->ftt_pid = pdata->ftps_pid;
1962
1963#if defined(__arm64__)
1964 /*
1965 * On arm the subinfo is used to distinguish between arm
1966 * and thumb modes. On arm64 there is no thumb mode, so
1967 * this field is simply initialized to 0 on its way
1968 * into the kernel.
1969 */
1970 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1971#endif
1972
1973 pp->ftp_tps[0].fit_tp = tp;
1974 pp->ftp_tps[0].fit_id.fti_probe = pp;
1975 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1976 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1977 pdata->ftps_mod, pdata->ftps_func, name_str,
1978 FASTTRAP_OFFSET_AFRAMES, pp);
1979 }
1980
1981 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1982 pdata->ftps_func, name) == 0) {
1983 os_atomic_add(&fasttrap_total, pdata->ftps_noffs, relaxed);
1984
1985 if (fasttrap_total > fasttrap_max) {
1986 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1987 goto no_mem;
1988 }
1989
1990 /*
1991 * Make sure all tracepoint program counter values are unique.
1992 * We later assume that each probe has exactly one tracepoint
1993 * for a given pc.
1994 */
1995 qsort(a: pdata->ftps_offs, n: pdata->ftps_noffs,
1996 es: sizeof (uint64_t), cmp: fasttrap_uint64_cmp);
1997 for (i = 1; i < pdata->ftps_noffs; i++) {
1998 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1999 continue;
2000
2001 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
2002 goto no_mem;
2003 }
2004 provider->ftp_pcount += pdata->ftps_noffs;
2005 ASSERT(pdata->ftps_noffs > 0);
2006 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2007 pp = zalloc_flags(fasttrap_probe_t_zones[pdata->ftps_noffs],
2008 Z_WAITOK | Z_ZERO);
2009 } else {
2010 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
2011 }
2012
2013 pp->ftp_prov = provider;
2014 pp->ftp_faddr = pdata->ftps_pc;
2015 pp->ftp_fsize = pdata->ftps_size;
2016 pp->ftp_pid = pdata->ftps_pid;
2017 pp->ftp_ntps = pdata->ftps_noffs;
2018
2019 for (i = 0; i < pdata->ftps_noffs; i++) {
2020 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
2021 tp->ftt_proc = provider->ftp_proc;
2022 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
2023 tp->ftt_pid = pdata->ftps_pid;
2024
2025#if defined (__arm64__)
2026 /*
2027 * On arm the subinfo is used to distinguish between arm
2028 * and thumb modes. On arm64 there is no thumb mode, so
2029 * this field is simply initialized to 0 on its way
2030 * into the kernel.
2031 */
2032
2033 tp->ftt_fntype = pdata->ftps_arch_subinfo;
2034#endif
2035 pp->ftp_tps[i].fit_tp = tp;
2036 pp->ftp_tps[i].fit_id.fti_probe = pp;
2037 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
2038 }
2039
2040 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
2041 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
2042 }
2043
2044 lck_mtx_unlock(lck: &provider->ftp_cmtx);
2045
2046 /*
2047 * We know that the provider is still valid since we incremented the
2048 * creation reference count. If someone tried to clean up this provider
2049 * while we were using it (e.g. because the process called exec(2) or
2050 * exit(2)), take note of that and try to clean it up now.
2051 */
2052 lck_mtx_lock(lck: &provider->ftp_mtx);
2053 provider->ftp_ccount--;
2054 whack = provider->ftp_retired;
2055 lck_mtx_unlock(lck: &provider->ftp_mtx);
2056
2057 if (whack)
2058 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2059
2060 return (0);
2061
2062no_mem:
2063 /*
2064 * If we've exhausted the allowable resources, we'll try to remove
2065 * this provider to free some up. This is to cover the case where
2066 * the user has accidentally created many more probes than was
2067 * intended (e.g. pid123:::).
2068 */
2069 lck_mtx_unlock(lck: &provider->ftp_cmtx);
2070 lck_mtx_lock(lck: &provider->ftp_mtx);
2071 provider->ftp_ccount--;
2072 provider->ftp_marked = 1;
2073 lck_mtx_unlock(lck: &provider->ftp_mtx);
2074
2075 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2076
2077 return (ENOMEM);
2078}
2079
2080/*ARGSUSED*/
2081static void *
2082fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2083{
2084#pragma unused(arg)
2085 fasttrap_provider_t *provider;
2086
2087 /*
2088 * A 32-bit unsigned integer (like a pid for example) can be
2089 * expressed in 10 or fewer decimal digits. Make sure that we'll
2090 * have enough space for the provider name.
2091 */
2092 if (strlen(s: dhpv->dthpv_provname) + 10 >=
2093 sizeof (provider->ftp_name)) {
2094 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2095 "name too long to accomodate pid", dhpv->dthpv_provname);
2096 return (NULL);
2097 }
2098
2099 /*
2100 * Don't let folks spoof the true pid provider.
2101 */
2102 if (strncmp(s1: dhpv->dthpv_provname, FASTTRAP_PID_NAME, n: sizeof(FASTTRAP_PID_NAME)) == 0) {
2103 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2104 "%s is an invalid name", dhpv->dthpv_provname,
2105 FASTTRAP_PID_NAME);
2106 return (NULL);
2107 }
2108
2109 /*
2110 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2111 */
2112 if (strncmp(s1: dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, n: sizeof(FASTTRAP_OBJC_NAME)) == 0) {
2113 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2114 "%s is an invalid name", dhpv->dthpv_provname,
2115 FASTTRAP_OBJC_NAME);
2116 return (NULL);
2117 }
2118 if (strncmp(s1: dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, n: sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
2119 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2120 "%s is an invalid name", dhpv->dthpv_provname,
2121 FASTTRAP_ONESHOT_NAME);
2122 return (NULL);
2123 }
2124
2125 /*
2126 * The highest stability class that fasttrap supports is ISA; cap
2127 * the stability of the new provider accordingly.
2128 */
2129 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2130 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2131 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2132 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2133 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2134 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2135 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2136 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2137 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2138 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2139
2140 if ((provider = fasttrap_provider_lookup(p, provider_type: DTFTP_PROVIDER_USDT, name: dhpv->dthpv_provname,
2141 pattr: &dhpv->dthpv_pattr)) == NULL) {
2142 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2143 "process %u", dhpv->dthpv_provname, (uint_t)proc_getpid(p));
2144 return (NULL);
2145 }
2146
2147 /*
2148 * APPLE NOTE!
2149 *
2150 * USDT probes (fasttrap meta probes) are very expensive to create.
2151 * Profiling has shown that the largest single cost is verifying that
2152 * dtrace hasn't already created a given meta_probe. The reason for
2153 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2154 * each static probe being created. We want to get rid of that check.
2155 * The simplest way of eliminating it is to deny the ability to add
2156 * probes to an existing provider. If the provider already exists, BZZT!
2157 * This still leaves the possibility of intentionally malformed DOF
2158 * having duplicate probes. However, duplicate probes are not fatal,
2159 * and there is no way to get that by accident, so we will not check
2160 * for that case.
2161 *
2162 * UPDATE: It turns out there are several use cases that require adding
2163 * probes to existing providers. Disabling the dtrace_probe_lookup()
2164 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2165 */
2166
2167 /*
2168 * Up the meta provider count so this provider isn't removed until
2169 * the meta provider has been told to remove it.
2170 */
2171 provider->ftp_mcount++;
2172
2173 lck_mtx_unlock(lck: &provider->ftp_mtx);
2174
2175 return (provider);
2176}
2177
2178/*ARGSUSED*/
2179static void
2180fasttrap_meta_create_probe(void *arg, void *parg,
2181 dtrace_helper_probedesc_t *dhpb)
2182{
2183#pragma unused(arg)
2184 fasttrap_provider_t *provider = parg;
2185 fasttrap_probe_t *pp;
2186 fasttrap_tracepoint_t *tp;
2187 unsigned int i, j;
2188 uint32_t ntps;
2189
2190 /*
2191 * Since the meta provider count is non-zero we don't have to worry
2192 * about this provider disappearing.
2193 */
2194 ASSERT(provider->ftp_mcount > 0);
2195
2196 /*
2197 * The offsets must be unique.
2198 */
2199 qsort(a: dhpb->dthpb_offs, n: dhpb->dthpb_noffs, es: sizeof (uint32_t),
2200 cmp: fasttrap_uint32_cmp);
2201 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2202 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2203 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2204 return;
2205 }
2206
2207 qsort(a: dhpb->dthpb_enoffs, n: dhpb->dthpb_nenoffs, es: sizeof (uint32_t),
2208 cmp: fasttrap_uint32_cmp);
2209 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2210 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2211 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2212 return;
2213 }
2214
2215 /*
2216 * Grab the creation lock to ensure consistency between calls to
2217 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2218 * other threads creating probes.
2219 */
2220 lck_mtx_lock(lck: &provider->ftp_cmtx);
2221
2222#if 0
2223 /*
2224 * APPLE NOTE: This is hideously expensive. See note in
2225 * fasttrap_meta_provide() for why we can get away without
2226 * checking here.
2227 */
2228 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2229 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2230 lck_mtx_unlock(&provider->ftp_cmtx);
2231 return;
2232 }
2233#endif
2234
2235 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2236 ASSERT(ntps > 0);
2237
2238 os_atomic_add(&fasttrap_total, ntps, relaxed);
2239
2240 if (fasttrap_total > fasttrap_max) {
2241 os_atomic_sub(&fasttrap_total, ntps, relaxed);
2242 lck_mtx_unlock(lck: &provider->ftp_cmtx);
2243 return;
2244 }
2245
2246 provider->ftp_pcount += ntps;
2247
2248 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2249 pp = zalloc_flags(fasttrap_probe_t_zones[ntps], Z_WAITOK | Z_ZERO);
2250 } else {
2251 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2252 }
2253
2254 pp->ftp_prov = provider;
2255 pp->ftp_pid = provider->ftp_pid;
2256 pp->ftp_ntps = ntps;
2257 pp->ftp_nargs = dhpb->dthpb_xargc;
2258 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2259 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2260
2261 /*
2262 * First create a tracepoint for each actual point of interest.
2263 */
2264 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2265 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
2266
2267 tp->ftt_proc = provider->ftp_proc;
2268
2269 /*
2270 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2271 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2272 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2273 */
2274#if defined(__x86_64__)
2275 /*
2276 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2277 */
2278 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2279#elif defined(__arm64__)
2280 /*
2281 * All ARM and ARM64 probes are zero offset. We need to zero out the
2282 * thumb bit because we still support 32bit user processes.
2283 * On 64bit user processes, bit zero won't be set anyway.
2284 */
2285 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
2286 tp->ftt_fntype = FASTTRAP_FN_USDT;
2287#else
2288#error "Architecture not supported"
2289#endif
2290
2291 tp->ftt_pid = provider->ftp_pid;
2292
2293 pp->ftp_tps[i].fit_tp = tp;
2294 pp->ftp_tps[i].fit_id.fti_probe = pp;
2295 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2296 }
2297
2298 /*
2299 * Then create a tracepoint for each is-enabled point.
2300 */
2301 for (j = 0; i < ntps; i++, j++) {
2302 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
2303
2304 tp->ftt_proc = provider->ftp_proc;
2305
2306 /*
2307 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2308 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2309 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2310 */
2311#if defined(__x86_64__)
2312 /*
2313 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2314 */
2315 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2316#elif defined(__arm64__)
2317 /*
2318 * All ARM and ARM64 probes are zero offset. We need to zero out the
2319 * thumb bit because we still support 32bit user processes.
2320 * On 64bit user processes, bit zero won't be set anyway.
2321 */
2322 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
2323 tp->ftt_fntype = FASTTRAP_FN_USDT;
2324#else
2325#error "Architecture not supported"
2326#endif
2327
2328 tp->ftt_pid = provider->ftp_pid;
2329
2330 pp->ftp_tps[i].fit_tp = tp;
2331 pp->ftp_tps[i].fit_id.fti_probe = pp;
2332 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2333 }
2334
2335 /*
2336 * If the arguments are shuffled around we set the argument remapping
2337 * table. Later, when the probe fires, we only remap the arguments
2338 * if the table is non-NULL.
2339 */
2340 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2341 if (dhpb->dthpb_args[i] != i) {
2342 pp->ftp_argmap = dhpb->dthpb_args;
2343 break;
2344 }
2345 }
2346
2347 /*
2348 * The probe is fully constructed -- register it with DTrace.
2349 */
2350 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2351 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2352
2353 lck_mtx_unlock(lck: &provider->ftp_cmtx);
2354}
2355
2356/*ARGSUSED*/
2357static void
2358fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2359{
2360#pragma unused(arg)
2361 /*
2362 * Clean up the USDT provider. There may be active consumers of the
2363 * provider busy adding probes, no damage will actually befall the
2364 * provider until that count has dropped to zero. This just puts
2365 * the provider on death row.
2366 */
2367 fasttrap_provider_retire(p, name: dhpv->dthpv_provname, mprov: 1);
2368}
2369
2370static char*
2371fasttrap_meta_provider_name(void *arg)
2372{
2373 fasttrap_provider_t *fprovider = arg;
2374 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2375 return provider->dtpv_name;
2376}
2377
2378static dtrace_mops_t fasttrap_mops = {
2379 .dtms_create_probe = fasttrap_meta_create_probe,
2380 .dtms_provide_proc = fasttrap_meta_provide,
2381 .dtms_remove_proc = fasttrap_meta_remove,
2382 .dtms_provider_name = fasttrap_meta_provider_name
2383};
2384
2385/*
2386 * Validate a null-terminated string. If str is not null-terminated,
2387 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2388 * returned.
2389 *
2390 * str: string to validate.
2391 * maxlen: maximal length of the string, null-terminated byte included.
2392 */
2393static int
2394fasttrap_validatestr(char const* str, size_t maxlen) {
2395 size_t len;
2396
2397 assert(str);
2398 assert(maxlen != 0);
2399
2400 /* Check if the string is null-terminated. */
2401 len = strnlen(s: str, n: maxlen);
2402 if (len >= maxlen)
2403 return -1;
2404
2405 /* Finally, check for UTF8 validity. */
2406 return utf8_validatestr(utf8p: (unsigned const char*) str, utf8len: len);
2407}
2408
2409/*
2410 * Checks that provided credentials are allowed to debug target process.
2411 */
2412static int
2413fasttrap_check_cred_priv(cred_t *cr, proc_t *p)
2414{
2415 int err = 0;
2416
2417 /* Only root can use DTrace. */
2418 if (!kauth_cred_issuser(cred: cr)) {
2419 err = EPERM;
2420 goto out;
2421 }
2422
2423 /* Process is marked as no attach. */
2424 if (ISSET(p->p_lflag, P_LNOATTACH)) {
2425 err = EBUSY;
2426 goto out;
2427 }
2428
2429#if CONFIG_MACF
2430 /* Check with MAC framework when enabled. */
2431 struct proc_ident cur_ident = proc_ident(p: current_proc());
2432 struct proc_ident p_ident = proc_ident(p);
2433
2434 /* Do not hold ref to proc here to avoid deadlock. */
2435 proc_rele(p);
2436 err = mac_proc_check_debug(tracing_ident: &cur_ident, tracing_cred: cr, traced_ident: &p_ident);
2437
2438 if (proc_find_ident(i: &p_ident) == PROC_NULL) {
2439 err = ESRCH;
2440 goto out_no_proc;
2441 }
2442#endif /* CONFIG_MACF */
2443
2444out:
2445 proc_rele(p);
2446
2447out_no_proc:
2448 return err;
2449}
2450
2451/*ARGSUSED*/
2452static int
2453fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2454{
2455#pragma unused(dev, md, rv)
2456 if (!dtrace_attached())
2457 return (EAGAIN);
2458
2459 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2460 fasttrap_probe_spec_t *probe;
2461 uint64_t noffs;
2462 size_t size;
2463 int ret;
2464
2465 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2466 sizeof (probe->ftps_noffs)))
2467 return (EFAULT);
2468
2469 /*
2470 * Probes must have at least one tracepoint.
2471 */
2472 if (noffs == 0)
2473 return (EINVAL);
2474
2475 /*
2476 * We want to check the number of noffs before doing
2477 * sizing math, to prevent potential buffer overflows.
2478 */
2479 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2480 return (ENOMEM);
2481
2482 size = sizeof (fasttrap_probe_spec_t) +
2483 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2484
2485 probe = kmem_alloc(size, KM_SLEEP);
2486
2487 if (copyin(arg, probe, size) != 0 ||
2488 probe->ftps_noffs != noffs) {
2489 kmem_free(probe, size);
2490 return (EFAULT);
2491 }
2492
2493 /*
2494 * Verify that the function and module strings contain no
2495 * funny characters.
2496 */
2497
2498 if (fasttrap_validatestr(str: probe->ftps_func, maxlen: sizeof(probe->ftps_func)) != 0) {
2499 ret = EINVAL;
2500 goto err;
2501 }
2502
2503 if (fasttrap_validatestr(str: probe->ftps_mod, maxlen: sizeof(probe->ftps_mod)) != 0) {
2504 ret = EINVAL;
2505 goto err;
2506 }
2507
2508 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2509 proc_t *p;
2510 pid_t pid = probe->ftps_pid;
2511
2512 /*
2513 * Report an error if the process doesn't exist
2514 * or is actively being birthed.
2515 */
2516 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2517 if (p != PROC_NULL)
2518 proc_rele(p);
2519 ret = ESRCH;
2520 goto err;
2521 }
2522
2523 ret = fasttrap_check_cred_priv(cr, p);
2524 if (ret != 0) {
2525 goto err;
2526 }
2527 }
2528
2529 ret = fasttrap_add_probe(pdata: probe);
2530
2531err:
2532 kmem_free(probe, size);
2533
2534 return (ret);
2535
2536 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2537 fasttrap_instr_query_t instr;
2538 fasttrap_tracepoint_t *tp;
2539 uint_t index;
2540 int ret;
2541
2542 if (copyin(arg, &instr, sizeof (instr)) != 0)
2543 return (EFAULT);
2544
2545 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2546 proc_t *p;
2547 pid_t pid = instr.ftiq_pid;
2548
2549 /*
2550 * Report an error if the process doesn't exist
2551 * or is actively being birthed.
2552 */
2553 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2554 if (p != PROC_NULL)
2555 proc_rele(p);
2556 return (ESRCH);
2557 }
2558
2559 ret = fasttrap_check_cred_priv(cr, p);
2560 if (ret != 0) {
2561 return (ret);
2562 }
2563 }
2564
2565 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2566
2567 lck_mtx_lock(lck: &fasttrap_tpoints.fth_table[index].ftb_mtx);
2568 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2569 while (tp != NULL) {
2570 if (instr.ftiq_pid == tp->ftt_pid &&
2571 instr.ftiq_pc == tp->ftt_pc &&
2572 tp->ftt_proc->ftpc_acount != 0)
2573 break;
2574
2575 tp = tp->ftt_next;
2576 }
2577
2578 if (tp == NULL) {
2579 lck_mtx_unlock(lck: &fasttrap_tpoints.fth_table[index].ftb_mtx);
2580 return (ENOENT);
2581 }
2582
2583 bcopy(src: &tp->ftt_instr, dst: &instr.ftiq_instr,
2584 n: sizeof (instr.ftiq_instr));
2585 lck_mtx_unlock(lck: &fasttrap_tpoints.fth_table[index].ftb_mtx);
2586
2587 if (copyout(&instr, arg, sizeof (instr)) != 0)
2588 return (EFAULT);
2589
2590 return (0);
2591 }
2592
2593 return (EINVAL);
2594}
2595
2596static void
2597fasttrap_attach(void)
2598{
2599 ulong_t nent;
2600 unsigned int i;
2601
2602 /*
2603 * Install our hooks into fork(2), exec(2), and exit(2).
2604 */
2605 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2606 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2607 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2608
2609 /*
2610 * APPLE NOTE: We size the maximum number of fasttrap probes
2611 * based on system memory. 100k probes per 256M of system memory.
2612 * Yes, this is a WAG.
2613 */
2614 fasttrap_max = (sane_size >> 28) * 100000;
2615
2616 if (fasttrap_max == 0)
2617 fasttrap_max = 50000;
2618
2619 fasttrap_total = 0;
2620 fasttrap_retired = 0;
2621
2622 /*
2623 * Conjure up the tracepoints hashtable...
2624 */
2625#ifdef illumos
2626 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2627 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2628#else
2629 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2630#endif
2631
2632 if (nent <= 0 || nent > 0x1000000)
2633 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2634
2635 if ((nent & (nent - 1)) == 0)
2636 fasttrap_tpoints.fth_nent = nent;
2637 else
2638 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(i: nent);
2639 ASSERT(fasttrap_tpoints.fth_nent > 0);
2640 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2641 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2642 sizeof (fasttrap_bucket_t), KM_SLEEP);
2643 ASSERT(fasttrap_tpoints.fth_table != NULL);
2644
2645 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
2646 lck_mtx_init(lck: &fasttrap_tpoints.fth_table[i].ftb_mtx, grp: &fasttrap_lck_grp,
2647 attr: &fasttrap_lck_attr);
2648 }
2649
2650 /*
2651 * ... and the providers hash table...
2652 */
2653 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2654 if ((nent & (nent - 1)) == 0)
2655 fasttrap_provs.fth_nent = nent;
2656 else
2657 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(i: nent);
2658 ASSERT(fasttrap_provs.fth_nent > 0);
2659 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2660 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2661 sizeof (fasttrap_bucket_t), KM_SLEEP);
2662 ASSERT(fasttrap_provs.fth_table != NULL);
2663
2664 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2665 lck_mtx_init(lck: &fasttrap_provs.fth_table[i].ftb_mtx, grp: &fasttrap_lck_grp,
2666 attr: &fasttrap_lck_attr);
2667 }
2668
2669 /*
2670 * ... and the procs hash table.
2671 */
2672 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2673 if ((nent & (nent - 1)) == 0)
2674 fasttrap_procs.fth_nent = nent;
2675 else
2676 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(i: nent);
2677 ASSERT(fasttrap_procs.fth_nent > 0);
2678 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2679 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2680 sizeof (fasttrap_bucket_t), KM_SLEEP);
2681 ASSERT(fasttrap_procs.fth_table != NULL);
2682
2683#ifndef illumos
2684 for (i = 0; i < fasttrap_procs.fth_nent; i++) {
2685 lck_mtx_init(lck: &fasttrap_procs.fth_table[i].ftb_mtx, grp: &fasttrap_lck_grp,
2686 attr: &fasttrap_lck_attr);
2687 }
2688#endif
2689
2690 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2691 &fasttrap_meta_id);
2692}
2693
2694static int
2695_fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2696{
2697#pragma unused(dev, flags, devtype, p)
2698 return 0;
2699}
2700
2701static int
2702_fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2703{
2704 int err, rv = 0;
2705 user_addr_t uaddrp;
2706
2707 if (proc_is64bit(p)) {
2708 uaddrp = *(user_addr_t *)data;
2709 } else {
2710 uaddrp = (user_addr_t) *(uint32_t *)data;
2711 }
2712
2713 err = fasttrap_ioctl(dev, cmd, arg: uaddrp, md: fflag, CRED(), rv: &rv);
2714
2715 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2716 if (err != 0) {
2717 ASSERT( (err & 0xfffff000) == 0 );
2718 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2719 } else if (rv != 0) {
2720 ASSERT( (rv & 0xfff00000) == 0 );
2721 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2722 } else
2723 return 0;
2724}
2725
2726static int fasttrap_inited = 0;
2727
2728#define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2729
2730static const struct cdevsw fasttrap_cdevsw =
2731{
2732 .d_open = _fasttrap_open,
2733 .d_close = eno_opcl,
2734 .d_read = eno_rdwrt,
2735 .d_write = eno_rdwrt,
2736 .d_ioctl = _fasttrap_ioctl,
2737 .d_stop = eno_stop,
2738 .d_reset = eno_reset,
2739 .d_select = eno_select,
2740 .d_mmap = eno_mmap,
2741 .d_strategy = eno_strat,
2742 .d_reserved_1 = eno_getc,
2743 .d_reserved_2 = eno_putc,
2744};
2745
2746void fasttrap_init(void);
2747
2748void
2749fasttrap_init( void )
2750{
2751 /*
2752 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2753 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2754 *
2755 * The reason is to delay allocating the (rather large) resources as late as possible.
2756 */
2757 if (!fasttrap_inited) {
2758 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2759
2760 if (majdevno < 0) {
2761 // FIX ME! What kind of error reporting to do here?
2762 printf("fasttrap_init: failed to allocate a major number!\n");
2763 return;
2764 }
2765
2766 dev_t device = makedev( (uint32_t)majdevno, 0 );
2767 if (NULL == devfs_make_node( dev: device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, perms: 0666, fmt: "fasttrap" )) {
2768 return;
2769 }
2770
2771 /*
2772 * fasttrap_probe_t's are variable in size. We use an array of zones to
2773 * cover the most common sizes.
2774 */
2775 int i;
2776 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2777 fasttrap_probe_t_zones[i] =
2778 zone_create(name: fasttrap_probe_t_zone_names[i],
2779 offsetof(fasttrap_probe_t, ftp_tps[i]), flags: ZC_NONE);
2780 }
2781
2782
2783 fasttrap_attach();
2784
2785 /*
2786 * Start the fasttrap cleanup thread
2787 */
2788 kern_return_t res = kernel_thread_start_priority(continuation: (thread_continue_t)fasttrap_pid_cleanup_cb, NULL, priority: 46 /* BASEPRI_BACKGROUND */, new_thread: &fasttrap_cleanup_thread);
2789 if (res != KERN_SUCCESS) {
2790 panic("Could not create fasttrap_cleanup_thread");
2791 }
2792 thread_set_thread_name(th: fasttrap_cleanup_thread, name: "dtrace_fasttrap_cleanup_thread");
2793
2794 fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
2795 fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
2796 KM_SLEEP);
2797
2798 fasttrap_inited = 1;
2799 }
2800}
2801
2802#undef FASTTRAP_MAJOR
2803