1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26#include <kern/cpu_data.h>
27#include <kern/thread.h>
28#include <kern/assert.h>
29#include <mach/thread_status.h>
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/errno.h>
34#include <sys/stat.h>
35#include <sys/ioctl.h>
36#include <sys/conf.h>
37#include <sys/fcntl.h>
38#include <miscfs/devfs/devfs.h>
39
40#include <sys/dtrace.h>
41#include <sys/dtrace_impl.h>
42
43#include <sys/dtrace_glue.h>
44
45#include <machine/pal_routines.h>
46
47#if defined(__x86_64__)
48extern x86_saved_state_t *find_kern_regs(thread_t);
49#elif defined(__arm64__)
50extern struct arm_saved_state *find_kern_regs(thread_t);
51#else
52#error Unknown architecture
53#endif
54
55extern void profile_init(void);
56
57static dtrace_provider_id_t profile_id;
58
59/*
60 * Regardless of platform, the stack frames look like this in the case of the
61 * profile provider:
62 *
63 * profile_fire
64 * cyclic_expire
65 * cyclic_fire
66 * [ cbe ]
67 * [ interrupt code ]
68 *
69 * On x86, there are five frames from the generic interrupt code; further, the
70 * interrupted instruction appears as its own stack frame, giving us a total of
71 * 10.
72 *
73 * On SPARC, the picture is further complicated because the compiler
74 * optimizes away tail-calls -- so the following frames are optimized away:
75 *
76 * profile_fire
77 * cyclic_expire
78 *
79 * This gives three frames. However, on DEBUG kernels, the cyclic_expire
80 * frame cannot be tail-call eliminated, yielding four frames in this case.
81 *
82 * All of the above constraints lead to the mess below. Yes, the profile
83 * provider should ideally figure this out on-the-fly by hitting one of its own
84 * probes and then walking its own stack trace. This is complicated, however,
85 * and the static definition doesn't seem to be overly brittle. Still, we
86 * allow for a manual override in case we get it completely wrong.
87 */
88
89#if defined(__x86_64__)
90#define PROF_ARTIFICIAL_FRAMES 9
91#elif defined(__arm64__)
92#define PROF_ARTIFICIAL_FRAMES 8
93#else
94#error Unknown architecture
95#endif
96
97#define PROF_NAMELEN 15
98
99#define PROF_PROFILE 0
100#define PROF_TICK 1
101#define PROF_PREFIX_PROFILE "profile-"
102#define PROF_PREFIX_TICK "tick-"
103
104typedef struct profile_probe {
105 char prof_name[PROF_NAMELEN];
106 dtrace_id_t prof_id;
107 int prof_kind;
108 hrtime_t prof_interval;
109 cyclic_id_t prof_cyclic;
110} profile_probe_t;
111
112typedef struct profile_probe_percpu {
113 hrtime_t profc_expected;
114 hrtime_t profc_interval;
115 profile_probe_t *profc_probe;
116} profile_probe_percpu_t;
117
118hrtime_t profile_interval_min = NANOSEC / 5000; /* 5000 hz */
119int profile_aframes = 0; /* override */
120
121static int profile_rates[] = {
122 97, 199, 499, 997, 1999,
123 4001, 4999, 0, 0, 0,
124 0, 0, 0, 0, 0,
125 0, 0, 0, 0, 0
126};
127
128static int profile_ticks[] = {
129 1, 10, 100, 500, 1000,
130 5000, 0, 0, 0, 0,
131 0, 0, 0, 0, 0
132};
133
134/*
135 * profile_max defines the upper bound on the number of profile probes that
136 * can exist (this is to prevent malicious or clumsy users from exhausing
137 * system resources by creating a slew of profile probes). At mod load time,
138 * this gets its value from PROFILE_MAX_DEFAULT or profile-max-probes if it's
139 * present in the profile.conf file.
140 */
141#define PROFILE_MAX_DEFAULT 1000 /* default max. number of probes */
142static uint32_t profile_max; /* maximum number of profile probes */
143static uint32_t profile_total; /* current number of profile probes */
144
145static void
146profile_fire(void *arg)
147{
148 profile_probe_percpu_t *pcpu = arg;
149 profile_probe_t *prof = pcpu->profc_probe;
150 hrtime_t late;
151
152 late = dtrace_gethrtime() - pcpu->profc_expected;
153 pcpu->profc_expected += pcpu->profc_interval;
154
155#if defined(__x86_64__)
156 x86_saved_state_t *kern_regs = find_kern_regs(current_thread());
157
158 if (NULL != kern_regs) {
159 /* Kernel was interrupted. */
160 dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, late, 0, 0);
161 } else {
162 pal_register_cache_state(current_thread(), VALID);
163 /* Possibly a user interrupt */
164 x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
165
166 if (NULL == tagged_regs) {
167 /* Too bad, so sad, no useful interrupt state. */
168 dtrace_probe(prof->prof_id, 0xcafebabe,
169 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
170 } else if (is_saved_state64(tagged_regs)) {
171 x86_saved_state64_t *regs = saved_state64(tagged_regs);
172
173 dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, late, 0, 0);
174 } else {
175 x86_saved_state32_t *regs = saved_state32(tagged_regs);
176
177 dtrace_probe(prof->prof_id, 0x0, regs->eip, late, 0, 0);
178 }
179 }
180#elif defined(__arm64__)
181 {
182 arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
183
184 // We should only come in here from interrupt context, so we should always have valid kernel regs
185 assert(NULL != arm_kern_regs);
186
187 if (saved_state64(iss: arm_kern_regs)->cpsr & 0xF) {
188 const uint64_t pc = ml_get_backtrace_pc(state: arm_kern_regs);
189
190 /* Kernel was interrupted. */
191 dtrace_probe(prof->prof_id, arg0: pc, arg1: 0x0, arg2: late, arg3: 0, arg4: 0);
192 } else {
193 /* Possibly a user interrupt */
194 arm_saved_state_t *arm_user_regs = (arm_saved_state_t *)find_user_regs(thread: current_thread());
195
196 if (NULL == arm_user_regs) {
197 /* Too bad, so sad, no useful interrupt state. */
198 dtrace_probe(prof->prof_id, arg0: 0xcafebabe, arg1: 0x0, arg2: late, arg3: 0, arg4: 0); /* XXX_BOGUS also see profile_usermode() below. */
199 } else {
200 dtrace_probe(prof->prof_id, arg0: 0x0, arg1: get_saved_state_pc(iss: arm_user_regs), arg2: late, arg3: 0, arg4: 0);
201 }
202 }
203 }
204#else
205#error Unknown architecture
206#endif
207}
208
209static void
210profile_tick(void *arg)
211{
212 profile_probe_t *prof = arg;
213
214#if defined(__x86_64__)
215 x86_saved_state_t *kern_regs = find_kern_regs(current_thread());
216
217 if (NULL != kern_regs) {
218 /* Kernel was interrupted. */
219 dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, 0, 0, 0);
220 } else {
221 pal_register_cache_state(current_thread(), VALID);
222 /* Possibly a user interrupt */
223 x86_saved_state_t *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
224
225 if (NULL == tagged_regs) {
226 /* Too bad, so sad, no useful interrupt state. */
227 dtrace_probe(prof->prof_id, 0xcafebabe,
228 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
229 } else if (is_saved_state64(tagged_regs)) {
230 x86_saved_state64_t *regs = saved_state64(tagged_regs);
231
232 dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, 0, 0, 0);
233 } else {
234 x86_saved_state32_t *regs = saved_state32(tagged_regs);
235
236 dtrace_probe(prof->prof_id, 0x0, regs->eip, 0, 0, 0);
237 }
238 }
239#elif defined(__arm64__)
240 {
241 arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
242
243 if (NULL != arm_kern_regs) {
244 const uint64_t pc = ml_get_backtrace_pc(state: arm_kern_regs);
245
246 /* Kernel was interrupted. */
247 dtrace_probe(prof->prof_id, arg0: pc, arg1: 0x0, arg2: 0, arg3: 0, arg4: 0);
248 } else {
249 /* Possibly a user interrupt */
250 arm_saved_state_t *arm_user_regs = (arm_saved_state_t *)find_user_regs(thread: current_thread());
251
252 if (NULL == arm_user_regs) {
253 /* Too bad, so sad, no useful interrupt state. */
254 dtrace_probe(prof->prof_id, arg0: 0xcafebabe, arg1: 0x0, arg2: 0, arg3: 0, arg4: 0); /* XXX_BOGUS also see profile_usermode() below. */
255 } else {
256 dtrace_probe(prof->prof_id, arg0: 0x0, arg1: get_saved_state_pc(iss: arm_user_regs), arg2: 0, arg3: 0, arg4: 0);
257 }
258 }
259 }
260
261#else
262#error Unknown architecture
263#endif
264}
265
266static void
267profile_create(hrtime_t interval, const char *name, int kind)
268{
269 profile_probe_t *prof;
270
271 if (interval < profile_interval_min) {
272 return;
273 }
274
275 if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) {
276 return;
277 }
278
279 os_atomic_inc(&profile_total, relaxed);
280 if (profile_total > profile_max) {
281 os_atomic_dec(&profile_total, relaxed);
282 return;
283 }
284
285 if (PROF_TICK == kind) {
286 prof = kmem_zalloc(sizeof(profile_probe_t), KM_SLEEP);
287 } else {
288 prof = kmem_zalloc(sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t), KM_SLEEP);
289 }
290
291 (void) strlcpy(dst: prof->prof_name, src: name, n: sizeof(prof->prof_name));
292 prof->prof_interval = interval;
293 prof->prof_cyclic = CYCLIC_NONE;
294 prof->prof_kind = kind;
295 prof->prof_id = dtrace_probe_create(profile_id,
296 NULL, NULL, name,
297 profile_aframes ? profile_aframes : PROF_ARTIFICIAL_FRAMES, prof);
298}
299
300/*ARGSUSED*/
301static void
302profile_provide(void *arg, const dtrace_probedesc_t *desc)
303{
304#pragma unused(arg) /* __APPLE__ */
305 int i, j, rate, kind;
306 hrtime_t val = 0, mult = 1, len;
307 const char *name, *suffix = NULL;
308
309 const struct {
310 const char *prefix;
311 int kind;
312 } types[] = {
313 { PROF_PREFIX_PROFILE, PROF_PROFILE },
314 { PROF_PREFIX_TICK, PROF_TICK },
315 { NULL, .kind: 0 }
316 };
317
318 const struct {
319 const char *name;
320 hrtime_t mult;
321 } suffixes[] = {
322 { "ns", NANOSEC / NANOSEC },
323 { .name: "nsec", NANOSEC / NANOSEC },
324 { .name: "us", NANOSEC / MICROSEC },
325 { .name: "usec", NANOSEC / MICROSEC },
326 { .name: "ms", NANOSEC / MILLISEC },
327 { .name: "msec", NANOSEC / MILLISEC },
328 { .name: "s", NANOSEC / SEC },
329 { .name: "sec", NANOSEC / SEC },
330 { .name: "m", NANOSEC * (hrtime_t)60 },
331 { .name: "min", NANOSEC * (hrtime_t)60 },
332 { .name: "h", NANOSEC * (hrtime_t)(60 * 60) },
333 { .name: "hour", NANOSEC * (hrtime_t)(60 * 60) },
334 { .name: "d", NANOSEC * (hrtime_t)(24 * 60 * 60) },
335 { .name: "day", NANOSEC * (hrtime_t)(24 * 60 * 60) },
336 { .name: "hz", .mult: 0 },
337 { NULL, .mult: 0 }
338 };
339
340 if (desc == NULL) {
341 char n[PROF_NAMELEN];
342
343 /*
344 * If no description was provided, provide all of our probes.
345 */
346 for (i = 0; i < (int)(sizeof(profile_rates) / sizeof(int)); i++) {
347 if ((rate = profile_rates[i]) == 0) {
348 continue;
349 }
350
351 (void) snprintf(n, PROF_NAMELEN, "%s%d",
352 PROF_PREFIX_PROFILE, rate);
353 profile_create(NANOSEC / rate, name: n, PROF_PROFILE);
354 }
355
356 for (i = 0; i < (int)(sizeof(profile_ticks) / sizeof(int)); i++) {
357 if ((rate = profile_ticks[i]) == 0) {
358 continue;
359 }
360
361 (void) snprintf(n, PROF_NAMELEN, "%s%d",
362 PROF_PREFIX_TICK, rate);
363 profile_create(NANOSEC / rate, name: n, PROF_TICK);
364 }
365
366 return;
367 }
368
369 name = desc->dtpd_name;
370
371 for (i = 0; types[i].prefix != NULL; i++) {
372 len = strlen(s: types[i].prefix);
373
374 if (strncmp(s1: name, s2: types[i].prefix, n: len) != 0) {
375 continue;
376 }
377 break;
378 }
379
380 if (types[i].prefix == NULL) {
381 return;
382 }
383
384 kind = types[i].kind;
385 j = strlen(s: name) - len;
386
387 /*
388 * We need to start before any time suffix.
389 */
390 for (j = strlen(s: name); j >= len; j--) {
391 if (name[j] >= '0' && name[j] <= '9') {
392 break;
393 }
394 suffix = &name[j];
395 }
396
397 if (!suffix) {
398 suffix = &name[strlen(s: name)];
399 }
400
401 /*
402 * Now determine the numerical value present in the probe name.
403 */
404 for (; j >= len; j--) {
405 if (name[j] < '0' || name[j] > '9') {
406 return;
407 }
408
409 val += (name[j] - '0') * mult;
410 mult *= (hrtime_t)10;
411 }
412
413 if (val == 0) {
414 return;
415 }
416
417 /*
418 * Look-up the suffix to determine the multiplier.
419 */
420 for (i = 0, mult = 0; suffixes[i].name != NULL; i++) {
421 /* APPLE NOTE: Darwin employs size bounded string operations */
422 if (strncasecmp(s1: suffixes[i].name, s2: suffix, n: strlen(s: suffixes[i].name) + 1) == 0) {
423 mult = suffixes[i].mult;
424 break;
425 }
426 }
427
428 if (suffixes[i].name == NULL && *suffix != '\0') {
429 return;
430 }
431
432 if (mult == 0) {
433 /*
434 * The default is frequency-per-second.
435 */
436 val = NANOSEC / val;
437 } else {
438 val *= mult;
439 }
440
441 profile_create(interval: val, name, kind);
442}
443
444/*ARGSUSED*/
445static void
446profile_destroy(void *arg, dtrace_id_t id, void *parg)
447{
448#pragma unused(arg,id) /* __APPLE__ */
449 profile_probe_t *prof = parg;
450
451 ASSERT(prof->prof_cyclic == CYCLIC_NONE);
452
453 if (prof->prof_kind == PROF_TICK) {
454 kmem_free(prof, sizeof(profile_probe_t));
455 } else {
456 kmem_free(prof, sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t));
457 }
458
459 ASSERT(profile_total >= 1);
460 os_atomic_dec(&profile_total, relaxed);
461}
462
463/*ARGSUSED*/
464static void
465profile_online(void *arg, dtrace_cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
466{
467#pragma unused(cpu) /* __APPLE__ */
468 profile_probe_t *prof = arg;
469 profile_probe_percpu_t *pcpu;
470
471 pcpu = ((profile_probe_percpu_t *)(&(prof[1]))) + cpu_number();
472 pcpu->profc_probe = prof;
473
474 hdlr->cyh_func = profile_fire;
475 hdlr->cyh_arg = pcpu;
476 hdlr->cyh_level = CY_HIGH_LEVEL;
477
478 when->cyt_interval = prof->prof_interval;
479 when->cyt_when = dtrace_gethrtime() + when->cyt_interval;
480
481 pcpu->profc_expected = when->cyt_when;
482 pcpu->profc_interval = when->cyt_interval;
483}
484
485/*ARGSUSED*/
486static void
487profile_offline(void *arg, dtrace_cpu_t *cpu, void *oarg)
488{
489 profile_probe_percpu_t *pcpu = oarg;
490
491 ASSERT(pcpu->profc_probe == arg);
492#pragma unused(pcpu,arg,cpu) /* __APPLE__ */
493}
494
495/*ARGSUSED*/
496static int
497profile_enable(void *arg, dtrace_id_t id, void *parg)
498{
499#pragma unused(arg,id) /* __APPLE__ */
500 profile_probe_t *prof = parg;
501 cyc_omni_handler_t omni;
502 cyc_handler_t hdlr;
503 cyc_time_t when;
504
505 ASSERT(prof->prof_interval != 0);
506 ASSERT(MUTEX_HELD(&cpu_lock));
507
508 if (prof->prof_kind == PROF_TICK) {
509 hdlr.cyh_func = profile_tick;
510 hdlr.cyh_arg = prof;
511 hdlr.cyh_level = CY_HIGH_LEVEL;
512
513 when.cyt_interval = prof->prof_interval;
514#if !defined(__APPLE__)
515 when.cyt_when = dtrace_gethrtime() + when.cyt_interval;
516#else
517 when.cyt_when = 0;
518#endif /* __APPLE__ */
519 } else {
520 ASSERT(prof->prof_kind == PROF_PROFILE);
521 omni.cyo_online = profile_online;
522 omni.cyo_offline = profile_offline;
523 omni.cyo_arg = prof;
524 }
525
526 if (prof->prof_kind == PROF_TICK) {
527 prof->prof_cyclic = cyclic_timer_add(&hdlr, &when);
528 } else {
529 prof->prof_cyclic = (cyclic_id_t)cyclic_add_omni(&omni); /* cast puns cyclic_id_list_t with cyclic_id_t */
530 }
531
532 return 0;
533}
534
535/*ARGSUSED*/
536static void
537profile_disable(void *arg, dtrace_id_t id, void *parg)
538{
539 profile_probe_t *prof = parg;
540
541 ASSERT(prof->prof_cyclic != CYCLIC_NONE);
542 ASSERT(MUTEX_HELD(&cpu_lock));
543
544#pragma unused(arg,id)
545 if (prof->prof_kind == PROF_TICK) {
546 cyclic_timer_remove(prof->prof_cyclic);
547 } else {
548 cyclic_remove_omni((cyclic_id_list_t)prof->prof_cyclic); /* cast puns cyclic_id_list_t with cyclic_id_t */
549 }
550 prof->prof_cyclic = CYCLIC_NONE;
551}
552
553static uint64_t
554profile_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
555{
556#pragma unused(arg, id, parg, argno, aframes)
557 /*
558 * All the required arguments for the profile probe are passed directly
559 * to dtrace_probe, and we do not go through dtrace_getarg which doesn't
560 * know how to hop to the kernel stack from the interrupt stack like
561 * dtrace_getpcstack
562 */
563 return 0;
564}
565
566static void
567profile_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc)
568{
569#pragma unused(arg, id)
570 profile_probe_t *prof = parg;
571 const char *argdesc = NULL;
572 switch (desc->dtargd_ndx) {
573 case 0:
574 argdesc = "void*";
575 break;
576 case 1:
577 argdesc = "user_addr_t";
578 break;
579 case 2:
580 if (prof->prof_kind == PROF_PROFILE) {
581 argdesc = "hrtime_t";
582 }
583 break;
584 }
585 if (argdesc) {
586 strlcpy(dst: desc->dtargd_native, src: argdesc, DTRACE_ARGTYPELEN);
587 } else {
588 desc->dtargd_ndx = DTRACE_ARGNONE;
589 }
590}
591
592/*
593 * APPLE NOTE: profile_usermode call not supported.
594 */
595static int
596profile_usermode(void *arg, dtrace_id_t id, void *parg)
597{
598#pragma unused(arg,id,parg)
599 return 1; /* XXX_BOGUS */
600}
601
602static dtrace_pattr_t profile_attr = {
603 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
604 { DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_UNKNOWN },
605 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
606 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
607 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
608};
609
610static dtrace_pops_t profile_pops = {
611 .dtps_provide = profile_provide,
612 .dtps_provide_module = NULL,
613 .dtps_enable = profile_enable,
614 .dtps_disable = profile_disable,
615 .dtps_suspend = NULL,
616 .dtps_resume = NULL,
617 .dtps_getargdesc = profile_getargdesc,
618 .dtps_getargval = profile_getarg,
619 .dtps_usermode = profile_usermode,
620 .dtps_destroy = profile_destroy
621};
622
623static int
624profile_attach(dev_info_t *devi)
625{
626 if (ddi_create_minor_node(devi, "profile", S_IFCHR, 0,
627 DDI_PSEUDO, 0) == DDI_FAILURE ||
628 dtrace_register("profile", &profile_attr,
629 DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER, NULL,
630 &profile_pops, NULL, &profile_id) != 0) {
631 ddi_remove_minor_node(devi, NULL);
632 return DDI_FAILURE;
633 }
634
635 profile_max = PROFILE_MAX_DEFAULT;
636
637 return DDI_SUCCESS;
638}
639
640/*
641 * APPLE NOTE: profile_detach not implemented
642 */
643#if !defined(__APPLE__)
644static int
645profile_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
646{
647 switch (cmd) {
648 case DDI_DETACH:
649 break;
650 case DDI_SUSPEND:
651 return DDI_SUCCESS;
652 default:
653 return DDI_FAILURE;
654 }
655
656 if (dtrace_unregister(profile_id) != 0) {
657 return DDI_FAILURE;
658 }
659
660 ddi_remove_minor_node(devi, NULL);
661 return DDI_SUCCESS;
662}
663#endif /* __APPLE__ */
664
665d_open_t _profile_open;
666
667int
668_profile_open(dev_t dev, int flags, int devtype, struct proc *p)
669{
670#pragma unused(dev,flags,devtype,p)
671 return 0;
672}
673
674#define PROFILE_MAJOR -24 /* let the kernel pick the device number */
675
676static const struct cdevsw profile_cdevsw =
677{
678 .d_open = _profile_open,
679 .d_close = eno_opcl,
680 .d_read = eno_rdwrt,
681 .d_write = eno_rdwrt,
682 .d_ioctl = eno_ioctl,
683 .d_stop = eno_stop,
684 .d_reset = eno_reset,
685 .d_select = eno_select,
686 .d_mmap = eno_mmap,
687 .d_strategy = eno_strat,
688 .d_reserved_1 = eno_getc,
689 .d_reserved_2 = eno_putc,
690};
691
692void
693profile_init( void )
694{
695 int majdevno = cdevsw_add(PROFILE_MAJOR, &profile_cdevsw);
696
697 if (majdevno < 0) {
698 printf("profile_init: failed to allocate a major number!\n");
699 return;
700 }
701
702 profile_attach(devi: (dev_info_t*)(uintptr_t)majdevno);
703}
704#undef PROFILE_MAJOR
705