1 | /* |
2 | * Copyright (c) 2005-2018 Apple Computer, Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | |
29 | #include <arm/caches_internal.h> |
30 | #include <kern/thread.h> |
31 | |
32 | #if __has_include(<ptrauth.h>) |
33 | #include <ptrauth.h> |
34 | #endif |
35 | #include <stdarg.h> |
36 | #include <sys/time.h> |
37 | #include <sys/systm.h> |
38 | #include <sys/proc.h> |
39 | #include <sys/proc_internal.h> |
40 | #include <sys/kauth.h> |
41 | #include <sys/dtrace.h> |
42 | #include <sys/dtrace_impl.h> |
43 | #include <machine/atomic.h> |
44 | #include <kern/cambria_layout.h> |
45 | #include <kern/simple_lock.h> |
46 | #include <kern/sched_prim.h> /* for thread_wakeup() */ |
47 | #include <kern/thread_call.h> |
48 | #include <kern/task.h> |
49 | #include <machine/atomic.h> |
50 | #include <machine/machine_routines.h> |
51 | |
52 | extern struct arm_saved_state *find_kern_regs(thread_t); |
53 | |
54 | extern dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ |
55 | typedef arm_saved_state_t savearea_t; |
56 | |
57 | struct frame { |
58 | struct frame *backchain; |
59 | uintptr_t retaddr; |
60 | }; |
61 | |
62 | /* |
63 | * Atomicity and synchronization |
64 | */ |
65 | inline void |
66 | dtrace_membar_producer(void) |
67 | { |
68 | __builtin_arm_dmb(DMB_ISH); |
69 | } |
70 | |
71 | inline void |
72 | dtrace_membar_consumer(void) |
73 | { |
74 | __builtin_arm_dmb(DMB_ISH); |
75 | } |
76 | |
77 | /* |
78 | * Interrupt manipulation |
79 | * XXX dtrace_getipl() can be called from probe context. |
80 | */ |
81 | int |
82 | dtrace_getipl(void) |
83 | { |
84 | /* |
85 | * XXX Drat, get_interrupt_level is MACH_KERNEL_PRIVATE |
86 | * in osfmk/kern/cpu_data.h |
87 | */ |
88 | /* return get_interrupt_level(); */ |
89 | return ml_at_interrupt_context() ? 1 : 0; |
90 | } |
91 | |
92 | /* |
93 | * MP coordination |
94 | */ |
95 | |
96 | static LCK_MTX_DECLARE_ATTR(dt_xc_lock, &dtrace_lck_grp, &dtrace_lck_attr); |
97 | static uint32_t dt_xc_sync; |
98 | |
99 | typedef struct xcArg { |
100 | processorid_t cpu; |
101 | dtrace_xcall_t f; |
102 | void *arg; |
103 | } xcArg_t; |
104 | |
105 | static void |
106 | xcRemote(void *foo) |
107 | { |
108 | xcArg_t *pArg = (xcArg_t *) foo; |
109 | |
110 | if (pArg->cpu == CPU->cpu_id || pArg->cpu == DTRACE_CPUALL) { |
111 | (pArg->f)(pArg->arg); |
112 | } |
113 | |
114 | if (os_atomic_dec(&dt_xc_sync, relaxed) == 0) { |
115 | thread_wakeup((event_t) &dt_xc_sync); |
116 | } |
117 | } |
118 | |
119 | /* |
120 | * dtrace_xcall() is not called from probe context. |
121 | */ |
122 | void |
123 | dtrace_xcall(processorid_t cpu, dtrace_xcall_t f, void *arg) |
124 | { |
125 | /* Only one dtrace_xcall in flight allowed */ |
126 | lck_mtx_lock(lck: &dt_xc_lock); |
127 | |
128 | xcArg_t xcArg; |
129 | |
130 | xcArg.cpu = cpu; |
131 | xcArg.f = f; |
132 | xcArg.arg = arg; |
133 | |
134 | cpu_broadcast_xcall(&dt_xc_sync, TRUE, xcRemote, (void*) &xcArg); |
135 | |
136 | lck_mtx_unlock(lck: &dt_xc_lock); |
137 | return; |
138 | } |
139 | |
140 | |
141 | /** |
142 | * Register definitions |
143 | */ |
144 | #define ARM64_FP 29 |
145 | #define ARM64_LR 30 |
146 | #define ARM64_SP 31 |
147 | #define ARM64_PC 32 |
148 | #define ARM64_CPSR 33 |
149 | |
150 | /* |
151 | * Runtime and ABI |
152 | */ |
153 | uint64_t |
154 | dtrace_getreg(struct regs * savearea, uint_t reg) |
155 | { |
156 | struct arm_saved_state *regs = (struct arm_saved_state *) savearea; |
157 | |
158 | if (regs == NULL) { |
159 | DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); |
160 | return 0; |
161 | } |
162 | |
163 | if (!check_saved_state_reglimit(regs, reg)) { |
164 | DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); |
165 | return 0; |
166 | } |
167 | |
168 | return (uint64_t)get_saved_state_reg(regs, reg); |
169 | } |
170 | |
171 | uint64_t |
172 | dtrace_getvmreg(uint_t ndx) |
173 | { |
174 | #pragma unused(ndx) |
175 | DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); |
176 | return 0; |
177 | } |
178 | |
179 | void |
180 | dtrace_livedump(char *filename, size_t len) |
181 | { |
182 | #pragma unused(filename) |
183 | #pragma unused(len) |
184 | DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); |
185 | } |
186 | |
187 | #define RETURN_OFFSET64 8 |
188 | |
189 | static int |
190 | dtrace_getustack_common(uint64_t * pcstack, int pcstack_limit, user_addr_t pc, |
191 | user_addr_t sp) |
192 | { |
193 | volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; |
194 | int ret = 0; |
195 | |
196 | ASSERT(pcstack == NULL || pcstack_limit > 0); |
197 | |
198 | while (pc != 0) { |
199 | ret++; |
200 | if (pcstack != NULL) { |
201 | *pcstack++ = (uint64_t) pc; |
202 | pcstack_limit--; |
203 | if (pcstack_limit <= 0) { |
204 | break; |
205 | } |
206 | } |
207 | |
208 | if (sp == 0) { |
209 | break; |
210 | } |
211 | |
212 | pc = dtrace_fuword64((sp + RETURN_OFFSET64)); |
213 | sp = dtrace_fuword64(sp); |
214 | |
215 | /* Truncate ustack if the iterator causes fault. */ |
216 | if (*flags & CPU_DTRACE_FAULT) { |
217 | *flags &= ~CPU_DTRACE_FAULT; |
218 | break; |
219 | } |
220 | } |
221 | |
222 | return ret; |
223 | } |
224 | |
225 | void |
226 | dtrace_getupcstack(uint64_t * pcstack, int pcstack_limit) |
227 | { |
228 | thread_t thread = current_thread(); |
229 | savearea_t *regs; |
230 | user_addr_t pc, sp, fp; |
231 | volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; |
232 | int n; |
233 | |
234 | if (*flags & CPU_DTRACE_FAULT) { |
235 | return; |
236 | } |
237 | |
238 | if (pcstack_limit <= 0) { |
239 | return; |
240 | } |
241 | |
242 | /* |
243 | * If there's no user context we still need to zero the stack. |
244 | */ |
245 | if (thread == NULL) { |
246 | goto zero; |
247 | } |
248 | |
249 | regs = (savearea_t *) find_user_regs(thread); |
250 | if (regs == NULL) { |
251 | goto zero; |
252 | } |
253 | |
254 | *pcstack++ = (uint64_t)dtrace_proc_selfpid(); |
255 | pcstack_limit--; |
256 | |
257 | if (pcstack_limit <= 0) { |
258 | return; |
259 | } |
260 | |
261 | pc = get_saved_state_pc(regs); |
262 | sp = get_saved_state_sp(regs); |
263 | |
264 | { |
265 | fp = get_saved_state_fp(regs); |
266 | } |
267 | |
268 | if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { |
269 | *pcstack++ = (uint64_t) pc; |
270 | pcstack_limit--; |
271 | if (pcstack_limit <= 0) { |
272 | return; |
273 | } |
274 | |
275 | pc = get_saved_state_lr(regs); |
276 | } |
277 | |
278 | n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp: fp); |
279 | |
280 | ASSERT(n >= 0); |
281 | ASSERT(n <= pcstack_limit); |
282 | |
283 | pcstack += n; |
284 | pcstack_limit -= n; |
285 | |
286 | zero: |
287 | while (pcstack_limit-- > 0) { |
288 | *pcstack++ = 0ULL; |
289 | } |
290 | } |
291 | |
292 | int |
293 | dtrace_getustackdepth(void) |
294 | { |
295 | thread_t thread = current_thread(); |
296 | savearea_t *regs; |
297 | user_addr_t pc, sp, fp; |
298 | int n = 0; |
299 | |
300 | if (thread == NULL) { |
301 | return 0; |
302 | } |
303 | |
304 | if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) { |
305 | return -1; |
306 | } |
307 | |
308 | regs = (savearea_t *) find_user_regs(thread); |
309 | if (regs == NULL) { |
310 | return 0; |
311 | } |
312 | |
313 | pc = get_saved_state_pc(regs); |
314 | sp = get_saved_state_sp(regs); |
315 | fp = get_saved_state_fp(regs); |
316 | |
317 | if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { |
318 | n++; |
319 | pc = get_saved_state_lr(regs); |
320 | } |
321 | |
322 | /* |
323 | * Note that unlike ppc, the arm code does not use |
324 | * CPU_DTRACE_USTACK_FP. This is because arm always |
325 | * traces from the sp, even in syscall/profile/fbt |
326 | * providers. |
327 | */ |
328 | |
329 | n += dtrace_getustack_common(NULL, pcstack_limit: 0, pc, sp: fp); |
330 | |
331 | return n; |
332 | } |
333 | |
334 | void |
335 | dtrace_getufpstack(uint64_t * pcstack, uint64_t * fpstack, int pcstack_limit) |
336 | { |
337 | thread_t thread = current_thread(); |
338 | boolean_t is64bit = proc_is64bit_data(current_proc()); |
339 | savearea_t *regs; |
340 | user_addr_t pc, sp; |
341 | volatile uint16_t *flags = (volatile uint16_t *) &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; |
342 | |
343 | |
344 | if (*flags & CPU_DTRACE_FAULT) { |
345 | return; |
346 | } |
347 | |
348 | if (pcstack_limit <= 0) { |
349 | return; |
350 | } |
351 | |
352 | /* |
353 | * If there's no user context we still need to zero the stack. |
354 | */ |
355 | if (thread == NULL) { |
356 | goto zero; |
357 | } |
358 | |
359 | regs = (savearea_t *) find_user_regs(thread); |
360 | if (regs == NULL) { |
361 | goto zero; |
362 | } |
363 | |
364 | *pcstack++ = (uint64_t)dtrace_proc_selfpid(); |
365 | pcstack_limit--; |
366 | |
367 | if (pcstack_limit <= 0) { |
368 | return; |
369 | } |
370 | |
371 | pc = get_saved_state_pc(regs); |
372 | sp = get_saved_state_lr(regs); |
373 | |
374 | #if 0 /* XXX signal stack crawl */ |
375 | oldcontext = lwp->lwp_oldcontext; |
376 | |
377 | if (p->p_model == DATAMODEL_NATIVE) { |
378 | s1 = sizeof(struct frame) + 2 * sizeof(long); |
379 | s2 = s1 + sizeof(siginfo_t); |
380 | } else { |
381 | s1 = sizeof(struct frame32) + 3 * sizeof(int); |
382 | s2 = s1 + sizeof(siginfo32_t); |
383 | } |
384 | #endif |
385 | |
386 | if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) { |
387 | *pcstack++ = (uint64_t) pc; |
388 | *fpstack++ = 0; |
389 | pcstack_limit--; |
390 | if (pcstack_limit <= 0) { |
391 | return; |
392 | } |
393 | |
394 | if (is64bit) { |
395 | pc = dtrace_fuword64(sp); |
396 | } else { |
397 | pc = dtrace_fuword32(sp); |
398 | } |
399 | } |
400 | while (pc != 0 && sp != 0) { |
401 | *pcstack++ = (uint64_t) pc; |
402 | *fpstack++ = sp; |
403 | pcstack_limit--; |
404 | if (pcstack_limit <= 0) { |
405 | break; |
406 | } |
407 | |
408 | #if 0 /* XXX signal stack crawl */ |
409 | if (oldcontext == sp + s1 || oldcontext == sp + s2) { |
410 | if (p->p_model == DATAMODEL_NATIVE) { |
411 | ucontext_t *ucp = (ucontext_t *) oldcontext; |
412 | greg_t *gregs = ucp->uc_mcontext.gregs; |
413 | |
414 | sp = dtrace_fulword(&gregs[REG_FP]); |
415 | pc = dtrace_fulword(&gregs[REG_PC]); |
416 | |
417 | oldcontext = dtrace_fulword(&ucp->uc_link); |
418 | } else { |
419 | ucontext_t *ucp = (ucontext_t *) oldcontext; |
420 | greg_t *gregs = ucp->uc_mcontext.gregs; |
421 | |
422 | sp = dtrace_fuword32(&gregs[EBP]); |
423 | pc = dtrace_fuword32(&gregs[EIP]); |
424 | |
425 | oldcontext = dtrace_fuword32(&ucp->uc_link); |
426 | } |
427 | } else |
428 | #endif |
429 | { |
430 | pc = dtrace_fuword64((sp + RETURN_OFFSET64)); |
431 | sp = dtrace_fuword64(sp); |
432 | } |
433 | |
434 | /* Truncate ustack if the iterator causes fault. */ |
435 | if (*flags & CPU_DTRACE_FAULT) { |
436 | *flags &= ~CPU_DTRACE_FAULT; |
437 | break; |
438 | } |
439 | } |
440 | |
441 | zero: |
442 | while (pcstack_limit-- > 0) { |
443 | *pcstack++ = 0ULL; |
444 | } |
445 | } |
446 | |
447 | /** |
448 | * Return whether a frame is located within the current thread's kernel stack. |
449 | * |
450 | * @param fp The frame to check. |
451 | */ |
452 | static inline bool |
453 | dtrace_frame_in_kernel_stack(struct frame * fp) |
454 | { |
455 | const uintptr_t bottom = dtrace_get_kernel_stack(current_thread()); |
456 | |
457 | /* Return early if there is no kernel stack. */ |
458 | if (bottom == 0) { |
459 | return false; |
460 | } |
461 | |
462 | const uintptr_t top = bottom + kernel_stack_size; |
463 | return ((uintptr_t)fp >= bottom) && ((uintptr_t)fp < top); |
464 | } |
465 | |
466 | void |
467 | dtrace_getpcstack(pc_t * pcstack, int pcstack_limit, int aframes, |
468 | uint32_t * intrpc) |
469 | { |
470 | struct frame *fp = (struct frame *) __builtin_frame_address(0); |
471 | struct frame *nextfp; |
472 | int depth = 0; |
473 | int on_intr = CPU_ON_INTR(CPU); |
474 | int last = 0; |
475 | uintptr_t pc; |
476 | uintptr_t caller = CPU->cpu_dtrace_caller; |
477 | |
478 | aframes++; |
479 | |
480 | if (intrpc != NULL && depth < pcstack_limit) { |
481 | pcstack[depth++] = (pc_t) intrpc; |
482 | } |
483 | |
484 | while (depth < pcstack_limit) { |
485 | nextfp = fp->backchain; |
486 | pc = fp->retaddr; |
487 | |
488 | /* |
489 | * Stacks grow down; backtracing should always be moving to higher |
490 | * addresses except when the backtrace spans multiple different stacks. |
491 | */ |
492 | if (nextfp <= fp) { |
493 | if (on_intr) { |
494 | /* |
495 | * Let's check whether we're moving from the interrupt stack to |
496 | * either a kernel stack or a non-XNU stack. |
497 | */ |
498 | arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread()); |
499 | if (arm_kern_regs) { |
500 | /* |
501 | * If this frame is not stitching from the interrupt stack |
502 | * to either the kernel stack or a known non-XNU stack, then |
503 | * stop the backtrace. |
504 | */ |
505 | if (!dtrace_frame_in_kernel_stack(fp: nextfp) && |
506 | !ml_addr_in_non_xnu_stack(addr: (uintptr_t)nextfp)) { |
507 | last = 1; |
508 | } |
509 | |
510 | /* Not on the interrupt stack anymore. */ |
511 | on_intr = 0; |
512 | } else { |
513 | /* |
514 | * If this thread was on the interrupt stack, but did not |
515 | * take an interrupt (i.e, the idle thread), there is no |
516 | * explicit saved state for us to use. |
517 | */ |
518 | last = 1; |
519 | } |
520 | } else if (!ml_addr_in_non_xnu_stack(addr: (uintptr_t)fp) && |
521 | !ml_addr_in_non_xnu_stack(addr: (uintptr_t)nextfp)) { |
522 | /* |
523 | * This is the last frame we can process; indicate that we |
524 | * should return after processing this frame. |
525 | * |
526 | * This could be for a few reasons. If the nextfp is NULL, then |
527 | * this logic will be triggered. Beyond that, the only valid |
528 | * stack switches are either going from kernel stack to non-xnu |
529 | * stack, non-xnu stack to kernel stack, or between one non-xnu |
530 | * stack and another. So if none of those transitions are |
531 | * happening, then stop the backtrace. |
532 | */ |
533 | last = 1; |
534 | } |
535 | } |
536 | if (aframes > 0) { |
537 | if (--aframes == 0 && caller != (uintptr_t)NULL) { |
538 | /* |
539 | * We've just run out of artificial frames, |
540 | * and we have a valid caller -- fill it in |
541 | * now. |
542 | */ |
543 | ASSERT(depth < pcstack_limit); |
544 | pcstack[depth++] = (pc_t) caller; |
545 | caller = (uintptr_t)NULL; |
546 | } |
547 | } else { |
548 | if (depth < pcstack_limit) { |
549 | pcstack[depth++] = (pc_t) pc; |
550 | } |
551 | } |
552 | |
553 | if (last) { |
554 | while (depth < pcstack_limit) { |
555 | pcstack[depth++] = (pc_t) NULL; |
556 | } |
557 | return; |
558 | } |
559 | fp = nextfp; |
560 | } |
561 | } |
562 | |
563 | uint64_t |
564 | dtrace_getarg(int arg, int aframes, dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) |
565 | { |
566 | #pragma unused(arg, aframes) |
567 | uint64_t val = 0; |
568 | struct frame *fp = (struct frame *)__builtin_frame_address(0); |
569 | uintptr_t *stack; |
570 | uintptr_t pc; |
571 | int i; |
572 | |
573 | /* |
574 | * A total of 8 arguments are passed via registers; any argument with |
575 | * index of 7 or lower is therefore in a register. |
576 | */ |
577 | int inreg = 7; |
578 | |
579 | for (i = 1; i <= aframes; ++i) { |
580 | #if __has_feature(ptrauth_frames) |
581 | fp = ptrauth_strip(fp->backchain, ptrauth_key_frame_pointer); |
582 | #else |
583 | fp = fp->backchain; |
584 | #endif |
585 | |
586 | #if __has_feature(ptrauth_returns) |
587 | pc = (uintptr_t)ptrauth_strip((void*)fp->retaddr, ptrauth_key_return_address); |
588 | #else |
589 | pc = fp->retaddr; |
590 | #endif |
591 | |
592 | if (dtrace_invop_callsite_pre != NULL |
593 | && pc > (uintptr_t) dtrace_invop_callsite_pre |
594 | && pc <= (uintptr_t) dtrace_invop_callsite_post) { |
595 | /* fp points to frame of dtrace_invop() activation */ |
596 | fp = fp->backchain; /* to fbt_perfCallback activation */ |
597 | fp = fp->backchain; /* to sleh_synchronous activation */ |
598 | fp = fp->backchain; /* to fleh_synchronous activation */ |
599 | |
600 | arm_saved_state_t *tagged_regs = (arm_saved_state_t*) ((void*) &fp[1]); |
601 | arm_saved_state64_t *saved_state = saved_state64(iss: tagged_regs); |
602 | |
603 | if (arg <= inreg) { |
604 | /* the argument will be found in a register */ |
605 | stack = (uintptr_t*) &saved_state->x[0]; |
606 | } else { |
607 | /* the argument will be found in the stack */ |
608 | fp = (struct frame*) saved_state->sp; |
609 | stack = (uintptr_t*) &fp[1]; |
610 | arg -= (inreg + 1); |
611 | } |
612 | |
613 | goto load; |
614 | } |
615 | } |
616 | |
617 | /* |
618 | * We know that we did not come through a trap to get into |
619 | * dtrace_probe() -- We arrive here when the provider has |
620 | * called dtrace_probe() directly. |
621 | * The probe ID is the first argument to dtrace_probe(). |
622 | * We must advance beyond that to get the argX. |
623 | */ |
624 | arg++; /* Advance past probeID */ |
625 | |
626 | if (arg <= inreg) { |
627 | /* |
628 | * This shouldn't happen. If the argument is passed in a |
629 | * register then it should have been, well, passed in a |
630 | * register... |
631 | */ |
632 | DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); |
633 | return 0; |
634 | } |
635 | |
636 | arg -= (inreg + 1); |
637 | stack = (uintptr_t*) &fp[1]; /* Find marshalled arguments */ |
638 | |
639 | load: |
640 | if (dtrace_canload((uint64_t)(stack + arg), sizeof(uint64_t), |
641 | mstate, vstate)) { |
642 | /* dtrace_probe arguments arg0 ... arg4 are 64bits wide */ |
643 | val = dtrace_load64((uint64_t)(stack + arg)); |
644 | } |
645 | |
646 | return val; |
647 | } |
648 | |
649 | void |
650 | dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which, |
651 | int fltoffs, int fault, uint64_t illval) |
652 | { |
653 | /* XXX ARMTODO */ |
654 | /* |
655 | * For the case of the error probe firing lets |
656 | * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG. |
657 | */ |
658 | state->dts_arg_error_illval = illval; |
659 | dtrace_probe( dtrace_probeid_error, arg0: (uint64_t)(uintptr_t)state, arg1: epid, arg2: which, arg3: fltoffs, arg4: fault ); |
660 | } |
661 | |
662 | void |
663 | dtrace_toxic_ranges(void (*func)(uintptr_t base, uintptr_t limit)) |
664 | { |
665 | /* XXX ARMTODO check copied from ppc/x86*/ |
666 | /* |
667 | * "base" is the smallest toxic address in the range, "limit" is the first |
668 | * VALID address greater than "base". |
669 | */ |
670 | func(0x0, VM_MIN_KERNEL_ADDRESS); |
671 | if (VM_MAX_KERNEL_ADDRESS < ~(uintptr_t)0) { |
672 | func(VM_MAX_KERNEL_ADDRESS + 1, ~(uintptr_t)0); |
673 | } |
674 | } |
675 | |
676 | void |
677 | dtrace_flush_caches(void) |
678 | { |
679 | /* TODO There were some problems with flushing just the cache line that had been modified. |
680 | * For now, we'll flush the entire cache, until we figure out how to flush just the patched block. |
681 | */ |
682 | FlushPoU_Dcache(); |
683 | InvalidatePoU_Icache(); |
684 | } |
685 | |