1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <debug.h>
30
31#include <types.h>
32
33#include <mach/mach_types.h>
34#include <mach/thread_status.h>
35#include <mach/vm_types.h>
36
37#include <kern/kern_types.h>
38#include <kern/task.h>
39#include <kern/thread.h>
40#include <kern/misc_protos.h>
41#include <kern/mach_param.h>
42#include <kern/spl.h>
43#include <kern/machine.h>
44#include <kern/kalloc.h>
45#include <kern/kpc.h>
46
47#if MONOTONIC
48#include <kern/monotonic.h>
49#endif /* MONOTONIC */
50
51#include <machine/atomic.h>
52#include <arm64/proc_reg.h>
53#include <arm64/machine_machdep.h>
54#include <arm/cpu_data_internal.h>
55#include <arm/machdep_call.h>
56#include <arm/misc_protos.h>
57#include <arm/cpuid.h>
58
59#include <vm/vm_map.h>
60#include <vm/vm_protos.h>
61
62#include <sys/kdebug.h>
63
64
65#define USER_SS_ZONE_ALLOC_SIZE (0x4000)
66
67extern int debug_task;
68
69zone_t ads_zone; /* zone for debug_state area */
70zone_t user_ss_zone; /* zone for user arm_context_t allocations */
71
72/*
73 * Routine: consider_machine_collect
74 *
75 */
76void
77consider_machine_collect(void)
78{
79 pmap_gc();
80}
81
82/*
83 * Routine: consider_machine_adjust
84 *
85 */
86void
87consider_machine_adjust(void)
88{
89}
90
91/*
92 * Routine: machine_switch_context
93 *
94 */
95thread_t
96machine_switch_context(
97 thread_t old,
98 thread_continue_t continuation,
99 thread_t new)
100{
101 thread_t retval;
102 pmap_t new_pmap;
103 cpu_data_t *cpu_data_ptr;
104
105#define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
106 * text: " x) */
107
108 cpu_data_ptr = getCpuDatap();
109 if (old == new)
110 panic("machine_switch_context");
111
112 kpc_off_cpu(old);
113
114
115 new_pmap = new->map->pmap;
116 if (old->map->pmap != new_pmap)
117 pmap_switch(new_pmap);
118
119 new->machine.CpuDatap = cpu_data_ptr;
120
121 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
122
123 retval = Switch_context(old, continuation, new);
124 assert(retval != NULL);
125
126 return retval;
127}
128
129/*
130 * Routine: machine_thread_create
131 *
132 */
133kern_return_t
134machine_thread_create(
135 thread_t thread,
136 task_t task)
137{
138 arm_context_t *thread_user_ss = NULL;
139 kern_return_t result = KERN_SUCCESS;
140
141#define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
142
143 machine_thread_create_kprintf("thread = %x\n", thread);
144
145 if (current_thread() != thread) {
146 thread->machine.CpuDatap = (cpu_data_t *)0;
147 }
148 thread->machine.preemption_count = 0;
149 thread->machine.cthread_self = 0;
150 thread->machine.cthread_data = 0;
151
152
153 if (task != kernel_task) {
154 /* If this isn't a kernel thread, we'll have userspace state. */
155 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
156
157 if (!thread->machine.contextData) {
158 return KERN_FAILURE;
159 }
160
161 thread->machine.upcb = &thread->machine.contextData->ss;
162 thread->machine.uNeon = &thread->machine.contextData->ns;
163
164 if (task_has_64Bit_data(task)) {
165 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
166 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
167 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
168 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
169 } else {
170 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
171 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
172 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
173 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
174 }
175 } else {
176 thread->machine.upcb = NULL;
177 thread->machine.uNeon = NULL;
178 thread->machine.contextData = NULL;
179 }
180
181 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
182
183 result = machine_thread_state_initialize(thread);
184
185 if (result != KERN_SUCCESS) {
186 thread_user_ss = thread->machine.contextData;
187 thread->machine.upcb = NULL;
188 thread->machine.uNeon = NULL;
189 thread->machine.contextData = NULL;
190 zfree(user_ss_zone, thread_user_ss);
191 }
192
193 return result;
194}
195
196/*
197 * Routine: machine_thread_destroy
198 *
199 */
200void
201machine_thread_destroy(
202 thread_t thread)
203{
204 arm_context_t *thread_user_ss;
205
206 if (thread->machine.contextData) {
207 /* Disassociate the user save state from the thread before we free it. */
208 thread_user_ss = thread->machine.contextData;
209 thread->machine.upcb = NULL;
210 thread->machine.uNeon = NULL;
211 thread->machine.contextData = NULL;
212 zfree(user_ss_zone, thread_user_ss);
213 }
214
215 if (thread->machine.DebugData != NULL) {
216 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
217 arm_debug_set(NULL);
218 }
219
220 zfree(ads_zone, thread->machine.DebugData);
221 }
222}
223
224
225/*
226 * Routine: machine_thread_init
227 *
228 */
229void
230machine_thread_init(void)
231{
232 ads_zone = zinit(sizeof(arm_debug_state_t),
233 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
234 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
235 "arm debug state");
236
237 /*
238 * Create a zone for the user save state. At the time this zone was created,
239 * the user save state was 848 bytes, and the matching kalloc zone was 1024
240 * bytes, which would result in significant amounts of wasted space if we
241 * simply used kalloc to allocate the user saved state.
242 *
243 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
244 * of wasted space per chunk, which should correspond to 19 allocations.
245 */
246 user_ss_zone = zinit(sizeof(arm_context_t),
247 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
248 USER_SS_ZONE_ALLOC_SIZE,
249 "user save state");
250}
251
252
253/*
254 * Routine: get_useraddr
255 *
256 */
257user_addr_t
258get_useraddr()
259{
260 return (get_saved_state_pc(current_thread()->machine.upcb));
261}
262
263/*
264 * Routine: machine_stack_detach
265 *
266 */
267vm_offset_t
268machine_stack_detach(
269 thread_t thread)
270{
271 vm_offset_t stack;
272
273 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
274 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
275
276 stack = thread->kernel_stack;
277 thread->kernel_stack = 0;
278 thread->machine.kstackptr = 0;
279
280 return (stack);
281}
282
283
284/*
285 * Routine: machine_stack_attach
286 *
287 */
288void
289machine_stack_attach(
290 thread_t thread,
291 vm_offset_t stack)
292{
293 struct arm_context *context;
294 struct arm_saved_state64 *savestate;
295
296#define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
297
298 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
299 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
300
301 thread->kernel_stack = stack;
302 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
303 thread_initialize_kernel_state(thread);
304
305 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
306
307 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
308 savestate = saved_state64(&context->ss);
309 savestate->fp = 0;
310 savestate->lr = (uintptr_t)thread_continue;
311 savestate->sp = thread->machine.kstackptr;
312 savestate->cpsr = PSR64_KERNEL_DEFAULT;
313 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
314}
315
316
317/*
318 * Routine: machine_stack_handoff
319 *
320 */
321void
322machine_stack_handoff(
323 thread_t old,
324 thread_t new)
325{
326 vm_offset_t stack;
327 pmap_t new_pmap;
328 cpu_data_t *cpu_data_ptr;
329
330 kpc_off_cpu(old);
331
332 stack = machine_stack_detach(old);
333 cpu_data_ptr = getCpuDatap();
334 new->kernel_stack = stack;
335 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
336 if (stack == old->reserved_stack) {
337 assert(new->reserved_stack);
338 old->reserved_stack = new->reserved_stack;
339 new->reserved_stack = stack;
340 }
341
342
343 new_pmap = new->map->pmap;
344 if (old->map->pmap != new_pmap)
345 pmap_switch(new_pmap);
346
347 new->machine.CpuDatap = cpu_data_ptr;
348 machine_set_current_thread(new);
349 thread_initialize_kernel_state(new);
350
351 return;
352}
353
354
355/*
356 * Routine: call_continuation
357 *
358 */
359void
360call_continuation(
361 thread_continue_t continuation,
362 void *parameter,
363 wait_result_t wresult,
364 boolean_t enable_interrupts)
365{
366#define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:" x) */
367
368 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
369 Call_continuation(continuation, parameter, wresult, enable_interrupts);
370}
371
372#define SET_DBGBCRn(n, value, accum) \
373 __asm__ volatile( \
374 "msr DBGBCR" #n "_EL1, %[val]\n" \
375 "orr %[result], %[result], %[val]\n" \
376 : [result] "+r"(accum) : [val] "r"((value)))
377
378#define SET_DBGBVRn(n, value) \
379 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
380
381#define SET_DBGWCRn(n, value, accum) \
382 __asm__ volatile( \
383 "msr DBGWCR" #n "_EL1, %[val]\n" \
384 "orr %[result], %[result], %[val]\n" \
385 : [result] "+r"(accum) : [val] "r"((value)))
386
387#define SET_DBGWVRn(n, value) \
388 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
389
390void arm_debug_set32(arm_debug_state_t *debug_state)
391{
392 struct cpu_data *cpu_data_ptr;
393 arm_debug_info_t *debug_info = arm_debug_info();
394 boolean_t intr, set_mde = 0;
395 arm_debug_state_t off_state;
396 uint32_t i;
397 uint64_t all_ctrls = 0;
398
399 intr = ml_set_interrupts_enabled(FALSE);
400 cpu_data_ptr = getCpuDatap();
401
402 // Set current user debug
403 cpu_data_ptr->cpu_user_debug = debug_state;
404
405 if (NULL == debug_state) {
406 bzero(&off_state, sizeof(off_state));
407 debug_state = &off_state;
408 }
409
410 switch (debug_info->num_breakpoint_pairs) {
411 case 16:
412 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
413 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
414 case 15:
415 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
416 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
417 case 14:
418 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
419 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
420 case 13:
421 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
422 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
423 case 12:
424 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
425 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
426 case 11:
427 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
428 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
429 case 10:
430 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
431 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
432 case 9:
433 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
434 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
435 case 8:
436 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
437 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
438 case 7:
439 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
440 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
441 case 6:
442 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
443 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
444 case 5:
445 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
446 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
447 case 4:
448 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
449 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
450 case 3:
451 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
452 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
453 case 2:
454 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
455 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
456 case 1:
457 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
458 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
459 default:
460 break;
461 }
462
463 switch (debug_info->num_watchpoint_pairs) {
464 case 16:
465 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
466 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
467 case 15:
468 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
469 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
470 case 14:
471 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
472 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
473 case 13:
474 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
475 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
476 case 12:
477 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
478 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
479 case 11:
480 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
481 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
482 case 10:
483 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
484 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
485 case 9:
486 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
487 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
488 case 8:
489 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
490 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
491 case 7:
492 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
493 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
494 case 6:
495 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
496 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
497 case 5:
498 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
499 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
500 case 4:
501 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
502 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
503 case 3:
504 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
505 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
506 case 2:
507 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
508 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
509 case 1:
510 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
511 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
512 default:
513 break;
514 }
515
516#if defined(CONFIG_KERNEL_INTEGRITY)
517 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
518 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
519 }
520#endif
521
522 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
523 if (0 != debug_state->uds.ds32.bcr[i]) {
524 set_mde = 1;
525 break;
526 }
527 }
528
529 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
530 if (0 != debug_state->uds.ds32.wcr[i]) {
531 set_mde = 1;
532 break;
533 }
534 }
535
536 /*
537 * Breakpoint/Watchpoint Enable
538 */
539 if (set_mde) {
540 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
541 } else {
542 update_mdscr(0x8000, 0);
543 }
544
545 /*
546 * Software debug single step enable
547 */
548 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
549 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
550
551 set_saved_state_cpsr((current_thread()->machine.upcb),
552 get_saved_state_cpsr((current_thread()->machine.upcb)) | PSR64_SS);
553
554 } else {
555
556 update_mdscr(0x1, 0);
557
558#if SINGLE_STEP_RETIRE_ERRATA
559 // Workaround for radar 20619637
560 __builtin_arm_isb(ISB_SY);
561#endif
562 }
563
564 (void) ml_set_interrupts_enabled(intr);
565
566 return;
567}
568
569void arm_debug_set64(arm_debug_state_t *debug_state)
570{
571 struct cpu_data *cpu_data_ptr;
572 arm_debug_info_t *debug_info = arm_debug_info();
573 boolean_t intr, set_mde = 0;
574 arm_debug_state_t off_state;
575 uint32_t i;
576 uint64_t all_ctrls = 0;
577
578 intr = ml_set_interrupts_enabled(FALSE);
579 cpu_data_ptr = getCpuDatap();
580
581 // Set current user debug
582 cpu_data_ptr->cpu_user_debug = debug_state;
583
584 if (NULL == debug_state) {
585 bzero(&off_state, sizeof(off_state));
586 debug_state = &off_state;
587 }
588
589 switch (debug_info->num_breakpoint_pairs) {
590 case 16:
591 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
592 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
593 case 15:
594 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
595 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
596 case 14:
597 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
598 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
599 case 13:
600 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
601 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
602 case 12:
603 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
604 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
605 case 11:
606 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
607 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
608 case 10:
609 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
610 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
611 case 9:
612 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
613 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
614 case 8:
615 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
616 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
617 case 7:
618 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
619 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
620 case 6:
621 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
622 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
623 case 5:
624 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
625 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
626 case 4:
627 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
628 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
629 case 3:
630 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
631 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
632 case 2:
633 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
634 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
635 case 1:
636 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
637 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
638 default:
639 break;
640 }
641
642 switch (debug_info->num_watchpoint_pairs) {
643 case 16:
644 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
645 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
646 case 15:
647 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
648 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
649 case 14:
650 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
651 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
652 case 13:
653 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
654 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
655 case 12:
656 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
657 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
658 case 11:
659 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
660 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
661 case 10:
662 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
663 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
664 case 9:
665 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
666 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
667 case 8:
668 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
669 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
670 case 7:
671 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
672 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
673 case 6:
674 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
675 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
676 case 5:
677 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
678 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
679 case 4:
680 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
681 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
682 case 3:
683 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
684 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
685 case 2:
686 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
687 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
688 case 1:
689 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
690 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
691 default:
692 break;
693 }
694
695#if defined(CONFIG_KERNEL_INTEGRITY)
696 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
697 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
698 }
699#endif
700
701 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
702 if (0 != debug_state->uds.ds64.bcr[i]) {
703 set_mde = 1;
704 break;
705 }
706 }
707
708 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
709 if (0 != debug_state->uds.ds64.wcr[i]) {
710 set_mde = 1;
711 break;
712 }
713 }
714
715 /*
716 * Breakpoint/Watchpoint Enable
717 */
718 if (set_mde) {
719 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
720 }
721
722 /*
723 * Software debug single step enable
724 */
725 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
726
727 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
728
729 set_saved_state_cpsr((current_thread()->machine.upcb),
730 get_saved_state_cpsr((current_thread()->machine.upcb)) | PSR64_SS);
731
732 } else {
733
734 update_mdscr(0x1, 0);
735
736#if SINGLE_STEP_RETIRE_ERRATA
737 // Workaround for radar 20619637
738 __builtin_arm_isb(ISB_SY);
739#endif
740 }
741
742 (void) ml_set_interrupts_enabled(intr);
743
744 return;
745}
746
747void arm_debug_set(arm_debug_state_t *debug_state)
748{
749 if (debug_state) {
750 switch (debug_state->dsh.flavor) {
751 case ARM_DEBUG_STATE32:
752 arm_debug_set32(debug_state);
753 break;
754 case ARM_DEBUG_STATE64:
755 arm_debug_set64(debug_state);
756 break;
757 default:
758 panic("arm_debug_set");
759 break;
760 }
761 } else {
762 if (thread_is_64bit_data(current_thread()))
763 arm_debug_set64(debug_state);
764 else
765 arm_debug_set32(debug_state);
766 }
767}
768
769#define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
770boolean_t
771debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
772{
773 arm_debug_info_t *debug_info = arm_debug_info();
774 uint32_t i;
775 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
776 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
777 return FALSE;
778 }
779
780 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
781 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
782 return FALSE;
783 }
784 return TRUE;
785}
786
787boolean_t
788debug_state_is_valid32(arm_debug_state32_t *debug_state)
789{
790 arm_debug_info_t *debug_info = arm_debug_info();
791 uint32_t i;
792 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
793 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
794 return FALSE;
795 }
796
797 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
798 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
799 return FALSE;
800 }
801 return TRUE;
802}
803
804boolean_t
805debug_state_is_valid64(arm_debug_state64_t *debug_state)
806{
807 arm_debug_info_t *debug_info = arm_debug_info();
808 uint32_t i;
809 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
810 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
811 return FALSE;
812 }
813
814 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
815 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
816 return FALSE;
817 }
818 return TRUE;
819}
820
821/*
822 * Duplicate one arm_debug_state_t to another. "all" parameter
823 * is ignored in the case of ARM -- Is this the right assumption?
824 */
825void
826copy_legacy_debug_state(
827 arm_legacy_debug_state_t *src,
828 arm_legacy_debug_state_t *target,
829 __unused boolean_t all)
830{
831 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
832}
833
834void
835copy_debug_state32(
836 arm_debug_state32_t *src,
837 arm_debug_state32_t *target,
838 __unused boolean_t all)
839{
840 bcopy(src, target, sizeof(arm_debug_state32_t));
841}
842
843void
844copy_debug_state64(
845 arm_debug_state64_t *src,
846 arm_debug_state64_t *target,
847 __unused boolean_t all)
848{
849 bcopy(src, target, sizeof(arm_debug_state64_t));
850}
851
852kern_return_t
853machine_thread_set_tsd_base(
854 thread_t thread,
855 mach_vm_offset_t tsd_base)
856{
857
858 if (thread->task == kernel_task) {
859 return KERN_INVALID_ARGUMENT;
860 }
861
862 if (tsd_base & MACHDEP_CPUNUM_MASK) {
863 return KERN_INVALID_ARGUMENT;
864 }
865
866 if (thread_is_64bit_addr(thread)) {
867 if (tsd_base > vm_map_max(thread->map))
868 tsd_base = 0ULL;
869 } else {
870 if (tsd_base > UINT32_MAX)
871 tsd_base = 0ULL;
872 }
873
874 thread->machine.cthread_self = tsd_base;
875
876 /* For current thread, make the TSD base active immediately */
877 if (thread == current_thread()) {
878 uint64_t cpunum, tpidrro_el0;
879
880 mp_disable_preemption();
881 tpidrro_el0 = get_tpidrro();
882 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
883 set_tpidrro(tsd_base | cpunum);
884 mp_enable_preemption();
885
886 }
887
888 return KERN_SUCCESS;
889}
890