1/*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach/mach_types.h>
29#include <mach/vm_attributes.h>
30#include <mach/vm_param.h>
31
32#include <vm/pmap.h>
33
34#include <mach/thread_status.h>
35#include <mach-o/loader.h>
36#include <mach/vm_region.h>
37#include <mach/vm_statistics.h>
38
39#include <vm/vm_kern.h>
40#include <vm/vm_object.h>
41#include <vm/vm_protos.h>
42#include <kdp/kdp_core.h>
43#include <kdp/kdp_udp.h>
44#include <kdp/kdp_internal.h>
45#include <arm/misc_protos.h>
46#include <arm/caches_internal.h>
47#include <arm/cpu_data_internal.h>
48#include <arm/misc_protos.h>
49
50pmap_t kdp_pmap = 0;
51boolean_t kdp_trans_off;
52boolean_t kdp_read_io = 0;
53
54pmap_paddr_t kdp_vtophys(pmap_t pmap, vm_offset_t va);
55
56/*
57 * kdp_vtophys
58 */
59pmap_paddr_t
60kdp_vtophys(
61 pmap_t pmap,
62 vm_offset_t va)
63{
64 pmap_paddr_t pa;
65
66 /* Ensure that the provided va resides within the provided pmap range. */
67 if (!pmap || ((pmap != kernel_pmap) && ((va < pmap->min) || (va >= pmap->max)))) {
68#ifdef KDP_VTOPHYS_DEBUG
69 printf("kdp_vtophys(%08x, %016lx) not in range %08x .. %08x\n", (unsigned int) pmap,
70 (unsigned long) va,
71 (unsigned int) (pmap ? pmap->min : 0),
72 (unsigned int) (pmap ? pmap->max : 0));
73#endif
74 return 0; /* Just return if no translation */
75 }
76
77 pa = pmap_find_pa(map: pmap, va); /* Get the physical address */
78 return pa;
79}
80
81/*
82 * kdp_machine_vm_read
83 *
84 * Verify that src is valid, and physically copy len bytes from src to
85 * dst, translating if necessary. If translation is enabled
86 * (kdp_trans_off is 0), a non-zero kdp_pmap specifies the pmap to use
87 * when translating src.
88 */
89
90mach_vm_size_t
91kdp_machine_vm_read( mach_vm_address_t src, caddr_t dst, mach_vm_size_t len)
92{
93 addr64_t cur_virt_src, cur_virt_dst;
94 addr64_t cur_phys_src, cur_phys_dst;
95 mach_vm_size_t resid, cnt;
96 pmap_t pmap;
97
98#ifdef KDP_VM_READ_DEBUG
99 kprintf("kdp_machine_vm_read1: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *) src)[0], ((unsigned long *) src)[1]);
100#endif
101
102 cur_virt_src = (addr64_t) src;
103 cur_virt_dst = (addr64_t) dst;
104
105 if (kdp_trans_off) {
106 kdp_readphysmem64_req_t rq;
107 mach_vm_size_t ret;
108
109 rq.address = src;
110 rq.nbytes = (uint32_t)len;
111 ret = kdp_machine_phys_read(rq: &rq, dst, 0 /* unused */);
112 return ret;
113 } else {
114 resid = len;
115
116 if (kdp_pmap) {
117 pmap = kdp_pmap; /* If special pmap, use it */
118 } else {
119 pmap = kernel_pmap; /* otherwise, use kernel's */
120 }
121 while (resid != 0) {
122 /*
123 * Always translate the destination using the
124 * kernel_pmap.
125 */
126 if ((cur_phys_dst = kdp_vtophys(pmap: kernel_pmap, va: cur_virt_dst)) == 0) {
127 goto exit;
128 }
129
130 if ((cur_phys_src = kdp_vtophys(pmap, va: cur_virt_src)) == 0) {
131 goto exit;
132 }
133
134 /* Attempt to ensure that there are valid translations for src and dst. */
135 if (!kdp_read_io && ((!pmap_valid_address(addr: cur_phys_dst)) || (!pmap_valid_address(addr: cur_phys_src)))) {
136 goto exit;
137 }
138
139 cnt = ARM_PGBYTES - (cur_virt_src & PAGE_MASK); /* Get length left on
140 * page */
141 if (cnt > (ARM_PGBYTES - (cur_virt_dst & PAGE_MASK))) {
142 cnt = ARM_PGBYTES - (cur_virt_dst & PAGE_MASK);
143 }
144
145 if (cnt > resid) {
146 cnt = resid;
147 }
148
149#ifdef KDP_VM_READ_DEBUG
150 kprintf("kdp_machine_vm_read2: pmap %08X, virt %016LLX, phys %016LLX\n",
151 pmap, cur_virt_src, cur_phys_src);
152#endif
153 bcopy_phys(from: cur_phys_src, to: cur_phys_dst, nbytes: cnt);
154
155 cur_virt_src += cnt;
156 cur_virt_dst += cnt;
157 resid -= cnt;
158 }
159 }
160exit:
161#ifdef KDP_VM_READ_DEBUG
162 kprintf("kdp_machine_vm_read: ret %08X\n", len - resid);
163#endif
164 return len - resid;
165}
166
167mach_vm_size_t
168kdp_machine_phys_read(kdp_readphysmem64_req_t *rq, caddr_t dst, uint16_t lcpu __unused)
169{
170 mach_vm_address_t src = rq->address;
171 mach_vm_size_t len = rq->nbytes;
172
173 addr64_t cur_virt_dst;
174 addr64_t cur_phys_src, cur_phys_dst;
175 mach_vm_size_t resid = len;
176 mach_vm_size_t cnt = 0, cnt_src, cnt_dst;
177
178#ifdef KDP_VM_READ_DEBUG
179 kprintf("kdp_phys_read src %x dst %p len %x\n", src, dst, len);
180#endif
181
182 cur_virt_dst = (addr64_t) dst;
183 cur_phys_src = (addr64_t) src;
184
185 while (resid != 0) {
186 if ((cur_phys_dst = kdp_vtophys(pmap: kernel_pmap, va: cur_virt_dst)) == 0) {
187 goto exit;
188 }
189
190 /* Get length left on page */
191
192 cnt_src = ARM_PGBYTES - (cur_phys_src & PAGE_MASK);
193 cnt_dst = ARM_PGBYTES - (cur_phys_dst & PAGE_MASK);
194 if (cnt_src > cnt_dst) {
195 cnt = cnt_dst;
196 } else {
197 cnt = cnt_src;
198 }
199 if (cnt > resid) {
200 cnt = resid;
201 }
202
203 bcopy_phys(from: cur_phys_src, to: cur_phys_dst, nbytes: cnt); /* Copy stuff over */
204 cur_phys_src += cnt;
205 cur_virt_dst += cnt;
206 resid -= cnt;
207 }
208
209exit:
210 return len - resid;
211}
212
213/*
214 * kdp_vm_write
215 */
216mach_vm_size_t
217kdp_machine_vm_write( caddr_t src, mach_vm_address_t dst, mach_vm_size_t len)
218{
219 addr64_t cur_virt_src, cur_virt_dst;
220 addr64_t cur_phys_src, cur_phys_dst;
221 mach_vm_size_t resid, cnt, cnt_src, cnt_dst;
222
223#ifdef KDP_VM_WRITE_DEBUG
224 printf("kdp_vm_write: src %x dst %x len %x - %08X %08X\n", src, dst, len, ((unsigned long *) src)[0], ((unsigned long *) src)[1]);
225#endif
226
227 cur_virt_src = (addr64_t) src;
228 cur_virt_dst = (addr64_t) dst;
229
230 resid = len;
231
232 while (resid != 0) {
233 if ((cur_phys_dst = kdp_vtophys(pmap: kernel_pmap, va: cur_virt_dst)) == 0) {
234 goto exit;
235 }
236
237 if ((cur_phys_src = kdp_vtophys(pmap: kernel_pmap, va: cur_virt_src)) == 0) {
238 goto exit;
239 }
240
241 /* Attempt to ensure that there are valid translations for src and dst. */
242 /* No support for enabling writes for an invalid translation at the moment. */
243 if ((!pmap_valid_address(addr: cur_phys_dst)) || (!pmap_valid_address(addr: cur_phys_src))) {
244 goto exit;
245 }
246
247 cnt_src = ((cur_phys_src + ARM_PGBYTES) & (-ARM_PGBYTES)) - cur_phys_src;
248 cnt_dst = ((cur_phys_dst + ARM_PGBYTES) & (-ARM_PGBYTES)) - cur_phys_dst;
249
250 if (cnt_src > cnt_dst) {
251 cnt = cnt_dst;
252 } else {
253 cnt = cnt_src;
254 }
255 if (cnt > resid) {
256 cnt = resid;
257 }
258
259#ifdef KDP_VM_WRITE_DEBUG
260 printf("kdp_vm_write: cur_phys_src %x cur_phys_src %x len %x - %08X %08X\n", src, dst, cnt);
261#endif
262 bcopy_phys(from: cur_phys_src, to: cur_phys_dst, nbytes: cnt); /* Copy stuff over */
263 flush_dcache64(addr: cur_phys_dst, count: (unsigned int)cnt, TRUE);
264 invalidate_icache64(addr: cur_phys_dst, cnt: (unsigned int)cnt, TRUE);
265
266 cur_virt_src += cnt;
267 cur_virt_dst += cnt;
268 resid -= cnt;
269 }
270exit:
271 return len - resid;
272}
273
274mach_vm_size_t
275kdp_machine_phys_write(kdp_writephysmem64_req_t *rq __unused, caddr_t src __unused,
276 uint16_t lcpu __unused)
277{
278 return 0; /* unimplemented */
279}
280
281void
282kern_collectth_state_size(uint64_t * tstate_count, uint64_t * tstate_size)
283{
284 uint64_t count = ml_get_max_cpu_number() + 1;
285
286 *tstate_count = count;
287 *tstate_size = sizeof(struct thread_command)
288 + (sizeof(arm_state_hdr_t)
289#if defined(__arm64__)
290 + ARM_THREAD_STATE64_COUNT * sizeof(uint32_t));
291#else
292 + ARM_THREAD_STATE32_COUNT * sizeof(uint32_t));
293#endif
294}
295
296void
297kern_collectth_state(thread_t thread __unused, void *buffer, uint64_t size, void ** iter)
298{
299 cpu_data_entry_t *cpuentryp = *iter;
300 if (cpuentryp == NULL) {
301 cpuentryp = &CpuDataEntries[0];
302 }
303
304 if (cpuentryp == &CpuDataEntries[ml_get_max_cpu_number()]) {
305 *iter = NULL;
306 } else {
307 *iter = cpuentryp + 1;
308 }
309
310 struct cpu_data *cpudatap = cpuentryp->cpu_data_vaddr;
311
312 struct thread_command *tc = (struct thread_command *)buffer;
313 arm_state_hdr_t *hdr = (arm_state_hdr_t *)(void *)(tc + 1);
314#if defined(__arm64__)
315 hdr->flavor = ARM_THREAD_STATE64;
316 hdr->count = ARM_THREAD_STATE64_COUNT;
317 arm_thread_state64_t *state = (arm_thread_state64_t *)(void *)(hdr + 1);
318#else
319 hdr->flavor = ARM_THREAD_STATE;
320 hdr->count = ARM_THREAD_STATE_COUNT;
321 arm_thread_state_t *state = (arm_thread_state_t *)(void *)(hdr + 1);
322#endif
323
324 tc->cmd = LC_THREAD;
325 tc->cmdsize = (uint32_t) size;
326
327 if ((cpudatap != NULL) && (cpudatap->halt_status == CPU_HALTED_WITH_STATE)) {
328 *state = cpudatap->halt_state;
329 return;
330 }
331
332 processor_t processor = PERCPU_GET_RELATIVE(processor, cpu_data, cpudatap);
333 if ((cpudatap == NULL) || (processor->active_thread == NULL)) {
334 bzero(s: state, n: hdr->count * sizeof(uint32_t));
335 return;
336 }
337
338#if defined(__arm64__)
339 void *kpcb = processor->active_thread->machine.kpcb;
340 if (kpcb != NULL) {
341 arm_saved_state_t *saved_state = (arm_saved_state_t *)kpcb;
342
343 state->fp = saved_state->ss_64.fp;
344 state->lr = saved_state->ss_64.lr;
345 state->sp = saved_state->ss_64.sp;
346 state->pc = saved_state->ss_64.pc;
347 state->cpsr = saved_state->ss_64.cpsr;
348 bcopy(src: &saved_state->ss_64.x[0], dst: &state->x[0], n: sizeof(state->x));
349 } else {
350 vm_offset_t kstackptr = (vm_offset_t) processor->active_thread->machine.kstackptr;
351 arm_kernel_saved_state_t *saved_state = (arm_kernel_saved_state_t *) kstackptr;
352
353 state->fp = saved_state->fp;
354 state->lr = saved_state->lr;
355 state->sp = saved_state->sp;
356 state->pc = saved_state->pc_was_in_userspace ? (register_t)ptrauth_strip((void *)&_was_in_userspace, ptrauth_key_function_pointer) : 0;
357 state->cpsr = PSR64_KERNEL_DEFAULT;
358 }
359
360#else /* __arm64__ */
361 vm_offset_t kstackptr = (vm_offset_t) processor->active_thread->machine.kstackptr;
362 arm_saved_state_t *saved_state = (arm_saved_state_t *) kstackptr;
363
364 state->lr = saved_state->lr;
365 state->sp = saved_state->sp;
366 state->pc = saved_state->pc;
367 state->cpsr = saved_state->cpsr;
368 bcopy(&saved_state->r[0], &state->r[0], sizeof(state->r));
369
370#endif /* !__arm64__ */
371}
372
373static const arm_state_hdr_t user32_thread_flavor_array[] = {
374 { ARM_THREAD_STATE, ARM_UNIFIED_THREAD_STATE_COUNT },
375};
376
377#if defined(__arm64__)
378static const arm_state_hdr_t user64_thread_flavor_array[] = {
379 { ARM_THREAD_STATE64, ARM_THREAD_STATE64_COUNT },
380 { ARM_VFP_STATE, ARM_VFP_STATE_COUNT },
381 { ARM_EXCEPTION_STATE64, ARM_EXCEPTION_STATE64_COUNT },
382};
383#endif
384
385void
386kern_collect_userth_state_size(task_t task, uint64_t * tstate_count, uint64_t * tstate_size)
387{
388 uint64_t per_thread_size = 0;
389 uint64_t num_flavors = 0;
390 const arm_state_hdr_t * flavors;
391#if defined(__arm64__)
392 bool is64bit = task_has_64Bit_addr(task);
393
394 if (is64bit) {
395 flavors = user64_thread_flavor_array;
396 num_flavors = sizeof(user64_thread_flavor_array) / sizeof(user64_thread_flavor_array[0]);
397 } else {
398 flavors = user32_thread_flavor_array;
399 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
400 }
401#else
402 flavors = user32_thread_flavor_array;
403 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
404#endif
405
406 for (size_t i = 0; i < num_flavors; i++) {
407 per_thread_size += sizeof(arm_state_hdr_t) + (flavors[i].count * sizeof(natural_t));
408 }
409
410 *tstate_count = task->thread_count;
411 *tstate_size = sizeof(struct thread_command) + per_thread_size;
412}
413
414void
415kern_collect_userth_state(task_t task, thread_t thread, void *buffer, uint64_t size)
416{
417 kern_return_t ret;
418 uint64_t num_flavors = 0;
419 const arm_state_hdr_t * flavors;
420#if defined(__arm64__)
421 bool is64bit = task_has_64Bit_addr(task);
422
423 if (is64bit) {
424 flavors = user64_thread_flavor_array;
425 num_flavors = sizeof(user64_thread_flavor_array) / sizeof(user64_thread_flavor_array[0]);
426 } else {
427 flavors = user32_thread_flavor_array;
428 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
429 }
430#else
431 (void)task;
432 flavors = user32_thread_flavor_array;
433 num_flavors = sizeof(user32_thread_flavor_array) / sizeof(user32_thread_flavor_array[0]);
434#endif
435
436 struct thread_command *tc = buffer;
437 tc->cmd = LC_THREAD;
438 tc->cmdsize = (uint32_t)size;
439
440 arm_state_hdr_t *hdr = (arm_state_hdr_t *)(tc + 1);
441
442 for (size_t i = 0; i < num_flavors; i++) {
443 hdr->flavor = flavors[i].flavor;
444 hdr->count = flavors[i].count;
445 /* Ensure we can't write past the end of the buffer */
446 assert(hdr->count + sizeof(arm_state_hdr_t) + ((uintptr_t)hdr - (uintptr_t)buffer) <= size);
447 ret = machine_thread_get_state(thread, flavor: hdr->flavor, state: (thread_state_t)(hdr + 1), count: &hdr->count);
448 assert(ret == KERN_SUCCESS);
449
450 hdr = (arm_state_hdr_t *)((uintptr_t)(hdr + 1) + hdr->count * sizeof(natural_t));
451 }
452}
453
454/*
455 * kdp_core_start_addr
456 *
457 * return the address where the kernel core file starts
458 *
459 * The kernel start address is VM_MIN_KERNEL_AND_KEXT_ADDRESS
460 * unless the physical aperture has been relocated below
461 * VM_MIN_KERNEL_AND_KEXT_ADDRESS as in the case of
462 * ARM_LARGE_MEMORY systems
463 *
464 */
465vm_map_offset_t
466kdp_core_start_addr()
467{
468#if defined(__arm64__)
469 extern const vm_map_address_t physmap_base;
470 return MIN(physmap_base, VM_MIN_KERNEL_AND_KEXT_ADDRESS);
471#else /* !defined(__arm64__) */
472 return VM_MIN_KERNEL_AND_KEXT_ADDRESS;
473#endif /* !defined(__arm64__) */
474}
475