1/*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_assert.h>
30#include <mach/vm_types.h>
31#include <mach/mach_time.h>
32#include <kern/timer.h>
33#include <kern/clock.h>
34#include <kern/machine.h>
35#include <kern/iotrace.h>
36#include <mach/machine.h>
37#include <mach/machine/vm_param.h>
38#include <mach_kdp.h>
39#include <kdp/kdp_udp.h>
40#if !MACH_KDP
41#include <kdp/kdp_callout.h>
42#endif /* !MACH_KDP */
43#include <arm/cpu_data.h>
44#include <arm/cpu_data_internal.h>
45#include <arm/caches_internal.h>
46
47#include <vm/vm_kern.h>
48#include <vm/vm_map.h>
49#include <vm/pmap.h>
50
51#include <arm/misc_protos.h>
52
53#include <sys/errno.h>
54
55#include <libkern/section_keywords.h>
56#include <libkern/OSDebug.h>
57
58#define INT_SIZE (BYTE_SIZE * sizeof (int))
59
60#define BCOPY_PHYS_SRC_IS_PHYS(flags) (((flags) & cppvPsrc) != 0)
61#define BCOPY_PHYS_DST_IS_PHYS(flags) (((flags) & cppvPsnk) != 0)
62#define BCOPY_PHYS_SRC_IS_USER(flags) (((flags) & (cppvPsrc | cppvKmap)) == 0)
63#define BCOPY_PHYS_DST_IS_USER(flags) (((flags) & (cppvPsnk | cppvKmap)) == 0)
64
65static kern_return_t
66bcopy_phys_internal(addr64_t src, addr64_t dst, vm_size_t bytes, int flags)
67{
68 unsigned int src_index;
69 unsigned int dst_index;
70 vm_offset_t src_offset;
71 vm_offset_t dst_offset;
72 unsigned int wimg_bits_src, wimg_bits_dst;
73 unsigned int cpu_num = 0;
74 ppnum_t pn_src;
75 ppnum_t pn_dst;
76 addr64_t end __assert_only;
77 kern_return_t res = KERN_SUCCESS;
78
79 if (!BCOPY_PHYS_SRC_IS_USER(flags)) {
80 assert(!__improbable(os_add_overflow(src, bytes, &end)));
81 }
82 if (!BCOPY_PHYS_DST_IS_USER(flags)) {
83 assert(!__improbable(os_add_overflow(dst, bytes, &end)));
84 }
85
86 while ((bytes > 0) && (res == KERN_SUCCESS)) {
87 src_offset = src & PAGE_MASK;
88 dst_offset = dst & PAGE_MASK;
89 boolean_t use_copy_window_src = FALSE;
90 boolean_t use_copy_window_dst = FALSE;
91 vm_size_t count = bytes;
92 vm_size_t count2 = bytes;
93 if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
94 use_copy_window_src = !pmap_valid_address(addr: src);
95 pn_src = (ppnum_t)(src >> PAGE_SHIFT);
96#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
97 count = PAGE_SIZE - src_offset;
98 wimg_bits_src = pmap_cache_attributes(pn_src);
99 if ((wimg_bits_src & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
100 use_copy_window_src = TRUE;
101 }
102#else
103 if (use_copy_window_src) {
104 wimg_bits_src = pmap_cache_attributes(pn: pn_src);
105 count = PAGE_SIZE - src_offset;
106 }
107#endif
108 }
109 if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
110 // write preflighting needed for things like dtrace which may write static read-only mappings
111 use_copy_window_dst = (!pmap_valid_address(addr: dst) || !mmu_kvtop_wpreflight(va: phystokv(pa: (pmap_paddr_t)dst)));
112 pn_dst = (ppnum_t)(dst >> PAGE_SHIFT);
113#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
114 count2 = PAGE_SIZE - dst_offset;
115 wimg_bits_dst = pmap_cache_attributes(pn_dst);
116 if ((wimg_bits_dst & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
117 use_copy_window_dst = TRUE;
118 }
119#else
120 if (use_copy_window_dst) {
121 wimg_bits_dst = pmap_cache_attributes(pn: pn_dst);
122 count2 = PAGE_SIZE - dst_offset;
123 }
124#endif
125 }
126
127 char *tmp_src;
128 char *tmp_dst;
129
130 if (use_copy_window_src || use_copy_window_dst) {
131 mp_disable_preemption();
132 cpu_num = cpu_number();
133 }
134
135 if (use_copy_window_src) {
136 src_index = pmap_map_cpu_windows_copy(pn: pn_src, VM_PROT_READ, wimg_bits: wimg_bits_src);
137 tmp_src = (char*)(pmap_cpu_windows_copy_addr(cpu_num, index: src_index) + src_offset);
138 } else if (BCOPY_PHYS_SRC_IS_PHYS(flags)) {
139 tmp_src = (char*)phystokv_range(pa: (pmap_paddr_t)src, max_len: &count);
140 } else {
141 tmp_src = (char*)src;
142 }
143 if (use_copy_window_dst) {
144 dst_index = pmap_map_cpu_windows_copy(pn: pn_dst, VM_PROT_READ | VM_PROT_WRITE, wimg_bits: wimg_bits_dst);
145 tmp_dst = (char*)(pmap_cpu_windows_copy_addr(cpu_num, index: dst_index) + dst_offset);
146 } else if (BCOPY_PHYS_DST_IS_PHYS(flags)) {
147 tmp_dst = (char*)phystokv_range(pa: (pmap_paddr_t)dst, max_len: &count2);
148 } else {
149 tmp_dst = (char*)dst;
150 }
151
152 if (count > count2) {
153 count = count2;
154 }
155 if (count > bytes) {
156 count = bytes;
157 }
158
159 if (BCOPY_PHYS_SRC_IS_USER(flags)) {
160 res = copyin((user_addr_t)src, tmp_dst, count);
161 } else if (BCOPY_PHYS_DST_IS_USER(flags)) {
162 res = copyout(tmp_src, (user_addr_t)dst, count);
163 } else {
164 bcopy(src: tmp_src, dst: tmp_dst, n: count);
165 }
166
167 if (use_copy_window_src) {
168 pmap_unmap_cpu_windows_copy(index: src_index);
169 }
170 if (use_copy_window_dst) {
171 pmap_unmap_cpu_windows_copy(index: dst_index);
172 }
173 if (use_copy_window_src || use_copy_window_dst) {
174 mp_enable_preemption();
175 }
176
177 src += count;
178 dst += count;
179 bytes -= count;
180 }
181 return res;
182}
183
184void
185bcopy_phys(addr64_t src, addr64_t dst, vm_size_t bytes)
186{
187 bcopy_phys_internal(src, dst, bytes, cppvPsrc | cppvPsnk);
188}
189
190void
191bzero_phys_nc(addr64_t src64, vm_size_t bytes)
192{
193 bzero_phys(phys_address: src64, length: bytes);
194}
195
196extern void *secure_memset(void *, int, size_t);
197
198/* Zero bytes starting at a physical address */
199void
200bzero_phys(addr64_t src, vm_size_t bytes)
201{
202 unsigned int wimg_bits;
203 unsigned int cpu_num = cpu_number();
204 ppnum_t pn;
205 addr64_t end __assert_only;
206
207 assert(!__improbable(os_add_overflow(src, bytes, &end)));
208
209 vm_offset_t offset = src & PAGE_MASK;
210 while (bytes > 0) {
211 vm_size_t count = bytes;
212
213 boolean_t use_copy_window = !pmap_valid_address(addr: src);
214 pn = (ppnum_t)(src >> PAGE_SHIFT);
215 wimg_bits = pmap_cache_attributes(pn);
216#if !defined(__ARM_COHERENT_IO__) && !__ARM_PTE_PHYSMAP__
217 count = PAGE_SIZE - offset;
218 if ((wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
219 use_copy_window = TRUE;
220 }
221#else
222 if (use_copy_window) {
223 count = PAGE_SIZE - offset;
224 }
225#endif
226 char *buf;
227 unsigned int index;
228 if (use_copy_window) {
229 mp_disable_preemption();
230 cpu_num = cpu_number();
231 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
232 buf = (char *)(pmap_cpu_windows_copy_addr(cpu_num, index) + offset);
233 } else {
234 buf = (char *)phystokv_range(pa: (pmap_paddr_t)src, max_len: &count);
235 }
236
237 if (count > bytes) {
238 count = bytes;
239 }
240
241 switch (wimg_bits & VM_WIMG_MASK) {
242 case VM_WIMG_DEFAULT:
243 case VM_WIMG_WCOMB:
244 case VM_WIMG_INNERWBACK:
245 case VM_WIMG_WTHRU:
246#if HAS_UCNORMAL_MEM
247 case VM_WIMG_RT:
248#endif
249 bzero(s: buf, n: count);
250 break;
251 default:
252 /* 'dc zva' performed by bzero is not safe for device memory */
253 secure_memset((void*)buf, 0, count);
254 }
255
256 if (use_copy_window) {
257 pmap_unmap_cpu_windows_copy(index);
258 mp_enable_preemption();
259 }
260
261 src += count;
262 bytes -= count;
263 offset = 0;
264 }
265}
266
267/*
268 * Read data from a physical address.
269 */
270
271#if BUILD_QUAD_WORD_FUNCS
272static inline uint128_t
273__read128(vm_address_t addr)
274{
275 uint64_t hi, lo;
276
277 asm volatile (
278 "ldp %[lo], %[hi], [%[addr]]" "\n"
279 : [lo] "=r"(lo), [hi] "=r"(hi)
280 : [addr] "r"(addr)
281 : "memory"
282 );
283
284 return (((uint128_t)hi) << 64) + lo;
285}
286#endif /* BUILD_QUAD_WORD_FUNCS */
287
288static uint128_t
289ml_phys_read_data(pmap_paddr_t paddr, int size)
290{
291 vm_address_t addr;
292 ppnum_t pn = atop_kernel(paddr);
293 ppnum_t pn_end = atop_kernel(paddr + size - 1);
294 uint128_t result = 0;
295 uint8_t s1;
296 uint16_t s2;
297 uint32_t s4;
298 uint64_t s8;
299 unsigned int index;
300 bool use_copy_window = true;
301
302 if (__improbable(pn_end != pn)) {
303 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
304 }
305
306#ifdef ML_IO_TIMEOUTS_ENABLED
307 bool istate, timeread = false;
308 uint64_t sabs, eabs;
309
310 uint32_t report_phy_read_delay = os_atomic_load(&report_phy_read_delay_to, relaxed);
311 uint32_t const trace_phy_read_delay = os_atomic_load(&trace_phy_read_delay_to, relaxed);
312
313 if (__improbable(report_phy_read_delay != 0)) {
314 istate = ml_set_interrupts_enabled(FALSE);
315 sabs = ml_get_timebase();
316 timeread = true;
317 }
318#ifdef ML_IO_SIMULATE_STRETCHED_ENABLED
319 if (__improbable(timeread && simulate_stretched_io)) {
320 sabs -= simulate_stretched_io;
321 }
322#endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
323#endif /* ML_IO_TIMEOUTS_ENABLED */
324
325#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
326 if (pmap_valid_address(addr: paddr)) {
327 addr = phystokv(pa: paddr);
328 use_copy_window = false;
329 }
330#endif /* defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ */
331
332 if (use_copy_window) {
333 mp_disable_preemption();
334 unsigned int wimg_bits = pmap_cache_attributes(pn);
335 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ, wimg_bits);
336 addr = pmap_cpu_windows_copy_addr(cpu_num: cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
337 }
338
339 switch (size) {
340 case 1:
341 s1 = *(volatile uint8_t *)addr;
342 result = s1;
343 break;
344 case 2:
345 s2 = *(volatile uint16_t *)addr;
346 result = s2;
347 break;
348 case 4:
349 s4 = *(volatile uint32_t *)addr;
350 result = s4;
351 break;
352 case 8:
353 s8 = *(volatile uint64_t *)addr;
354 result = s8;
355 break;
356#if BUILD_QUAD_WORD_FUNCS
357 case 16:
358 result = __read128(addr);
359 break;
360#endif /* BUILD_QUAD_WORD_FUNCS */
361 default:
362 panic("Invalid size %d for ml_phys_read_data", size);
363 break;
364 }
365
366 if (use_copy_window) {
367 pmap_unmap_cpu_windows_copy(index);
368 mp_enable_preemption();
369 }
370
371#ifdef ML_IO_TIMEOUTS_ENABLED
372 if (__improbable(timeread)) {
373 eabs = ml_get_timebase();
374
375 iotrace(IOTRACE_PHYS_READ, 0, addr, size, result, sabs, eabs - sabs);
376
377 if (__improbable((eabs - sabs) > report_phy_read_delay)) {
378 DTRACE_PHYSLAT4(physread, uint64_t, (eabs - sabs),
379 uint64_t, addr, uint32_t, size, uint64_t, result);
380
381 uint64_t override = 0;
382 override_io_timeouts(vaddr: 0, paddr, read_timeout: &override, NULL);
383
384 if (override != 0) {
385#if SCHED_HYGIENE_DEBUG
386 /*
387 * The IO timeout was overridden. As interrupts are disabled in
388 * order to accurately measure IO time this can cause the
389 * interrupt masked timeout threshold to be exceeded. If the
390 * interrupt masked debug mode is set to panic, abandon the
391 * measurement. If in trace mode leave it as-is for
392 * observability.
393 */
394 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
395 ml_spin_debug_clear(current_thread());
396 ml_irq_debug_abandon();
397 }
398#endif
399 report_phy_read_delay = override;
400 }
401 }
402
403 if (__improbable((eabs - sabs) > report_phy_read_delay)) {
404 if (phy_read_panic && (machine_timeout_suspended() == FALSE)) {
405 const uint64_t hi = (uint64_t)(result >> 64);
406 const uint64_t lo = (uint64_t)(result);
407 panic("Read from physical addr 0x%llx took %llu ns, "
408 "result: 0x%016llx%016llx (start: %llu, end: %llu), ceiling: %llu",
409 (unsigned long long)addr, (eabs - sabs), hi, lo, sabs, eabs,
410 (uint64_t)report_phy_read_delay);
411 }
412 }
413
414 if (__improbable(trace_phy_read_delay > 0 && (eabs - sabs) > trace_phy_read_delay)) {
415 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_READ),
416 (eabs - sabs), sabs, addr, result);
417 }
418
419 ml_set_interrupts_enabled(enable: istate);
420 }
421#endif /* ML_IO_TIMEOUTS_ENABLED */
422
423 return result;
424}
425
426unsigned int
427ml_phys_read(vm_offset_t paddr)
428{
429 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr, size: 4);
430}
431
432unsigned int
433ml_phys_read_word(vm_offset_t paddr)
434{
435 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr, size: 4);
436}
437
438unsigned int
439ml_phys_read_64(addr64_t paddr64)
440{
441 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr64, size: 4);
442}
443
444unsigned int
445ml_phys_read_word_64(addr64_t paddr64)
446{
447 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr64, size: 4);
448}
449
450unsigned int
451ml_phys_read_half(vm_offset_t paddr)
452{
453 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr, size: 2);
454}
455
456unsigned int
457ml_phys_read_half_64(addr64_t paddr64)
458{
459 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr64, size: 2);
460}
461
462unsigned int
463ml_phys_read_byte(vm_offset_t paddr)
464{
465 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr, size: 1);
466}
467
468unsigned int
469ml_phys_read_byte_64(addr64_t paddr64)
470{
471 return (unsigned int)ml_phys_read_data(paddr: (pmap_paddr_t)paddr64, size: 1);
472}
473
474unsigned long long
475ml_phys_read_double(vm_offset_t paddr)
476{
477 return ml_phys_read_data(paddr: (pmap_paddr_t)paddr, size: 8);
478}
479
480unsigned long long
481ml_phys_read_double_64(addr64_t paddr64)
482{
483 return ml_phys_read_data(paddr: (pmap_paddr_t)paddr64, size: 8);
484}
485
486#if BUILD_QUAD_WORD_FUNCS
487uint128_t
488ml_phys_read_quad(vm_offset_t paddr)
489{
490 return ml_phys_read_data(paddr: (pmap_paddr_t)paddr, size: 16);
491}
492
493uint128_t
494ml_phys_read_quad_64(addr64_t paddr64)
495{
496 return ml_phys_read_data(paddr: (pmap_paddr_t)paddr64, size: 16);
497}
498#endif /* BUILD_QUAD_WORD_FUNCS */
499
500/*
501 * Write data to a physical address.
502 */
503
504#if BUILD_QUAD_WORD_FUNCS
505static inline void
506__write128(vm_address_t addr, uint128_t data)
507{
508 const uint64_t hi = (uint64_t)(data >> 64);
509 const uint64_t lo = (uint64_t)(data);
510
511 asm volatile (
512 "stp %[lo], %[hi], [%[addr]]" "\n"
513 : /**/
514 : [lo] "r"(lo), [hi] "r"(hi), [addr] "r"(addr)
515 : "memory"
516 );
517}
518#endif /* BUILD_QUAD_WORD_FUNCS */
519
520static void
521ml_phys_write_data(pmap_paddr_t paddr, uint128_t data, int size)
522{
523 vm_address_t addr;
524 ppnum_t pn = atop_kernel(paddr);
525 ppnum_t pn_end = atop_kernel(paddr + size - 1);
526 unsigned int index;
527 bool use_copy_window = true;
528
529 if (__improbable(pn_end != pn)) {
530 panic("%s: paddr 0x%llx spans a page boundary", __func__, (uint64_t)paddr);
531 }
532
533#ifdef ML_IO_TIMEOUTS_ENABLED
534 bool istate, timewrite = false;
535 uint64_t sabs, eabs;
536
537 uint32_t report_phy_write_delay = os_atomic_load(&report_phy_write_delay_to, relaxed);
538 uint32_t const trace_phy_write_delay = os_atomic_load(&trace_phy_write_delay_to, relaxed);
539
540 if (__improbable(report_phy_write_delay != 0)) {
541 istate = ml_set_interrupts_enabled(FALSE);
542 sabs = ml_get_timebase();
543 timewrite = true;
544 }
545#ifdef ML_IO_SIMULATE_STRETCHED_ENABLED
546 if (__improbable(timewrite && simulate_stretched_io)) {
547 sabs -= simulate_stretched_io;
548 }
549#endif /* ML_IO_SIMULATE_STRETCHED_ENABLED */
550#endif /* ML_IO_TIMEOUTS_ENABLED */
551
552#if defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__
553 if (pmap_valid_address(addr: paddr)) {
554 addr = phystokv(pa: paddr);
555 use_copy_window = false;
556 }
557#endif /* defined(__ARM_COHERENT_IO__) || __ARM_PTE_PHYSMAP__ */
558
559 if (use_copy_window) {
560 mp_disable_preemption();
561 unsigned int wimg_bits = pmap_cache_attributes(pn);
562 index = pmap_map_cpu_windows_copy(pn, VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
563 addr = pmap_cpu_windows_copy_addr(cpu_num: cpu_number(), index) | ((uint32_t)paddr & PAGE_MASK);
564 }
565
566 switch (size) {
567 case 1:
568 *(volatile uint8_t *)addr = (uint8_t)data;
569 break;
570 case 2:
571 *(volatile uint16_t *)addr = (uint16_t)data;
572 break;
573 case 4:
574 *(volatile uint32_t *)addr = (uint32_t)data;
575 break;
576 case 8:
577 *(volatile uint64_t *)addr = (uint64_t)data;
578 break;
579#if BUILD_QUAD_WORD_FUNCS
580 case 16:
581 __write128(addr, data);
582 break;
583#endif /* BUILD_QUAD_WORD_FUNCS */
584 default:
585 panic("Invalid size %d for ml_phys_write_data", size);
586 }
587
588 if (use_copy_window) {
589 pmap_unmap_cpu_windows_copy(index);
590 mp_enable_preemption();
591 }
592
593#ifdef ML_IO_TIMEOUTS_ENABLED
594 if (__improbable(timewrite)) {
595 eabs = ml_get_timebase();
596
597 iotrace(IOTRACE_PHYS_WRITE, 0, paddr, size, data, sabs, eabs - sabs);
598
599 if (__improbable((eabs - sabs) > report_phy_write_delay)) {
600 DTRACE_PHYSLAT4(physwrite, uint64_t, (eabs - sabs),
601 uint64_t, paddr, uint32_t, size, uint64_t, data);
602
603 uint64_t override = 0;
604 override_io_timeouts(vaddr: 0, paddr, NULL, write_timeout: &override);
605 if (override != 0) {
606#if SCHED_HYGIENE_DEBUG
607 /*
608 * The IO timeout was overridden. As interrupts are disabled in
609 * order to accurately measure IO time this can cause the
610 * interrupt masked timeout threshold to be exceeded. If the
611 * interrupt masked debug mode is set to panic, abandon the
612 * measurement. If in trace mode leave it as-is for
613 * observability.
614 */
615 if (interrupt_masked_debug_mode == SCHED_HYGIENE_MODE_PANIC) {
616 ml_spin_debug_clear(current_thread());
617 ml_irq_debug_abandon();
618 }
619#endif
620 report_phy_write_delay = override;
621 }
622 }
623
624 if (__improbable((eabs - sabs) > report_phy_write_delay)) {
625 if (phy_write_panic && (machine_timeout_suspended() == FALSE)) {
626 const uint64_t hi = (uint64_t)(data >> 64);
627 const uint64_t lo = (uint64_t)(data);
628 panic("Write from physical addr 0x%llx took %llu ns, "
629 "data: 0x%016llx%016llx (start: %llu, end: %llu), ceiling: %llu",
630 (unsigned long long)paddr, (eabs - sabs), hi, lo, sabs, eabs,
631 (uint64_t)report_phy_write_delay);
632 }
633 }
634
635 if (__improbable(trace_phy_write_delay > 0 && (eabs - sabs) > trace_phy_write_delay)) {
636 KDBG(MACHDBG_CODE(DBG_MACH_IO, DBC_MACH_IO_PHYS_WRITE),
637 (eabs - sabs), sabs, paddr, data);
638 }
639
640 ml_set_interrupts_enabled(enable: istate);
641 }
642#endif /* ML_IO_TIMEOUTS_ENABLED */
643}
644
645void
646ml_phys_write_byte(vm_offset_t paddr, unsigned int data)
647{
648 ml_phys_write_data(paddr: (pmap_paddr_t)paddr, data, size: 1);
649}
650
651void
652ml_phys_write_byte_64(addr64_t paddr64, unsigned int data)
653{
654 ml_phys_write_data(paddr: (pmap_paddr_t)paddr64, data, size: 1);
655}
656
657void
658ml_phys_write_half(vm_offset_t paddr, unsigned int data)
659{
660 ml_phys_write_data(paddr: (pmap_paddr_t)paddr, data, size: 2);
661}
662
663void
664ml_phys_write_half_64(addr64_t paddr64, unsigned int data)
665{
666 ml_phys_write_data(paddr: (pmap_paddr_t)paddr64, data, size: 2);
667}
668
669void
670ml_phys_write(vm_offset_t paddr, unsigned int data)
671{
672 ml_phys_write_data(paddr: (pmap_paddr_t)paddr, data, size: 4);
673}
674
675void
676ml_phys_write_64(addr64_t paddr64, unsigned int data)
677{
678 ml_phys_write_data(paddr: (pmap_paddr_t)paddr64, data, size: 4);
679}
680
681void
682ml_phys_write_word(vm_offset_t paddr, unsigned int data)
683{
684 ml_phys_write_data(paddr: (pmap_paddr_t)paddr, data, size: 4);
685}
686
687void
688ml_phys_write_word_64(addr64_t paddr64, unsigned int data)
689{
690 ml_phys_write_data(paddr: (pmap_paddr_t)paddr64, data, size: 4);
691}
692
693void
694ml_phys_write_double(vm_offset_t paddr, unsigned long long data)
695{
696 ml_phys_write_data(paddr: (pmap_paddr_t)paddr, data, size: 8);
697}
698
699void
700ml_phys_write_double_64(addr64_t paddr64, unsigned long long data)
701{
702 ml_phys_write_data(paddr: (pmap_paddr_t)paddr64, data, size: 8);
703}
704
705#if BUILD_QUAD_WORD_FUNCS
706void
707ml_phys_write_quad(vm_offset_t paddr, uint128_t data)
708{
709 ml_phys_write_data(paddr: (pmap_paddr_t)paddr, data, size: 16);
710}
711
712void
713ml_phys_write_quad_64(addr64_t paddr64, uint128_t data)
714{
715 ml_phys_write_data(paddr: (pmap_paddr_t)paddr64, data, size: 16);
716}
717#endif /* BUILD_QUAD_WORD_FUNCS */
718
719/*
720 * Set indicated bit in bit string.
721 */
722void
723setbit(int bitno, int *s)
724{
725 s[bitno / INT_SIZE] |= 1U << (bitno % INT_SIZE);
726}
727
728/*
729 * Clear indicated bit in bit string.
730 */
731void
732clrbit(int bitno, int *s)
733{
734 s[bitno / INT_SIZE] &= ~(1U << (bitno % INT_SIZE));
735}
736
737/*
738 * Test if indicated bit is set in bit string.
739 */
740int
741testbit(int bitno, int *s)
742{
743 return s[bitno / INT_SIZE] & (1U << (bitno % INT_SIZE));
744}
745
746/*
747 * Find first bit set in bit string.
748 */
749int
750ffsbit(int *s)
751{
752 int offset;
753
754 for (offset = 0; !*s; offset += INT_SIZE, ++s) {
755 ;
756 }
757 return offset + __builtin_ctz(*s);
758}
759
760int
761ffs(unsigned int mask)
762{
763 if (mask == 0) {
764 return 0;
765 }
766
767 /*
768 * NOTE: cannot use __builtin_ffs because it generates a call to
769 * 'ffs'
770 */
771 return 1 + __builtin_ctz(mask);
772}
773
774int
775ffsll(unsigned long long mask)
776{
777 if (mask == 0) {
778 return 0;
779 }
780
781 /*
782 * NOTE: cannot use __builtin_ffsll because it generates a call to
783 * 'ffsll'
784 */
785 return 1 + __builtin_ctzll(mask);
786}
787
788/*
789 * Find last bit set in bit string.
790 */
791int
792fls(unsigned int mask)
793{
794 if (mask == 0) {
795 return 0;
796 }
797
798 return (sizeof(mask) << 3) - __builtin_clz(mask);
799}
800
801int
802flsll(unsigned long long mask)
803{
804 if (mask == 0) {
805 return 0;
806 }
807
808 return (sizeof(mask) << 3) - __builtin_clzll(mask);
809}
810
811kern_return_t
812copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
813{
814 if ((which & (cppvPsrc | cppvPsnk)) == 0) { /* Make sure that only one is virtual */
815 panic("%s: no more than 1 parameter may be virtual", __func__);
816 }
817
818 kern_return_t res = bcopy_phys_internal(src: source, dst: sink, bytes: size, flags: which);
819
820#ifndef __ARM_COHERENT_IO__
821 if (which & cppvFsrc) {
822 flush_dcache64(source, size, ((which & cppvPsrc) == cppvPsrc));
823 }
824
825 if (which & cppvFsnk) {
826 flush_dcache64(sink, size, ((which & cppvPsnk) == cppvPsnk));
827 }
828#endif
829
830 return res;
831}
832
833int
834clr_be_bit(void)
835{
836 panic("clr_be_bit");
837 return 0;
838}
839
840boolean_t
841ml_probe_read(
842 __unused vm_offset_t paddr,
843 __unused unsigned int *val)
844{
845 panic("ml_probe_read() unimplemented");
846 return 1;
847}
848
849boolean_t
850ml_probe_read_64(
851 __unused addr64_t paddr,
852 __unused unsigned int *val)
853{
854 panic("ml_probe_read_64() unimplemented");
855 return 1;
856}
857
858
859void
860ml_thread_policy(
861 __unused thread_t thread,
862 __unused unsigned policy_id,
863 __unused unsigned policy_info)
864{
865 // <rdar://problem/7141284>: Reduce print noise
866 // kprintf("ml_thread_policy() unimplemented\n");
867}
868
869__dead2
870void
871panic_unimplemented(void)
872{
873 panic("Not yet implemented.");
874}
875
876/* ARM64_TODO <rdar://problem/9198953> */
877void abort(void) __dead2;
878
879void
880abort(void)
881{
882 panic("Abort.");
883}
884
885
886#if !MACH_KDP
887void
888kdp_register_callout(kdp_callout_fn_t fn, void *arg)
889{
890#pragma unused(fn,arg)
891}
892#endif
893
894/*
895 * Get a quick virtual mapping of a physical page and run a callback on that
896 * page's virtual address.
897 *
898 * @param dst64 Physical address to access (doesn't need to be page-aligned).
899 * @param bytes Number of bytes to be accessed. This cannot cross page boundaries.
900 * @param func Callback function to call with the page's virtual address.
901 * @param arg Argument passed directly to `func`.
902 *
903 * @return The return value from `func`.
904 */
905int
906apply_func_phys(
907 addr64_t dst64,
908 vm_size_t bytes,
909 int (*func)(void * buffer, vm_size_t bytes, void * arg),
910 void * arg)
911{
912 /* The physical aperture is only guaranteed to work with kernel-managed addresses. */
913 if (!pmap_valid_address(addr: dst64)) {
914 panic("%s address error: passed in address (%#llx) not a kernel managed address",
915 __FUNCTION__, dst64);
916 }
917
918 /* Ensure we stay within a single page */
919 if (((((uint32_t)dst64 & (ARM_PGBYTES - 1)) + bytes) > ARM_PGBYTES)) {
920 panic("%s alignment error: tried accessing addresses spanning more than one page %#llx %#lx",
921 __FUNCTION__, dst64, bytes);
922 }
923
924 return func((void*)phystokv(pa: dst64), bytes, arg);
925}
926