1/*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <mach_assert.h>
29#include <mach/vm_types.h>
30#include <mach/mach_time.h>
31#include <kern/timer.h>
32#include <kern/clock.h>
33#include <kern/machine.h>
34#include <mach/machine.h>
35#include <mach/machine/vm_param.h>
36#include <mach_kdp.h>
37#include <kdp/kdp_udp.h>
38#include <arm/caches_internal.h>
39#include <arm/cpuid.h>
40#include <arm/cpu_data.h>
41#include <arm/cpu_data_internal.h>
42#include <arm/cpu_internal.h>
43
44#include <vm/vm_kern.h>
45#include <vm/vm_map.h>
46#include <vm/pmap.h>
47
48#include <arm/misc_protos.h>
49
50/*
51 * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info
52 */
53#define LWOpDone 1
54#define BWOpDone 3
55
56#ifndef __ARM_COHERENT_IO__
57
58TUNABLE(bool, up_style_idle_exit, "up_style_idle_exit", false);
59
60void
61flush_dcache(
62 vm_offset_t addr,
63 unsigned length,
64 boolean_t phys)
65{
66 cpu_data_t *cpu_data_ptr = getCpuDatap();
67 vm_offset_t vaddr;
68 addr64_t paddr;
69 vm_size_t count;
70
71 while (length > 0) {
72 if (phys) {
73 count = length;
74 paddr = CAST_DOWN(pmap_paddr_t, addr);
75 vaddr = phystokv_range(paddr, &count);
76 } else {
77 paddr = kvtophys(addr);
78 vaddr = addr;
79 count = PAGE_SIZE - (addr & PAGE_MASK);
80 if (count > length) {
81 count = length;
82 }
83 }
84 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
85 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
86 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
87 }
88 addr += count;
89 length -= count;
90 }
91 return;
92}
93
94void
95clean_dcache(
96 vm_offset_t addr,
97 unsigned length,
98 boolean_t phys)
99{
100 cpu_data_t *cpu_data_ptr = getCpuDatap();
101 vm_offset_t vaddr;
102 addr64_t paddr;
103 vm_size_t count;
104
105 while (length > 0) {
106 if (phys) {
107 count = length;
108 paddr = CAST_DOWN(pmap_paddr_t, addr);
109 vaddr = phystokv_range(paddr, &count);
110 } else {
111 paddr = kvtophys(addr);
112 vaddr = addr;
113 count = PAGE_SIZE - (addr & PAGE_MASK);
114 if (count > length) {
115 count = length;
116 }
117 }
118 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
119 if (paddr && (cpu_data_ptr->cpu_cache_dispatch != NULL)) {
120 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
121 }
122 addr += count;
123 length -= count;
124 }
125 return;
126}
127
128void
129flush_dcache_syscall(
130 vm_offset_t va,
131 unsigned length)
132{
133 if ((cache_info()->c_bulksize_op != 0) && (length >= (cache_info()->c_bulksize_op))) {
134 FlushPoC_Dcache();
135 if (getCpuDatap()->cpu_cache_dispatch != NULL) {
136 getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
137 }
138 } else {
139 FlushPoC_DcacheRegion((vm_offset_t) va, length);
140 }
141 return;
142}
143
144void
145dcache_incoherent_io_flush64(
146 addr64_t pa,
147 unsigned int size,
148 unsigned int remaining,
149 unsigned int *res)
150{
151 cpu_data_t *cpu_data_ptr = getCpuDatap();
152
153 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
154 FlushPoC_Dcache();
155 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
156 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
157 }
158 *res = BWOpDone;
159 } else {
160 vm_offset_t vaddr;
161 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
162 vm_size_t count;
163 unsigned int wimg_bits, index;
164
165 while (size > 0) {
166 if (isphysmem(paddr)) {
167 count = size;
168 vaddr = phystokv_range(paddr, &count);
169 } else {
170 count = PAGE_SIZE - (paddr & PAGE_MASK);
171 if (count > size) {
172 count = size;
173 }
174
175 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
176 mp_disable_preemption();
177 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
178 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
179 }
180 FlushPoC_DcacheRegion(vaddr, (unsigned)count);
181 if (isphysmem(paddr)) {
182 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
183 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, (unsigned)count);
184 }
185 } else {
186 pmap_unmap_cpu_windows_copy(index);
187 mp_enable_preemption();
188 }
189 paddr += count;
190 size -= count;
191 }
192 }
193
194 return;
195}
196
197void
198dcache_incoherent_io_store64(
199 addr64_t pa,
200 unsigned int size,
201 unsigned int remaining,
202 unsigned int *res)
203{
204 pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa);
205 cpu_data_t *cpu_data_ptr = getCpuDatap();
206
207 if (isphysmem(paddr)) {
208 unsigned int wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
209 if ((wimg_bits == VM_WIMG_IO) || (wimg_bits == VM_WIMG_WCOMB) || (wimg_bits == VM_WIMG_RT)) {
210 return;
211 }
212 }
213
214 if ((cache_info()->c_bulksize_op != 0) && (remaining >= (cache_info()->c_bulksize_op))) {
215 CleanPoC_Dcache();
216 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
217 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
218 }
219 *res = BWOpDone;
220 } else {
221 vm_offset_t vaddr;
222 vm_size_t count;
223 unsigned int wimg_bits, index;
224
225 while (size > 0) {
226 if (isphysmem(paddr)) {
227 count = size;
228 vaddr = phystokv_range(paddr, &count);
229 } else {
230 count = PAGE_SIZE - (paddr & PAGE_MASK);
231 if (count > size) {
232 count = size;
233 }
234 wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT));
235 mp_disable_preemption();
236 index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ | VM_PROT_WRITE, wimg_bits);
237 vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK);
238 }
239 CleanPoC_DcacheRegion(vaddr, (unsigned)count);
240 if (isphysmem(paddr)) {
241 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
242 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, (unsigned)count);
243 }
244 } else {
245 pmap_unmap_cpu_windows_copy(index);
246 mp_enable_preemption();
247 }
248 paddr += count;
249 size -= count;
250 }
251 }
252
253 return;
254}
255
256void
257cache_sync_page(
258 ppnum_t pp
259 )
260{
261 pmap_paddr_t paddr = ptoa(pp);
262
263 if (isphysmem(paddr)) {
264 vm_offset_t vaddr = phystokv(paddr);
265 InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE);
266 } else {
267 FlushPoC_Dcache();
268 InvalidatePoU_Icache();
269 };
270}
271
272void
273platform_cache_init(
274 void)
275{
276 cache_info_t *cpuid_cache_info;
277 unsigned int cache_size = 0x0UL;
278 cpu_data_t *cpu_data_ptr = getCpuDatap();
279
280 cpuid_cache_info = cache_info();
281
282 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
283 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL);
284
285 if (cpuid_cache_info->c_l2size == 0x0) {
286 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize, (unsigned int)&cache_size);
287 cpuid_cache_info->c_l2size = cache_size;
288 }
289 }
290}
291
292void
293platform_cache_flush(
294 void)
295{
296 cpu_data_t *cpu_data_ptr = getCpuDatap();
297
298 FlushPoC_Dcache();
299
300 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
301 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
302 }
303}
304
305void
306platform_cache_clean(
307 void)
308{
309 cpu_data_t *cpu_data_ptr = getCpuDatap();
310
311 CleanPoC_Dcache();
312
313 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
314 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheClean, 0x0UL, 0x0UL);
315 }
316}
317
318void
319platform_cache_shutdown(
320 void)
321{
322 cpu_data_t *cpu_data_ptr = getCpuDatap();
323
324 CleanPoC_Dcache();
325
326 if (cpu_data_ptr->cpu_cache_dispatch != NULL) {
327 cpu_data_ptr->cpu_cache_dispatch(cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL, 0x0UL);
328 }
329}
330
331void
332platform_cache_disable(void)
333{
334}
335
336void
337platform_cache_idle_enter(
338 void)
339{
340 platform_cache_disable();
341
342 /*
343 * If we're only using a single CPU, just write back any
344 * dirty cachelines. We can avoid doing housekeeping
345 * on CPU data that would normally be modified by other
346 * CPUs.
347 */
348 if (up_style_idle_exit && (real_ncpus == 1)) {
349 CleanPoU_Dcache();
350 } else {
351 FlushPoU_Dcache();
352 }
353}
354
355boolean_t
356platform_cache_batch_wimg(
357 __unused unsigned int new_wimg,
358 __unused unsigned int size
359 )
360{
361 boolean_t do_cache_op = FALSE;
362
363 if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) {
364 do_cache_op = TRUE;
365 }
366
367 return do_cache_op;
368}
369
370void
371platform_cache_flush_wimg(
372 __unused unsigned int new_wimg
373 )
374{
375 FlushPoC_Dcache();
376 if (getCpuDatap()->cpu_cache_dispatch != NULL) {
377 getCpuDatap()->cpu_cache_dispatch(getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL, 0x0UL);
378 }
379}
380
381
382
383#else /* __ARM_COHERENT_IO__ */
384
385void
386flush_dcache(
387 __unused vm_offset_t addr,
388 __unused unsigned length,
389 __unused boolean_t phys)
390{
391 __builtin_arm_dsb(DSB_SY);
392}
393
394void
395clean_dcache(
396 __unused vm_offset_t addr,
397 __unused unsigned length,
398 __unused boolean_t phys)
399{
400 __builtin_arm_dsb(DSB_SY);
401}
402
403void
404flush_dcache_syscall(
405 __unused vm_offset_t va,
406 __unused unsigned length)
407{
408 __builtin_arm_dsb(DSB_SY);
409}
410
411void
412dcache_incoherent_io_flush64(
413 __unused addr64_t pa,
414 __unused unsigned int size,
415 __unused unsigned int remaining,
416 __unused unsigned int *res)
417{
418 __builtin_arm_dsb(DSB_SY);
419 *res = LWOpDone;
420 return;
421}
422
423void
424dcache_incoherent_io_store64(
425 __unused addr64_t pa,
426 __unused unsigned int size,
427 __unused unsigned int remaining,
428 __unused unsigned int *res)
429{
430 __builtin_arm_dsb(DSB_SY);
431 *res = LWOpDone;
432 return;
433}
434
435void
436cache_sync_page(
437 ppnum_t pp
438 )
439{
440 pmap_paddr_t paddr = ptoa(pp);
441
442 if (isphysmem(paddr)) {
443 vm_offset_t vaddr = phystokv(pa: paddr);
444 InvalidatePoU_IcacheRegion(va: vaddr, PAGE_SIZE);
445 }
446}
447
448void
449platform_cache_init(
450 void)
451{
452}
453
454void
455platform_cache_flush(
456 void)
457{
458}
459
460void
461platform_cache_clean(
462 void)
463{
464}
465
466void
467platform_cache_shutdown(
468 void)
469{
470}
471
472void
473platform_cache_idle_enter(
474 void)
475{
476}
477
478boolean_t
479platform_cache_batch_wimg(
480 __unused unsigned int new_wimg,
481 __unused unsigned int size
482 )
483{
484 return TRUE;
485}
486
487void
488platform_cache_flush_wimg(
489 __unused unsigned int new_wimg)
490{
491}
492
493#endif /* __ARM_COHERENT_IO__ */
494