1 | /* |
2 | * Copyright (c) 2010 Apple Inc. All rights reserved. |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
5 | * |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License |
8 | * Version 2.0 (the 'License'). You may not use this file except in |
9 | * compliance with the License. The rights granted to you under the License |
10 | * may not be used to create, or enable the creation or redistribution of, |
11 | * unlawful or unlicensed copies of an Apple operating system, or to |
12 | * circumvent, violate, or enable the circumvention or violation of, any |
13 | * terms of an Apple operating system software license agreement. |
14 | * |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. |
17 | * |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and |
24 | * limitations under the License. |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ |
28 | #include <mach_assert.h> |
29 | #include <mach/vm_types.h> |
30 | #include <mach/mach_time.h> |
31 | #include <kern/timer.h> |
32 | #include <kern/clock.h> |
33 | #include <kern/machine.h> |
34 | #include <mach/machine.h> |
35 | #include <mach/machine/vm_param.h> |
36 | #include <mach_kdp.h> |
37 | #include <kdp/kdp_udp.h> |
38 | #include <arm/caches_internal.h> |
39 | #include <arm/cpuid.h> |
40 | #include <arm/cpu_data.h> |
41 | #include <arm/cpu_data_internal.h> |
42 | #include <arm/cpu_internal.h> |
43 | |
44 | #include <vm/vm_kern.h> |
45 | #include <vm/vm_map.h> |
46 | #include <vm/pmap.h> |
47 | |
48 | #include <arm/misc_protos.h> |
49 | |
50 | /* |
51 | * dcache_incoherent_io_flush64() dcache_incoherent_io_store64() result info |
52 | */ |
53 | #define LWOpDone 1 |
54 | #define BWOpDone 3 |
55 | |
56 | #ifndef __ARM_COHERENT_IO__ |
57 | |
58 | extern boolean_t up_style_idle_exit; |
59 | |
60 | void |
61 | flush_dcache( |
62 | vm_offset_t addr, |
63 | unsigned length, |
64 | boolean_t phys) |
65 | { |
66 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
67 | |
68 | if (phys) { |
69 | pmap_paddr_t paddr; |
70 | vm_offset_t vaddr; |
71 | |
72 | paddr = CAST_DOWN(pmap_paddr_t, addr); |
73 | if (!isphysmem(paddr)) |
74 | return; |
75 | vaddr = phystokv(paddr); |
76 | FlushPoC_DcacheRegion( (vm_offset_t) vaddr, length); |
77 | |
78 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
79 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
80 | cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, length); |
81 | return; |
82 | } |
83 | if (cpu_data_ptr->cpu_cache_dispatch == (cache_dispatch_t) NULL) { |
84 | FlushPoC_DcacheRegion( (vm_offset_t) addr, length); |
85 | } else { |
86 | addr64_t paddr; |
87 | uint32_t count; |
88 | |
89 | while (length > 0) { |
90 | count = PAGE_SIZE - (addr & PAGE_MASK); |
91 | if (count > length) |
92 | count = length; |
93 | FlushPoC_DcacheRegion( (vm_offset_t) addr, count); |
94 | paddr = kvtophys(addr); |
95 | if (paddr) |
96 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
97 | cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) paddr, count); |
98 | addr += count; |
99 | length -= count; |
100 | } |
101 | } |
102 | return; |
103 | } |
104 | |
105 | void |
106 | clean_dcache( |
107 | vm_offset_t addr, |
108 | unsigned length, |
109 | boolean_t phys) |
110 | { |
111 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
112 | |
113 | if (phys) { |
114 | pmap_paddr_t paddr; |
115 | vm_offset_t vaddr; |
116 | |
117 | paddr = CAST_DOWN(pmap_paddr_t, addr); |
118 | if (!isphysmem(paddr)) |
119 | return; |
120 | |
121 | vaddr = phystokv(paddr); |
122 | CleanPoC_DcacheRegion( (vm_offset_t) vaddr, length); |
123 | |
124 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
125 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
126 | cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, length); |
127 | return; |
128 | } |
129 | |
130 | if (cpu_data_ptr->cpu_cache_dispatch == (cache_dispatch_t) NULL) { |
131 | CleanPoC_DcacheRegion( (vm_offset_t) addr, length); |
132 | } else { |
133 | addr64_t paddr; |
134 | uint32_t count; |
135 | |
136 | while (length > 0) { |
137 | count = PAGE_SIZE - (addr & PAGE_MASK); |
138 | if (count > length) |
139 | count = length; |
140 | CleanPoC_DcacheRegion( (vm_offset_t) addr, count); |
141 | paddr = kvtophys(addr); |
142 | if (paddr) |
143 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
144 | cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) paddr, count); |
145 | addr += count; |
146 | length -= count; |
147 | } |
148 | } |
149 | return; |
150 | } |
151 | |
152 | void |
153 | flush_dcache_syscall( |
154 | vm_offset_t va, |
155 | unsigned length) |
156 | { |
157 | if ((cache_info()->c_bulksize_op !=0) && (length >= (cache_info()->c_bulksize_op))) { |
158 | #if __ARM_SMP__ && defined(ARMA7) |
159 | cache_xcall(LWFlush); |
160 | #else |
161 | FlushPoC_Dcache(); |
162 | if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
163 | ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch) ( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); |
164 | #endif |
165 | } else { |
166 | FlushPoC_DcacheRegion( (vm_offset_t) va, length); |
167 | } |
168 | return; |
169 | } |
170 | |
171 | void |
172 | dcache_incoherent_io_flush64( |
173 | addr64_t pa, |
174 | unsigned int size, |
175 | unsigned int remaining, |
176 | unsigned int *res) |
177 | { |
178 | vm_offset_t vaddr; |
179 | pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa); |
180 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
181 | |
182 | if ((cache_info()->c_bulksize_op !=0) && (remaining >= (cache_info()->c_bulksize_op))) { |
183 | #if __ARM_SMP__ && defined (ARMA7) |
184 | cache_xcall(LWFlush); |
185 | #else |
186 | FlushPoC_Dcache(); |
187 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
188 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); |
189 | #endif |
190 | *res = BWOpDone; |
191 | } else { |
192 | if (isphysmem(paddr)) { |
193 | vaddr = phystokv(pa); |
194 | { |
195 | FlushPoC_DcacheRegion( (vm_offset_t) vaddr, size); |
196 | |
197 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
198 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (cpu_data_ptr->cpu_id, CacheCleanFlushRegion, (unsigned int) pa, size); |
199 | } |
200 | } else { |
201 | /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */ |
202 | unsigned int wimg_bits, index; |
203 | uint32_t count; |
204 | |
205 | mp_disable_preemption(); |
206 | |
207 | while (size > 0) { |
208 | count = PAGE_SIZE - (paddr & PAGE_MASK); |
209 | if (count > size) |
210 | count = size; |
211 | |
212 | wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT)); |
213 | index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ|VM_PROT_WRITE, wimg_bits); |
214 | vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK); |
215 | |
216 | CleanPoC_DcacheRegion( (vm_offset_t) vaddr, count); |
217 | |
218 | pmap_unmap_cpu_windows_copy(index); |
219 | |
220 | paddr += count; |
221 | size -= count; |
222 | } |
223 | |
224 | mp_enable_preemption(); |
225 | } |
226 | } |
227 | |
228 | return; |
229 | } |
230 | |
231 | void |
232 | dcache_incoherent_io_store64( |
233 | addr64_t pa, |
234 | unsigned int size, |
235 | unsigned int remaining, |
236 | unsigned int *res) |
237 | { |
238 | vm_offset_t vaddr; |
239 | pmap_paddr_t paddr = CAST_DOWN(pmap_paddr_t, pa); |
240 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
241 | |
242 | if (isphysmem(paddr)) { |
243 | unsigned int wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT)); |
244 | if ((wimg_bits == VM_WIMG_IO) || (wimg_bits == VM_WIMG_WCOMB)) { |
245 | return; |
246 | } |
247 | } |
248 | |
249 | if ((cache_info()->c_bulksize_op !=0) && (remaining >= (cache_info()->c_bulksize_op))) { |
250 | #if __ARM_SMP__ && defined (ARMA7) |
251 | cache_xcall(LWClean); |
252 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
253 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL); |
254 | #else |
255 | CleanPoC_Dcache(); |
256 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
257 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL); |
258 | #endif |
259 | *res = BWOpDone; |
260 | } else { |
261 | if (isphysmem(paddr)) { |
262 | vaddr = phystokv(pa); |
263 | { |
264 | CleanPoC_DcacheRegion( (vm_offset_t) vaddr, size); |
265 | |
266 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
267 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) (cpu_data_ptr->cpu_id, CacheCleanRegion, (unsigned int) pa, size); |
268 | } |
269 | } else { |
270 | /* slow path - pa isn't in the vtop region. Flush one page at a time via cpu_copywindows */ |
271 | unsigned int wimg_bits, index; |
272 | uint32_t count; |
273 | |
274 | mp_disable_preemption(); |
275 | |
276 | while (size > 0) { |
277 | count = PAGE_SIZE - (paddr & PAGE_MASK); |
278 | if (count > size) |
279 | count = size; |
280 | |
281 | wimg_bits = pmap_cache_attributes((ppnum_t) (paddr >> PAGE_SHIFT)); |
282 | index = pmap_map_cpu_windows_copy((ppnum_t) (paddr >> PAGE_SHIFT), VM_PROT_READ|VM_PROT_WRITE, wimg_bits); |
283 | vaddr = pmap_cpu_windows_copy_addr(cpu_number(), index) | (paddr & PAGE_MASK); |
284 | |
285 | CleanPoC_DcacheRegion( (vm_offset_t) vaddr, count); |
286 | |
287 | pmap_unmap_cpu_windows_copy(index); |
288 | |
289 | paddr += count; |
290 | size -= count; |
291 | } |
292 | |
293 | mp_enable_preemption(); |
294 | } |
295 | } |
296 | |
297 | return; |
298 | } |
299 | |
300 | void |
301 | cache_sync_page( |
302 | ppnum_t pp |
303 | ) |
304 | { |
305 | pmap_paddr_t paddr = ptoa(pp); |
306 | |
307 | if (isphysmem(paddr)) { |
308 | vm_offset_t vaddr = phystokv(paddr); |
309 | |
310 | CleanPoU_DcacheRegion(vaddr, PAGE_SIZE); |
311 | #ifdef __ARM_IC_NOALIAS_ICACHE__ |
312 | InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE); |
313 | #else |
314 | InvalidatePoU_Icache(); |
315 | #endif |
316 | } else { |
317 | FlushPoC_Dcache(); |
318 | InvalidatePoU_Icache(); |
319 | }; |
320 | } |
321 | |
322 | void |
323 | platform_cache_init( |
324 | void) |
325 | { |
326 | cache_info_t *cpuid_cache_info; |
327 | unsigned int cache_size = 0x0UL; |
328 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
329 | |
330 | cpuid_cache_info = cache_info(); |
331 | |
332 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) { |
333 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
334 | cpu_data_ptr->cpu_id, CacheControl, CacheControlEnable, 0x0UL); |
335 | |
336 | if ( cpuid_cache_info->c_l2size == 0x0 ) { |
337 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
338 | cpu_data_ptr->cpu_id, CacheConfig, CacheConfigSize , (unsigned int)&cache_size); |
339 | cpuid_cache_info->c_l2size = cache_size; |
340 | } |
341 | } |
342 | |
343 | } |
344 | |
345 | void |
346 | platform_cache_flush( |
347 | void) |
348 | { |
349 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
350 | |
351 | FlushPoC_Dcache(); |
352 | |
353 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
354 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
355 | cpu_data_ptr->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); |
356 | } |
357 | |
358 | void |
359 | platform_cache_clean( |
360 | void) |
361 | { |
362 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
363 | |
364 | CleanPoC_Dcache(); |
365 | |
366 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
367 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
368 | cpu_data_ptr->cpu_id, CacheClean, 0x0UL , 0x0UL); |
369 | } |
370 | |
371 | void |
372 | platform_cache_shutdown( |
373 | void) |
374 | { |
375 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
376 | |
377 | CleanPoC_Dcache(); |
378 | |
379 | if (cpu_data_ptr->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
380 | ((cache_dispatch_t) cpu_data_ptr->cpu_cache_dispatch) ( |
381 | cpu_data_ptr->cpu_id, CacheShutdown, 0x0UL , 0x0UL); |
382 | } |
383 | |
384 | void |
385 | platform_cache_disable(void) |
386 | { |
387 | #if (__ARM_ARCH__ < 8) |
388 | uint32_t sctlr_value = 0; |
389 | |
390 | /* Disable dcache allocation. */ |
391 | __asm__ volatile("mrc p15, 0, %0, c1, c0, 0" |
392 | : "=r" (sctlr_value)); |
393 | |
394 | sctlr_value &= ~SCTLR_DCACHE; |
395 | |
396 | __asm__ volatile("mcr p15, 0, %0, c1, c0, 0\n" |
397 | "isb" |
398 | :: "r" (sctlr_value)); |
399 | #endif /* (__ARM_ARCH__ < 8) */ |
400 | } |
401 | |
402 | void |
403 | platform_cache_idle_enter( |
404 | void) |
405 | { |
406 | #if __ARM_SMP__ |
407 | platform_cache_disable(); |
408 | |
409 | /* |
410 | * If we're only using a single CPU, just write back any |
411 | * dirty cachelines. We can avoid doing housekeeping |
412 | * on CPU data that would normally be modified by other |
413 | * CPUs. |
414 | */ |
415 | if (up_style_idle_exit && (real_ncpus == 1)) |
416 | CleanPoU_Dcache(); |
417 | else { |
418 | FlushPoU_Dcache(); |
419 | |
420 | #if (__ARM_ARCH__ < 8) |
421 | cpu_data_t *cpu_data_ptr = getCpuDatap(); |
422 | cpu_data_ptr->cpu_CLW_active = 0; |
423 | __asm__ volatile("dmb ish" ); |
424 | cpu_data_ptr->cpu_CLWFlush_req = 0; |
425 | cpu_data_ptr->cpu_CLWClean_req = 0; |
426 | CleanPoC_DcacheRegion((vm_offset_t) cpu_data_ptr, sizeof(cpu_data_t)); |
427 | #endif /* (__ARM_ARCH__ < 8) */ |
428 | } |
429 | #else |
430 | CleanPoU_Dcache(); |
431 | #endif |
432 | |
433 | #if defined (__ARM_SMP__) && defined (ARMA7) |
434 | uint32_t actlr_value = 0; |
435 | |
436 | /* Leave the coherency domain */ |
437 | __asm__ volatile("clrex\n" |
438 | "mrc p15, 0, %0, c1, c0, 1\n" |
439 | : "=r" (actlr_value)); |
440 | |
441 | actlr_value &= ~0x40; |
442 | |
443 | __asm__ volatile("mcr p15, 0, %0, c1, c0, 1\n" |
444 | /* Ensures any pending fwd request gets serviced and ends up */ |
445 | "dsb\n" |
446 | /* Forces the processor to re-fetch, so any pending fwd request gets into the core */ |
447 | "isb\n" |
448 | /* Ensures the second possible pending fwd request ends up. */ |
449 | "dsb\n" |
450 | :: "r" (actlr_value)); |
451 | #endif |
452 | } |
453 | |
454 | void |
455 | platform_cache_idle_exit( |
456 | void) |
457 | { |
458 | #if defined (ARMA7) |
459 | uint32_t actlr_value = 0; |
460 | |
461 | /* Flush L1 caches and TLB before rejoining the coherency domain */ |
462 | FlushPoU_Dcache(); |
463 | /* |
464 | * If we're only using a single CPU, we can avoid flushing the |
465 | * I-cache or the TLB, as neither program text nor pagetables |
466 | * should have been changed during the idle period. We still |
467 | * want to flush the D-cache to PoU (above), as memory contents |
468 | * may have been changed by DMA. |
469 | */ |
470 | if (!up_style_idle_exit || (real_ncpus > 1)) { |
471 | InvalidatePoU_Icache(); |
472 | flush_core_tlb(); |
473 | } |
474 | |
475 | /* Rejoin the coherency domain */ |
476 | __asm__ volatile("mrc p15, 0, %0, c1, c0, 1\n" |
477 | : "=r" (actlr_value)); |
478 | |
479 | actlr_value |= 0x40; |
480 | |
481 | __asm__ volatile("mcr p15, 0, %0, c1, c0, 1\n" |
482 | "isb\n" |
483 | :: "r" (actlr_value)); |
484 | |
485 | #if __ARM_SMP__ |
486 | uint32_t sctlr_value = 0; |
487 | |
488 | /* Enable dcache allocation. */ |
489 | __asm__ volatile("mrc p15, 0, %0, c1, c0, 0\n" |
490 | : "=r" (sctlr_value)); |
491 | |
492 | sctlr_value |= SCTLR_DCACHE; |
493 | |
494 | __asm__ volatile("mcr p15, 0, %0, c1, c0, 0\n" |
495 | "isb" |
496 | :: "r" (sctlr_value)); |
497 | getCpuDatap()->cpu_CLW_active = 1; |
498 | #endif |
499 | #endif |
500 | } |
501 | |
502 | boolean_t |
503 | platform_cache_batch_wimg( |
504 | __unused unsigned int new_wimg, |
505 | __unused unsigned int size |
506 | ) |
507 | { |
508 | boolean_t do_cache_op = FALSE; |
509 | |
510 | if ((cache_info()->c_bulksize_op != 0) && (size >= (cache_info()->c_bulksize_op))) do_cache_op = TRUE; |
511 | |
512 | return do_cache_op; |
513 | } |
514 | |
515 | void |
516 | platform_cache_flush_wimg( |
517 | __unused unsigned int new_wimg |
518 | ) |
519 | { |
520 | #if __ARM_SMP__ && defined (ARMA7) |
521 | cache_xcall(LWFlush); |
522 | #else |
523 | FlushPoC_Dcache(); |
524 | if (getCpuDatap()->cpu_cache_dispatch != (cache_dispatch_t) NULL) |
525 | ((cache_dispatch_t) getCpuDatap()->cpu_cache_dispatch) ( getCpuDatap()->cpu_id, CacheCleanFlush, 0x0UL , 0x0UL); |
526 | #endif |
527 | } |
528 | |
529 | #if __ARM_SMP__ && defined(ARMA7) |
530 | void |
531 | cache_xcall_handler(unsigned int op) |
532 | { |
533 | cpu_data_t *cdp; |
534 | uint64_t abstime; |
535 | |
536 | cdp = getCpuDatap(); |
537 | |
538 | if ((op == LWFlush) && (cdp->cpu_CLWFlush_req > cdp->cpu_CLWFlush_last)) { |
539 | FlushPoU_Dcache(); |
540 | abstime = ml_get_timebase(); |
541 | cdp->cpu_CLWFlush_last = abstime; |
542 | cdp->cpu_CLWClean_last = abstime; |
543 | } else if ((op == LWClean) && (cdp->cpu_CLWClean_req > cdp->cpu_CLWClean_last)) { |
544 | CleanPoU_Dcache(); |
545 | abstime = ml_get_timebase(); |
546 | cdp->cpu_CLWClean_last = abstime; |
547 | } |
548 | } |
549 | |
550 | |
551 | void |
552 | cache_xcall(unsigned int op) |
553 | { |
554 | boolean_t intr; |
555 | cpu_data_t *cdp; |
556 | cpu_data_t *target_cdp; |
557 | unsigned int cpu; |
558 | unsigned int signal; |
559 | uint64_t abstime; |
560 | |
561 | intr = ml_set_interrupts_enabled(FALSE); |
562 | cdp = getCpuDatap(); |
563 | abstime = ml_get_timebase(); |
564 | if (op == LWClean) |
565 | signal = SIGPLWClean; |
566 | else |
567 | signal = SIGPLWFlush; |
568 | |
569 | for (cpu=0; cpu < MAX_CPUS; cpu++) { |
570 | |
571 | target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
572 | if(target_cdp == (cpu_data_t *)NULL) |
573 | break; |
574 | |
575 | if (target_cdp->cpu_CLW_active == 0) |
576 | continue; |
577 | |
578 | if (op == LWFlush) |
579 | target_cdp->cpu_CLWFlush_req = abstime; |
580 | else if (op == LWClean) |
581 | target_cdp->cpu_CLWClean_req = abstime; |
582 | __asm__ volatile("dmb ish" ); |
583 | if (target_cdp->cpu_CLW_active == 0) { |
584 | if (op == LWFlush) |
585 | target_cdp->cpu_CLWFlush_req = 0x0ULL; |
586 | else if (op == LWClean) |
587 | target_cdp->cpu_CLWClean_req = 0x0ULL; |
588 | continue; |
589 | } |
590 | |
591 | if (target_cdp == cdp) |
592 | continue; |
593 | |
594 | if(KERN_SUCCESS != cpu_signal(target_cdp, signal, (void *)NULL, NULL)) { |
595 | if (op == LWFlush) |
596 | target_cdp->cpu_CLWFlush_req = 0x0ULL; |
597 | else if (op == LWClean) |
598 | target_cdp->cpu_CLWClean_req = 0x0ULL; |
599 | } |
600 | if (cpu == real_ncpus) |
601 | break; |
602 | } |
603 | |
604 | cache_xcall_handler (op); |
605 | |
606 | (void) ml_set_interrupts_enabled(intr); |
607 | |
608 | for (cpu=0; cpu < MAX_CPUS; cpu++) { |
609 | |
610 | target_cdp = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr; |
611 | if(target_cdp == (cpu_data_t *)NULL) |
612 | break; |
613 | |
614 | if (target_cdp == cdp) |
615 | continue; |
616 | |
617 | if (op == LWFlush) |
618 | while ((target_cdp->cpu_CLWFlush_req != 0x0ULL) && (target_cdp->cpu_CLWFlush_last < abstime)); |
619 | else if (op == LWClean) |
620 | while ((target_cdp->cpu_CLWClean_req != 0x0ULL ) && (target_cdp->cpu_CLWClean_last < abstime)); |
621 | |
622 | if (cpu == real_ncpus) |
623 | break; |
624 | } |
625 | |
626 | if (op == LWFlush) |
627 | FlushPoC_Dcache(); |
628 | else if (op == LWClean) |
629 | CleanPoC_Dcache(); |
630 | } |
631 | #endif |
632 | |
633 | |
634 | #else /* __ARM_COHERENT_IO__ */ |
635 | |
636 | void |
637 | flush_dcache( |
638 | __unused vm_offset_t addr, |
639 | __unused unsigned length, |
640 | __unused boolean_t phys) |
641 | { |
642 | __asm__ volatile ("dsb sy" ); |
643 | } |
644 | |
645 | void |
646 | clean_dcache( |
647 | __unused vm_offset_t addr, |
648 | __unused unsigned length, |
649 | __unused boolean_t phys) |
650 | { |
651 | __asm__ volatile ("dsb sy" ); |
652 | } |
653 | |
654 | void |
655 | flush_dcache_syscall( |
656 | __unused vm_offset_t va, |
657 | __unused unsigned length) |
658 | { |
659 | __asm__ volatile ("dsb sy" ); |
660 | } |
661 | |
662 | void |
663 | dcache_incoherent_io_flush64( |
664 | __unused addr64_t pa, |
665 | __unused unsigned int size, |
666 | __unused unsigned int remaining, |
667 | __unused unsigned int *res) |
668 | { |
669 | __asm__ volatile ("dsb sy" ); |
670 | *res = LWOpDone; |
671 | return; |
672 | } |
673 | |
674 | void |
675 | dcache_incoherent_io_store64( |
676 | __unused addr64_t pa, |
677 | __unused unsigned int size, |
678 | __unused unsigned int remaining, |
679 | __unused unsigned int *res) |
680 | { |
681 | __asm__ volatile ("dsb sy" ); |
682 | *res = LWOpDone; |
683 | return; |
684 | } |
685 | |
686 | void |
687 | cache_sync_page( |
688 | ppnum_t pp |
689 | ) |
690 | { |
691 | pmap_paddr_t paddr = ptoa(pp); |
692 | |
693 | if (isphysmem(paddr)) { |
694 | vm_offset_t vaddr = phystokv(paddr); |
695 | |
696 | #ifdef __ARM_IC_NOALIAS_ICACHE__ |
697 | InvalidatePoU_IcacheRegion(vaddr, PAGE_SIZE); |
698 | #else |
699 | InvalidatePoU_Icache(); |
700 | #endif |
701 | } |
702 | } |
703 | |
704 | void |
705 | platform_cache_init( |
706 | void) |
707 | { |
708 | } |
709 | |
710 | void |
711 | platform_cache_flush( |
712 | void) |
713 | { |
714 | } |
715 | |
716 | void |
717 | platform_cache_clean( |
718 | void) |
719 | { |
720 | } |
721 | |
722 | void |
723 | platform_cache_shutdown( |
724 | void) |
725 | { |
726 | } |
727 | |
728 | void |
729 | platform_cache_idle_enter( |
730 | void) |
731 | { |
732 | } |
733 | |
734 | void |
735 | platform_cache_idle_exit( |
736 | void) |
737 | { |
738 | } |
739 | |
740 | boolean_t |
741 | platform_cache_batch_wimg( |
742 | __unused unsigned int new_wimg, |
743 | __unused unsigned int size |
744 | ) |
745 | { |
746 | return TRUE; |
747 | } |
748 | |
749 | void |
750 | platform_cache_flush_wimg( |
751 | __unused unsigned int new_wimg) |
752 | { |
753 | } |
754 | |
755 | #endif /* __ARM_COHERENT_IO__ */ |
756 | |