~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/include/asm/cacheflush.h

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  arch/arm/include/asm/cacheflush.h
  3  *
  4  *  Copyright (C) 1999-2002 Russell King
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 #ifndef _ASMARM_CACHEFLUSH_H
 11 #define _ASMARM_CACHEFLUSH_H
 12 
 13 #include <linux/mm.h>
 14 
 15 #include <asm/glue-cache.h>
 16 #include <asm/shmparam.h>
 17 #include <asm/cachetype.h>
 18 #include <asm/outercache.h>
 19 
 20 #define CACHE_COLOUR(vaddr)     ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
 21 
 22 /*
 23  * This flag is used to indicate that the page pointed to by a pte is clean
 24  * and does not require cleaning before returning it to the user.
 25  */
 26 #define PG_dcache_clean PG_arch_1
 27 
 28 /*
 29  *      MM Cache Management
 30  *      ===================
 31  *
 32  *      The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
 33  *      implement these methods.
 34  *
 35  *      Start addresses are inclusive and end addresses are exclusive;
 36  *      start addresses should be rounded down, end addresses up.
 37  *
 38  *      See Documentation/cachetlb.txt for more information.
 39  *      Please note that the implementation of these, and the required
 40  *      effects are cache-type (VIVT/VIPT/PIPT) specific.
 41  *
 42  *      flush_icache_all()
 43  *
 44  *              Unconditionally clean and invalidate the entire icache.
 45  *              Currently only needed for cache-v6.S and cache-v7.S, see
 46  *              __flush_icache_all for the generic implementation.
 47  *
 48  *      flush_kern_all()
 49  *
 50  *              Unconditionally clean and invalidate the entire cache.
 51  *
 52  *     flush_kern_louis()
 53  *
 54  *             Flush data cache levels up to the level of unification
 55  *             inner shareable and invalidate the I-cache.
 56  *             Only needed from v7 onwards, falls back to flush_cache_all()
 57  *             for all other processor versions.
 58  *
 59  *      flush_user_all()
 60  *
 61  *              Clean and invalidate all user space cache entries
 62  *              before a change of page tables.
 63  *
 64  *      flush_user_range(start, end, flags)
 65  *
 66  *              Clean and invalidate a range of cache entries in the
 67  *              specified address space before a change of page tables.
 68  *              - start - user start address (inclusive, page aligned)
 69  *              - end   - user end address   (exclusive, page aligned)
 70  *              - flags - vma->vm_flags field
 71  *
 72  *      coherent_kern_range(start, end)
 73  *
 74  *              Ensure coherency between the Icache and the Dcache in the
 75  *              region described by start, end.  If you have non-snooping
 76  *              Harvard caches, you need to implement this function.
 77  *              - start  - virtual start address
 78  *              - end    - virtual end address
 79  *
 80  *      coherent_user_range(start, end)
 81  *
 82  *              Ensure coherency between the Icache and the Dcache in the
 83  *              region described by start, end.  If you have non-snooping
 84  *              Harvard caches, you need to implement this function.
 85  *              - start  - virtual start address
 86  *              - end    - virtual end address
 87  *
 88  *      flush_kern_dcache_area(kaddr, size)
 89  *
 90  *              Ensure that the data held in page is written back.
 91  *              - kaddr  - page address
 92  *              - size   - region size
 93  *
 94  *      DMA Cache Coherency
 95  *      ===================
 96  *
 97  *      dma_flush_range(start, end)
 98  *
 99  *              Clean and invalidate the specified virtual address range.
100  *              - start  - virtual start address
101  *              - end    - virtual end address
102  */
103 
104 struct cpu_cache_fns {
105         void (*flush_icache_all)(void);
106         void (*flush_kern_all)(void);
107         void (*flush_kern_louis)(void);
108         void (*flush_user_all)(void);
109         void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
110 
111         void (*coherent_kern_range)(unsigned long, unsigned long);
112         int  (*coherent_user_range)(unsigned long, unsigned long);
113         void (*flush_kern_dcache_area)(void *, size_t);
114 
115         void (*dma_map_area)(const void *, size_t, int);
116         void (*dma_unmap_area)(const void *, size_t, int);
117 
118         void (*dma_flush_range)(const void *, const void *);
119 };
120 
121 /*
122  * Select the calling method
123  */
124 #ifdef MULTI_CACHE
125 
126 extern struct cpu_cache_fns cpu_cache;
127 
128 #define __cpuc_flush_icache_all         cpu_cache.flush_icache_all
129 #define __cpuc_flush_kern_all           cpu_cache.flush_kern_all
130 #define __cpuc_flush_kern_louis         cpu_cache.flush_kern_louis
131 #define __cpuc_flush_user_all           cpu_cache.flush_user_all
132 #define __cpuc_flush_user_range         cpu_cache.flush_user_range
133 #define __cpuc_coherent_kern_range      cpu_cache.coherent_kern_range
134 #define __cpuc_coherent_user_range      cpu_cache.coherent_user_range
135 #define __cpuc_flush_dcache_area        cpu_cache.flush_kern_dcache_area
136 
137 /*
138  * These are private to the dma-mapping API.  Do not use directly.
139  * Their sole purpose is to ensure that data held in the cache
140  * is visible to DMA, or data written by DMA to system memory is
141  * visible to the CPU.
142  */
143 #define dmac_map_area                   cpu_cache.dma_map_area
144 #define dmac_unmap_area                 cpu_cache.dma_unmap_area
145 #define dmac_flush_range                cpu_cache.dma_flush_range
146 
147 #else
148 
149 extern void __cpuc_flush_icache_all(void);
150 extern void __cpuc_flush_kern_all(void);
151 extern void __cpuc_flush_kern_louis(void);
152 extern void __cpuc_flush_user_all(void);
153 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
154 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
155 extern int  __cpuc_coherent_user_range(unsigned long, unsigned long);
156 extern void __cpuc_flush_dcache_area(void *, size_t);
157 
158 /*
159  * These are private to the dma-mapping API.  Do not use directly.
160  * Their sole purpose is to ensure that data held in the cache
161  * is visible to DMA, or data written by DMA to system memory is
162  * visible to the CPU.
163  */
164 extern void dmac_map_area(const void *, size_t, int);
165 extern void dmac_unmap_area(const void *, size_t, int);
166 extern void dmac_flush_range(const void *, const void *);
167 
168 #endif
169 
170 /*
171  * Copy user data from/to a page which is mapped into a different
172  * processes address space.  Really, we want to allow our "user
173  * space" model to handle this.
174  */
175 extern void copy_to_user_page(struct vm_area_struct *, struct page *,
176         unsigned long, void *, const void *, unsigned long);
177 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
178         do {                                                    \
179                 memcpy(dst, src, len);                          \
180         } while (0)
181 
182 /*
183  * Convert calls to our calling convention.
184  */
185 
186 /* Invalidate I-cache */
187 #define __flush_icache_all_generic()                                    \
188         asm("mcr        p15, 0, %0, c7, c5, 0"                          \
189             : : "r" (0));
190 
191 /* Invalidate I-cache inner shareable */
192 #define __flush_icache_all_v7_smp()                                     \
193         asm("mcr        p15, 0, %0, c7, c1, 0"                          \
194             : : "r" (0));
195 
196 /*
197  * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
198  * will fall through to use __flush_icache_all_generic.
199  */
200 #if (defined(CONFIG_CPU_V7) && \
201      (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
202         defined(CONFIG_SMP_ON_UP)
203 #define __flush_icache_preferred        __cpuc_flush_icache_all
204 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
205 #define __flush_icache_preferred        __flush_icache_all_v7_smp
206 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
207 #define __flush_icache_preferred        __cpuc_flush_icache_all
208 #else
209 #define __flush_icache_preferred        __flush_icache_all_generic
210 #endif
211 
212 static inline void __flush_icache_all(void)
213 {
214         __flush_icache_preferred();
215         dsb();
216 }
217 
218 /*
219  * Flush caches up to Level of Unification Inner Shareable
220  */
221 #define flush_cache_louis()             __cpuc_flush_kern_louis()
222 
223 #define flush_cache_all()               __cpuc_flush_kern_all()
224 
225 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
226 {
227         if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
228                 __cpuc_flush_user_all();
229 }
230 
231 static inline void
232 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
233 {
234         struct mm_struct *mm = vma->vm_mm;
235 
236         if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
237                 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
238                                         vma->vm_flags);
239 }
240 
241 static inline void
242 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
243 {
244         struct mm_struct *mm = vma->vm_mm;
245 
246         if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
247                 unsigned long addr = user_addr & PAGE_MASK;
248                 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
249         }
250 }
251 
252 #ifndef CONFIG_CPU_CACHE_VIPT
253 #define flush_cache_mm(mm) \
254                 vivt_flush_cache_mm(mm)
255 #define flush_cache_range(vma,start,end) \
256                 vivt_flush_cache_range(vma,start,end)
257 #define flush_cache_page(vma,addr,pfn) \
258                 vivt_flush_cache_page(vma,addr,pfn)
259 #else
260 extern void flush_cache_mm(struct mm_struct *mm);
261 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
262 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
263 #endif
264 
265 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
266 
267 /*
268  * flush_cache_user_range is used when we want to ensure that the
269  * Harvard caches are synchronised for the user space address range.
270  * This is used for the ARM private sys_cacheflush system call.
271  */
272 #define flush_cache_user_range(start,end) \
273         __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
274 
275 /*
276  * Perform necessary cache operations to ensure that data previously
277  * stored within this range of addresses can be executed by the CPU.
278  */
279 #define flush_icache_range(s,e)         __cpuc_coherent_kern_range(s,e)
280 
281 /*
282  * Perform necessary cache operations to ensure that the TLB will
283  * see data written in the specified area.
284  */
285 #define clean_dcache_area(start,size)   cpu_dcache_clean_area(start, size)
286 
287 /*
288  * flush_dcache_page is used when the kernel has written to the page
289  * cache page at virtual address page->virtual.
290  *
291  * If this page isn't mapped (ie, page_mapping == NULL), or it might
292  * have userspace mappings, then we _must_ always clean + invalidate
293  * the dcache entries associated with the kernel mapping.
294  *
295  * Otherwise we can defer the operation, and clean the cache when we are
296  * about to change to user space.  This is the same method as used on SPARC64.
297  * See update_mmu_cache for the user space part.
298  */
299 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
300 extern void flush_dcache_page(struct page *);
301 
302 static inline void flush_kernel_vmap_range(void *addr, int size)
303 {
304         if ((cache_is_vivt() || cache_is_vipt_aliasing()))
305           __cpuc_flush_dcache_area(addr, (size_t)size);
306 }
307 static inline void invalidate_kernel_vmap_range(void *addr, int size)
308 {
309         if ((cache_is_vivt() || cache_is_vipt_aliasing()))
310           __cpuc_flush_dcache_area(addr, (size_t)size);
311 }
312 
313 #define ARCH_HAS_FLUSH_ANON_PAGE
314 static inline void flush_anon_page(struct vm_area_struct *vma,
315                          struct page *page, unsigned long vmaddr)
316 {
317         extern void __flush_anon_page(struct vm_area_struct *vma,
318                                 struct page *, unsigned long);
319         if (PageAnon(page))
320                 __flush_anon_page(vma, page, vmaddr);
321 }
322 
323 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
324 extern void flush_kernel_dcache_page(struct page *);
325 
326 #define flush_dcache_mmap_lock(mapping) \
327         spin_lock_irq(&(mapping)->tree_lock)
328 #define flush_dcache_mmap_unlock(mapping) \
329         spin_unlock_irq(&(mapping)->tree_lock)
330 
331 #define flush_icache_user_range(vma,page,addr,len) \
332         flush_dcache_page(page)
333 
334 /*
335  * We don't appear to need to do anything here.  In fact, if we did, we'd
336  * duplicate cache flushing elsewhere performed by flush_dcache_page().
337  */
338 #define flush_icache_page(vma,page)     do { } while (0)
339 
340 /*
341  * flush_cache_vmap() is used when creating mappings (eg, via vmap,
342  * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
343  * caches, since the direct-mappings of these pages may contain cached
344  * data, we need to do a full cache flush to ensure that writebacks
345  * don't corrupt data placed into these pages via the new mappings.
346  */
347 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
348 {
349         if (!cache_is_vipt_nonaliasing())
350                 flush_cache_all();
351         else
352                 /*
353                  * set_pte_at() called from vmap_pte_range() does not
354                  * have a DSB after cleaning the cache line.
355                  */
356                 dsb();
357 }
358 
359 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
360 {
361         if (!cache_is_vipt_nonaliasing())
362                 flush_cache_all();
363 }
364 
365 /*
366  * Memory synchronization helpers for mixed cached vs non cached accesses.
367  *
368  * Some synchronization algorithms have to set states in memory with the
369  * cache enabled or disabled depending on the code path.  It is crucial
370  * to always ensure proper cache maintenance to update main memory right
371  * away in that case.
372  *
373  * Any cached write must be followed by a cache clean operation.
374  * Any cached read must be preceded by a cache invalidate operation.
375  * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
376  * operation is needed to avoid discarding possible concurrent writes to the
377  * accessed memory.
378  *
379  * Also, in order to prevent a cached writer from interfering with an
380  * adjacent non-cached writer, each state variable must be located to
381  * a separate cache line.
382  */
383 
384 /*
385  * This needs to be >= the max cache writeback size of all
386  * supported platforms included in the current kernel configuration.
387  * This is used to align state variables to their own cache lines.
388  */
389 #define __CACHE_WRITEBACK_ORDER 6  /* guessed from existing platforms */
390 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
391 
392 /*
393  * There is no __cpuc_clean_dcache_area but we use it anyway for
394  * code intent clarity, and alias it to __cpuc_flush_dcache_area.
395  */
396 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
397 
398 /*
399  * Ensure preceding writes to *p by this CPU are visible to
400  * subsequent reads by other CPUs:
401  */
402 static inline void __sync_cache_range_w(volatile void *p, size_t size)
403 {
404         char *_p = (char *)p;
405 
406         __cpuc_clean_dcache_area(_p, size);
407         outer_clean_range(__pa(_p), __pa(_p + size));
408 }
409 
410 /*
411  * Ensure preceding writes to *p by other CPUs are visible to
412  * subsequent reads by this CPU.  We must be careful not to
413  * discard data simultaneously written by another CPU, hence the
414  * usage of flush rather than invalidate operations.
415  */
416 static inline void __sync_cache_range_r(volatile void *p, size_t size)
417 {
418         char *_p = (char *)p;
419 
420 #ifdef CONFIG_OUTER_CACHE
421         if (outer_cache.flush_range) {
422                 /*
423                  * Ensure dirty data migrated from other CPUs into our cache
424                  * are cleaned out safely before the outer cache is cleaned:
425                  */
426                 __cpuc_clean_dcache_area(_p, size);
427 
428                 /* Clean and invalidate stale data for *p from outer ... */
429                 outer_flush_range(__pa(_p), __pa(_p + size));
430         }
431 #endif
432 
433         /* ... and inner cache: */
434         __cpuc_flush_dcache_area(_p, size);
435 }
436 
437 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
438 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
439 
440 #endif
441 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp