~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/asm-arm/cacheflush.h

Version: ~ [ linux-6.6-rc1 ] ~ [ linux-6.5.2 ] ~ [ linux-6.4.15 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.52 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.131 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.194 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.256 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.294 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.325 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/include/asm-arm/cacheflush.h
  3  *
  4  *  Copyright (C) 1999-2002 Russell King
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 #ifndef _ASMARM_CACHEFLUSH_H
 11 #define _ASMARM_CACHEFLUSH_H
 12 
 13 #include <linux/config.h>
 14 #include <linux/sched.h>
 15 #include <linux/mm.h>
 16 
 17 #include <asm/mman.h>
 18 #include <asm/glue.h>
 19 
 20 /*
 21  *      Cache Model
 22  *      ===========
 23  */
 24 #undef _CACHE
 25 #undef MULTI_CACHE
 26 
 27 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
 28 # ifdef _CACHE
 29 #  define MULTI_CACHE 1
 30 # else
 31 #  define _CACHE v3
 32 # endif
 33 #endif
 34 
 35 #if defined(CONFIG_CPU_ARM720T)
 36 # ifdef _CACHE
 37 #  define MULTI_CACHE 1
 38 # else
 39 #  define _CACHE v4
 40 # endif
 41 #endif
 42 
 43 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
 44     defined(CONFIG_CPU_ARM1020)
 45 # define MULTI_CACHE 1
 46 #endif
 47 
 48 #if defined(CONFIG_CPU_ARM926T)
 49 # ifdef _CACHE
 50 #  define MULTI_CACHE 1
 51 # else
 52 #  define _CACHE arm926
 53 # endif
 54 #endif
 55 
 56 #if defined(CONFIG_CPU_SA110) || defined(CONFIG_CPU_SA1100)
 57 # ifdef _CACHE
 58 #  define MULTI_CACHE 1
 59 # else
 60 #  define _CACHE v4wb
 61 # endif
 62 #endif
 63 
 64 #if defined(CONFIG_CPU_XSCALE)
 65 # ifdef _CACHE
 66 #  define MULTI_CACHE 1
 67 # else
 68 #  define _CACHE xscale
 69 # endif
 70 #endif
 71 
 72 #if !defined(_CACHE) && !defined(MULTI_CACHE)
 73 #error Unknown cache maintainence model
 74 #endif
 75 
 76 /*
 77  * This flag is used to indicate that the page pointed to by a pte
 78  * is dirty and requires cleaning before returning it to the user.
 79  */
 80 #define PG_dcache_dirty PG_arch_1
 81 
 82 /*
 83  *      MM Cache Management
 84  *      ===================
 85  *
 86  *      The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
 87  *      implement these methods.
 88  *
 89  *      Start addresses are inclusive and end addresses are exclusive;
 90  *      start addresses should be rounded down, end addresses up.
 91  *
 92  *      See linux/Documentation/cachetlb.txt for more information.
 93  *      Please note that the implementation of these, and the required
 94  *      effects are cache-type (VIVT/VIPT/PIPT) specific.
 95  *
 96  *      flush_cache_kern_all()
 97  *
 98  *              Unconditionally clean and invalidate the entire cache.
 99  *
100  *      flush_cache_user_mm(mm)
101  *
102  *              Clean and invalidate all user space cache entries
103  *              before a change of page tables.
104  *
105  *      flush_cache_user_range(start, end, flags)
106  *
107  *              Clean and invalidate a range of cache entries in the
108  *              specified address space before a change of page tables.
109  *              - start - user start address (inclusive, page aligned)
110  *              - end   - user end address   (exclusive, page aligned)
111  *              - flags - vma->vm_flags field
112  *
113  *      coherent_kern_range(start, end)
114  *
115  *              Ensure coherency between the Icache and the Dcache in the
116  *              region described by start, end.  If you have non-snooping
117  *              Harvard caches, you need to implement this function.
118  *              - start  - virtual start address
119  *              - end    - virtual end address
120  *
121  *      DMA Cache Coherency
122  *      ===================
123  *
124  *      dma_inv_range(start, end)
125  *
126  *              Invalidate (discard) the specified virtual address range.
127  *              May not write back any entries.  If 'start' or 'end'
128  *              are not cache line aligned, those lines must be written
129  *              back.
130  *              - start  - virtual start address
131  *              - end    - virtual end address
132  *
133  *      dma_clean_range(start, end)
134  *
135  *              Clean (write back) the specified virtual address range.
136  *              - start  - virtual start address
137  *              - end    - virtual end address
138  *
139  *      dma_flush_range(start, end)
140  *
141  *              Clean and invalidate the specified virtual address range.
142  *              - start  - virtual start address
143  *              - end    - virtual end address
144  */
145 
146 struct cpu_cache_fns {
147         void (*flush_kern_all)(void);
148         void (*flush_user_all)(void);
149         void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
150 
151         void (*coherent_kern_range)(unsigned long, unsigned long);
152         void (*flush_kern_dcache_page)(void *);
153 
154         void (*dma_inv_range)(unsigned long, unsigned long);
155         void (*dma_clean_range)(unsigned long, unsigned long);
156         void (*dma_flush_range)(unsigned long, unsigned long);
157 };
158 
159 /*
160  * Select the calling method
161  */
162 #ifdef MULTI_CACHE
163 
164 extern struct cpu_cache_fns cpu_cache;
165 
166 #define __cpuc_flush_kern_all           cpu_cache.flush_kern_all
167 #define __cpuc_flush_user_all           cpu_cache.flush_user_all
168 #define __cpuc_flush_user_range         cpu_cache.flush_user_range
169 #define __cpuc_coherent_kern_range      cpu_cache.coherent_kern_range
170 #define __cpuc_flush_dcache_page        cpu_cache.flush_kern_dcache_page
171 
172 /*
173  * These are private to the dma-mapping API.  Do not use directly.
174  * Their sole purpose is to ensure that data held in the cache
175  * is visible to DMA, or data written by DMA to system memory is
176  * visible to the CPU.
177  */
178 #define dmac_inv_range                  cpu_cache.dma_inv_range
179 #define dmac_clean_range                cpu_cache.dma_clean_range
180 #define dmac_flush_range                cpu_cache.dma_flush_range
181 
182 #else
183 
184 #define __cpuc_flush_kern_all           __glue(_CACHE,_flush_kern_cache_all)
185 #define __cpuc_flush_user_all           __glue(_CACHE,_flush_user_cache_all)
186 #define __cpuc_flush_user_range         __glue(_CACHE,_flush_user_cache_range)
187 #define __cpuc_coherent_kern_range      __glue(_CACHE,_coherent_kern_range)
188 #define __cpuc_flush_dcache_page        __glue(_CACHE,_flush_kern_dcache_page)
189 
190 extern void __cpuc_flush_kern_all(void);
191 extern void __cpuc_flush_user_all(void);
192 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
193 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
194 extern void __cpuc_flush_dcache_page(void *);
195 
196 /*
197  * These are private to the dma-mapping API.  Do not use directly.
198  * Their sole purpose is to ensure that data held in the cache
199  * is visible to DMA, or data written by DMA to system memory is
200  * visible to the CPU.
201  */
202 #define dmac_inv_range                  __glue(_CACHE,_dma_inv_range)
203 #define dmac_clean_range                __glue(_CACHE,_dma_clean_range)
204 #define dmac_flush_range                __glue(_CACHE,_dma_flush_range)
205 
206 extern void dmac_inv_range(unsigned long, unsigned long);
207 extern void dmac_clean_range(unsigned long, unsigned long);
208 extern void dmac_flush_range(unsigned long, unsigned long);
209 
210 #endif
211 
212 #define flush_cache_vmap(start, end)            flush_cache_all()
213 #define flush_cache_vunmap(start, end)          flush_cache_all()
214 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
215 do { memcpy(dst, src, len); \
216      flush_icache_user_range(vma, page, vaddr, len); \
217 } while (0)
218 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
219         memcpy(dst, src, len)
220 
221 /*
222  * Convert calls to our calling convention.
223  */
224 #define flush_cache_all()               __cpuc_flush_kern_all()
225 
226 static inline void flush_cache_mm(struct mm_struct *mm)
227 {
228         if (current->active_mm == mm)
229                 __cpuc_flush_user_all();
230 }
231 
232 static inline void
233 flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
234 {
235         if (current->active_mm == vma->vm_mm)
236                 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
237                                         vma->vm_flags);
238 }
239 
240 static inline void
241 flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr)
242 {
243         if (current->active_mm == vma->vm_mm) {
244                 unsigned long addr = user_addr & PAGE_MASK;
245                 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
246         }
247 }
248 
249 /*
250  * Perform necessary cache operations to ensure that data previously
251  * stored within this range of addresses can be executed by the CPU.
252  */
253 #define flush_icache_range(s,e)         __cpuc_coherent_kern_range(s,e)
254 
255 /*
256  * Perform necessary cache operations to ensure that the TLB will
257  * see data written in the specified area.
258  */
259 #define clean_dcache_area(start,size)   cpu_dcache_clean_area(start, size)
260 
261 /*
262  * flush_dcache_page is used when the kernel has written to the page
263  * cache page at virtual address page->virtual.
264  *
265  * If this page isn't mapped (ie, page->mapping = NULL), or it has
266  * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
267  * then we _must_ always clean + invalidate the dcache entries associated
268  * with the kernel mapping.
269  *
270  * Otherwise we can defer the operation, and clean the cache when we are
271  * about to change to user space.  This is the same method as used on SPARC64.
272  * See update_mmu_cache for the user space part.
273  */
274 #define mapping_mapped(map)     (!list_empty(&(map)->i_mmap) || \
275                                  !list_empty(&(map)->i_mmap_shared))
276 
277 extern void __flush_dcache_page(struct page *);
278 
279 static inline void flush_dcache_page(struct page *page)
280 {
281         if (page->mapping && !mapping_mapped(page->mapping))
282                 set_bit(PG_dcache_dirty, &page->flags);
283         else
284                 __flush_dcache_page(page);
285 }
286 
287 #define flush_icache_user_range(vma,page,addr,len) \
288         flush_dcache_page(page)
289 
290 /*
291  * We don't appear to need to do anything here.  In fact, if we did, we'd
292  * duplicate cache flushing elsewhere performed by flush_dcache_page().
293  */
294 #define flush_icache_page(vma,page)     do { } while (0)
295 
296 #endif
297 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp