~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/flush.c

Version: ~ [ linux-5.14-rc1 ] ~ [ linux-5.13.1 ] ~ [ linux-5.12.16 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.49 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.131 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.197 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.239 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.275 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.275 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  linux/arch/arm/mm/flush.c
  3  *
  4  *  Copyright (C) 1995-2002 Russell King
  5  *
  6  * This program is free software; you can redistribute it and/or modify
  7  * it under the terms of the GNU General Public License version 2 as
  8  * published by the Free Software Foundation.
  9  */
 10 #include <linux/module.h>
 11 #include <linux/mm.h>
 12 #include <linux/pagemap.h>
 13 #include <linux/highmem.h>
 14 
 15 #include <asm/cacheflush.h>
 16 #include <asm/cachetype.h>
 17 #include <asm/highmem.h>
 18 #include <asm/smp_plat.h>
 19 #include <asm/tlbflush.h>
 20 #include <linux/hugetlb.h>
 21 
 22 #include "mm.h"
 23 
 24 #ifdef CONFIG_CPU_CACHE_VIPT
 25 
 26 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
 27 {
 28         unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 29         const int zero = 0;
 30 
 31         set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL));
 32 
 33         asm(    "mcrr   p15, 0, %1, %0, c14\n"
 34         "       mcr     p15, 0, %2, c7, c10, 4"
 35             :
 36             : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
 37             : "cc");
 38 }
 39 
 40 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len)
 41 {
 42         unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
 43         unsigned long offset = vaddr & (PAGE_SIZE - 1);
 44         unsigned long to;
 45 
 46         set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL));
 47         to = va + offset;
 48         flush_icache_range(to, to + len);
 49 }
 50 
 51 void flush_cache_mm(struct mm_struct *mm)
 52 {
 53         if (cache_is_vivt()) {
 54                 vivt_flush_cache_mm(mm);
 55                 return;
 56         }
 57 
 58         if (cache_is_vipt_aliasing()) {
 59                 asm(    "mcr    p15, 0, %0, c7, c14, 0\n"
 60                 "       mcr     p15, 0, %0, c7, c10, 4"
 61                     :
 62                     : "r" (0)
 63                     : "cc");
 64         }
 65 }
 66 
 67 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 68 {
 69         if (cache_is_vivt()) {
 70                 vivt_flush_cache_range(vma, start, end);
 71                 return;
 72         }
 73 
 74         if (cache_is_vipt_aliasing()) {
 75                 asm(    "mcr    p15, 0, %0, c7, c14, 0\n"
 76                 "       mcr     p15, 0, %0, c7, c10, 4"
 77                     :
 78                     : "r" (0)
 79                     : "cc");
 80         }
 81 
 82         if (vma->vm_flags & VM_EXEC)
 83                 __flush_icache_all();
 84 }
 85 
 86 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 87 {
 88         if (cache_is_vivt()) {
 89                 vivt_flush_cache_page(vma, user_addr, pfn);
 90                 return;
 91         }
 92 
 93         if (cache_is_vipt_aliasing()) {
 94                 flush_pfn_alias(pfn, user_addr);
 95                 __flush_icache_all();
 96         }
 97 
 98         if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
 99                 __flush_icache_all();
100 }
101 
102 #else
103 #define flush_pfn_alias(pfn,vaddr)              do { } while (0)
104 #define flush_icache_alias(pfn,vaddr,len)       do { } while (0)
105 #endif
106 
107 #define FLAG_PA_IS_EXEC 1
108 #define FLAG_PA_CORE_IN_MM 2
109 
110 static void flush_ptrace_access_other(void *args)
111 {
112         __flush_icache_all();
113 }
114 
115 static inline
116 void __flush_ptrace_access(struct page *page, unsigned long uaddr, void *kaddr,
117                            unsigned long len, unsigned int flags)
118 {
119         if (cache_is_vivt()) {
120                 if (flags & FLAG_PA_CORE_IN_MM) {
121                         unsigned long addr = (unsigned long)kaddr;
122                         __cpuc_coherent_kern_range(addr, addr + len);
123                 }
124                 return;
125         }
126 
127         if (cache_is_vipt_aliasing()) {
128                 flush_pfn_alias(page_to_pfn(page), uaddr);
129                 __flush_icache_all();
130                 return;
131         }
132 
133         /* VIPT non-aliasing D-cache */
134         if (flags & FLAG_PA_IS_EXEC) {
135                 unsigned long addr = (unsigned long)kaddr;
136                 if (icache_is_vipt_aliasing())
137                         flush_icache_alias(page_to_pfn(page), uaddr, len);
138                 else
139                         __cpuc_coherent_kern_range(addr, addr + len);
140                 if (cache_ops_need_broadcast())
141                         smp_call_function(flush_ptrace_access_other,
142                                           NULL, 1);
143         }
144 }
145 
146 static
147 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
148                          unsigned long uaddr, void *kaddr, unsigned long len)
149 {
150         unsigned int flags = 0;
151         if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
152                 flags |= FLAG_PA_CORE_IN_MM;
153         if (vma->vm_flags & VM_EXEC)
154                 flags |= FLAG_PA_IS_EXEC;
155         __flush_ptrace_access(page, uaddr, kaddr, len, flags);
156 }
157 
158 void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
159                              void *kaddr, unsigned long len)
160 {
161         unsigned int flags = FLAG_PA_CORE_IN_MM|FLAG_PA_IS_EXEC;
162 
163         __flush_ptrace_access(page, uaddr, kaddr, len, flags);
164 }
165 
166 /*
167  * Copy user data from/to a page which is mapped into a different
168  * processes address space.  Really, we want to allow our "user
169  * space" model to handle this.
170  *
171  * Note that this code needs to run on the current CPU.
172  */
173 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
174                        unsigned long uaddr, void *dst, const void *src,
175                        unsigned long len)
176 {
177 #ifdef CONFIG_SMP
178         preempt_disable();
179 #endif
180         memcpy(dst, src, len);
181         flush_ptrace_access(vma, page, uaddr, dst, len);
182 #ifdef CONFIG_SMP
183         preempt_enable();
184 #endif
185 }
186 
187 void __flush_dcache_page(struct address_space *mapping, struct page *page)
188 {
189         /*
190          * Writeback any data associated with the kernel mapping of this
191          * page.  This ensures that data in the physical page is mutually
192          * coherent with the kernels mapping.
193          */
194         if (!PageHighMem(page)) {
195                 size_t page_size = PAGE_SIZE << compound_order(page);
196                 __cpuc_flush_dcache_area(page_address(page), page_size);
197         } else {
198                 unsigned long i;
199                 if (cache_is_vipt_nonaliasing()) {
200                         for (i = 0; i < (1 << compound_order(page)); i++) {
201                                 void *addr = kmap_atomic(page + i);
202                                 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
203                                 kunmap_atomic(addr);
204                         }
205                 } else {
206                         for (i = 0; i < (1 << compound_order(page)); i++) {
207                                 void *addr = kmap_high_get(page + i);
208                                 if (addr) {
209                                         __cpuc_flush_dcache_area(addr, PAGE_SIZE);
210                                         kunmap_high(page + i);
211                                 }
212                         }
213                 }
214         }
215 
216         /*
217          * If this is a page cache page, and we have an aliasing VIPT cache,
218          * we only need to do one flush - which would be at the relevant
219          * userspace colour, which is congruent with page->index.
220          */
221         if (mapping && cache_is_vipt_aliasing())
222                 flush_pfn_alias(page_to_pfn(page),
223                                 page->index << PAGE_CACHE_SHIFT);
224 }
225 
226 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
227 {
228         struct mm_struct *mm = current->active_mm;
229         struct vm_area_struct *mpnt;
230         pgoff_t pgoff;
231 
232         /*
233          * There are possible user space mappings of this page:
234          * - VIVT cache: we need to also write back and invalidate all user
235          *   data in the current VM view associated with this page.
236          * - aliasing VIPT: we only need to find one mapping of this page.
237          */
238         pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
239 
240         flush_dcache_mmap_lock(mapping);
241         vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
242                 unsigned long offset;
243 
244                 /*
245                  * If this VMA is not in our MM, we can ignore it.
246                  */
247                 if (mpnt->vm_mm != mm)
248                         continue;
249                 if (!(mpnt->vm_flags & VM_MAYSHARE))
250                         continue;
251                 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
252                 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
253         }
254         flush_dcache_mmap_unlock(mapping);
255 }
256 
257 #if __LINUX_ARM_ARCH__ >= 6
258 void __sync_icache_dcache(pte_t pteval)
259 {
260         unsigned long pfn;
261         struct page *page;
262         struct address_space *mapping;
263 
264         if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
265                 /* only flush non-aliasing VIPT caches for exec mappings */
266                 return;
267         pfn = pte_pfn(pteval);
268         if (!pfn_valid(pfn))
269                 return;
270 
271         page = pfn_to_page(pfn);
272         if (cache_is_vipt_aliasing())
273                 mapping = page_mapping(page);
274         else
275                 mapping = NULL;
276 
277         if (!test_and_set_bit(PG_dcache_clean, &page->flags))
278                 __flush_dcache_page(mapping, page);
279 
280         if (pte_exec(pteval))
281                 __flush_icache_all();
282 }
283 #endif
284 
285 /*
286  * Ensure cache coherency between kernel mapping and userspace mapping
287  * of this page.
288  *
289  * We have three cases to consider:
290  *  - VIPT non-aliasing cache: fully coherent so nothing required.
291  *  - VIVT: fully aliasing, so we need to handle every alias in our
292  *          current VM view.
293  *  - VIPT aliasing: need to handle one alias in our current VM view.
294  *
295  * If we need to handle aliasing:
296  *  If the page only exists in the page cache and there are no user
297  *  space mappings, we can be lazy and remember that we may have dirty
298  *  kernel cache lines for later.  Otherwise, we assume we have
299  *  aliasing mappings.
300  *
301  * Note that we disable the lazy flush for SMP configurations where
302  * the cache maintenance operations are not automatically broadcasted.
303  */
304 void flush_dcache_page(struct page *page)
305 {
306         struct address_space *mapping;
307 
308         /*
309          * The zero page is never written to, so never has any dirty
310          * cache lines, and therefore never needs to be flushed.
311          */
312         if (page == ZERO_PAGE(0))
313                 return;
314 
315         mapping = page_mapping(page);
316 
317         if (!cache_ops_need_broadcast() &&
318             mapping && !page_mapped(page))
319                 clear_bit(PG_dcache_clean, &page->flags);
320         else {
321                 __flush_dcache_page(mapping, page);
322                 if (mapping && cache_is_vivt())
323                         __flush_dcache_aliases(mapping, page);
324                 else if (mapping)
325                         __flush_icache_all();
326                 set_bit(PG_dcache_clean, &page->flags);
327         }
328 }
329 EXPORT_SYMBOL(flush_dcache_page);
330 
331 /*
332  * Ensure cache coherency for the kernel mapping of this page. We can
333  * assume that the page is pinned via kmap.
334  *
335  * If the page only exists in the page cache and there are no user
336  * space mappings, this is a no-op since the page was already marked
337  * dirty at creation.  Otherwise, we need to flush the dirty kernel
338  * cache lines directly.
339  */
340 void flush_kernel_dcache_page(struct page *page)
341 {
342         if (cache_is_vivt() || cache_is_vipt_aliasing()) {
343                 struct address_space *mapping;
344 
345                 mapping = page_mapping(page);
346 
347                 if (!mapping || mapping_mapped(mapping)) {
348                         void *addr;
349 
350                         addr = page_address(page);
351                         /*
352                          * kmap_atomic() doesn't set the page virtual
353                          * address for highmem pages, and
354                          * kunmap_atomic() takes care of cache
355                          * flushing already.
356                          */
357                         if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
358                                 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
359                 }
360         }
361 }
362 EXPORT_SYMBOL(flush_kernel_dcache_page);
363 
364 /*
365  * Flush an anonymous page so that users of get_user_pages()
366  * can safely access the data.  The expected sequence is:
367  *
368  *  get_user_pages()
369  *    -> flush_anon_page
370  *  memcpy() to/from page
371  *  if written to page, flush_dcache_page()
372  */
373 void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
374 {
375         unsigned long pfn;
376 
377         /* VIPT non-aliasing caches need do nothing */
378         if (cache_is_vipt_nonaliasing())
379                 return;
380 
381         /*
382          * Write back and invalidate userspace mapping.
383          */
384         pfn = page_to_pfn(page);
385         if (cache_is_vivt()) {
386                 flush_cache_page(vma, vmaddr, pfn);
387         } else {
388                 /*
389                  * For aliasing VIPT, we can flush an alias of the
390                  * userspace address only.
391                  */
392                 flush_pfn_alias(pfn, vmaddr);
393                 __flush_icache_all();
394         }
395 
396         /*
397          * Invalidate kernel mapping.  No data should be contained
398          * in this mapping of the page.  FIXME: this is overkill
399          * since we actually ask for a write-back and invalidate.
400          */
401         __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
402 }
403 
404 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
405 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
406 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
407                           pmd_t *pmdp)
408 {
409         pmd_t pmd = pmd_mksplitting(*pmdp);
410         VM_BUG_ON(address & ~PMD_MASK);
411         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
412 
413         /* dummy IPI to serialise against fast_gup */
414         kick_all_cpus_sync();
415 }
416 #endif /* CONFIG_HAVE_RCU_TABLE_FREE */
417 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
418 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp