~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/metag/include/asm/cacheflush.h

Version: ~ [ linux-5.3 ] ~ [ linux-5.2.14 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.72 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.143 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.192 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.192 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.73 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _METAG_CACHEFLUSH_H
  2 #define _METAG_CACHEFLUSH_H
  3 
  4 #include <linux/mm.h>
  5 #include <linux/sched.h>
  6 #include <linux/io.h>
  7 
  8 #include <asm/l2cache.h>
  9 #include <asm/metag_isa.h>
 10 #include <asm/metag_mem.h>
 11 
 12 void metag_cache_probe(void);
 13 
 14 void metag_data_cache_flush_all(const void *start);
 15 void metag_code_cache_flush_all(const void *start);
 16 
 17 /*
 18  * Routines to flush physical cache lines that may be used to cache data or code
 19  * normally accessed via the linear address range supplied. The region flushed
 20  * must either lie in local or global address space determined by the top bit of
 21  * the pStart address. If Bytes is >= 4K then the whole of the related cache
 22  * state will be flushed rather than a limited range.
 23  */
 24 void metag_data_cache_flush(const void *start, int bytes);
 25 void metag_code_cache_flush(const void *start, int bytes);
 26 
 27 #ifdef CONFIG_METAG_META12
 28 
 29 /* Write through, virtually tagged, split I/D cache. */
 30 
 31 static inline void __flush_cache_all(void)
 32 {
 33         metag_code_cache_flush_all((void *) PAGE_OFFSET);
 34         metag_data_cache_flush_all((void *) PAGE_OFFSET);
 35 }
 36 
 37 #define flush_cache_all() __flush_cache_all()
 38 
 39 /* flush the entire user address space referenced in this mm structure */
 40 static inline void flush_cache_mm(struct mm_struct *mm)
 41 {
 42         if (mm == current->mm)
 43                 __flush_cache_all();
 44 }
 45 
 46 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
 47 
 48 /* flush a range of addresses from this mm */
 49 static inline void flush_cache_range(struct vm_area_struct *vma,
 50                                      unsigned long start, unsigned long end)
 51 {
 52         flush_cache_mm(vma->vm_mm);
 53 }
 54 
 55 static inline void flush_cache_page(struct vm_area_struct *vma,
 56                                     unsigned long vmaddr, unsigned long pfn)
 57 {
 58         flush_cache_mm(vma->vm_mm);
 59 }
 60 
 61 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE       1
 62 static inline void flush_dcache_page(struct page *page)
 63 {
 64         metag_data_cache_flush_all((void *) PAGE_OFFSET);
 65 }
 66 
 67 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
 68 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
 69 
 70 static inline void flush_icache_page(struct vm_area_struct *vma,
 71                                      struct page *page)
 72 {
 73         metag_code_cache_flush(page_to_virt(page), PAGE_SIZE);
 74 }
 75 
 76 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 77 {
 78         metag_data_cache_flush_all((void *) PAGE_OFFSET);
 79 }
 80 
 81 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 82 {
 83         metag_data_cache_flush_all((void *) PAGE_OFFSET);
 84 }
 85 
 86 #else
 87 
 88 /* Write through, physically tagged, split I/D cache. */
 89 
 90 #define flush_cache_all()                       do { } while (0)
 91 #define flush_cache_mm(mm)                      do { } while (0)
 92 #define flush_cache_dup_mm(mm)                  do { } while (0)
 93 #define flush_cache_range(vma, start, end)      do { } while (0)
 94 #define flush_cache_page(vma, vmaddr, pfn)      do { } while (0)
 95 #define flush_dcache_mmap_lock(mapping)         do { } while (0)
 96 #define flush_dcache_mmap_unlock(mapping)       do { } while (0)
 97 #define flush_icache_page(vma, pg)              do { } while (0)
 98 #define flush_cache_vmap(start, end)            do { } while (0)
 99 #define flush_cache_vunmap(start, end)          do { } while (0)
100 
101 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE       1
102 static inline void flush_dcache_page(struct page *page)
103 {
104         /* FIXME: We can do better than this. All we are trying to do is
105          * make the i-cache coherent, we should use the PG_arch_1 bit like
106          * e.g. powerpc.
107          */
108 #ifdef CONFIG_SMP
109         metag_out32(1, SYSC_ICACHE_FLUSH);
110 #else
111         metag_code_cache_flush_all((void *) PAGE_OFFSET);
112 #endif
113 }
114 
115 #endif
116 
117 /* Push n pages at kernel virtual address and clear the icache */
118 static inline void flush_icache_range(unsigned long address,
119                                       unsigned long endaddr)
120 {
121 #ifdef CONFIG_SMP
122         metag_out32(1, SYSC_ICACHE_FLUSH);
123 #else
124         metag_code_cache_flush((void *) address, endaddr - address);
125 #endif
126 }
127 
128 static inline void flush_cache_sigtramp(unsigned long addr, int size)
129 {
130         /*
131          * Flush the icache in case there was previously some code
132          * fetched from this address, perhaps a previous sigtramp.
133          *
134          * We don't need to flush the dcache, it's write through and
135          * we just wrote the sigtramp code through it.
136          */
137 #ifdef CONFIG_SMP
138         metag_out32(1, SYSC_ICACHE_FLUSH);
139 #else
140         metag_code_cache_flush((void *) addr, size);
141 #endif
142 }
143 
144 #ifdef CONFIG_METAG_L2C
145 
146 /*
147  * Perform a single specific CACHEWD operation on an address, masking lower bits
148  * of address first.
149  */
150 static inline void cachewd_line(void *addr, unsigned int data)
151 {
152         unsigned long masked = (unsigned long)addr & -0x40;
153         __builtin_meta2_cachewd((void *)masked, data);
154 }
155 
156 /* Perform a certain CACHEW op on each cache line in a range */
157 static inline void cachew_region_op(void *start, unsigned long size,
158                                     unsigned int op)
159 {
160         unsigned long offset = (unsigned long)start & 0x3f;
161         int i;
162         if (offset) {
163                 size += offset;
164                 start -= offset;
165         }
166         i = (size - 1) >> 6;
167         do {
168                 __builtin_meta2_cachewd(start, op);
169                 start += 0x40;
170         } while (i--);
171 }
172 
173 /* prevent write fence and flushbacks being reordered in L2 */
174 static inline void l2c_fence_flush(void *addr)
175 {
176         /*
177          * Synchronise by reading back and re-flushing.
178          * It is assumed this access will miss, as the caller should have just
179          * flushed the cache line.
180          */
181         (void)(volatile u8 *)addr;
182         cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
183 }
184 
185 /* prevent write fence and writebacks being reordered in L2 */
186 static inline void l2c_fence(void *addr)
187 {
188         /*
189          * A write back has occurred, but not necessarily an invalidate, so the
190          * readback in l2c_fence_flush() would hit in the cache and have no
191          * effect. Therefore fully flush the line first.
192          */
193         cachewd_line(addr, CACHEW_FLUSH_L1D_L2);
194         l2c_fence_flush(addr);
195 }
196 
197 /* Used to keep memory consistent when doing DMA. */
198 static inline void flush_dcache_region(void *start, unsigned long size)
199 {
200         /* metag_data_cache_flush won't flush L2 cache lines if size >= 4096 */
201         if (meta_l2c_is_enabled()) {
202                 cachew_region_op(start, size, CACHEW_FLUSH_L1D_L2);
203                 if (meta_l2c_is_writeback())
204                         l2c_fence_flush(start + size - 1);
205         } else {
206                 metag_data_cache_flush(start, size);
207         }
208 }
209 
210 /* Write back dirty lines to memory (or do nothing if no writeback caches) */
211 static inline void writeback_dcache_region(void *start, unsigned long size)
212 {
213         if (meta_l2c_is_enabled() && meta_l2c_is_writeback()) {
214                 cachew_region_op(start, size, CACHEW_WRITEBACK_L1D_L2);
215                 l2c_fence(start + size - 1);
216         }
217 }
218 
219 /* Invalidate (may also write back if necessary) */
220 static inline void invalidate_dcache_region(void *start, unsigned long size)
221 {
222         if (meta_l2c_is_enabled())
223                 cachew_region_op(start, size, CACHEW_INVALIDATE_L1D_L2);
224         else
225                 metag_data_cache_flush(start, size);
226 }
227 #else
228 #define flush_dcache_region(s, l)       metag_data_cache_flush((s), (l))
229 #define writeback_dcache_region(s, l)   do {} while (0)
230 #define invalidate_dcache_region(s, l)  flush_dcache_region((s), (l))
231 #endif
232 
233 static inline void copy_to_user_page(struct vm_area_struct *vma,
234                                      struct page *page, unsigned long vaddr,
235                                      void *dst, const void *src,
236                                      unsigned long len)
237 {
238         memcpy(dst, src, len);
239         flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
240 }
241 
242 static inline void copy_from_user_page(struct vm_area_struct *vma,
243                                        struct page *page, unsigned long vaddr,
244                                        void *dst, const void *src,
245                                        unsigned long len)
246 {
247         memcpy(dst, src, len);
248 }
249 
250 #endif /* _METAG_CACHEFLUSH_H */
251 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp