~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/mm/tlb.c

Version: ~ [ linux-5.4.2 ] ~ [ linux-5.3.15 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.88 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* arch/sparc64/mm/tlb.c
  2  *
  3  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
  4  */
  5 
  6 #include <linux/kernel.h>
  7 #include <linux/percpu.h>
  8 #include <linux/mm.h>
  9 #include <linux/swap.h>
 10 #include <linux/preempt.h>
 11 
 12 #include <asm/pgtable.h>
 13 #include <asm/pgalloc.h>
 14 #include <asm/tlbflush.h>
 15 #include <asm/cacheflush.h>
 16 #include <asm/mmu_context.h>
 17 #include <asm/tlb.h>
 18 
 19 /* Heavily inspired by the ppc64 code.  */
 20 
 21 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
 22 
 23 void flush_tlb_pending(void)
 24 {
 25         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 26         struct mm_struct *mm = tb->mm;
 27 
 28         if (!tb->tlb_nr)
 29                 goto out;
 30 
 31         flush_tsb_user(tb);
 32 
 33         if (CTX_VALID(mm->context)) {
 34                 if (tb->tlb_nr == 1) {
 35                         global_flush_tlb_page(mm, tb->vaddrs[0]);
 36                 } else {
 37 #ifdef CONFIG_SMP
 38                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
 39                                               &tb->vaddrs[0]);
 40 #else
 41                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
 42                                             tb->tlb_nr, &tb->vaddrs[0]);
 43 #endif
 44                 }
 45         }
 46 
 47         tb->tlb_nr = 0;
 48 
 49 out:
 50         put_cpu_var(tlb_batch);
 51 }
 52 
 53 void arch_enter_lazy_mmu_mode(void)
 54 {
 55         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
 56 
 57         tb->active = 1;
 58 }
 59 
 60 void arch_leave_lazy_mmu_mode(void)
 61 {
 62         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
 63 
 64         if (tb->tlb_nr)
 65                 flush_tlb_pending();
 66         tb->active = 0;
 67 }
 68 
 69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
 70                               bool exec)
 71 {
 72         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
 73         unsigned long nr;
 74 
 75         vaddr &= PAGE_MASK;
 76         if (exec)
 77                 vaddr |= 0x1UL;
 78 
 79         nr = tb->tlb_nr;
 80 
 81         if (unlikely(nr != 0 && mm != tb->mm)) {
 82                 flush_tlb_pending();
 83                 nr = 0;
 84         }
 85 
 86         if (!tb->active) {
 87                 flush_tsb_user_page(mm, vaddr);
 88                 global_flush_tlb_page(mm, vaddr);
 89                 goto out;
 90         }
 91 
 92         if (nr == 0)
 93                 tb->mm = mm;
 94 
 95         tb->vaddrs[nr] = vaddr;
 96         tb->tlb_nr = ++nr;
 97         if (nr >= TLB_BATCH_NR)
 98                 flush_tlb_pending();
 99 
100 out:
101         put_cpu_var(tlb_batch);
102 }
103 
104 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
105                    pte_t *ptep, pte_t orig, int fullmm)
106 {
107         if (tlb_type != hypervisor &&
108             pte_dirty(orig)) {
109                 unsigned long paddr, pfn = pte_pfn(orig);
110                 struct address_space *mapping;
111                 struct page *page;
112 
113                 if (!pfn_valid(pfn))
114                         goto no_cache_flush;
115 
116                 page = pfn_to_page(pfn);
117                 if (PageReserved(page))
118                         goto no_cache_flush;
119 
120                 /* A real file page? */
121                 mapping = page_mapping(page);
122                 if (!mapping)
123                         goto no_cache_flush;
124 
125                 paddr = (unsigned long) page_address(page);
126                 if ((paddr ^ vaddr) & (1 << 13))
127                         flush_dcache_page_all(mm, page);
128         }
129 
130 no_cache_flush:
131         if (!fullmm)
132                 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
133 }
134 
135 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
136 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
137                                pmd_t pmd)
138 {
139         unsigned long end;
140         pte_t *pte;
141 
142         pte = pte_offset_map(&pmd, vaddr);
143         end = vaddr + HPAGE_SIZE;
144         while (vaddr < end) {
145                 if (pte_val(*pte) & _PAGE_VALID) {
146                         bool exec = pte_exec(*pte);
147 
148                         tlb_batch_add_one(mm, vaddr, exec);
149                 }
150                 pte++;
151                 vaddr += PAGE_SIZE;
152         }
153         pte_unmap(pte);
154 }
155 
156 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
157                 pmd_t *pmdp, pmd_t pmd)
158 {
159         pmd_t orig = *pmdp;
160 
161         *pmdp = pmd;
162 
163         if (mm == &init_mm)
164                 return;
165 
166         if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
167                 if (pmd_val(pmd) & _PAGE_PMD_HUGE)
168                         mm->context.huge_pte_count++;
169                 else
170                         mm->context.huge_pte_count--;
171 
172                 /* Do not try to allocate the TSB hash table if we
173                  * don't have one already.  We have various locks held
174                  * and thus we'll end up doing a GFP_KERNEL allocation
175                  * in an atomic context.
176                  *
177                  * Instead, we let the first TLB miss on a hugepage
178                  * take care of this.
179                  */
180         }
181 
182         if (!pmd_none(orig)) {
183                 addr &= HPAGE_MASK;
184                 if (pmd_trans_huge(orig)) {
185                         pte_t orig_pte = __pte(pmd_val(orig));
186                         bool exec = pte_exec(orig_pte);
187 
188                         tlb_batch_add_one(mm, addr, exec);
189                         tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
190                 } else {
191                         tlb_batch_pmd_scan(mm, addr, orig);
192                 }
193         }
194 }
195 
196 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
197                      pmd_t *pmdp)
198 {
199         pmd_t entry = *pmdp;
200 
201         pmd_val(entry) &= ~_PAGE_VALID;
202 
203         set_pmd_at(vma->vm_mm, address, pmdp, entry);
204         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
205 }
206 
207 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
208                                 pgtable_t pgtable)
209 {
210         struct list_head *lh = (struct list_head *) pgtable;
211 
212         assert_spin_locked(&mm->page_table_lock);
213 
214         /* FIFO */
215         if (!pmd_huge_pte(mm, pmdp))
216                 INIT_LIST_HEAD(lh);
217         else
218                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
219         pmd_huge_pte(mm, pmdp) = pgtable;
220 }
221 
222 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
223 {
224         struct list_head *lh;
225         pgtable_t pgtable;
226 
227         assert_spin_locked(&mm->page_table_lock);
228 
229         /* FIFO */
230         pgtable = pmd_huge_pte(mm, pmdp);
231         lh = (struct list_head *) pgtable;
232         if (list_empty(lh))
233                 pmd_huge_pte(mm, pmdp) = NULL;
234         else {
235                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
236                 list_del(lh);
237         }
238         pte_val(pgtable[0]) = 0;
239         pte_val(pgtable[1]) = 0;
240 
241         return pgtable;
242 }
243 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
244 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp