~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/mm/tlb.c

Version: ~ [ linux-5.12-rc1 ] ~ [ linux-5.11.2 ] ~ [ linux-5.10.19 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.101 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.177 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.222 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.258 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.258 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/init.h>
  2 
  3 #include <linux/mm.h>
  4 #include <linux/spinlock.h>
  5 #include <linux/smp.h>
  6 #include <linux/interrupt.h>
  7 #include <linux/module.h>
  8 #include <linux/cpu.h>
  9 
 10 #include <asm/tlbflush.h>
 11 #include <asm/mmu_context.h>
 12 #include <asm/cache.h>
 13 #include <asm/apic.h>
 14 #include <asm/uv/uv.h>
 15 #include <linux/debugfs.h>
 16 
 17 /*
 18  *      Smarter SMP flushing macros.
 19  *              c/o Linus Torvalds.
 20  *
 21  *      These mean you can really definitely utterly forget about
 22  *      writing to user space from interrupts. (Its not allowed anyway).
 23  *
 24  *      Optimizations Manfred Spraul <manfred@colorfullife.com>
 25  *
 26  *      More scalable flush, from Andi Kleen
 27  *
 28  *      Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
 29  */
 30 
 31 struct flush_tlb_info {
 32         struct mm_struct *flush_mm;
 33         unsigned long flush_start;
 34         unsigned long flush_end;
 35 };
 36 
 37 /*
 38  * We cannot call mmdrop() because we are in interrupt context,
 39  * instead update mm->cpu_vm_mask.
 40  */
 41 void leave_mm(int cpu)
 42 {
 43         struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
 44         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
 45                 BUG();
 46         if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
 47                 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
 48                 load_cr3(swapper_pg_dir);
 49                 /*
 50                  * This gets called in the idle path where RCU
 51                  * functions differently.  Tracing normally
 52                  * uses RCU, so we have to call the tracepoint
 53                  * specially here.
 54                  */
 55                 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
 56         }
 57 }
 58 EXPORT_SYMBOL_GPL(leave_mm);
 59 
 60 /*
 61  * The flush IPI assumes that a thread switch happens in this order:
 62  * [cpu0: the cpu that switches]
 63  * 1) switch_mm() either 1a) or 1b)
 64  * 1a) thread switch to a different mm
 65  * 1a1) set cpu_tlbstate to TLBSTATE_OK
 66  *      Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
 67  *      if cpu0 was in lazy tlb mode.
 68  * 1a2) update cpu active_mm
 69  *      Now cpu0 accepts tlb flushes for the new mm.
 70  * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
 71  *      Now the other cpus will send tlb flush ipis.
 72  * 1a4) change cr3.
 73  * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
 74  *      Stop ipi delivery for the old mm. This is not synchronized with
 75  *      the other cpus, but flush_tlb_func ignore flush ipis for the wrong
 76  *      mm, and in the worst case we perform a superfluous tlb flush.
 77  * 1b) thread switch without mm change
 78  *      cpu active_mm is correct, cpu0 already handles flush ipis.
 79  * 1b1) set cpu_tlbstate to TLBSTATE_OK
 80  * 1b2) test_and_set the cpu bit in cpu_vm_mask.
 81  *      Atomically set the bit [other cpus will start sending flush ipis],
 82  *      and test the bit.
 83  * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
 84  * 2) switch %%esp, ie current
 85  *
 86  * The interrupt must handle 2 special cases:
 87  * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
 88  * - the cpu performs speculative tlb reads, i.e. even if the cpu only
 89  *   runs in kernel space, the cpu could load tlb entries for user space
 90  *   pages.
 91  *
 92  * The good news is that cpu_tlbstate is local to each cpu, no
 93  * write/read ordering problems.
 94  */
 95 
 96 /*
 97  * TLB flush funcation:
 98  * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
 99  * 2) Leave the mm if we are in the lazy tlb mode.
100  */
101 static void flush_tlb_func(void *info)
102 {
103         struct flush_tlb_info *f = info;
104 
105         inc_irq_stat(irq_tlb_count);
106 
107         if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
108                 return;
109         if (!f->flush_end)
110                 f->flush_end = f->flush_start + PAGE_SIZE;
111 
112         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
113         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
114                 if (f->flush_end == TLB_FLUSH_ALL) {
115                         local_flush_tlb();
116                         trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
117                 } else {
118                         unsigned long addr;
119                         unsigned long nr_pages =
120                                 (f->flush_end - f->flush_start) / PAGE_SIZE;
121                         addr = f->flush_start;
122                         while (addr < f->flush_end) {
123                                 __flush_tlb_single(addr);
124                                 addr += PAGE_SIZE;
125                         }
126                         trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
127                 }
128         } else
129                 leave_mm(smp_processor_id());
130 
131 }
132 
133 void native_flush_tlb_others(const struct cpumask *cpumask,
134                                  struct mm_struct *mm, unsigned long start,
135                                  unsigned long end)
136 {
137         struct flush_tlb_info info;
138         info.flush_mm = mm;
139         info.flush_start = start;
140         info.flush_end = end;
141 
142         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
143         if (is_uv_system()) {
144                 unsigned int cpu;
145 
146                 cpu = smp_processor_id();
147                 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
148                 if (cpumask)
149                         smp_call_function_many(cpumask, flush_tlb_func,
150                                                                 &info, 1);
151                 return;
152         }
153         smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
154 }
155 
156 void flush_tlb_current_task(void)
157 {
158         struct mm_struct *mm = current->mm;
159 
160         preempt_disable();
161 
162         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
163         local_flush_tlb();
164         trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
165         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
166                 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
167         preempt_enable();
168 }
169 
170 /*
171  * See Documentation/x86/tlb.txt for details.  We choose 33
172  * because it is large enough to cover the vast majority (at
173  * least 95%) of allocations, and is small enough that we are
174  * confident it will not cause too much overhead.  Each single
175  * flush is about 100 ns, so this caps the maximum overhead at
176  * _about_ 3,000 ns.
177  *
178  * This is in units of pages.
179  */
180 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
181 
182 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
183                                 unsigned long end, unsigned long vmflag)
184 {
185         unsigned long addr;
186         /* do a global flush by default */
187         unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
188 
189         preempt_disable();
190         if (current->active_mm != mm)
191                 goto out;
192 
193         if (!current->mm) {
194                 leave_mm(smp_processor_id());
195                 goto out;
196         }
197 
198         if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
199                 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
200 
201         if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
202                 base_pages_to_flush = TLB_FLUSH_ALL;
203                 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
204                 local_flush_tlb();
205         } else {
206                 /* flush range by one by one 'invlpg' */
207                 for (addr = start; addr < end;  addr += PAGE_SIZE) {
208                         count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
209                         __flush_tlb_single(addr);
210                 }
211         }
212         trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
213 out:
214         if (base_pages_to_flush == TLB_FLUSH_ALL) {
215                 start = 0UL;
216                 end = TLB_FLUSH_ALL;
217         }
218         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
219                 flush_tlb_others(mm_cpumask(mm), mm, start, end);
220         preempt_enable();
221 }
222 
223 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
224 {
225         struct mm_struct *mm = vma->vm_mm;
226 
227         preempt_disable();
228 
229         if (current->active_mm == mm) {
230                 if (current->mm)
231                         __flush_tlb_one(start);
232                 else
233                         leave_mm(smp_processor_id());
234         }
235 
236         if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
237                 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
238 
239         preempt_enable();
240 }
241 
242 static void do_flush_tlb_all(void *info)
243 {
244         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
245         __flush_tlb_all();
246         if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
247                 leave_mm(smp_processor_id());
248 }
249 
250 void flush_tlb_all(void)
251 {
252         count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
253         on_each_cpu(do_flush_tlb_all, NULL, 1);
254 }
255 
256 static void do_kernel_range_flush(void *info)
257 {
258         struct flush_tlb_info *f = info;
259         unsigned long addr;
260 
261         /* flush range by one by one 'invlpg' */
262         for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
263                 __flush_tlb_single(addr);
264 }
265 
266 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
267 {
268 
269         /* Balance as user space task's flush, a bit conservative */
270         if (end == TLB_FLUSH_ALL ||
271             (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
272                 on_each_cpu(do_flush_tlb_all, NULL, 1);
273         } else {
274                 struct flush_tlb_info info;
275                 info.flush_start = start;
276                 info.flush_end = end;
277                 on_each_cpu(do_kernel_range_flush, &info, 1);
278         }
279 }
280 
281 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
282                              size_t count, loff_t *ppos)
283 {
284         char buf[32];
285         unsigned int len;
286 
287         len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
288         return simple_read_from_buffer(user_buf, count, ppos, buf, len);
289 }
290 
291 static ssize_t tlbflush_write_file(struct file *file,
292                  const char __user *user_buf, size_t count, loff_t *ppos)
293 {
294         char buf[32];
295         ssize_t len;
296         int ceiling;
297 
298         len = min(count, sizeof(buf) - 1);
299         if (copy_from_user(buf, user_buf, len))
300                 return -EFAULT;
301 
302         buf[len] = '\0';
303         if (kstrtoint(buf, 0, &ceiling))
304                 return -EINVAL;
305 
306         if (ceiling < 0)
307                 return -EINVAL;
308 
309         tlb_single_page_flush_ceiling = ceiling;
310         return count;
311 }
312 
313 static const struct file_operations fops_tlbflush = {
314         .read = tlbflush_read_file,
315         .write = tlbflush_write_file,
316         .llseek = default_llseek,
317 };
318 
319 static int __init create_tlb_single_page_flush_ceiling(void)
320 {
321         debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
322                             arch_debugfs_dir, NULL, &fops_tlbflush);
323         return 0;
324 }
325 late_initcall(create_tlb_single_page_flush_ceiling);
326 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp