~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/mm/mmu_context_nohash.c

Version: ~ [ linux-5.5-rc1 ] ~ [ linux-5.4.2 ] ~ [ linux-5.3.15 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.88 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This file contains the routines for handling the MMU on those
  3  * PowerPC implementations where the MMU is not using the hash
  4  * table, such as 8xx, 4xx, BookE's etc...
  5  *
  6  * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
  7  *                IBM Corp.
  8  *
  9  *  Derived from previous arch/powerpc/mm/mmu_context.c
 10  *  and arch/powerpc/include/asm/mmu_context.h
 11  *
 12  *  This program is free software; you can redistribute it and/or
 13  *  modify it under the terms of the GNU General Public License
 14  *  as published by the Free Software Foundation; either version
 15  *  2 of the License, or (at your option) any later version.
 16  *
 17  * TODO:
 18  *
 19  *   - The global context lock will not scale very well
 20  *   - The maps should be dynamically allocated to allow for processors
 21  *     that support more PID bits at runtime
 22  *   - Implement flush_tlb_mm() by making the context stale and picking
 23  *     a new one
 24  *   - More aggressively clear stale map bits and maybe find some way to
 25  *     also clear mm->cpu_vm_mask bits when processes are migrated
 26  */
 27 
 28 //#define DEBUG_MAP_CONSISTENCY
 29 //#define DEBUG_CLAMP_LAST_CONTEXT   31
 30 //#define DEBUG_HARDER
 31 
 32 /* We don't use DEBUG because it tends to be compiled in always nowadays
 33  * and this would generate way too much output
 34  */
 35 #ifdef DEBUG_HARDER
 36 #define pr_hard(args...)        printk(KERN_DEBUG args)
 37 #define pr_hardcont(args...)    printk(KERN_CONT args)
 38 #else
 39 #define pr_hard(args...)        do { } while(0)
 40 #define pr_hardcont(args...)    do { } while(0)
 41 #endif
 42 
 43 #include <linux/kernel.h>
 44 #include <linux/mm.h>
 45 #include <linux/init.h>
 46 #include <linux/spinlock.h>
 47 #include <linux/bootmem.h>
 48 #include <linux/notifier.h>
 49 #include <linux/cpu.h>
 50 #include <linux/slab.h>
 51 
 52 #include <asm/mmu_context.h>
 53 #include <asm/tlbflush.h>
 54 
 55 static unsigned int first_context, last_context;
 56 static unsigned int next_context, nr_free_contexts;
 57 static unsigned long *context_map;
 58 static unsigned long *stale_map[NR_CPUS];
 59 static struct mm_struct **context_mm;
 60 static DEFINE_RAW_SPINLOCK(context_lock);
 61 
 62 #define CTX_MAP_SIZE    \
 63         (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
 64 
 65 
 66 /* Steal a context from a task that has one at the moment.
 67  *
 68  * This is used when we are running out of available PID numbers
 69  * on the processors.
 70  *
 71  * This isn't an LRU system, it just frees up each context in
 72  * turn (sort-of pseudo-random replacement :).  This would be the
 73  * place to implement an LRU scheme if anyone was motivated to do it.
 74  *  -- paulus
 75  *
 76  * For context stealing, we use a slightly different approach for
 77  * SMP and UP. Basically, the UP one is simpler and doesn't use
 78  * the stale map as we can just flush the local CPU
 79  *  -- benh
 80  */
 81 #ifdef CONFIG_SMP
 82 static unsigned int steal_context_smp(unsigned int id)
 83 {
 84         struct mm_struct *mm;
 85         unsigned int cpu, max, i;
 86 
 87         max = last_context - first_context;
 88 
 89         /* Attempt to free next_context first and then loop until we manage */
 90         while (max--) {
 91                 /* Pick up the victim mm */
 92                 mm = context_mm[id];
 93 
 94                 /* We have a candidate victim, check if it's active, on SMP
 95                  * we cannot steal active contexts
 96                  */
 97                 if (mm->context.active) {
 98                         id++;
 99                         if (id > last_context)
100                                 id = first_context;
101                         continue;
102                 }
103                 pr_hardcont(" | steal %d from 0x%p", id, mm);
104 
105                 /* Mark this mm has having no context anymore */
106                 mm->context.id = MMU_NO_CONTEXT;
107 
108                 /* Mark it stale on all CPUs that used this mm. For threaded
109                  * implementations, we set it on all threads on each core
110                  * represented in the mask. A future implementation will use
111                  * a core map instead but this will do for now.
112                  */
113                 for_each_cpu(cpu, mm_cpumask(mm)) {
114                         for (i = cpu_first_thread_sibling(cpu);
115                              i <= cpu_last_thread_sibling(cpu); i++)
116                                 __set_bit(id, stale_map[i]);
117                         cpu = i - 1;
118                 }
119                 return id;
120         }
121 
122         /* This will happen if you have more CPUs than available contexts,
123          * all we can do here is wait a bit and try again
124          */
125         raw_spin_unlock(&context_lock);
126         cpu_relax();
127         raw_spin_lock(&context_lock);
128 
129         /* This will cause the caller to try again */
130         return MMU_NO_CONTEXT;
131 }
132 #endif  /* CONFIG_SMP */
133 
134 /* Note that this will also be called on SMP if all other CPUs are
135  * offlined, which means that it may be called for cpu != 0. For
136  * this to work, we somewhat assume that CPUs that are onlined
137  * come up with a fully clean TLB (or are cleaned when offlined)
138  */
139 static unsigned int steal_context_up(unsigned int id)
140 {
141         struct mm_struct *mm;
142         int cpu = smp_processor_id();
143 
144         /* Pick up the victim mm */
145         mm = context_mm[id];
146 
147         pr_hardcont(" | steal %d from 0x%p", id, mm);
148 
149         /* Flush the TLB for that context */
150         local_flush_tlb_mm(mm);
151 
152         /* Mark this mm has having no context anymore */
153         mm->context.id = MMU_NO_CONTEXT;
154 
155         /* XXX This clear should ultimately be part of local_flush_tlb_mm */
156         __clear_bit(id, stale_map[cpu]);
157 
158         return id;
159 }
160 
161 #ifdef DEBUG_MAP_CONSISTENCY
162 static void context_check_map(void)
163 {
164         unsigned int id, nrf, nact;
165 
166         nrf = nact = 0;
167         for (id = first_context; id <= last_context; id++) {
168                 int used = test_bit(id, context_map);
169                 if (!used)
170                         nrf++;
171                 if (used != (context_mm[id] != NULL))
172                         pr_err("MMU: Context %d is %s and MM is %p !\n",
173                                id, used ? "used" : "free", context_mm[id]);
174                 if (context_mm[id] != NULL)
175                         nact += context_mm[id]->context.active;
176         }
177         if (nrf != nr_free_contexts) {
178                 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
179                        nr_free_contexts, nrf);
180                 nr_free_contexts = nrf;
181         }
182         if (nact > num_online_cpus())
183                 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
184                        nact, num_online_cpus());
185         if (first_context > 0 && !test_bit(0, context_map))
186                 pr_err("MMU: Context 0 has been freed !!!\n");
187 }
188 #else
189 static void context_check_map(void) { }
190 #endif
191 
192 void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
193 {
194         unsigned int i, id, cpu = smp_processor_id();
195         unsigned long *map;
196 
197         /* No lockless fast path .. yet */
198         raw_spin_lock(&context_lock);
199 
200         pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
201                 cpu, next, next->context.active, next->context.id);
202 
203 #ifdef CONFIG_SMP
204         /* Mark us active and the previous one not anymore */
205         next->context.active++;
206         if (prev) {
207                 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
208                 WARN_ON(prev->context.active < 1);
209                 prev->context.active--;
210         }
211 
212  again:
213 #endif /* CONFIG_SMP */
214 
215         /* If we already have a valid assigned context, skip all that */
216         id = next->context.id;
217         if (likely(id != MMU_NO_CONTEXT)) {
218 #ifdef DEBUG_MAP_CONSISTENCY
219                 if (context_mm[id] != next)
220                         pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
221                                next, id, id, context_mm[id]);
222 #endif
223                 goto ctxt_ok;
224         }
225 
226         /* We really don't have a context, let's try to acquire one */
227         id = next_context;
228         if (id > last_context)
229                 id = first_context;
230         map = context_map;
231 
232         /* No more free contexts, let's try to steal one */
233         if (nr_free_contexts == 0) {
234 #ifdef CONFIG_SMP
235                 if (num_online_cpus() > 1) {
236                         id = steal_context_smp(id);
237                         if (id == MMU_NO_CONTEXT)
238                                 goto again;
239                         goto stolen;
240                 }
241 #endif /* CONFIG_SMP */
242                 id = steal_context_up(id);
243                 goto stolen;
244         }
245         nr_free_contexts--;
246 
247         /* We know there's at least one free context, try to find it */
248         while (__test_and_set_bit(id, map)) {
249                 id = find_next_zero_bit(map, last_context+1, id);
250                 if (id > last_context)
251                         id = first_context;
252         }
253  stolen:
254         next_context = id + 1;
255         context_mm[id] = next;
256         next->context.id = id;
257         pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
258 
259         context_check_map();
260  ctxt_ok:
261 
262         /* If that context got marked stale on this CPU, then flush the
263          * local TLB for it and unmark it before we use it
264          */
265         if (test_bit(id, stale_map[cpu])) {
266                 pr_hardcont(" | stale flush %d [%d..%d]",
267                             id, cpu_first_thread_sibling(cpu),
268                             cpu_last_thread_sibling(cpu));
269 
270                 local_flush_tlb_mm(next);
271 
272                 /* XXX This clear should ultimately be part of local_flush_tlb_mm */
273                 for (i = cpu_first_thread_sibling(cpu);
274                      i <= cpu_last_thread_sibling(cpu); i++) {
275                         __clear_bit(id, stale_map[i]);
276                 }
277         }
278 
279         /* Flick the MMU and release lock */
280         pr_hardcont(" -> %d\n", id);
281         set_context(id, next->pgd);
282         raw_spin_unlock(&context_lock);
283 }
284 
285 /*
286  * Set up the context for a new address space.
287  */
288 int init_new_context(struct task_struct *t, struct mm_struct *mm)
289 {
290         pr_hard("initing context for mm @%p\n", mm);
291 
292         mm->context.id = MMU_NO_CONTEXT;
293         mm->context.active = 0;
294 
295 #ifdef CONFIG_PPC_MM_SLICES
296         if (slice_mm_new_context(mm))
297                 slice_set_user_psize(mm, mmu_virtual_psize);
298 #endif
299 
300         return 0;
301 }
302 
303 /*
304  * We're finished using the context for an address space.
305  */
306 void destroy_context(struct mm_struct *mm)
307 {
308         unsigned long flags;
309         unsigned int id;
310 
311         if (mm->context.id == MMU_NO_CONTEXT)
312                 return;
313 
314         WARN_ON(mm->context.active != 0);
315 
316         raw_spin_lock_irqsave(&context_lock, flags);
317         id = mm->context.id;
318         if (id != MMU_NO_CONTEXT) {
319                 __clear_bit(id, context_map);
320                 mm->context.id = MMU_NO_CONTEXT;
321 #ifdef DEBUG_MAP_CONSISTENCY
322                 mm->context.active = 0;
323 #endif
324                 context_mm[id] = NULL;
325                 nr_free_contexts++;
326         }
327         raw_spin_unlock_irqrestore(&context_lock, flags);
328 }
329 
330 #ifdef CONFIG_SMP
331 
332 static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
333                                             unsigned long action, void *hcpu)
334 {
335         unsigned int cpu = (unsigned int)(long)hcpu;
336 
337         /* We don't touch CPU 0 map, it's allocated at aboot and kept
338          * around forever
339          */
340         if (cpu == boot_cpuid)
341                 return NOTIFY_OK;
342 
343         switch (action) {
344         case CPU_UP_PREPARE:
345         case CPU_UP_PREPARE_FROZEN:
346                 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
347                 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
348                 break;
349 #ifdef CONFIG_HOTPLUG_CPU
350         case CPU_UP_CANCELED:
351         case CPU_UP_CANCELED_FROZEN:
352         case CPU_DEAD:
353         case CPU_DEAD_FROZEN:
354                 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
355                 kfree(stale_map[cpu]);
356                 stale_map[cpu] = NULL;
357 
358                 /* We also clear the cpu_vm_mask bits of CPUs going away */
359                 clear_tasks_mm_cpumask(cpu);
360         break;
361 #endif /* CONFIG_HOTPLUG_CPU */
362         }
363         return NOTIFY_OK;
364 }
365 
366 static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
367         .notifier_call  = mmu_context_cpu_notify,
368 };
369 
370 #endif /* CONFIG_SMP */
371 
372 /*
373  * Initialize the context management stuff.
374  */
375 void __init mmu_context_init(void)
376 {
377         /* Mark init_mm as being active on all possible CPUs since
378          * we'll get called with prev == init_mm the first time
379          * we schedule on a given CPU
380          */
381         init_mm.context.active = NR_CPUS;
382 
383         /*
384          *   The MPC8xx has only 16 contexts.  We rotate through them on each
385          * task switch.  A better way would be to keep track of tasks that
386          * own contexts, and implement an LRU usage.  That way very active
387          * tasks don't always have to pay the TLB reload overhead.  The
388          * kernel pages are mapped shared, so the kernel can run on behalf
389          * of any task that makes a kernel entry.  Shared does not mean they
390          * are not protected, just that the ASID comparison is not performed.
391          *      -- Dan
392          *
393          * The IBM4xx has 256 contexts, so we can just rotate through these
394          * as a way of "switching" contexts.  If the TID of the TLB is zero,
395          * the PID/TID comparison is disabled, so we can use a TID of zero
396          * to represent all kernel pages as shared among all contexts.
397          *      -- Dan
398          *
399          * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
400          * should normally never have to steal though the facility is
401          * present if needed.
402          *      -- BenH
403          */
404         if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
405                 first_context = 0;
406                 last_context = 15;
407         } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
408                 first_context = 1;
409                 last_context = 65535;
410         } else
411 #ifdef CONFIG_PPC_BOOK3E_MMU
412         if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
413                 u32 mmucfg = mfspr(SPRN_MMUCFG);
414                 u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
415                                 >> MMUCFG_PIDSIZE_SHIFT;
416                 first_context = 1;
417                 last_context = (1UL << (pid_bits + 1)) - 1;
418         } else
419 #endif
420         {
421                 first_context = 1;
422                 last_context = 255;
423         }
424 
425 #ifdef DEBUG_CLAMP_LAST_CONTEXT
426         last_context = DEBUG_CLAMP_LAST_CONTEXT;
427 #endif
428         /*
429          * Allocate the maps used by context management
430          */
431         context_map = alloc_bootmem(CTX_MAP_SIZE);
432         context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
433 #ifndef CONFIG_SMP
434         stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
435 #else
436         stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE);
437 
438         register_cpu_notifier(&mmu_context_cpu_nb);
439 #endif
440 
441         printk(KERN_INFO
442                "MMU: Allocated %zu bytes of context maps for %d contexts\n",
443                2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
444                last_context - first_context + 1);
445 
446         /*
447          * Some processors have too few contexts to reserve one for
448          * init_mm, and require using context 0 for a normal task.
449          * Other processors reserve the use of context zero for the kernel.
450          * This code assumes first_context < 32.
451          */
452         context_map[0] = (1 << first_context) - 1;
453         next_context = first_context;
454         nr_free_contexts = last_context - first_context + 1;
455 }
456 
457 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp