~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/mmu_context.h

Version: ~ [ linux-5.9 ] ~ [ linux-5.8.14 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.70 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.150 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.200 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.238 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.238 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Based on arch/arm/include/asm/mmu_context.h
  3  *
  4  * Copyright (C) 1996 Russell King.
  5  * Copyright (C) 2012 ARM Ltd.
  6  *
  7  * This program is free software; you can redistribute it and/or modify
  8  * it under the terms of the GNU General Public License version 2 as
  9  * published by the Free Software Foundation.
 10  *
 11  * This program is distributed in the hope that it will be useful,
 12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14  * GNU General Public License for more details.
 15  *
 16  * You should have received a copy of the GNU General Public License
 17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 18  */
 19 #ifndef __ASM_MMU_CONTEXT_H
 20 #define __ASM_MMU_CONTEXT_H
 21 
 22 #include <linux/compiler.h>
 23 #include <linux/sched.h>
 24 
 25 #include <asm/cacheflush.h>
 26 #include <asm/proc-fns.h>
 27 #include <asm-generic/mm_hooks.h>
 28 #include <asm/cputype.h>
 29 #include <asm/pgtable.h>
 30 
 31 #define MAX_ASID_BITS   16
 32 
 33 extern unsigned int cpu_last_asid;
 34 
 35 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 36 void __new_context(struct mm_struct *mm);
 37 
 38 #ifdef CONFIG_PID_IN_CONTEXTIDR
 39 static inline void contextidr_thread_switch(struct task_struct *next)
 40 {
 41         asm(
 42         "       msr     contextidr_el1, %0\n"
 43         "       isb"
 44         :
 45         : "r" (task_pid_nr(next)));
 46 }
 47 #else
 48 static inline void contextidr_thread_switch(struct task_struct *next)
 49 {
 50 }
 51 #endif
 52 
 53 /*
 54  * Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
 55  */
 56 static inline void cpu_set_reserved_ttbr0(void)
 57 {
 58         unsigned long ttbr = page_to_phys(empty_zero_page);
 59 
 60         asm(
 61         "       msr     ttbr0_el1, %0                   // set TTBR0\n"
 62         "       isb"
 63         :
 64         : "r" (ttbr));
 65 }
 66 
 67 static inline void switch_new_context(struct mm_struct *mm)
 68 {
 69         unsigned long flags;
 70 
 71         __new_context(mm);
 72 
 73         local_irq_save(flags);
 74         cpu_switch_mm(mm->pgd, mm);
 75         local_irq_restore(flags);
 76 }
 77 
 78 static inline void check_and_switch_context(struct mm_struct *mm,
 79                                             struct task_struct *tsk)
 80 {
 81         /*
 82          * Required during context switch to avoid speculative page table
 83          * walking with the wrong TTBR.
 84          */
 85         cpu_set_reserved_ttbr0();
 86 
 87         if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS))
 88                 /*
 89                  * The ASID is from the current generation, just switch to the
 90                  * new pgd. This condition is only true for calls from
 91                  * context_switch() and interrupts are already disabled.
 92                  */
 93                 cpu_switch_mm(mm->pgd, mm);
 94         else if (irqs_disabled())
 95                 /*
 96                  * Defer the new ASID allocation until after the context
 97                  * switch critical region since __new_context() cannot be
 98                  * called with interrupts disabled.
 99                  */
100                 set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
101         else
102                 /*
103                  * That is a direct call to switch_mm() or activate_mm() with
104                  * interrupts enabled and a new context.
105                  */
106                 switch_new_context(mm);
107 }
108 
109 #define init_new_context(tsk,mm)        (__init_new_context(tsk,mm),0)
110 #define destroy_context(mm)             do { } while(0)
111 
112 #define finish_arch_post_lock_switch \
113         finish_arch_post_lock_switch
114 static inline void finish_arch_post_lock_switch(void)
115 {
116         if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
117                 struct mm_struct *mm = current->mm;
118                 unsigned long flags;
119 
120                 __new_context(mm);
121 
122                 local_irq_save(flags);
123                 cpu_switch_mm(mm->pgd, mm);
124                 local_irq_restore(flags);
125         }
126 }
127 
128 /*
129  * This is called when "tsk" is about to enter lazy TLB mode.
130  *
131  * mm:  describes the currently active mm context
132  * tsk: task which is entering lazy tlb
133  * cpu: cpu number which is entering lazy tlb
134  *
135  * tsk->mm will be NULL
136  */
137 static inline void
138 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
139 {
140 }
141 
142 /*
143  * This is the actual mm switch as far as the scheduler
144  * is concerned.  No registers are touched.  We avoid
145  * calling the CPU specific function when the mm hasn't
146  * actually changed.
147  */
148 static inline void
149 switch_mm(struct mm_struct *prev, struct mm_struct *next,
150           struct task_struct *tsk)
151 {
152         unsigned int cpu = smp_processor_id();
153 
154 #ifdef CONFIG_SMP
155         /* check for possible thread migration */
156         if (!cpumask_empty(mm_cpumask(next)) &&
157             !cpumask_test_cpu(cpu, mm_cpumask(next)))
158                 __flush_icache_all();
159 #endif
160         if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
161                 check_and_switch_context(next, tsk);
162 }
163 
164 #define deactivate_mm(tsk,mm)   do { } while (0)
165 #define activate_mm(prev,next)  switch_mm(prev, next, NULL)
166 
167 #endif
168 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp