~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/mmu.h

Version: ~ [ linux-5.16-rc3 ] ~ [ linux-5.15.5 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.82 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.162 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.218 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.256 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.291 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.293 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef __KVM_X86_MMU_H
  3 #define __KVM_X86_MMU_H
  4 
  5 #include <linux/kvm_host.h>
  6 #include "kvm_cache_regs.h"
  7 
  8 #define PT64_PT_BITS 9
  9 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
 10 #define PT32_PT_BITS 10
 11 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
 12 
 13 #define PT_WRITABLE_SHIFT 1
 14 #define PT_USER_SHIFT 2
 15 
 16 #define PT_PRESENT_MASK (1ULL << 0)
 17 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
 18 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
 19 #define PT_PWT_MASK (1ULL << 3)
 20 #define PT_PCD_MASK (1ULL << 4)
 21 #define PT_ACCESSED_SHIFT 5
 22 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
 23 #define PT_DIRTY_SHIFT 6
 24 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
 25 #define PT_PAGE_SIZE_SHIFT 7
 26 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
 27 #define PT_PAT_MASK (1ULL << 7)
 28 #define PT_GLOBAL_MASK (1ULL << 8)
 29 #define PT64_NX_SHIFT 63
 30 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
 31 
 32 #define PT_PAT_SHIFT 7
 33 #define PT_DIR_PAT_SHIFT 12
 34 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
 35 
 36 #define PT32_DIR_PSE36_SIZE 4
 37 #define PT32_DIR_PSE36_SHIFT 13
 38 #define PT32_DIR_PSE36_MASK \
 39         (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
 40 
 41 #define PT64_ROOT_5LEVEL 5
 42 #define PT64_ROOT_4LEVEL 4
 43 #define PT32_ROOT_LEVEL 2
 44 #define PT32E_ROOT_LEVEL 3
 45 
 46 static inline u64 rsvd_bits(int s, int e)
 47 {
 48         if (e < s)
 49                 return 0;
 50 
 51         return ((1ULL << (e - s + 1)) - 1) << s;
 52 }
 53 
 54 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value);
 55 
 56 void
 57 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
 58 
 59 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
 60 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 61 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
 62                              bool accessed_dirty, gpa_t new_eptp);
 63 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 64 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
 65                                 u64 fault_address, char *insn, int insn_len);
 66 
 67 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
 68 {
 69         if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
 70                 return kvm->arch.n_max_mmu_pages -
 71                         kvm->arch.n_used_mmu_pages;
 72 
 73         return 0;
 74 }
 75 
 76 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
 77 {
 78         if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
 79                 return 0;
 80 
 81         return kvm_mmu_load(vcpu);
 82 }
 83 
 84 static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
 85 {
 86         BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
 87 
 88         return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
 89                ? cr3 & X86_CR3_PCID_MASK
 90                : 0;
 91 }
 92 
 93 static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
 94 {
 95         return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
 96 }
 97 
 98 static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
 99 {
100         if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
101                 vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
102                                               kvm_get_active_pcid(vcpu));
103 }
104 
105 /*
106  * Currently, we have two sorts of write-protection, a) the first one
107  * write-protects guest page to sync the guest modification, b) another one is
108  * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
109  * between these two sorts are:
110  * 1) the first case clears SPTE_MMU_WRITEABLE bit.
111  * 2) the first case requires flushing tlb immediately avoiding corrupting
112  *    shadow page table between all vcpus so it should be in the protection of
113  *    mmu-lock. And the another case does not need to flush tlb until returning
114  *    the dirty bitmap to userspace since it only write-protects the page
115  *    logged in the bitmap, that means the page in the dirty bitmap is not
116  *    missed, so it can flush tlb out of mmu-lock.
117  *
118  * So, there is the problem: the first case can meet the corrupted tlb caused
119  * by another case which write-protects pages but without flush tlb
120  * immediately. In order to making the first case be aware this problem we let
121  * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
122  * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
123  *
124  * Anyway, whenever a spte is updated (only permission and status bits are
125  * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
126  * readonly, if that happens, we need to flush tlb. Fortunately,
127  * mmu_spte_update() has already handled it perfectly.
128  *
129  * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
130  * - if we want to see if it has writable tlb entry or if the spte can be
131  *   writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
132  *   case, otherwise
133  * - if we fix page fault on the spte or do write-protection by dirty logging,
134  *   check PT_WRITABLE_MASK.
135  *
136  * TODO: introduce APIs to split these two cases.
137  */
138 static inline int is_writable_pte(unsigned long pte)
139 {
140         return pte & PT_WRITABLE_MASK;
141 }
142 
143 static inline bool is_write_protection(struct kvm_vcpu *vcpu)
144 {
145         return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
146 }
147 
148 /*
149  * Check if a given access (described through the I/D, W/R and U/S bits of a
150  * page fault error code pfec) causes a permission fault with the given PTE
151  * access rights (in ACC_* format).
152  *
153  * Return zero if the access does not fault; return the page fault error code
154  * if the access faults.
155  */
156 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
157                                   unsigned pte_access, unsigned pte_pkey,
158                                   unsigned pfec)
159 {
160         int cpl = kvm_x86_ops->get_cpl(vcpu);
161         unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
162 
163         /*
164          * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
165          *
166          * If CPL = 3, SMAP applies to all supervisor-mode data accesses
167          * (these are implicit supervisor accesses) regardless of the value
168          * of EFLAGS.AC.
169          *
170          * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
171          * the result in X86_EFLAGS_AC. We then insert it in place of
172          * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
173          * but it will be one in index if SMAP checks are being overridden.
174          * It is important to keep this branchless.
175          */
176         unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
177         int index = (pfec >> 1) +
178                     (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
179         bool fault = (mmu->permissions[index] >> pte_access) & 1;
180         u32 errcode = PFERR_PRESENT_MASK;
181 
182         WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
183         if (unlikely(mmu->pkru_mask)) {
184                 u32 pkru_bits, offset;
185 
186                 /*
187                 * PKRU defines 32 bits, there are 16 domains and 2
188                 * attribute bits per domain in pkru.  pte_pkey is the
189                 * index of the protection domain, so pte_pkey * 2 is
190                 * is the index of the first bit for the domain.
191                 */
192                 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
193 
194                 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
195                 offset = (pfec & ~1) +
196                         ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
197 
198                 pkru_bits &= mmu->pkru_mask >> offset;
199                 errcode |= -pkru_bits & PFERR_PK_MASK;
200                 fault |= (pkru_bits != 0);
201         }
202 
203         return -(u32)fault & errcode;
204 }
205 
206 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
207 
208 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
209 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
210 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
211                                     struct kvm_memory_slot *slot, u64 gfn);
212 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
213 #endif
214 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp