~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kvm/mmu.c

Version: ~ [ linux-5.13-rc5 ] ~ [ linux-5.12.9 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.42 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.124 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.193 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.235 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.271 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.271 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Kernel-based Virtual Machine driver for Linux
  3  *
  4  * This module enables machines with Intel VT-x extensions to run virtual
  5  * machines without emulation or binary translation.
  6  *
  7  * MMU support
  8  *
  9  * Copyright (C) 2006 Qumranet, Inc.
 10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
 11  *
 12  * Authors:
 13  *   Yaniv Kamay  <yaniv@qumranet.com>
 14  *   Avi Kivity   <avi@qumranet.com>
 15  *
 16  * This work is licensed under the terms of the GNU GPL, version 2.  See
 17  * the COPYING file in the top-level directory.
 18  *
 19  */
 20 
 21 #include "irq.h"
 22 #include "mmu.h"
 23 #include "x86.h"
 24 #include "kvm_cache_regs.h"
 25 #include "cpuid.h"
 26 
 27 #include <linux/kvm_host.h>
 28 #include <linux/types.h>
 29 #include <linux/string.h>
 30 #include <linux/mm.h>
 31 #include <linux/highmem.h>
 32 #include <linux/moduleparam.h>
 33 #include <linux/export.h>
 34 #include <linux/swap.h>
 35 #include <linux/hugetlb.h>
 36 #include <linux/compiler.h>
 37 #include <linux/srcu.h>
 38 #include <linux/slab.h>
 39 #include <linux/sched/signal.h>
 40 #include <linux/uaccess.h>
 41 #include <linux/hash.h>
 42 #include <linux/kern_levels.h>
 43 
 44 #include <asm/page.h>
 45 #include <asm/pat.h>
 46 #include <asm/cmpxchg.h>
 47 #include <asm/io.h>
 48 #include <asm/vmx.h>
 49 #include <asm/kvm_page_track.h>
 50 #include "trace.h"
 51 
 52 /*
 53  * When setting this variable to true it enables Two-Dimensional-Paging
 54  * where the hardware walks 2 page tables:
 55  * 1. the guest-virtual to guest-physical
 56  * 2. while doing 1. it walks guest-physical to host-physical
 57  * If the hardware supports that we don't need to do shadow paging.
 58  */
 59 bool tdp_enabled = false;
 60 
 61 enum {
 62         AUDIT_PRE_PAGE_FAULT,
 63         AUDIT_POST_PAGE_FAULT,
 64         AUDIT_PRE_PTE_WRITE,
 65         AUDIT_POST_PTE_WRITE,
 66         AUDIT_PRE_SYNC,
 67         AUDIT_POST_SYNC
 68 };
 69 
 70 #undef MMU_DEBUG
 71 
 72 #ifdef MMU_DEBUG
 73 static bool dbg = 0;
 74 module_param(dbg, bool, 0644);
 75 
 76 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
 77 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
 78 #define MMU_WARN_ON(x) WARN_ON(x)
 79 #else
 80 #define pgprintk(x...) do { } while (0)
 81 #define rmap_printk(x...) do { } while (0)
 82 #define MMU_WARN_ON(x) do { } while (0)
 83 #endif
 84 
 85 #define PTE_PREFETCH_NUM                8
 86 
 87 #define PT_FIRST_AVAIL_BITS_SHIFT 10
 88 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
 89 
 90 #define PT64_LEVEL_BITS 9
 91 
 92 #define PT64_LEVEL_SHIFT(level) \
 93                 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
 94 
 95 #define PT64_INDEX(address, level)\
 96         (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
 97 
 98 
 99 #define PT32_LEVEL_BITS 10
100 
101 #define PT32_LEVEL_SHIFT(level) \
102                 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
103 
104 #define PT32_LVL_OFFSET_MASK(level) \
105         (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
106                                                 * PT32_LEVEL_BITS))) - 1))
107 
108 #define PT32_INDEX(address, level)\
109         (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
110 
111 
112 #define PT64_BASE_ADDR_MASK __sme_clr((((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)))
113 #define PT64_DIR_BASE_ADDR_MASK \
114         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
115 #define PT64_LVL_ADDR_MASK(level) \
116         (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
117                                                 * PT64_LEVEL_BITS))) - 1))
118 #define PT64_LVL_OFFSET_MASK(level) \
119         (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
120                                                 * PT64_LEVEL_BITS))) - 1))
121 
122 #define PT32_BASE_ADDR_MASK PAGE_MASK
123 #define PT32_DIR_BASE_ADDR_MASK \
124         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
125 #define PT32_LVL_ADDR_MASK(level) \
126         (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
127                                             * PT32_LEVEL_BITS))) - 1))
128 
129 #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
130                         | shadow_x_mask | shadow_nx_mask | shadow_me_mask)
131 
132 #define ACC_EXEC_MASK    1
133 #define ACC_WRITE_MASK   PT_WRITABLE_MASK
134 #define ACC_USER_MASK    PT_USER_MASK
135 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
136 
137 /* The mask for the R/X bits in EPT PTEs */
138 #define PT64_EPT_READABLE_MASK                  0x1ull
139 #define PT64_EPT_EXECUTABLE_MASK                0x4ull
140 
141 #include <trace/events/kvm.h>
142 
143 #define CREATE_TRACE_POINTS
144 #include "mmutrace.h"
145 
146 #define SPTE_HOST_WRITEABLE     (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
147 #define SPTE_MMU_WRITEABLE      (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
148 
149 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
150 
151 /* make pte_list_desc fit well in cache line */
152 #define PTE_LIST_EXT 3
153 
154 /*
155  * Return values of handle_mmio_page_fault and mmu.page_fault:
156  * RET_PF_RETRY: let CPU fault again on the address.
157  * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
158  *
159  * For handle_mmio_page_fault only:
160  * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
161  */
162 enum {
163         RET_PF_RETRY = 0,
164         RET_PF_EMULATE = 1,
165         RET_PF_INVALID = 2,
166 };
167 
168 struct pte_list_desc {
169         u64 *sptes[PTE_LIST_EXT];
170         struct pte_list_desc *more;
171 };
172 
173 struct kvm_shadow_walk_iterator {
174         u64 addr;
175         hpa_t shadow_addr;
176         u64 *sptep;
177         int level;
178         unsigned index;
179 };
180 
181 #define for_each_shadow_entry(_vcpu, _addr, _walker)    \
182         for (shadow_walk_init(&(_walker), _vcpu, _addr);        \
183              shadow_walk_okay(&(_walker));                      \
184              shadow_walk_next(&(_walker)))
185 
186 #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte)     \
187         for (shadow_walk_init(&(_walker), _vcpu, _addr);                \
188              shadow_walk_okay(&(_walker)) &&                            \
189                 ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; });  \
190              __shadow_walk_next(&(_walker), spte))
191 
192 static struct kmem_cache *pte_list_desc_cache;
193 static struct kmem_cache *mmu_page_header_cache;
194 static struct percpu_counter kvm_total_used_mmu_pages;
195 
196 static u64 __read_mostly shadow_nx_mask;
197 static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
198 static u64 __read_mostly shadow_user_mask;
199 static u64 __read_mostly shadow_accessed_mask;
200 static u64 __read_mostly shadow_dirty_mask;
201 static u64 __read_mostly shadow_mmio_mask;
202 static u64 __read_mostly shadow_mmio_value;
203 static u64 __read_mostly shadow_present_mask;
204 static u64 __read_mostly shadow_me_mask;
205 
206 /*
207  * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
208  * Non-present SPTEs with shadow_acc_track_value set are in place for access
209  * tracking.
210  */
211 static u64 __read_mostly shadow_acc_track_mask;
212 static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
213 
214 /*
215  * The mask/shift to use for saving the original R/X bits when marking the PTE
216  * as not-present for access tracking purposes. We do not save the W bit as the
217  * PTEs being access tracked also need to be dirty tracked, so the W bit will be
218  * restored only when a write is attempted to the page.
219  */
220 static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
221                                                     PT64_EPT_EXECUTABLE_MASK;
222 static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
223 
224 static void mmu_spte_set(u64 *sptep, u64 spte);
225 static void mmu_free_roots(struct kvm_vcpu *vcpu);
226 
227 void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value)
228 {
229         BUG_ON((mmio_mask & mmio_value) != mmio_value);
230         shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
231         shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
232 }
233 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
234 
235 static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
236 {
237         return sp->role.ad_disabled;
238 }
239 
240 static inline bool spte_ad_enabled(u64 spte)
241 {
242         MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
243         return !(spte & shadow_acc_track_value);
244 }
245 
246 static inline u64 spte_shadow_accessed_mask(u64 spte)
247 {
248         MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
249         return spte_ad_enabled(spte) ? shadow_accessed_mask : 0;
250 }
251 
252 static inline u64 spte_shadow_dirty_mask(u64 spte)
253 {
254         MMU_WARN_ON((spte & shadow_mmio_mask) == shadow_mmio_value);
255         return spte_ad_enabled(spte) ? shadow_dirty_mask : 0;
256 }
257 
258 static inline bool is_access_track_spte(u64 spte)
259 {
260         return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0;
261 }
262 
263 /*
264  * the low bit of the generation number is always presumed to be zero.
265  * This disables mmio caching during memslot updates.  The concept is
266  * similar to a seqcount but instead of retrying the access we just punt
267  * and ignore the cache.
268  *
269  * spte bits 3-11 are used as bits 1-9 of the generation number,
270  * the bits 52-61 are used as bits 10-19 of the generation number.
271  */
272 #define MMIO_SPTE_GEN_LOW_SHIFT         2
273 #define MMIO_SPTE_GEN_HIGH_SHIFT        52
274 
275 #define MMIO_GEN_SHIFT                  20
276 #define MMIO_GEN_LOW_SHIFT              10
277 #define MMIO_GEN_LOW_MASK               ((1 << MMIO_GEN_LOW_SHIFT) - 2)
278 #define MMIO_GEN_MASK                   ((1 << MMIO_GEN_SHIFT) - 1)
279 
280 static u64 generation_mmio_spte_mask(unsigned int gen)
281 {
282         u64 mask;
283 
284         WARN_ON(gen & ~MMIO_GEN_MASK);
285 
286         mask = (gen & MMIO_GEN_LOW_MASK) << MMIO_SPTE_GEN_LOW_SHIFT;
287         mask |= ((u64)gen >> MMIO_GEN_LOW_SHIFT) << MMIO_SPTE_GEN_HIGH_SHIFT;
288         return mask;
289 }
290 
291 static unsigned int get_mmio_spte_generation(u64 spte)
292 {
293         unsigned int gen;
294 
295         spte &= ~shadow_mmio_mask;
296 
297         gen = (spte >> MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_GEN_LOW_MASK;
298         gen |= (spte >> MMIO_SPTE_GEN_HIGH_SHIFT) << MMIO_GEN_LOW_SHIFT;
299         return gen;
300 }
301 
302 static unsigned int kvm_current_mmio_generation(struct kvm_vcpu *vcpu)
303 {
304         return kvm_vcpu_memslots(vcpu)->generation & MMIO_GEN_MASK;
305 }
306 
307 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
308                            unsigned access)
309 {
310         unsigned int gen = kvm_current_mmio_generation(vcpu);
311         u64 mask = generation_mmio_spte_mask(gen);
312 
313         access &= ACC_WRITE_MASK | ACC_USER_MASK;
314         mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
315 
316         trace_mark_mmio_spte(sptep, gfn, access, gen);
317         mmu_spte_set(sptep, mask);
318 }
319 
320 static bool is_mmio_spte(u64 spte)
321 {
322         return (spte & shadow_mmio_mask) == shadow_mmio_value;
323 }
324 
325 static gfn_t get_mmio_spte_gfn(u64 spte)
326 {
327         u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
328         return (spte & ~mask) >> PAGE_SHIFT;
329 }
330 
331 static unsigned get_mmio_spte_access(u64 spte)
332 {
333         u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
334         return (spte & ~mask) & ~PAGE_MASK;
335 }
336 
337 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
338                           kvm_pfn_t pfn, unsigned access)
339 {
340         if (unlikely(is_noslot_pfn(pfn))) {
341                 mark_mmio_spte(vcpu, sptep, gfn, access);
342                 return true;
343         }
344 
345         return false;
346 }
347 
348 static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
349 {
350         unsigned int kvm_gen, spte_gen;
351 
352         kvm_gen = kvm_current_mmio_generation(vcpu);
353         spte_gen = get_mmio_spte_generation(spte);
354 
355         trace_check_mmio_spte(spte, kvm_gen, spte_gen);
356         return likely(kvm_gen == spte_gen);
357 }
358 
359 /*
360  * Sets the shadow PTE masks used by the MMU.
361  *
362  * Assumptions:
363  *  - Setting either @accessed_mask or @dirty_mask requires setting both
364  *  - At least one of @accessed_mask or @acc_track_mask must be set
365  */
366 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
367                 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
368                 u64 acc_track_mask, u64 me_mask)
369 {
370         BUG_ON(!dirty_mask != !accessed_mask);
371         BUG_ON(!accessed_mask && !acc_track_mask);
372         BUG_ON(acc_track_mask & shadow_acc_track_value);
373 
374         shadow_user_mask = user_mask;
375         shadow_accessed_mask = accessed_mask;
376         shadow_dirty_mask = dirty_mask;
377         shadow_nx_mask = nx_mask;
378         shadow_x_mask = x_mask;
379         shadow_present_mask = p_mask;
380         shadow_acc_track_mask = acc_track_mask;
381         shadow_me_mask = me_mask;
382 }
383 EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
384 
385 static void kvm_mmu_clear_all_pte_masks(void)
386 {
387         shadow_user_mask = 0;
388         shadow_accessed_mask = 0;
389         shadow_dirty_mask = 0;
390         shadow_nx_mask = 0;
391         shadow_x_mask = 0;
392         shadow_mmio_mask = 0;
393         shadow_present_mask = 0;
394         shadow_acc_track_mask = 0;
395 }
396 
397 static int is_cpuid_PSE36(void)
398 {
399         return 1;
400 }
401 
402 static int is_nx(struct kvm_vcpu *vcpu)
403 {
404         return vcpu->arch.efer & EFER_NX;
405 }
406 
407 static int is_shadow_present_pte(u64 pte)
408 {
409         return (pte != 0) && !is_mmio_spte(pte);
410 }
411 
412 static int is_large_pte(u64 pte)
413 {
414         return pte & PT_PAGE_SIZE_MASK;
415 }
416 
417 static int is_last_spte(u64 pte, int level)
418 {
419         if (level == PT_PAGE_TABLE_LEVEL)
420                 return 1;
421         if (is_large_pte(pte))
422                 return 1;
423         return 0;
424 }
425 
426 static bool is_executable_pte(u64 spte)
427 {
428         return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask;
429 }
430 
431 static kvm_pfn_t spte_to_pfn(u64 pte)
432 {
433         return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
434 }
435 
436 static gfn_t pse36_gfn_delta(u32 gpte)
437 {
438         int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
439 
440         return (gpte & PT32_DIR_PSE36_MASK) << shift;
441 }
442 
443 #ifdef CONFIG_X86_64
444 static void __set_spte(u64 *sptep, u64 spte)
445 {
446         WRITE_ONCE(*sptep, spte);
447 }
448 
449 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
450 {
451         WRITE_ONCE(*sptep, spte);
452 }
453 
454 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
455 {
456         return xchg(sptep, spte);
457 }
458 
459 static u64 __get_spte_lockless(u64 *sptep)
460 {
461         return READ_ONCE(*sptep);
462 }
463 #else
464 union split_spte {
465         struct {
466                 u32 spte_low;
467                 u32 spte_high;
468         };
469         u64 spte;
470 };
471 
472 static void count_spte_clear(u64 *sptep, u64 spte)
473 {
474         struct kvm_mmu_page *sp =  page_header(__pa(sptep));
475 
476         if (is_shadow_present_pte(spte))
477                 return;
478 
479         /* Ensure the spte is completely set before we increase the count */
480         smp_wmb();
481         sp->clear_spte_count++;
482 }
483 
484 static void __set_spte(u64 *sptep, u64 spte)
485 {
486         union split_spte *ssptep, sspte;
487 
488         ssptep = (union split_spte *)sptep;
489         sspte = (union split_spte)spte;
490 
491         ssptep->spte_high = sspte.spte_high;
492 
493         /*
494          * If we map the spte from nonpresent to present, We should store
495          * the high bits firstly, then set present bit, so cpu can not
496          * fetch this spte while we are setting the spte.
497          */
498         smp_wmb();
499 
500         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
501 }
502 
503 static void __update_clear_spte_fast(u64 *sptep, u64 spte)
504 {
505         union split_spte *ssptep, sspte;
506 
507         ssptep = (union split_spte *)sptep;
508         sspte = (union split_spte)spte;
509 
510         WRITE_ONCE(ssptep->spte_low, sspte.spte_low);
511 
512         /*
513          * If we map the spte from present to nonpresent, we should clear
514          * present bit firstly to avoid vcpu fetch the old high bits.
515          */
516         smp_wmb();
517 
518         ssptep->spte_high = sspte.spte_high;
519         count_spte_clear(sptep, spte);
520 }
521 
522 static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
523 {
524         union split_spte *ssptep, sspte, orig;
525 
526         ssptep = (union split_spte *)sptep;
527         sspte = (union split_spte)spte;
528 
529         /* xchg acts as a barrier before the setting of the high bits */
530         orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
531         orig.spte_high = ssptep->spte_high;
532         ssptep->spte_high = sspte.spte_high;
533         count_spte_clear(sptep, spte);
534 
535         return orig.spte;
536 }
537 
538 /*
539  * The idea using the light way get the spte on x86_32 guest is from
540  * gup_get_pte(arch/x86/mm/gup.c).
541  *
542  * An spte tlb flush may be pending, because kvm_set_pte_rmapp
543  * coalesces them and we are running out of the MMU lock.  Therefore
544  * we need to protect against in-progress updates of the spte.
545  *
546  * Reading the spte while an update is in progress may get the old value
547  * for the high part of the spte.  The race is fine for a present->non-present
548  * change (because the high part of the spte is ignored for non-present spte),
549  * but for a present->present change we must reread the spte.
550  *
551  * All such changes are done in two steps (present->non-present and
552  * non-present->present), hence it is enough to count the number of
553  * present->non-present updates: if it changed while reading the spte,
554  * we might have hit the race.  This is done using clear_spte_count.
555  */
556 static u64 __get_spte_lockless(u64 *sptep)
557 {
558         struct kvm_mmu_page *sp =  page_header(__pa(sptep));
559         union split_spte spte, *orig = (union split_spte *)sptep;
560         int count;
561 
562 retry:
563         count = sp->clear_spte_count;
564         smp_rmb();
565 
566         spte.spte_low = orig->spte_low;
567         smp_rmb();
568 
569         spte.spte_high = orig->spte_high;
570         smp_rmb();
571 
572         if (unlikely(spte.spte_low != orig->spte_low ||
573               count != sp->clear_spte_count))
574                 goto retry;
575 
576         return spte.spte;
577 }
578 #endif
579 
580 static bool spte_can_locklessly_be_made_writable(u64 spte)
581 {
582         return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
583                 (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
584 }
585 
586 static bool spte_has_volatile_bits(u64 spte)
587 {
588         if (!is_shadow_present_pte(spte))
589                 return false;
590 
591         /*
592          * Always atomically update spte if it can be updated
593          * out of mmu-lock, it can ensure dirty bit is not lost,
594          * also, it can help us to get a stable is_writable_pte()
595          * to ensure tlb flush is not missed.
596          */
597         if (spte_can_locklessly_be_made_writable(spte) ||
598             is_access_track_spte(spte))
599                 return true;
600 
601         if (spte_ad_enabled(spte)) {
602                 if ((spte & shadow_accessed_mask) == 0 ||
603                     (is_writable_pte(spte) && (spte & shadow_dirty_mask) == 0))
604                         return true;
605         }
606 
607         return false;
608 }
609 
610 static bool is_accessed_spte(u64 spte)
611 {
612         u64 accessed_mask = spte_shadow_accessed_mask(spte);
613 
614         return accessed_mask ? spte & accessed_mask
615                              : !is_access_track_spte(spte);
616 }
617 
618 static bool is_dirty_spte(u64 spte)
619 {
620         u64 dirty_mask = spte_shadow_dirty_mask(spte);
621 
622         return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK;
623 }
624 
625 /* Rules for using mmu_spte_set:
626  * Set the sptep from nonpresent to present.
627  * Note: the sptep being assigned *must* be either not present
628  * or in a state where the hardware will not attempt to update
629  * the spte.
630  */
631 static void mmu_spte_set(u64 *sptep, u64 new_spte)
632 {
633         WARN_ON(is_shadow_present_pte(*sptep));
634         __set_spte(sptep, new_spte);
635 }
636 
637 /*
638  * Update the SPTE (excluding the PFN), but do not track changes in its
639  * accessed/dirty status.
640  */
641 static u64 mmu_spte_update_no_track(u64 *sptep, u64 new_spte)
642 {
643         u64 old_spte = *sptep;
644 
645         WARN_ON(!is_shadow_present_pte(new_spte));
646 
647         if (!is_shadow_present_pte(old_spte)) {
648                 mmu_spte_set(sptep, new_spte);
649                 return old_spte;
650         }
651 
652         if (!spte_has_volatile_bits(old_spte))
653                 __update_clear_spte_fast(sptep, new_spte);
654         else
655                 old_spte = __update_clear_spte_slow(sptep, new_spte);
656 
657         WARN_ON(spte_to_pfn(old_spte) != spte_to_pfn(new_spte));
658 
659         return old_spte;
660 }
661 
662 /* Rules for using mmu_spte_update:
663  * Update the state bits, it means the mapped pfn is not changed.
664  *
665  * Whenever we overwrite a writable spte with a read-only one we
666  * should flush remote TLBs. Otherwise rmap_write_protect
667  * will find a read-only spte, even though the writable spte
668  * might be cached on a CPU's TLB, the return value indicates this
669  * case.
670  *
671  * Returns true if the TLB needs to be flushed
672  */
673 static bool mmu_spte_update(u64 *sptep, u64 new_spte)
674 {
675         bool flush = false;
676         u64 old_spte = mmu_spte_update_no_track(sptep, new_spte);
677 
678         if (!is_shadow_present_pte(old_spte))
679                 return false;
680 
681         /*
682          * For the spte updated out of mmu-lock is safe, since
683          * we always atomically update it, see the comments in
684          * spte_has_volatile_bits().
685          */
686         if (spte_can_locklessly_be_made_writable(old_spte) &&
687               !is_writable_pte(new_spte))
688                 flush = true;
689 
690         /*
691          * Flush TLB when accessed/dirty states are changed in the page tables,
692          * to guarantee consistency between TLB and page tables.
693          */
694 
695         if (is_accessed_spte(old_spte) && !is_accessed_spte(new_spte)) {
696                 flush = true;
697                 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
698         }
699 
700         if (is_dirty_spte(old_spte) && !is_dirty_spte(new_spte)) {
701                 flush = true;
702                 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
703         }
704 
705         return flush;
706 }
707 
708 /*
709  * Rules for using mmu_spte_clear_track_bits:
710  * It sets the sptep from present to nonpresent, and track the
711  * state bits, it is used to clear the last level sptep.
712  * Returns non-zero if the PTE was previously valid.
713  */
714 static int mmu_spte_clear_track_bits(u64 *sptep)
715 {
716         kvm_pfn_t pfn;
717         u64 old_spte = *sptep;
718 
719         if (!spte_has_volatile_bits(old_spte))
720                 __update_clear_spte_fast(sptep, 0ull);
721         else
722                 old_spte = __update_clear_spte_slow(sptep, 0ull);
723 
724         if (!is_shadow_present_pte(old_spte))
725                 return 0;
726 
727         pfn = spte_to_pfn(old_spte);
728 
729         /*
730          * KVM does not hold the refcount of the page used by
731          * kvm mmu, before reclaiming the page, we should
732          * unmap it from mmu first.
733          */
734         WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
735 
736         if (is_accessed_spte(old_spte))
737                 kvm_set_pfn_accessed(pfn);
738 
739         if (is_dirty_spte(old_spte))
740                 kvm_set_pfn_dirty(pfn);
741 
742         return 1;
743 }
744 
745 /*
746  * Rules for using mmu_spte_clear_no_track:
747  * Directly clear spte without caring the state bits of sptep,
748  * it is used to set the upper level spte.
749  */
750 static void mmu_spte_clear_no_track(u64 *sptep)
751 {
752         __update_clear_spte_fast(sptep, 0ull);
753 }
754 
755 static u64 mmu_spte_get_lockless(u64 *sptep)
756 {
757         return __get_spte_lockless(sptep);
758 }
759 
760 static u64 mark_spte_for_access_track(u64 spte)
761 {
762         if (spte_ad_enabled(spte))
763                 return spte & ~shadow_accessed_mask;
764 
765         if (is_access_track_spte(spte))
766                 return spte;
767 
768         /*
769          * Making an Access Tracking PTE will result in removal of write access
770          * from the PTE. So, verify that we will be able to restore the write
771          * access in the fast page fault path later on.
772          */
773         WARN_ONCE((spte & PT_WRITABLE_MASK) &&
774                   !spte_can_locklessly_be_made_writable(spte),
775                   "kvm: Writable SPTE is not locklessly dirty-trackable\n");
776 
777         WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask <<
778                           shadow_acc_track_saved_bits_shift),
779                   "kvm: Access Tracking saved bit locations are not zero\n");
780 
781         spte |= (spte & shadow_acc_track_saved_bits_mask) <<
782                 shadow_acc_track_saved_bits_shift;
783         spte &= ~shadow_acc_track_mask;
784 
785         return spte;
786 }
787 
788 /* Restore an acc-track PTE back to a regular PTE */
789 static u64 restore_acc_track_spte(u64 spte)
790 {
791         u64 new_spte = spte;
792         u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift)
793                          & shadow_acc_track_saved_bits_mask;
794 
795         WARN_ON_ONCE(spte_ad_enabled(spte));
796         WARN_ON_ONCE(!is_access_track_spte(spte));
797 
798         new_spte &= ~shadow_acc_track_mask;
799         new_spte &= ~(shadow_acc_track_saved_bits_mask <<
800                       shadow_acc_track_saved_bits_shift);
801         new_spte |= saved_bits;
802 
803         return new_spte;
804 }
805 
806 /* Returns the Accessed status of the PTE and resets it at the same time. */
807 static bool mmu_spte_age(u64 *sptep)
808 {
809         u64 spte = mmu_spte_get_lockless(sptep);
810 
811         if (!is_accessed_spte(spte))
812                 return false;
813 
814         if (spte_ad_enabled(spte)) {
815                 clear_bit((ffs(shadow_accessed_mask) - 1),
816                           (unsigned long *)sptep);
817         } else {
818                 /*
819                  * Capture the dirty status of the page, so that it doesn't get
820                  * lost when the SPTE is marked for access tracking.
821                  */
822                 if (is_writable_pte(spte))
823                         kvm_set_pfn_dirty(spte_to_pfn(spte));
824 
825                 spte = mark_spte_for_access_track(spte);
826                 mmu_spte_update_no_track(sptep, spte);
827         }
828 
829         return true;
830 }
831 
832 static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
833 {
834         /*
835          * Prevent page table teardown by making any free-er wait during
836          * kvm_flush_remote_tlbs() IPI to all active vcpus.
837          */
838         local_irq_disable();
839 
840         /*
841          * Make sure a following spte read is not reordered ahead of the write
842          * to vcpu->mode.
843          */
844         smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
845 }
846 
847 static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
848 {
849         /*
850          * Make sure the write to vcpu->mode is not reordered in front of
851          * reads to sptes.  If it does, kvm_commit_zap_page() can see us
852          * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
853          */
854         smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
855         local_irq_enable();
856 }
857 
858 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
859                                   struct kmem_cache *base_cache, int min)
860 {
861         void *obj;
862 
863         if (cache->nobjs >= min)
864                 return 0;
865         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
866                 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
867                 if (!obj)
868                         return -ENOMEM;
869                 cache->objects[cache->nobjs++] = obj;
870         }
871         return 0;
872 }
873 
874 static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
875 {
876         return cache->nobjs;
877 }
878 
879 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
880                                   struct kmem_cache *cache)
881 {
882         while (mc->nobjs)
883                 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
884 }
885 
886 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
887                                        int min)
888 {
889         void *page;
890 
891         if (cache->nobjs >= min)
892                 return 0;
893         while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
894                 page = (void *)__get_free_page(GFP_KERNEL);
895                 if (!page)
896                         return -ENOMEM;
897                 cache->objects[cache->nobjs++] = page;
898         }
899         return 0;
900 }
901 
902 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
903 {
904         while (mc->nobjs)
905                 free_page((unsigned long)mc->objects[--mc->nobjs]);
906 }
907 
908 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
909 {
910         int r;
911 
912         r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
913                                    pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
914         if (r)
915                 goto out;
916         r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
917         if (r)
918                 goto out;
919         r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
920                                    mmu_page_header_cache, 4);
921 out:
922         return r;
923 }
924 
925 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
926 {
927         mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
928                                 pte_list_desc_cache);
929         mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
930         mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
931                                 mmu_page_header_cache);
932 }
933 
934 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
935 {
936         void *p;
937 
938         BUG_ON(!mc->nobjs);
939         p = mc->objects[--mc->nobjs];
940         return p;
941 }
942 
943 static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
944 {
945         return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
946 }
947 
948 static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
949 {
950         kmem_cache_free(pte_list_desc_cache, pte_list_desc);
951 }
952 
953 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
954 {
955         if (!sp->role.direct)
956                 return sp->gfns[index];
957 
958         return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
959 }
960 
961 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
962 {
963         if (sp->role.direct)
964                 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
965         else
966                 sp->gfns[index] = gfn;
967 }
968 
969 /*
970  * Return the pointer to the large page information for a given gfn,
971  * handling slots that are not large page aligned.
972  */
973 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
974                                               struct kvm_memory_slot *slot,
975                                               int level)
976 {
977         unsigned long idx;
978 
979         idx = gfn_to_index(gfn, slot->base_gfn, level);
980         return &slot->arch.lpage_info[level - 2][idx];
981 }
982 
983 static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
984                                             gfn_t gfn, int count)
985 {
986         struct kvm_lpage_info *linfo;
987         int i;
988 
989         for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
990                 linfo = lpage_info_slot(gfn, slot, i);
991                 linfo->disallow_lpage += count;
992                 WARN_ON(linfo->disallow_lpage < 0);
993         }
994 }
995 
996 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
997 {
998         update_gfn_disallow_lpage_count(slot, gfn, 1);
999 }
1000 
1001 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
1002 {
1003         update_gfn_disallow_lpage_count(slot, gfn, -1);
1004 }
1005 
1006 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
1007 {
1008         struct kvm_memslots *slots;
1009         struct kvm_memory_slot *slot;
1010         gfn_t gfn;
1011 
1012         kvm->arch.indirect_shadow_pages++;
1013         gfn = sp->gfn;
1014         slots = kvm_memslots_for_spte_role(kvm, sp->role);
1015         slot = __gfn_to_memslot(slots, gfn);
1016 
1017         /* the non-leaf shadow pages are keeping readonly. */
1018         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
1019                 return kvm_slot_page_track_add_page(kvm, slot, gfn,
1020                                                     KVM_PAGE_TRACK_WRITE);
1021 
1022         kvm_mmu_gfn_disallow_lpage(slot, gfn);
1023 }
1024 
1025 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
1026 {
1027         struct kvm_memslots *slots;
1028         struct kvm_memory_slot *slot;
1029         gfn_t gfn;
1030 
1031         kvm->arch.indirect_shadow_pages--;
1032         gfn = sp->gfn;
1033         slots = kvm_memslots_for_spte_role(kvm, sp->role);
1034         slot = __gfn_to_memslot(slots, gfn);
1035         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
1036                 return kvm_slot_page_track_remove_page(kvm, slot, gfn,
1037                                                        KVM_PAGE_TRACK_WRITE);
1038 
1039         kvm_mmu_gfn_allow_lpage(slot, gfn);
1040 }
1041 
1042 static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
1043                                           struct kvm_memory_slot *slot)
1044 {
1045         struct kvm_lpage_info *linfo;
1046 
1047         if (slot) {
1048                 linfo = lpage_info_slot(gfn, slot, level);
1049                 return !!linfo->disallow_lpage;
1050         }
1051 
1052         return true;
1053 }
1054 
1055 static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
1056                                         int level)
1057 {
1058         struct kvm_memory_slot *slot;
1059 
1060         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1061         return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
1062 }
1063 
1064 static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
1065 {
1066         unsigned long page_size;
1067         int i, ret = 0;
1068 
1069         page_size = kvm_host_page_size(kvm, gfn);
1070 
1071         for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1072                 if (page_size >= KVM_HPAGE_SIZE(i))
1073                         ret = i;
1074                 else
1075                         break;
1076         }
1077 
1078         return ret;
1079 }
1080 
1081 static inline bool memslot_valid_for_gpte(struct kvm_memory_slot *slot,
1082                                           bool no_dirty_log)
1083 {
1084         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
1085                 return false;
1086         if (no_dirty_log && slot->dirty_bitmap)
1087                 return false;
1088 
1089         return true;
1090 }
1091 
1092 static struct kvm_memory_slot *
1093 gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
1094                             bool no_dirty_log)
1095 {
1096         struct kvm_memory_slot *slot;
1097 
1098         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1099         if (!memslot_valid_for_gpte(slot, no_dirty_log))
1100                 slot = NULL;
1101 
1102         return slot;
1103 }
1104 
1105 static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
1106                          bool *force_pt_level)
1107 {
1108         int host_level, level, max_level;
1109         struct kvm_memory_slot *slot;
1110 
1111         if (unlikely(*force_pt_level))
1112                 return PT_PAGE_TABLE_LEVEL;
1113 
1114         slot = kvm_vcpu_gfn_to_memslot(vcpu, large_gfn);
1115         *force_pt_level = !memslot_valid_for_gpte(slot, true);
1116         if (unlikely(*force_pt_level))
1117                 return PT_PAGE_TABLE_LEVEL;
1118 
1119         host_level = host_mapping_level(vcpu->kvm, large_gfn);
1120 
1121         if (host_level == PT_PAGE_TABLE_LEVEL)
1122                 return host_level;
1123 
1124         max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
1125 
1126         for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
1127                 if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
1128                         break;
1129 
1130         return level - 1;
1131 }
1132 
1133 /*
1134  * About rmap_head encoding:
1135  *
1136  * If the bit zero of rmap_head->val is clear, then it points to the only spte
1137  * in this rmap chain. Otherwise, (rmap_head->val & ~1) points to a struct
1138  * pte_list_desc containing more mappings.
1139  */
1140 
1141 /*
1142  * Returns the number of pointers in the rmap chain, not counting the new one.
1143  */
1144 static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
1145                         struct kvm_rmap_head *rmap_head)
1146 {
1147         struct pte_list_desc *desc;
1148         int i, count = 0;
1149 
1150         if (!rmap_head->val) {
1151                 rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
1152                 rmap_head->val = (unsigned long)spte;
1153         } else if (!(rmap_head->val & 1)) {
1154                 rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
1155                 desc = mmu_alloc_pte_list_desc(vcpu);
1156                 desc->sptes[0] = (u64 *)rmap_head->val;
1157                 desc->sptes[1] = spte;
1158                 rmap_head->val = (unsigned long)desc | 1;
1159                 ++count;
1160         } else {
1161                 rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
1162                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1163                 while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
1164                         desc = desc->more;
1165                         count += PTE_LIST_EXT;
1166                 }
1167                 if (desc->sptes[PTE_LIST_EXT-1]) {
1168                         desc->more = mmu_alloc_pte_list_desc(vcpu);
1169                         desc = desc->more;
1170                 }
1171                 for (i = 0; desc->sptes[i]; ++i)
1172                         ++count;
1173                 desc->sptes[i] = spte;
1174         }
1175         return count;
1176 }
1177 
1178 static void
1179 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
1180                            struct pte_list_desc *desc, int i,
1181                            struct pte_list_desc *prev_desc)
1182 {
1183         int j;
1184 
1185         for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
1186                 ;
1187         desc->sptes[i] = desc->sptes[j];
1188         desc->sptes[j] = NULL;
1189         if (j != 0)
1190                 return;
1191         if (!prev_desc && !desc->more)
1192                 rmap_head->val = (unsigned long)desc->sptes[0];
1193         else
1194                 if (prev_desc)
1195                         prev_desc->more = desc->more;
1196                 else
1197                         rmap_head->val = (unsigned long)desc->more | 1;
1198         mmu_free_pte_list_desc(desc);
1199 }
1200 
1201 static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
1202 {
1203         struct pte_list_desc *desc;
1204         struct pte_list_desc *prev_desc;
1205         int i;
1206 
1207         if (!rmap_head->val) {
1208                 printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
1209                 BUG();
1210         } else if (!(rmap_head->val & 1)) {
1211                 rmap_printk("pte_list_remove:  %p 1->0\n", spte);
1212                 if ((u64 *)rmap_head->val != spte) {
1213                         printk(KERN_ERR "pte_list_remove:  %p 1->BUG\n", spte);
1214                         BUG();
1215                 }
1216                 rmap_head->val = 0;
1217         } else {
1218                 rmap_printk("pte_list_remove:  %p many->many\n", spte);
1219                 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1220                 prev_desc = NULL;
1221                 while (desc) {
1222                         for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
1223                                 if (desc->sptes[i] == spte) {
1224                                         pte_list_desc_remove_entry(rmap_head,
1225                                                         desc, i, prev_desc);
1226                                         return;
1227                                 }
1228                         }
1229                         prev_desc = desc;
1230                         desc = desc->more;
1231                 }
1232                 pr_err("pte_list_remove: %p many->many\n", spte);
1233                 BUG();
1234         }
1235 }
1236 
1237 static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
1238                                            struct kvm_memory_slot *slot)
1239 {
1240         unsigned long idx;
1241 
1242         idx = gfn_to_index(gfn, slot->base_gfn, level);
1243         return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
1244 }
1245 
1246 static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
1247                                          struct kvm_mmu_page *sp)
1248 {
1249         struct kvm_memslots *slots;
1250         struct kvm_memory_slot *slot;
1251 
1252         slots = kvm_memslots_for_spte_role(kvm, sp->role);
1253         slot = __gfn_to_memslot(slots, gfn);
1254         return __gfn_to_rmap(gfn, sp->role.level, slot);
1255 }
1256 
1257 static bool rmap_can_add(struct kvm_vcpu *vcpu)
1258 {
1259         struct kvm_mmu_memory_cache *cache;
1260 
1261         cache = &vcpu->arch.mmu_pte_list_desc_cache;
1262         return mmu_memory_cache_free_objects(cache);
1263 }
1264 
1265 static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1266 {
1267         struct kvm_mmu_page *sp;
1268         struct kvm_rmap_head *rmap_head;
1269 
1270         sp = page_header(__pa(spte));
1271         kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
1272         rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1273         return pte_list_add(vcpu, spte, rmap_head);
1274 }
1275 
1276 static void rmap_remove(struct kvm *kvm, u64 *spte)
1277 {
1278         struct kvm_mmu_page *sp;
1279         gfn_t gfn;
1280         struct kvm_rmap_head *rmap_head;
1281 
1282         sp = page_header(__pa(spte));
1283         gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
1284         rmap_head = gfn_to_rmap(kvm, gfn, sp);
1285         pte_list_remove(spte, rmap_head);
1286 }
1287 
1288 /*
1289  * Used by the following functions to iterate through the sptes linked by a
1290  * rmap.  All fields are private and not assumed to be used outside.
1291  */
1292 struct rmap_iterator {
1293         /* private fields */
1294         struct pte_list_desc *desc;     /* holds the sptep if not NULL */
1295         int pos;                        /* index of the sptep */
1296 };
1297 
1298 /*
1299  * Iteration must be started by this function.  This should also be used after
1300  * removing/dropping sptes from the rmap link because in such cases the
1301  * information in the itererator may not be valid.
1302  *
1303  * Returns sptep if found, NULL otherwise.
1304  */
1305 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head,
1306                            struct rmap_iterator *iter)
1307 {
1308         u64 *sptep;
1309 
1310         if (!rmap_head->val)
1311                 return NULL;
1312 
1313         if (!(rmap_head->val & 1)) {
1314                 iter->desc = NULL;
1315                 sptep = (u64 *)rmap_head->val;
1316                 goto out;
1317         }
1318 
1319         iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
1320         iter->pos = 0;
1321         sptep = iter->desc->sptes[iter->pos];
1322 out:
1323         BUG_ON(!is_shadow_present_pte(*sptep));
1324         return sptep;
1325 }
1326 
1327 /*
1328  * Must be used with a valid iterator: e.g. after rmap_get_first().
1329  *
1330  * Returns sptep if found, NULL otherwise.
1331  */
1332 static u64 *rmap_get_next(struct rmap_iterator *iter)
1333 {
1334         u64 *sptep;
1335 
1336         if (iter->desc) {
1337                 if (iter->pos < PTE_LIST_EXT - 1) {
1338                         ++iter->pos;
1339                         sptep = iter->desc->sptes[iter->pos];
1340                         if (sptep)
1341                                 goto out;
1342                 }
1343 
1344                 iter->desc = iter->desc->more;
1345 
1346                 if (iter->desc) {
1347                         iter->pos = 0;
1348                         /* desc->sptes[0] cannot be NULL */
1349                         sptep = iter->desc->sptes[iter->pos];
1350                         goto out;
1351                 }
1352         }
1353 
1354         return NULL;
1355 out:
1356         BUG_ON(!is_shadow_present_pte(*sptep));
1357         return sptep;
1358 }
1359 
1360 #define for_each_rmap_spte(_rmap_head_, _iter_, _spte_)                 \
1361         for (_spte_ = rmap_get_first(_rmap_head_, _iter_);              \
1362              _spte_; _spte_ = rmap_get_next(_iter_))
1363 
1364 static void drop_spte(struct kvm *kvm, u64 *sptep)
1365 {
1366         if (mmu_spte_clear_track_bits(sptep))
1367                 rmap_remove(kvm, sptep);
1368 }
1369 
1370 
1371 static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
1372 {
1373         if (is_large_pte(*sptep)) {
1374                 WARN_ON(page_header(__pa(sptep))->role.level ==
1375                         PT_PAGE_TABLE_LEVEL);
1376                 drop_spte(kvm, sptep);
1377                 --kvm->stat.lpages;
1378                 return true;
1379         }
1380 
1381         return false;
1382 }
1383 
1384 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1385 {
1386         if (__drop_large_spte(vcpu->kvm, sptep))
1387                 kvm_flush_remote_tlbs(vcpu->kvm);
1388 }
1389 
1390 /*
1391  * Write-protect on the specified @sptep, @pt_protect indicates whether
1392  * spte write-protection is caused by protecting shadow page table.
1393  *
1394  * Note: write protection is difference between dirty logging and spte
1395  * protection:
1396  * - for dirty logging, the spte can be set to writable at anytime if
1397  *   its dirty bitmap is properly set.
1398  * - for spte protection, the spte can be writable only after unsync-ing
1399  *   shadow page.
1400  *
1401  * Return true if tlb need be flushed.
1402  */
1403 static bool spte_write_protect(u64 *sptep, bool pt_protect)
1404 {
1405         u64 spte = *sptep;
1406 
1407         if (!is_writable_pte(spte) &&
1408               !(pt_protect && spte_can_locklessly_be_made_writable(spte)))
1409                 return false;
1410 
1411         rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1412 
1413         if (pt_protect)
1414                 spte &= ~SPTE_MMU_WRITEABLE;
1415         spte = spte & ~PT_WRITABLE_MASK;
1416 
1417         return mmu_spte_update(sptep, spte);
1418 }
1419 
1420 static bool __rmap_write_protect(struct kvm *kvm,
1421                                  struct kvm_rmap_head *rmap_head,
1422                                  bool pt_protect)
1423 {
1424         u64 *sptep;
1425         struct rmap_iterator iter;
1426         bool flush = false;
1427 
1428         for_each_rmap_spte(rmap_head, &iter, sptep)
1429                 flush |= spte_write_protect(sptep, pt_protect);
1430 
1431         return flush;
1432 }
1433 
1434 static bool spte_clear_dirty(u64 *sptep)
1435 {
1436         u64 spte = *sptep;
1437 
1438         rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
1439 
1440         spte &= ~shadow_dirty_mask;
1441 
1442         return mmu_spte_update(sptep, spte);
1443 }
1444 
1445 static bool wrprot_ad_disabled_spte(u64 *sptep)
1446 {
1447         bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
1448                                                (unsigned long *)sptep);
1449         if (was_writable)
1450                 kvm_set_pfn_dirty(spte_to_pfn(*sptep));
1451 
1452         return was_writable;
1453 }
1454 
1455 /*
1456  * Gets the GFN ready for another round of dirty logging by clearing the
1457  *      - D bit on ad-enabled SPTEs, and
1458  *      - W bit on ad-disabled SPTEs.
1459  * Returns true iff any D or W bits were cleared.
1460  */
1461 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1462 {
1463         u64 *sptep;
1464         struct rmap_iterator iter;
1465         bool flush = false;
1466 
1467         for_each_rmap_spte(rmap_head, &iter, sptep)
1468                 if (spte_ad_enabled(*sptep))
1469                         flush |= spte_clear_dirty(sptep);
1470                 else
1471                         flush |= wrprot_ad_disabled_spte(sptep);
1472 
1473         return flush;
1474 }
1475 
1476 static bool spte_set_dirty(u64 *sptep)
1477 {
1478         u64 spte = *sptep;
1479 
1480         rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
1481 
1482         spte |= shadow_dirty_mask;
1483 
1484         return mmu_spte_update(sptep, spte);
1485 }
1486 
1487 static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1488 {
1489         u64 *sptep;
1490         struct rmap_iterator iter;
1491         bool flush = false;
1492 
1493         for_each_rmap_spte(rmap_head, &iter, sptep)
1494                 if (spte_ad_enabled(*sptep))
1495                         flush |= spte_set_dirty(sptep);
1496 
1497         return flush;
1498 }
1499 
1500 /**
1501  * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1502  * @kvm: kvm instance
1503  * @slot: slot to protect
1504  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1505  * @mask: indicates which pages we should protect
1506  *
1507  * Used when we do not need to care about huge page mappings: e.g. during dirty
1508  * logging we do not have any such mappings.
1509  */
1510 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1511                                      struct kvm_memory_slot *slot,
1512                                      gfn_t gfn_offset, unsigned long mask)
1513 {
1514         struct kvm_rmap_head *rmap_head;
1515 
1516         while (mask) {
1517                 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1518                                           PT_PAGE_TABLE_LEVEL, slot);
1519                 __rmap_write_protect(kvm, rmap_head, false);
1520 
1521                 /* clear the first set bit */
1522                 mask &= mask - 1;
1523         }
1524 }
1525 
1526 /**
1527  * kvm_mmu_clear_dirty_pt_masked - clear MMU D-bit for PT level pages, or write
1528  * protect the page if the D-bit isn't supported.
1529  * @kvm: kvm instance
1530  * @slot: slot to clear D-bit
1531  * @gfn_offset: start of the BITS_PER_LONG pages we care about
1532  * @mask: indicates which pages we should clear D-bit
1533  *
1534  * Used for PML to re-log the dirty GPAs after userspace querying dirty_bitmap.
1535  */
1536 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1537                                      struct kvm_memory_slot *slot,
1538                                      gfn_t gfn_offset, unsigned long mask)
1539 {
1540         struct kvm_rmap_head *rmap_head;
1541 
1542         while (mask) {
1543                 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
1544                                           PT_PAGE_TABLE_LEVEL, slot);
1545                 __rmap_clear_dirty(kvm, rmap_head);
1546 
1547                 /* clear the first set bit */
1548                 mask &= mask - 1;
1549         }
1550 }
1551 EXPORT_SYMBOL_GPL(kvm_mmu_clear_dirty_pt_masked);
1552 
1553 /**
1554  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1555  * PT level pages.
1556  *
1557  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1558  * enable dirty logging for them.
1559  *
1560  * Used when we do not need to care about huge page mappings: e.g. during dirty
1561  * logging we do not have any such mappings.
1562  */
1563 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1564                                 struct kvm_memory_slot *slot,
1565                                 gfn_t gfn_offset, unsigned long mask)
1566 {
1567         if (kvm_x86_ops->enable_log_dirty_pt_masked)
1568                 kvm_x86_ops->enable_log_dirty_pt_masked(kvm, slot, gfn_offset,
1569                                 mask);
1570         else
1571                 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1572 }
1573 
1574 /**
1575  * kvm_arch_write_log_dirty - emulate dirty page logging
1576  * @vcpu: Guest mode vcpu
1577  *
1578  * Emulate arch specific page modification logging for the
1579  * nested hypervisor
1580  */
1581 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu)
1582 {
1583         if (kvm_x86_ops->write_log_dirty)
1584                 return kvm_x86_ops->write_log_dirty(vcpu);
1585 
1586         return 0;
1587 }
1588 
1589 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
1590                                     struct kvm_memory_slot *slot, u64 gfn)
1591 {
1592         struct kvm_rmap_head *rmap_head;
1593         int i;
1594         bool write_protected = false;
1595 
1596         for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
1597                 rmap_head = __gfn_to_rmap(gfn, i, slot);
1598                 write_protected |= __rmap_write_protect(kvm, rmap_head, true);
1599         }
1600 
1601         return write_protected;
1602 }
1603 
1604 static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
1605 {
1606         struct kvm_memory_slot *slot;
1607 
1608         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
1609         return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
1610 }
1611 
1612 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
1613 {
1614         u64 *sptep;
1615         struct rmap_iterator iter;
1616         bool flush = false;
1617 
1618         while ((sptep = rmap_get_first(rmap_head, &iter))) {
1619                 rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
1620 
1621                 drop_spte(kvm, sptep);
1622                 flush = true;
1623         }
1624 
1625         return flush;
1626 }
1627 
1628 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1629                            struct kvm_memory_slot *slot, gfn_t gfn, int level,
1630                            unsigned long data)
1631 {
1632         return kvm_zap_rmapp(kvm, rmap_head);
1633 }
1634 
1635 static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1636                              struct kvm_memory_slot *slot, gfn_t gfn, int level,
1637                              unsigned long data)
1638 {
1639         u64 *sptep;
1640         struct rmap_iterator iter;
1641         int need_flush = 0;
1642         u64 new_spte;
1643         pte_t *ptep = (pte_t *)data;
1644         kvm_pfn_t new_pfn;
1645 
1646         WARN_ON(pte_huge(*ptep));
1647         new_pfn = pte_pfn(*ptep);
1648 
1649 restart:
1650         for_each_rmap_spte(rmap_head, &iter, sptep) {
1651                 rmap_printk("kvm_set_pte_rmapp: spte %p %llx gfn %llx (%d)\n",
1652                             sptep, *sptep, gfn, level);
1653 
1654                 need_flush = 1;
1655 
1656                 if (pte_write(*ptep)) {
1657                         drop_spte(kvm, sptep);
1658                         goto restart;
1659                 } else {
1660                         new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1661                         new_spte |= (u64)new_pfn << PAGE_SHIFT;
1662 
1663                         new_spte &= ~PT_WRITABLE_MASK;
1664                         new_spte &= ~SPTE_HOST_WRITEABLE;
1665 
1666                         new_spte = mark_spte_for_access_track(new_spte);
1667 
1668                         mmu_spte_clear_track_bits(sptep);
1669                         mmu_spte_set(sptep, new_spte);
1670                 }
1671         }
1672 
1673         if (need_flush)
1674                 kvm_flush_remote_tlbs(kvm);
1675 
1676         return 0;
1677 }
1678 
1679 struct slot_rmap_walk_iterator {
1680         /* input fields. */
1681         struct kvm_memory_slot *slot;
1682         gfn_t start_gfn;
1683         gfn_t end_gfn;
1684         int start_level;
1685         int end_level;
1686 
1687         /* output fields. */
1688         gfn_t gfn;
1689         struct kvm_rmap_head *rmap;
1690         int level;
1691 
1692         /* private field. */
1693         struct kvm_rmap_head *end_rmap;
1694 };
1695 
1696 static void
1697 rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
1698 {
1699         iterator->level = level;
1700         iterator->gfn = iterator->start_gfn;
1701         iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
1702         iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
1703                                            iterator->slot);
1704 }
1705 
1706 static void
1707 slot_rmap_walk_init(struct slot_rmap_walk_iterator *iterator,
1708                     struct kvm_memory_slot *slot, int start_level,
1709                     int end_level, gfn_t start_gfn, gfn_t end_gfn)
1710 {
1711         iterator->slot = slot;
1712         iterator->start_level = start_level;
1713         iterator->end_level = end_level;
1714         iterator->start_gfn = start_gfn;
1715         iterator->end_gfn = end_gfn;
1716 
1717         rmap_walk_init_level(iterator, iterator->start_level);
1718 }
1719 
1720 static bool slot_rmap_walk_okay(struct slot_rmap_walk_iterator *iterator)
1721 {
1722         return !!iterator->rmap;
1723 }
1724 
1725 static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
1726 {
1727         if (++iterator->rmap <= iterator->end_rmap) {
1728                 iterator->gfn += (1UL << KVM_HPAGE_GFN_SHIFT(iterator->level));
1729                 return;
1730         }
1731 
1732         if (++iterator->level > iterator->end_level) {
1733                 iterator->rmap = NULL;
1734                 return;
1735         }
1736 
1737         rmap_walk_init_level(iterator, iterator->level);
1738 }
1739 
1740 #define for_each_slot_rmap_range(_slot_, _start_level_, _end_level_,    \
1741            _start_gfn, _end_gfn, _iter_)                                \
1742         for (slot_rmap_walk_init(_iter_, _slot_, _start_level_,         \
1743                                  _end_level_, _start_gfn, _end_gfn);    \
1744              slot_rmap_walk_okay(_iter_);                               \
1745              slot_rmap_walk_next(_iter_))
1746 
1747 static int kvm_handle_hva_range(struct kvm *kvm,
1748                                 unsigned long start,
1749                                 unsigned long end,
1750                                 unsigned long data,
1751                                 int (*handler)(struct kvm *kvm,
1752                                                struct kvm_rmap_head *rmap_head,
1753                                                struct kvm_memory_slot *slot,
1754                                                gfn_t gfn,
1755                                                int level,
1756                                                unsigned long data))
1757 {
1758         struct kvm_memslots *slots;
1759         struct kvm_memory_slot *memslot;
1760         struct slot_rmap_walk_iterator iterator;
1761         int ret = 0;
1762         int i;
1763 
1764         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1765                 slots = __kvm_memslots(kvm, i);
1766                 kvm_for_each_memslot(memslot, slots) {
1767                         unsigned long hva_start, hva_end;
1768                         gfn_t gfn_start, gfn_end;
1769 
1770                         hva_start = max(start, memslot->userspace_addr);
1771                         hva_end = min(end, memslot->userspace_addr +
1772                                       (memslot->npages << PAGE_SHIFT));
1773                         if (hva_start >= hva_end)
1774                                 continue;
1775                         /*
1776                          * {gfn(page) | page intersects with [hva_start, hva_end)} =
1777                          * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1778                          */
1779                         gfn_start = hva_to_gfn_memslot(hva_start, memslot);
1780                         gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1781 
1782                         for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
1783                                                  PT_MAX_HUGEPAGE_LEVEL,
1784                                                  gfn_start, gfn_end - 1,
1785                                                  &iterator)
1786                                 ret |= handler(kvm, iterator.rmap, memslot,
1787                                                iterator.gfn, iterator.level, data);
1788                 }
1789         }
1790 
1791         return ret;
1792 }
1793 
1794 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
1795                           unsigned long data,
1796                           int (*handler)(struct kvm *kvm,
1797                                          struct kvm_rmap_head *rmap_head,
1798                                          struct kvm_memory_slot *slot,
1799                                          gfn_t gfn, int level,
1800                                          unsigned long data))
1801 {
1802         return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
1803 }
1804 
1805 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1806 {
1807         return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
1808 }
1809 
1810 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
1811 {
1812         return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
1813 }
1814 
1815 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1816 {
1817         kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
1818 }
1819 
1820 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1821                          struct kvm_memory_slot *slot, gfn_t gfn, int level,
1822                          unsigned long data)
1823 {
1824         u64 *sptep;
1825         struct rmap_iterator uninitialized_var(iter);
1826         int young = 0;
1827 
1828         for_each_rmap_spte(rmap_head, &iter, sptep)
1829                 young |= mmu_spte_age(sptep);
1830 
1831         trace_kvm_age_page(gfn, level, slot, young);
1832         return young;
1833 }
1834 
1835 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
1836                               struct kvm_memory_slot *slot, gfn_t gfn,
1837                               int level, unsigned long data)
1838 {
1839         u64 *sptep;
1840         struct rmap_iterator iter;
1841 
1842         for_each_rmap_spte(rmap_head, &iter, sptep)
1843                 if (is_accessed_spte(*sptep))
1844                         return 1;
1845         return 0;
1846 }
1847 
1848 #define RMAP_RECYCLE_THRESHOLD 1000
1849 
1850 static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
1851 {
1852         struct kvm_rmap_head *rmap_head;
1853         struct kvm_mmu_page *sp;
1854 
1855         sp = page_header(__pa(spte));
1856 
1857         rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
1858 
1859         kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
1860         kvm_flush_remote_tlbs(vcpu->kvm);
1861 }
1862 
1863 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1864 {
1865         return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
1866 }
1867 
1868 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1869 {
1870         return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
1871 }
1872 
1873 #ifdef MMU_DEBUG
1874 static int is_empty_shadow_page(u64 *spt)
1875 {
1876         u64 *pos;
1877         u64 *end;
1878 
1879         for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
1880                 if (is_shadow_present_pte(*pos)) {
1881                         printk(KERN_ERR "%s: %p %llx\n", __func__,
1882                                pos, *pos);
1883                         return 0;
1884                 }
1885         return 1;
1886 }
1887 #endif
1888 
1889 /*
1890  * This value is the sum of all of the kvm instances's
1891  * kvm->arch.n_used_mmu_pages values.  We need a global,
1892  * aggregate version in order to make the slab shrinker
1893  * faster
1894  */
1895 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1896 {
1897         kvm->arch.n_used_mmu_pages += nr;
1898         percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1899 }
1900 
1901 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
1902 {
1903         MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
1904         hlist_del(&sp->hash_link);
1905         list_del(&sp->link);
1906         free_page((unsigned long)sp->spt);
1907         if (!sp->role.direct)
1908                 free_page((unsigned long)sp->gfns);
1909         kmem_cache_free(mmu_page_header_cache, sp);
1910 }
1911 
1912 static unsigned kvm_page_table_hashfn(gfn_t gfn)
1913 {
1914         return hash_64(gfn, KVM_MMU_HASH_SHIFT);
1915 }
1916 
1917 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
1918                                     struct kvm_mmu_page *sp, u64 *parent_pte)
1919 {
1920         if (!parent_pte)
1921                 return;
1922 
1923         pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
1924 }
1925 
1926 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
1927                                        u64 *parent_pte)
1928 {
1929         pte_list_remove(parent_pte, &sp->parent_ptes);
1930 }
1931 
1932 static void drop_parent_pte(struct kvm_mmu_page *sp,
1933                             u64 *parent_pte)
1934 {
1935         mmu_page_remove_parent_pte(sp, parent_pte);
1936         mmu_spte_clear_no_track(parent_pte);
1937 }
1938 
1939 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct)
1940 {
1941         struct kvm_mmu_page *sp;
1942 
1943         sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
1944         sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1945         if (!direct)
1946                 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
1947         set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
1948 
1949         /*
1950          * The active_mmu_pages list is the FIFO list, do not move the
1951          * page until it is zapped. kvm_zap_obsolete_pages depends on
1952          * this feature. See the comments in kvm_zap_obsolete_pages().
1953          */
1954         list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
1955         kvm_mod_used_mmu_pages(vcpu->kvm, +1);
1956         return sp;
1957 }
1958 
1959 static void mark_unsync(u64 *spte);
1960 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
1961 {
1962         u64 *sptep;
1963         struct rmap_iterator iter;
1964 
1965         for_each_rmap_spte(&sp->parent_ptes, &iter, sptep) {
1966                 mark_unsync(sptep);
1967         }
1968 }
1969 
1970 static void mark_unsync(u64 *spte)
1971 {
1972         struct kvm_mmu_page *sp;
1973         unsigned int index;
1974 
1975         sp = page_header(__pa(spte));
1976         index = spte - sp->spt;
1977         if (__test_and_set_bit(index, sp->unsync_child_bitmap))
1978                 return;
1979         if (sp->unsync_children++)
1980                 return;
1981         kvm_mmu_mark_parents_unsync(sp);
1982 }
1983 
1984 static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
1985                                struct kvm_mmu_page *sp)
1986 {
1987         return 0;
1988 }
1989 
1990 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1991 {
1992 }
1993 
1994 static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
1995                                  struct kvm_mmu_page *sp, u64 *spte,
1996                                  const void *pte)
1997 {
1998         WARN_ON(1);
1999 }
2000 
2001 #define KVM_PAGE_ARRAY_NR 16
2002 
2003 struct kvm_mmu_pages {
2004         struct mmu_page_and_offset {
2005                 struct kvm_mmu_page *sp;
2006                 unsigned int idx;
2007         } page[KVM_PAGE_ARRAY_NR];
2008         unsigned int nr;
2009 };
2010 
2011 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
2012                          int idx)
2013 {
2014         int i;
2015 
2016         if (sp->unsync)
2017                 for (i=0; i < pvec->nr; i++)
2018                         if (pvec->page[i].sp == sp)
2019                                 return 0;
2020 
2021         pvec->page[pvec->nr].sp = sp;
2022         pvec->page[pvec->nr].idx = idx;
2023         pvec->nr++;
2024         return (pvec->nr == KVM_PAGE_ARRAY_NR);
2025 }
2026 
2027 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
2028 {
2029         --sp->unsync_children;
2030         WARN_ON((int)sp->unsync_children < 0);
2031         __clear_bit(idx, sp->unsync_child_bitmap);
2032 }
2033 
2034 static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
2035                            struct kvm_mmu_pages *pvec)
2036 {
2037         int i, ret, nr_unsync_leaf = 0;
2038 
2039         for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
2040                 struct kvm_mmu_page *child;
2041                 u64 ent = sp->spt[i];
2042 
2043                 if (!is_shadow_present_pte(ent) || is_large_pte(ent)) {
2044                         clear_unsync_child_bit(sp, i);
2045                         continue;
2046                 }
2047 
2048                 child = page_header(ent & PT64_BASE_ADDR_MASK);
2049 
2050                 if (child->unsync_children) {
2051                         if (mmu_pages_add(pvec, child, i))
2052                                 return -ENOSPC;
2053 
2054                         ret = __mmu_unsync_walk(child, pvec);
2055                         if (!ret) {
2056                                 clear_unsync_child_bit(sp, i);
2057                                 continue;
2058                         } else if (ret > 0) {
2059                                 nr_unsync_leaf += ret;
2060                         } else
2061                                 return ret;
2062                 } else if (child->unsync) {
2063                         nr_unsync_leaf++;
2064                         if (mmu_pages_add(pvec, child, i))
2065                                 return -ENOSPC;
2066                 } else
2067                         clear_unsync_child_bit(sp, i);
2068         }
2069 
2070         return nr_unsync_leaf;
2071 }
2072 
2073 #define INVALID_INDEX (-1)
2074 
2075 static int mmu_unsync_walk(struct kvm_mmu_page *sp,
2076                            struct kvm_mmu_pages *pvec)
2077 {
2078         pvec->nr = 0;
2079         if (!sp->unsync_children)
2080                 return 0;
2081 
2082         mmu_pages_add(pvec, sp, INVALID_INDEX);
2083         return __mmu_unsync_walk(sp, pvec);
2084 }
2085 
2086 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
2087 {
2088         WARN_ON(!sp->unsync);
2089         trace_kvm_mmu_sync_page(sp);
2090         sp->unsync = 0;
2091         --kvm->stat.mmu_unsync;
2092 }
2093 
2094 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2095                                     struct list_head *invalid_list);
2096 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2097                                     struct list_head *invalid_list);
2098 
2099 /*
2100  * NOTE: we should pay more attention on the zapped-obsolete page
2101  * (is_obsolete_sp(sp) && sp->role.invalid) when you do hash list walk
2102  * since it has been deleted from active_mmu_pages but still can be found
2103  * at hast list.
2104  *
2105  * for_each_valid_sp() has skipped that kind of pages.
2106  */
2107 #define for_each_valid_sp(_kvm, _sp, _gfn)                              \
2108         hlist_for_each_entry(_sp,                                       \
2109           &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
2110                 if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) {    \
2111                 } else
2112 
2113 #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn)                 \
2114         for_each_valid_sp(_kvm, _sp, _gfn)                              \
2115                 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
2116 
2117 /* @sp->gfn should be write-protected at the call site */
2118 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2119                             struct list_head *invalid_list)
2120 {
2121         if (sp->role.cr4_pae != !!is_pae(vcpu)) {
2122                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2123                 return false;
2124         }
2125 
2126         if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
2127                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
2128                 return false;
2129         }
2130 
2131         return true;
2132 }
2133 
2134 static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
2135                                  struct list_head *invalid_list,
2136                                  bool remote_flush, bool local_flush)
2137 {
2138         if (!list_empty(invalid_list)) {
2139                 kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
2140                 return;
2141         }
2142 
2143         if (remote_flush)
2144                 kvm_flush_remote_tlbs(vcpu->kvm);
2145         else if (local_flush)
2146                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2147 }
2148 
2149 #ifdef CONFIG_KVM_MMU_AUDIT
2150 #include "mmu_audit.c"
2151 #else
2152 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
2153 static void mmu_audit_disable(void) { }
2154 #endif
2155 
2156 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2157 {
2158         return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2159 }
2160 
2161 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2162                          struct list_head *invalid_list)
2163 {
2164         kvm_unlink_unsync_page(vcpu->kvm, sp);
2165         return __kvm_sync_page(vcpu, sp, invalid_list);
2166 }
2167 
2168 /* @gfn should be write-protected at the call site */
2169 static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
2170                            struct list_head *invalid_list)
2171 {
2172         struct kvm_mmu_page *s;
2173         bool ret = false;
2174 
2175         for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2176                 if (!s->unsync)
2177                         continue;
2178 
2179                 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
2180                 ret |= kvm_sync_page(vcpu, s, invalid_list);
2181         }
2182 
2183         return ret;
2184 }
2185 
2186 struct mmu_page_path {
2187         struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
2188         unsigned int idx[PT64_ROOT_MAX_LEVEL];
2189 };
2190 
2191 #define for_each_sp(pvec, sp, parents, i)                       \
2192                 for (i = mmu_pages_first(&pvec, &parents);      \
2193                         i < pvec.nr && ({ sp = pvec.page[i].sp; 1;});   \
2194                         i = mmu_pages_next(&pvec, &parents, i))
2195 
2196 static int mmu_pages_next(struct kvm_mmu_pages *pvec,
2197                           struct mmu_page_path *parents,
2198                           int i)
2199 {
2200         int n;
2201 
2202         for (n = i+1; n < pvec->nr; n++) {
2203                 struct kvm_mmu_page *sp = pvec->page[n].sp;
2204                 unsigned idx = pvec->page[n].idx;
2205                 int level = sp->role.level;
2206 
2207                 parents->idx[level-1] = idx;
2208                 if (level == PT_PAGE_TABLE_LEVEL)
2209                         break;
2210 
2211                 parents->parent[level-2] = sp;
2212         }
2213 
2214         return n;
2215 }
2216 
2217 static int mmu_pages_first(struct kvm_mmu_pages *pvec,
2218                            struct mmu_page_path *parents)
2219 {
2220         struct kvm_mmu_page *sp;
2221         int level;
2222 
2223         if (pvec->nr == 0)
2224                 return 0;
2225 
2226         WARN_ON(pvec->page[0].idx != INVALID_INDEX);
2227 
2228         sp = pvec->page[0].sp;
2229         level = sp->role.level;
2230         WARN_ON(level == PT_PAGE_TABLE_LEVEL);
2231 
2232         parents->parent[level-2] = sp;
2233 
2234         /* Also set up a sentinel.  Further entries in pvec are all
2235          * children of sp, so this element is never overwritten.
2236          */
2237         parents->parent[level-1] = NULL;
2238         return mmu_pages_next(pvec, parents, 0);
2239 }
2240 
2241 static void mmu_pages_clear_parents(struct mmu_page_path *parents)
2242 {
2243         struct kvm_mmu_page *sp;
2244         unsigned int level = 0;
2245 
2246         do {
2247                 unsigned int idx = parents->idx[level];
2248                 sp = parents->parent[level];
2249                 if (!sp)
2250                         return;
2251 
2252                 WARN_ON(idx == INVALID_INDEX);
2253                 clear_unsync_child_bit(sp, idx);
2254                 level++;
2255         } while (!sp->unsync_children);
2256 }
2257 
2258 static void mmu_sync_children(struct kvm_vcpu *vcpu,
2259                               struct kvm_mmu_page *parent)
2260 {
2261         int i;
2262         struct kvm_mmu_page *sp;
2263         struct mmu_page_path parents;
2264         struct kvm_mmu_pages pages;
2265         LIST_HEAD(invalid_list);
2266         bool flush = false;
2267 
2268         while (mmu_unsync_walk(parent, &pages)) {
2269                 bool protected = false;
2270 
2271                 for_each_sp(pages, sp, parents, i)
2272                         protected |= rmap_write_protect(vcpu, sp->gfn);
2273 
2274                 if (protected) {
2275                         kvm_flush_remote_tlbs(vcpu->kvm);
2276                         flush = false;
2277                 }
2278 
2279                 for_each_sp(pages, sp, parents, i) {
2280                         flush |= kvm_sync_page(vcpu, sp, &invalid_list);
2281                         mmu_pages_clear_parents(&parents);
2282                 }
2283                 if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
2284                         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2285                         cond_resched_lock(&vcpu->kvm->mmu_lock);
2286                         flush = false;
2287                 }
2288         }
2289 
2290         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2291 }
2292 
2293 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
2294 {
2295         atomic_set(&sp->write_flooding_count,  0);
2296 }
2297 
2298 static void clear_sp_write_flooding_count(u64 *spte)
2299 {
2300         struct kvm_mmu_page *sp =  page_header(__pa(spte));
2301 
2302         __clear_sp_write_flooding_count(sp);
2303 }
2304 
2305 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2306                                              gfn_t gfn,
2307                                              gva_t gaddr,
2308                                              unsigned level,
2309                                              int direct,
2310                                              unsigned access)
2311 {
2312         union kvm_mmu_page_role role;
2313         unsigned quadrant;
2314         struct kvm_mmu_page *sp;
2315         bool need_sync = false;
2316         bool flush = false;
2317         int collisions = 0;
2318         LIST_HEAD(invalid_list);
2319 
2320         role = vcpu->arch.mmu.base_role;
2321         role.level = level;
2322         role.direct = direct;
2323         if (role.direct)
2324                 role.cr4_pae = 0;
2325         role.access = access;
2326         if (!vcpu->arch.mmu.direct_map
2327             && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
2328                 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
2329                 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2330                 role.quadrant = quadrant;
2331         }
2332         for_each_valid_sp(vcpu->kvm, sp, gfn) {
2333                 if (sp->gfn != gfn) {
2334                         collisions++;
2335                         continue;
2336                 }
2337 
2338                 if (!need_sync && sp->unsync)
2339                         need_sync = true;
2340 
2341                 if (sp->role.word != role.word)
2342                         continue;
2343 
2344                 if (sp->unsync) {
2345                         /* The page is good, but __kvm_sync_page might still end
2346                          * up zapping it.  If so, break in order to rebuild it.
2347                          */
2348                         if (!__kvm_sync_page(vcpu, sp, &invalid_list))
2349                                 break;
2350 
2351                         WARN_ON(!list_empty(&invalid_list));
2352                         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2353                 }
2354 
2355                 if (sp->unsync_children)
2356                         kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
2357 
2358                 __clear_sp_write_flooding_count(sp);
2359                 trace_kvm_mmu_get_page(sp, false);
2360                 goto out;
2361         }
2362 
2363         ++vcpu->kvm->stat.mmu_cache_miss;
2364 
2365         sp = kvm_mmu_alloc_page(vcpu, direct);
2366 
2367         sp->gfn = gfn;
2368         sp->role = role;
2369         hlist_add_head(&sp->hash_link,
2370                 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
2371         if (!direct) {
2372                 /*
2373                  * we should do write protection before syncing pages
2374                  * otherwise the content of the synced shadow page may
2375                  * be inconsistent with guest page table.
2376                  */
2377                 account_shadowed(vcpu->kvm, sp);
2378                 if (level == PT_PAGE_TABLE_LEVEL &&
2379                       rmap_write_protect(vcpu, gfn))
2380                         kvm_flush_remote_tlbs(vcpu->kvm);
2381 
2382                 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2383                         flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2384         }
2385         sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2386         clear_page(sp->spt);
2387         trace_kvm_mmu_get_page(sp, true);
2388 
2389         kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2390 out:
2391         if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2392                 vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2393         return sp;
2394 }
2395 
2396 static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
2397                              struct kvm_vcpu *vcpu, u64 addr)
2398 {
2399         iterator->addr = addr;
2400         iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
2401         iterator->level = vcpu->arch.mmu.shadow_root_level;
2402 
2403         if (iterator->level == PT64_ROOT_4LEVEL &&
2404             vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
2405             !vcpu->arch.mmu.direct_map)
2406                 --iterator->level;
2407 
2408         if (iterator->level == PT32E_ROOT_LEVEL) {
2409                 iterator->shadow_addr
2410                         = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
2411                 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
2412                 --iterator->level;
2413                 if (!iterator->shadow_addr)
2414                         iterator->level = 0;
2415         }
2416 }
2417 
2418 static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
2419 {
2420         if (iterator->level < PT_PAGE_TABLE_LEVEL)
2421                 return false;
2422 
2423         iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
2424         iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
2425         return true;
2426 }
2427 
2428 static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
2429                                u64 spte)
2430 {
2431         if (is_last_spte(spte, iterator->level)) {
2432                 iterator->level = 0;
2433                 return;
2434         }
2435 
2436         iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
2437         --iterator->level;
2438 }
2439 
2440 static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2441 {
2442         __shadow_walk_next(iterator, *iterator->sptep);
2443 }
2444 
2445 static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
2446                              struct kvm_mmu_page *sp)
2447 {
2448         u64 spte;
2449 
2450         BUILD_BUG_ON(VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2451 
2452         spte = __pa(sp->spt) | shadow_present_mask | PT_WRITABLE_MASK |
2453                shadow_user_mask | shadow_x_mask | shadow_me_mask;
2454 
2455         if (sp_ad_disabled(sp))
2456                 spte |= shadow_acc_track_value;
2457         else
2458                 spte |= shadow_accessed_mask;
2459 
2460         mmu_spte_set(sptep, spte);
2461 
2462         mmu_page_add_parent_pte(vcpu, sp, sptep);
2463 
2464         if (sp->unsync_children || sp->unsync)
2465                 mark_unsync(sptep);
2466 }
2467 
2468 static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2469                                    unsigned direct_access)
2470 {
2471         if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
2472                 struct kvm_mmu_page *child;
2473 
2474                 /*
2475                  * For the direct sp, if the guest pte's dirty bit
2476                  * changed form clean to dirty, it will corrupt the
2477                  * sp's access: allow writable in the read-only sp,
2478                  * so we should update the spte at this point to get
2479                  * a new sp with the correct access.
2480                  */
2481                 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
2482                 if (child->role.access == direct_access)
2483                         return;
2484 
2485                 drop_parent_pte(child, sptep);
2486                 kvm_flush_remote_tlbs(vcpu->kvm);
2487         }
2488 }
2489 
2490 static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
2491                              u64 *spte)
2492 {
2493         u64 pte;
2494         struct kvm_mmu_page *child;
2495 
2496         pte = *spte;
2497         if (is_shadow_present_pte(pte)) {
2498                 if (is_last_spte(pte, sp->role.level)) {
2499                         drop_spte(kvm, spte);
2500                         if (is_large_pte(pte))
2501                                 --kvm->stat.lpages;
2502                 } else {
2503                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2504                         drop_parent_pte(child, spte);
2505                 }
2506                 return true;
2507         }
2508 
2509         if (is_mmio_spte(pte))
2510                 mmu_spte_clear_no_track(spte);
2511 
2512         return false;
2513 }
2514 
2515 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
2516                                          struct kvm_mmu_page *sp)
2517 {
2518         unsigned i;
2519 
2520         for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
2521                 mmu_page_zap_pte(kvm, sp, sp->spt + i);
2522 }
2523 
2524 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
2525 {
2526         u64 *sptep;
2527         struct rmap_iterator iter;
2528 
2529         while ((sptep = rmap_get_first(&sp->parent_ptes, &iter)))
2530                 drop_parent_pte(sp, sptep);
2531 }
2532 
2533 static int mmu_zap_unsync_children(struct kvm *kvm,
2534                                    struct kvm_mmu_page *parent,
2535                                    struct list_head *invalid_list)
2536 {
2537         int i, zapped = 0;
2538         struct mmu_page_path parents;
2539         struct kvm_mmu_pages pages;
2540 
2541         if (parent->role.level == PT_PAGE_TABLE_LEVEL)
2542                 return 0;
2543 
2544         while (mmu_unsync_walk(parent, &pages)) {
2545                 struct kvm_mmu_page *sp;
2546 
2547                 for_each_sp(pages, sp, parents, i) {
2548                         kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2549                         mmu_pages_clear_parents(&parents);
2550                         zapped++;
2551                 }
2552         }
2553 
2554         return zapped;
2555 }
2556 
2557 static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
2558                                     struct list_head *invalid_list)
2559 {
2560         int ret;
2561 
2562         trace_kvm_mmu_prepare_zap_page(sp);
2563         ++kvm->stat.mmu_shadow_zapped;
2564         ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
2565         kvm_mmu_page_unlink_children(kvm, sp);
2566         kvm_mmu_unlink_parents(kvm, sp);
2567 
2568         if (!sp->role.invalid && !sp->role.direct)
2569                 unaccount_shadowed(kvm, sp);
2570 
2571         if (sp->unsync)
2572                 kvm_unlink_unsync_page(kvm, sp);
2573         if (!sp->root_count) {
2574                 /* Count self */
2575                 ret++;
2576                 list_move(&sp->link, invalid_list);
2577                 kvm_mod_used_mmu_pages(kvm, -1);
2578         } else {
2579                 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2580 
2581                 /*
2582                  * The obsolete pages can not be used on any vcpus.
2583                  * See the comments in kvm_mmu_invalidate_zap_all_pages().
2584                  */
2585                 if (!sp->role.invalid && !is_obsolete_sp(kvm, sp))
2586                         kvm_reload_remote_mmus(kvm);
2587         }
2588 
2589         sp->role.invalid = 1;
2590         return ret;
2591 }
2592 
2593 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2594                                     struct list_head *invalid_list)
2595 {
2596         struct kvm_mmu_page *sp, *nsp;
2597 
2598         if (list_empty(invalid_list))
2599                 return;
2600 
2601         /*
2602          * We need to make sure everyone sees our modifications to
2603          * the page tables and see changes to vcpu->mode here. The barrier
2604          * in the kvm_flush_remote_tlbs() achieves this. This pairs
2605          * with vcpu_enter_guest and walk_shadow_page_lockless_begin/end.
2606          *
2607          * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
2608          * guest mode and/or lockless shadow page table walks.
2609          */
2610         kvm_flush_remote_tlbs(kvm);
2611 
2612         list_for_each_entry_safe(sp, nsp, invalid_list, link) {
2613                 WARN_ON(!sp->role.invalid || sp->root_count);
2614                 kvm_mmu_free_page(sp);
2615         }
2616 }
2617 
2618 static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
2619                                         struct list_head *invalid_list)
2620 {
2621         struct kvm_mmu_page *sp;
2622 
2623         if (list_empty(&kvm->arch.active_mmu_pages))
2624                 return false;
2625 
2626         sp = list_last_entry(&kvm->arch.active_mmu_pages,
2627                              struct kvm_mmu_page, link);
2628         return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2629 }
2630 
2631 /*
2632  * Changing the number of mmu pages allocated to the vm
2633  * Note: if goal_nr_mmu_pages is too small, you will get dead lock
2634  */
2635 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2636 {
2637         LIST_HEAD(invalid_list);
2638 
2639         spin_lock(&kvm->mmu_lock);
2640 
2641         if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2642                 /* Need to free some mmu pages to achieve the goal. */
2643                 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
2644                         if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
2645                                 break;
2646 
2647                 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2648                 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2649         }
2650 
2651         kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
2652 
2653         spin_unlock(&kvm->mmu_lock);
2654 }
2655 
2656 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2657 {
2658         struct kvm_mmu_page *sp;
2659         LIST_HEAD(invalid_list);
2660         int r;
2661 
2662         pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2663         r = 0;
2664         spin_lock(&kvm->mmu_lock);
2665         for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2666                 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2667                          sp->role.word);
2668                 r = 1;
2669                 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
2670         }
2671         kvm_mmu_commit_zap_page(kvm, &invalid_list);
2672         spin_unlock(&kvm->mmu_lock);
2673 
2674         return r;
2675 }
2676 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
2677 
2678 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2679 {
2680         trace_kvm_mmu_unsync_page(sp);
2681         ++vcpu->kvm->stat.mmu_unsync;
2682         sp->unsync = 1;
2683 
2684         kvm_mmu_mark_parents_unsync(sp);
2685 }
2686 
2687 static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2688                                    bool can_unsync)
2689 {
2690         struct kvm_mmu_page *sp;
2691 
2692         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
2693                 return true;
2694 
2695         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
2696                 if (!can_unsync)
2697                         return true;
2698 
2699                 if (sp->unsync)
2700                         continue;
2701 
2702                 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
2703                 kvm_unsync_page(vcpu, sp);
2704         }
2705 
2706         return false;
2707 }
2708 
2709 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
2710 {
2711         if (pfn_valid(pfn))
2712                 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
2713                         /*
2714                          * Some reserved pages, such as those from NVDIMM
2715                          * DAX devices, are not for MMIO, and can be mapped
2716                          * with cached memory type for better performance.
2717                          * However, the above check misconceives those pages
2718                          * as MMIO, and results in KVM mapping them with UC
2719                          * memory type, which would hurt the performance.
2720                          * Therefore, we check the host memory type in addition
2721                          * and only treat UC/UC-/WC pages as MMIO.
2722                          */
2723                         (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
2724 
2725         return true;
2726 }
2727 
2728 static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2729                     unsigned pte_access, int level,
2730                     gfn_t gfn, kvm_pfn_t pfn, bool speculative,
2731                     bool can_unsync, bool host_writable)
2732 {
2733         u64 spte = 0;
2734         int ret = 0;
2735         struct kvm_mmu_page *sp;
2736 
2737         if (set_mmio_spte(vcpu, sptep, gfn, pfn, pte_access))
2738                 return 0;
2739 
2740         sp = page_header(__pa(sptep));
2741         if (sp_ad_disabled(sp))
2742                 spte |= shadow_acc_track_value;
2743 
2744         /*
2745          * For the EPT case, shadow_present_mask is 0 if hardware
2746          * supports exec-only page table entries.  In that case,
2747          * ACC_USER_MASK and shadow_user_mask are used to represent
2748          * read access.  See FNAME(gpte_access) in paging_tmpl.h.
2749          */
2750         spte |= shadow_present_mask;
2751         if (!speculative)
2752                 spte |= spte_shadow_accessed_mask(spte);
2753 
2754         if (pte_access & ACC_EXEC_MASK)
2755                 spte |= shadow_x_mask;
2756         else
2757                 spte |= shadow_nx_mask;
2758 
2759         if (pte_access & ACC_USER_MASK)
2760                 spte |= shadow_user_mask;
2761 
2762         if (level > PT_PAGE_TABLE_LEVEL)
2763                 spte |= PT_PAGE_SIZE_MASK;
2764         if (tdp_enabled)
2765                 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2766                         kvm_is_mmio_pfn(pfn));
2767 
2768         if (host_writable)
2769                 spte |= SPTE_HOST_WRITEABLE;
2770         else
2771                 pte_access &= ~ACC_WRITE_MASK;
2772 
2773         if (!kvm_is_mmio_pfn(pfn))
2774                 spte |= shadow_me_mask;
2775 
2776         spte |= (u64)pfn << PAGE_SHIFT;
2777 
2778         if (pte_access & ACC_WRITE_MASK) {
2779 
2780                 /*
2781                  * Other vcpu creates new sp in the window between
2782                  * mapping_level() and acquiring mmu-lock. We can
2783                  * allow guest to retry the access, the mapping can
2784                  * be fixed if guest refault.
2785                  */
2786                 if (level > PT_PAGE_TABLE_LEVEL &&
2787                     mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
2788                         goto done;
2789 
2790                 spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
2791 
2792                 /*
2793                  * Optimization: for pte sync, if spte was writable the hash
2794                  * lookup is unnecessary (and expensive). Write protection
2795                  * is responsibility of mmu_get_page / kvm_sync_page.
2796                  * Same reasoning can be applied to dirty page accounting.
2797                  */
2798                 if (!can_unsync && is_writable_pte(*sptep))
2799                         goto set_pte;
2800 
2801                 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
2802                         pgprintk("%s: found shadow page for %llx, marking ro\n",
2803                                  __func__, gfn);
2804                         ret = 1;
2805                         pte_access &= ~ACC_WRITE_MASK;
2806                         spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
2807                 }
2808         }
2809 
2810         if (pte_access & ACC_WRITE_MASK) {
2811                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
2812                 spte |= spte_shadow_dirty_mask(spte);
2813         }
2814 
2815         if (speculative)
2816                 spte = mark_spte_for_access_track(spte);
2817 
2818 set_pte:
2819         if (mmu_spte_update(sptep, spte))
2820                 kvm_flush_remote_tlbs(vcpu->kvm);
2821 done:
2822         return ret;
2823 }
2824 
2825 static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
2826                         int write_fault, int level, gfn_t gfn, kvm_pfn_t pfn,
2827                         bool speculative, bool host_writable)
2828 {
2829         int was_rmapped = 0;
2830         int rmap_count;
2831         int ret = RET_PF_RETRY;
2832 
2833         pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
2834                  *sptep, write_fault, gfn);
2835 
2836         if (is_shadow_present_pte(*sptep)) {
2837                 /*
2838                  * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2839                  * the parent of the now unreachable PTE.
2840                  */
2841                 if (level > PT_PAGE_TABLE_LEVEL &&
2842                     !is_large_pte(*sptep)) {
2843                         struct kvm_mmu_page *child;
2844                         u64 pte = *sptep;
2845 
2846                         child = page_header(pte & PT64_BASE_ADDR_MASK);
2847                         drop_parent_pte(child, sptep);
2848                         kvm_flush_remote_tlbs(vcpu->kvm);
2849                 } else if (pfn != spte_to_pfn(*sptep)) {
2850                         pgprintk("hfn old %llx new %llx\n",
2851                                  spte_to_pfn(*sptep), pfn);
2852                         drop_spte(vcpu->kvm, sptep);
2853                         kvm_flush_remote_tlbs(vcpu->kvm);
2854                 } else
2855                         was_rmapped = 1;
2856         }
2857 
2858         if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
2859               true, host_writable)) {
2860                 if (write_fault)
2861                         ret = RET_PF_EMULATE;
2862                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
2863         }
2864 
2865         if (unlikely(is_mmio_spte(*sptep)))
2866                 ret = RET_PF_EMULATE;
2867 
2868         pgprintk("%s: setting spte %llx\n", __func__, *sptep);
2869         pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
2870                  is_large_pte(*sptep)? "2MB" : "4kB",
2871                  *sptep & PT_WRITABLE_MASK ? "RW" : "R", gfn,
2872                  *sptep, sptep);
2873         if (!was_rmapped && is_large_pte(*sptep))
2874                 ++vcpu->kvm->stat.lpages;
2875 
2876         if (is_shadow_present_pte(*sptep)) {
2877                 if (!was_rmapped) {
2878                         rmap_count = rmap_add(vcpu, sptep, gfn);
2879                         if (rmap_count > RMAP_RECYCLE_THRESHOLD)
2880                                 rmap_recycle(vcpu, sptep, gfn);
2881                 }
2882         }
2883 
2884         kvm_release_pfn_clean(pfn);
2885 
2886         return ret;
2887 }
2888 
2889 static kvm_pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2890                                      bool no_dirty_log)
2891 {
2892         struct kvm_memory_slot *slot;
2893 
2894         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
2895         if (!slot)
2896                 return KVM_PFN_ERR_FAULT;
2897 
2898         return gfn_to_pfn_memslot_atomic(slot, gfn);
2899 }
2900 
2901 static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2902                                     struct kvm_mmu_page *sp,
2903                                     u64 *start, u64 *end)
2904 {
2905         struct page *pages[PTE_PREFETCH_NUM];
2906         struct kvm_memory_slot *slot;
2907         unsigned access = sp->role.access;
2908         int i, ret;
2909         gfn_t gfn;
2910 
2911         gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
2912         slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK);
2913         if (!slot)
2914                 return -1;
2915 
2916         ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
2917         if (ret <= 0)
2918                 return -1;
2919 
2920         for (i = 0; i < ret; i++, gfn++, start++)
2921                 mmu_set_spte(vcpu, start, access, 0, sp->role.level, gfn,
2922                              page_to_pfn(pages[i]), true, true);
2923 
2924         return 0;
2925 }
2926 
2927 static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
2928                                   struct kvm_mmu_page *sp, u64 *sptep)
2929 {
2930         u64 *spte, *start = NULL;
2931         int i;
2932 
2933         WARN_ON(!sp->role.direct);
2934 
2935         i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
2936         spte = sp->spt + i;
2937 
2938         for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
2939                 if (is_shadow_present_pte(*spte) || spte == sptep) {
2940                         if (!start)
2941                                 continue;
2942                         if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
2943                                 break;
2944                         start = NULL;
2945                 } else if (!start)
2946                         start = spte;
2947         }
2948 }
2949 
2950 static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
2951 {
2952         struct kvm_mmu_page *sp;
2953 
2954         sp = page_header(__pa(sptep));
2955 
2956         /*
2957          * Without accessed bits, there's no way to distinguish between
2958          * actually accessed translations and prefetched, so disable pte
2959          * prefetch if accessed bits aren't available.
2960          */
2961         if (sp_ad_disabled(sp))
2962                 return;
2963 
2964         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
2965                 return;
2966 
2967         __direct_pte_prefetch(vcpu, sp, sptep);
2968 }
2969 
2970 static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
2971                         int level, gfn_t gfn, kvm_pfn_t pfn, bool prefault)
2972 {
2973         struct kvm_shadow_walk_iterator iterator;
2974         struct kvm_mmu_page *sp;
2975         int emulate = 0;
2976         gfn_t pseudo_gfn;
2977 
2978         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2979                 return 0;
2980 
2981         for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
2982                 if (iterator.level == level) {
2983                         emulate = mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
2984                                                write, level, gfn, pfn, prefault,
2985                                                map_writable);
2986                         direct_pte_prefetch(vcpu, iterator.sptep);
2987                         ++vcpu->stat.pf_fixed;
2988                         break;
2989                 }
2990 
2991                 drop_large_spte(vcpu, iterator.sptep);
2992                 if (!is_shadow_present_pte(*iterator.sptep)) {
2993                         u64 base_addr = iterator.addr;
2994 
2995                         base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2996                         pseudo_gfn = base_addr >> PAGE_SHIFT;
2997                         sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2998                                               iterator.level - 1, 1, ACC_ALL);
2999 
3000                         link_shadow_page(vcpu, iterator.sptep, sp);
3001                 }
3002         }
3003         return emulate;
3004 }
3005 
3006 static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
3007 {
3008         siginfo_t info;
3009 
3010         info.si_signo   = SIGBUS;
3011         info.si_errno   = 0;
3012         info.si_code    = BUS_MCEERR_AR;
3013         info.si_addr    = (void __user *)address;
3014         info.si_addr_lsb = PAGE_SHIFT;
3015 
3016         send_sig_info(SIGBUS, &info, tsk);
3017 }
3018 
3019 static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
3020 {
3021         /*
3022          * Do not cache the mmio info caused by writing the readonly gfn
3023          * into the spte otherwise read access on readonly gfn also can
3024          * caused mmio page fault and treat it as mmio access.
3025          */
3026         if (pfn == KVM_PFN_ERR_RO_FAULT)
3027                 return RET_PF_EMULATE;
3028 
3029         if (pfn == KVM_PFN_ERR_HWPOISON) {
3030                 kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
3031                 return RET_PF_RETRY;
3032         }
3033 
3034         return -EFAULT;
3035 }
3036 
3037 static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
3038                                         gfn_t *gfnp, kvm_pfn_t *pfnp,
3039                                         int *levelp)
3040 {
3041         kvm_pfn_t pfn = *pfnp;
3042         gfn_t gfn = *gfnp;
3043         int level = *levelp;
3044 
3045         /*
3046          * Check if it's a transparent hugepage. If this would be an
3047          * hugetlbfs page, level wouldn't be set to
3048          * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
3049          * here.
3050          */
3051         if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
3052             level == PT_PAGE_TABLE_LEVEL &&
3053             PageTransCompoundMap(pfn_to_page(pfn)) &&
3054             !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
3055                 unsigned long mask;
3056                 /*
3057                  * mmu_notifier_retry was successful and we hold the
3058                  * mmu_lock here, so the pmd can't become splitting
3059                  * from under us, and in turn
3060                  * __split_huge_page_refcount() can't run from under
3061                  * us and we can safely transfer the refcount from
3062                  * PG_tail to PG_head as we switch the pfn to tail to
3063                  * head.
3064                  */
3065                 *levelp = level = PT_DIRECTORY_LEVEL;
3066                 mask = KVM_PAGES_PER_HPAGE(level) - 1;
3067                 VM_BUG_ON((gfn & mask) != (pfn & mask));
3068                 if (pfn & mask) {
3069                         gfn &= ~mask;
3070                         *gfnp = gfn;
3071                         kvm_release_pfn_clean(pfn);
3072                         pfn &= ~mask;
3073                         kvm_get_pfn(pfn);
3074                         *pfnp = pfn;
3075                 }
3076         }
3077 }
3078 
3079 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
3080                                 kvm_pfn_t pfn, unsigned access, int *ret_val)
3081 {
3082         /* The pfn is invalid, report the error! */
3083         if (unlikely(is_error_pfn(pfn))) {
3084                 *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
3085                 return true;
3086         }
3087 
3088         if (unlikely(is_noslot_pfn(pfn)))
3089                 vcpu_cache_mmio_info(vcpu, gva, gfn, access);
3090 
3091         return false;
3092 }
3093 
3094 static bool page_fault_can_be_fast(u32 error_code)
3095 {
3096         /*
3097          * Do not fix the mmio spte with invalid generation number which
3098          * need to be updated by slow page fault path.
3099          */
3100         if (unlikely(error_code & PFERR_RSVD_MASK))
3101                 return false;
3102 
3103         /* See if the page fault is due to an NX violation */
3104         if (unlikely(((error_code & (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))
3105                       == (PFERR_FETCH_MASK | PFERR_PRESENT_MASK))))
3106                 return false;
3107 
3108         /*
3109          * #PF can be fast if:
3110          * 1. The shadow page table entry is not present, which could mean that
3111          *    the fault is potentially caused by access tracking (if enabled).
3112          * 2. The shadow page table entry is present and the fault
3113          *    is caused by write-protect, that means we just need change the W
3114          *    bit of the spte which can be done out of mmu-lock.
3115          *
3116          * However, if access tracking is disabled we know that a non-present
3117          * page must be a genuine page fault where we have to create a new SPTE.
3118          * So, if access tracking is disabled, we return true only for write
3119          * accesses to a present page.
3120          */
3121 
3122         return shadow_acc_track_mask != 0 ||
3123                ((error_code & (PFERR_WRITE_MASK | PFERR_PRESENT_MASK))
3124                 == (PFERR_WRITE_MASK | PFERR_PRESENT_MASK));
3125 }
3126 
3127 /*
3128  * Returns true if the SPTE was fixed successfully. Otherwise,
3129  * someone else modified the SPTE from its original value.
3130  */
3131 static bool
3132 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
3133                         u64 *sptep, u64 old_spte, u64 new_spte)
3134 {
3135         gfn_t gfn;
3136 
3137         WARN_ON(!sp->role.direct);
3138 
3139         /*
3140          * Theoretically we could also set dirty bit (and flush TLB) here in
3141          * order to eliminate unnecessary PML logging. See comments in
3142          * set_spte. But fast_page_fault is very unlikely to happen with PML
3143          * enabled, so we do not do this. This might result in the same GPA
3144          * to be logged in PML buffer again when the write really happens, and
3145          * eventually to be called by mark_page_dirty twice. But it's also no
3146          * harm. This also avoids the TLB flush needed after setting dirty bit
3147          * so non-PML cases won't be impacted.
3148          *
3149          * Compare with set_spte where instead shadow_dirty_mask is set.
3150          */
3151         if (cmpxchg64(sptep, old_spte, new_spte) != old_spte)
3152                 return false;
3153 
3154         if (is_writable_pte(new_spte) && !is_writable_pte(old_spte)) {
3155                 /*
3156                  * The gfn of direct spte is stable since it is
3157                  * calculated by sp->gfn.
3158                  */
3159                 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
3160                 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3161         }
3162 
3163         return true;
3164 }
3165 
3166 static bool is_access_allowed(u32 fault_err_code, u64 spte)
3167 {
3168         if (fault_err_code & PFERR_FETCH_MASK)
3169                 return is_executable_pte(spte);
3170 
3171         if (fault_err_code & PFERR_WRITE_MASK)
3172                 return is_writable_pte(spte);
3173 
3174         /* Fault was on Read access */
3175         return spte & PT_PRESENT_MASK;
3176 }
3177 
3178 /*
3179  * Return value:
3180  * - true: let the vcpu to access on the same address again.
3181  * - false: let the real page fault path to fix it.
3182  */
3183 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
3184                             u32 error_code)
3185 {
3186         struct kvm_shadow_walk_iterator iterator;
3187         struct kvm_mmu_page *sp;
3188         bool fault_handled = false;
3189         u64 spte = 0ull;
3190         uint retry_count = 0;
3191 
3192         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3193                 return false;
3194 
3195         if (!page_fault_can_be_fast(error_code))
3196                 return false;
3197 
3198         walk_shadow_page_lockless_begin(vcpu);
3199 
3200         do {
3201                 u64 new_spte;
3202 
3203                 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
3204                         if (!is_shadow_present_pte(spte) ||
3205                             iterator.level < level)
3206                                 break;
3207 
3208                 sp = page_header(__pa(iterator.sptep));
3209                 if (!is_last_spte(spte, sp->role.level))
3210                         break;
3211 
3212                 /*
3213                  * Check whether the memory access that caused the fault would
3214                  * still cause it if it were to be performed right now. If not,
3215                  * then this is a spurious fault caused by TLB lazily flushed,
3216                  * or some other CPU has already fixed the PTE after the
3217                  * current CPU took the fault.
3218                  *
3219                  * Need not check the access of upper level table entries since
3220                  * they are always ACC_ALL.
3221                  */
3222                 if (is_access_allowed(error_code, spte)) {
3223                         fault_handled = true;
3224                         break;
3225                 }
3226 
3227                 new_spte = spte;
3228 
3229                 if (is_access_track_spte(spte))
3230                         new_spte = restore_acc_track_spte(new_spte);
3231 
3232                 /*
3233                  * Currently, to simplify the code, write-protection can
3234                  * be removed in the fast path only if the SPTE was
3235                  * write-protected for dirty-logging or access tracking.
3236                  */
3237                 if ((error_code & PFERR_WRITE_MASK) &&
3238                     spte_can_locklessly_be_made_writable(spte))
3239                 {
3240                         new_spte |= PT_WRITABLE_MASK;
3241 
3242                         /*
3243                          * Do not fix write-permission on the large spte.  Since
3244                          * we only dirty the first page into the dirty-bitmap in
3245                          * fast_pf_fix_direct_spte(), other pages are missed
3246                          * if its slot has dirty logging enabled.
3247                          *
3248                          * Instead, we let the slow page fault path create a
3249                          * normal spte to fix the access.
3250                          *
3251                          * See the comments in kvm_arch_commit_memory_region().
3252                          */
3253                         if (sp->role.level > PT_PAGE_TABLE_LEVEL)
3254                                 break;
3255                 }
3256 
3257                 /* Verify that the fault can be handled in the fast path */
3258                 if (new_spte == spte ||
3259                     !is_access_allowed(error_code, new_spte))
3260                         break;
3261 
3262                 /*
3263                  * Currently, fast page fault only works for direct mapping
3264                  * since the gfn is not stable for indirect shadow page. See
3265                  * Documentation/virtual/kvm/locking.txt to get more detail.
3266                  */
3267                 fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
3268                                                         iterator.sptep, spte,
3269                                                         new_spte);
3270                 if (fault_handled)
3271                         break;
3272 
3273                 if (++retry_count > 4) {
3274                         printk_once(KERN_WARNING
3275                                 "kvm: Fast #PF retrying more than 4 times.\n");
3276                         break;
3277                 }
3278 
3279         } while (true);
3280 
3281         trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
3282                               spte, fault_handled);
3283         walk_shadow_page_lockless_end(vcpu);
3284 
3285         return fault_handled;
3286 }
3287 
3288 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3289                          gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable);
3290 static int make_mmu_pages_available(struct kvm_vcpu *vcpu);
3291 
3292 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
3293                          gfn_t gfn, bool prefault)
3294 {
3295         int r;
3296         int level;
3297         bool force_pt_level = false;
3298         kvm_pfn_t pfn;
3299         unsigned long mmu_seq;
3300         bool map_writable, write = error_code & PFERR_WRITE_MASK;
3301 
3302         level = mapping_level(vcpu, gfn, &force_pt_level);
3303         if (likely(!force_pt_level)) {
3304                 /*
3305                  * This path builds a PAE pagetable - so we can map
3306                  * 2mb pages at maximum. Therefore check if the level
3307                  * is larger than that.
3308                  */
3309                 if (level > PT_DIRECTORY_LEVEL)
3310                         level = PT_DIRECTORY_LEVEL;
3311 
3312                 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3313         }
3314 
3315         if (fast_page_fault(vcpu, v, level, error_code))
3316                 return RET_PF_RETRY;
3317 
3318         mmu_seq = vcpu->kvm->mmu_notifier_seq;
3319         smp_rmb();
3320 
3321         if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
3322                 return RET_PF_RETRY;
3323 
3324         if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
3325                 return r;
3326 
3327         spin_lock(&vcpu->kvm->mmu_lock);
3328         if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3329                 goto out_unlock;
3330         if (make_mmu_pages_available(vcpu) < 0)
3331                 goto out_unlock;
3332         if (likely(!force_pt_level))
3333                 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3334         r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
3335         spin_unlock(&vcpu->kvm->mmu_lock);
3336 
3337         return r;
3338 
3339 out_unlock:
3340         spin_unlock(&vcpu->kvm->mmu_lock);
3341         kvm_release_pfn_clean(pfn);
3342         return RET_PF_RETRY;
3343 }
3344 
3345 
3346 static void mmu_free_roots(struct kvm_vcpu *vcpu)
3347 {
3348         int i;
3349         struct kvm_mmu_page *sp;
3350         LIST_HEAD(invalid_list);
3351 
3352         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3353                 return;
3354 
3355         if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL &&
3356             (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL ||
3357              vcpu->arch.mmu.direct_map)) {
3358                 hpa_t root = vcpu->arch.mmu.root_hpa;
3359 
3360                 spin_lock(&vcpu->kvm->mmu_lock);
3361                 sp = page_header(root);
3362                 --sp->root_count;
3363                 if (!sp->root_count && sp->role.invalid) {
3364                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
3365                         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3366                 }
3367                 spin_unlock(&vcpu->kvm->mmu_lock);
3368                 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3369                 return;
3370         }
3371 
3372         spin_lock(&vcpu->kvm->mmu_lock);
3373         for (i = 0; i < 4; ++i) {
3374                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3375 
3376                 if (root) {
3377                         root &= PT64_BASE_ADDR_MASK;
3378                         sp = page_header(root);
3379                         --sp->root_count;
3380                         if (!sp->root_count && sp->role.invalid)
3381                                 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
3382                                                          &invalid_list);
3383                 }
3384                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
3385         }
3386         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3387         spin_unlock(&vcpu->kvm->mmu_lock);
3388         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
3389 }
3390 
3391 static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
3392 {
3393         int ret = 0;
3394 
3395         if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
3396                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
3397                 ret = 1;
3398         }
3399 
3400         return ret;
3401 }
3402 
3403 static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
3404 {
3405         struct kvm_mmu_page *sp;
3406         unsigned i;
3407 
3408         if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) {
3409                 spin_lock(&vcpu->kvm->mmu_lock);
3410                 if(make_mmu_pages_available(vcpu) < 0) {
3411                         spin_unlock(&vcpu->kvm->mmu_lock);
3412                         return -ENOSPC;
3413                 }
3414                 sp = kvm_mmu_get_page(vcpu, 0, 0,
3415                                 vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
3416                 ++sp->root_count;
3417                 spin_unlock(&vcpu->kvm->mmu_lock);
3418                 vcpu->arch.mmu.root_hpa = __pa(sp->spt);
3419         } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
3420                 for (i = 0; i < 4; ++i) {
3421                         hpa_t root = vcpu->arch.mmu.pae_root[i];
3422 
3423                         MMU_WARN_ON(VALID_PAGE(root));
3424                         spin_lock(&vcpu->kvm->mmu_lock);
3425                         if (make_mmu_pages_available(vcpu) < 0) {
3426                                 spin_unlock(&vcpu->kvm->mmu_lock);
3427                                 return -ENOSPC;
3428                         }
3429                         sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
3430                                         i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
3431                         root = __pa(sp->spt);
3432                         ++sp->root_count;
3433                         spin_unlock(&vcpu->kvm->mmu_lock);
3434                         vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
3435                 }
3436                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3437         } else
3438                 BUG();
3439 
3440         return 0;
3441 }
3442 
3443 static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
3444 {
3445         struct kvm_mmu_page *sp;
3446         u64 pdptr, pm_mask;
3447         gfn_t root_gfn;
3448         int i;
3449 
3450         root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
3451 
3452         if (mmu_check_root(vcpu, root_gfn))
3453                 return 1;
3454 
3455         /*
3456          * Do we shadow a long mode page table? If so we need to
3457          * write-protect the guests page table root.
3458          */
3459         if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
3460                 hpa_t root = vcpu->arch.mmu.root_hpa;
3461 
3462                 MMU_WARN_ON(VALID_PAGE(root));
3463 
3464                 spin_lock(&vcpu->kvm->mmu_lock);
3465                 if (make_mmu_pages_available(vcpu) < 0) {
3466                         spin_unlock(&vcpu->kvm->mmu_lock);
3467                         return -ENOSPC;
3468                 }
3469                 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
3470                                 vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
3471                 root = __pa(sp->spt);
3472                 ++sp->root_count;
3473                 spin_unlock(&vcpu->kvm->mmu_lock);
3474                 vcpu->arch.mmu.root_hpa = root;
3475                 return 0;
3476         }
3477 
3478         /*
3479          * We shadow a 32 bit page table. This may be a legacy 2-level
3480          * or a PAE 3-level page table. In either case we need to be aware that
3481          * the shadow page table may be a PAE or a long mode page table.
3482          */
3483         pm_mask = PT_PRESENT_MASK;
3484         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
3485                 pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
3486 
3487         for (i = 0; i < 4; ++i) {
3488                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3489 
3490                 MMU_WARN_ON(VALID_PAGE(root));
3491                 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
3492                         pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
3493                         if (!(pdptr & PT_PRESENT_MASK)) {
3494                                 vcpu->arch.mmu.pae_root[i] = 0;
3495                                 continue;
3496                         }
3497                         root_gfn = pdptr >> PAGE_SHIFT;
3498                         if (mmu_check_root(vcpu, root_gfn))
3499                                 return 1;
3500                 }
3501                 spin_lock(&vcpu->kvm->mmu_lock);
3502                 if (make_mmu_pages_available(vcpu) < 0) {
3503                         spin_unlock(&vcpu->kvm->mmu_lock);
3504                         return -ENOSPC;
3505                 }
3506                 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
3507                                       0, ACC_ALL);
3508                 root = __pa(sp->spt);
3509                 ++sp->root_count;
3510                 spin_unlock(&vcpu->kvm->mmu_lock);
3511 
3512                 vcpu->arch.mmu.pae_root[i] = root | pm_mask;
3513         }
3514         vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
3515 
3516         /*
3517          * If we shadow a 32 bit page table with a long mode page
3518          * table we enter this path.
3519          */
3520         if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
3521                 if (vcpu->arch.mmu.lm_root == NULL) {
3522                         /*
3523                          * The additional page necessary for this is only
3524                          * allocated on demand.
3525                          */
3526 
3527                         u64 *lm_root;
3528 
3529                         lm_root = (void*)get_zeroed_page(GFP_KERNEL);
3530                         if (lm_root == NULL)
3531                                 return 1;
3532 
3533                         lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
3534 
3535                         vcpu->arch.mmu.lm_root = lm_root;
3536                 }
3537 
3538                 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
3539         }
3540 
3541         return 0;
3542 }
3543 
3544 static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
3545 {
3546         if (vcpu->arch.mmu.direct_map)
3547                 return mmu_alloc_direct_roots(vcpu);
3548         else
3549                 return mmu_alloc_shadow_roots(vcpu);
3550 }
3551 
3552 static void mmu_sync_roots(struct kvm_vcpu *vcpu)
3553 {
3554         int i;
3555         struct kvm_mmu_page *sp;
3556 
3557         if (vcpu->arch.mmu.direct_map)
3558                 return;
3559 
3560         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3561                 return;
3562 
3563         vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
3564         kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
3565         if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
3566                 hpa_t root = vcpu->arch.mmu.root_hpa;
3567                 sp = page_header(root);
3568                 mmu_sync_children(vcpu, sp);
3569                 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3570                 return;
3571         }
3572         for (i = 0; i < 4; ++i) {
3573                 hpa_t root = vcpu->arch.mmu.pae_root[i];
3574 
3575                 if (root && VALID_PAGE(root)) {
3576                         root &= PT64_BASE_ADDR_MASK;
3577                         sp = page_header(root);
3578                         mmu_sync_children(vcpu, sp);
3579                 }
3580         }
3581         kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
3582 }
3583 
3584 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3585 {
3586         spin_lock(&vcpu->kvm->mmu_lock);
3587         mmu_sync_roots(vcpu);
3588         spin_unlock(&vcpu->kvm->mmu_lock);
3589 }
3590 EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3591 
3592 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
3593                                   u32 access, struct x86_exception *exception)
3594 {
3595         if (exception)
3596                 exception->error_code = 0;
3597         return vaddr;
3598 }
3599 
3600 static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
3601                                          u32 access,
3602                                          struct x86_exception *exception)
3603 {
3604         if (exception)
3605                 exception->error_code = 0;
3606         return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
3607 }
3608 
3609 static bool
3610 __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, u64 pte, int level)
3611 {
3612         int bit7 = (pte >> 7) & 1, low6 = pte & 0x3f;
3613 
3614         return (pte & rsvd_check->rsvd_bits_mask[bit7][level-1]) |
3615                 ((rsvd_check->bad_mt_xwr & (1ull << low6)) != 0);
3616 }
3617 
3618 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
3619 {
3620         return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level);
3621 }
3622 
3623 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level)
3624 {
3625         return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
3626 }
3627 
3628 static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3629 {
3630         /*
3631          * A nested guest cannot use the MMIO cache if it is using nested
3632          * page tables, because cr2 is a nGPA while the cache stores GPAs.
3633          */
3634         if (mmu_is_nested(vcpu))
3635                 return false;
3636 
3637         if (direct)
3638                 return vcpu_match_mmio_gpa(vcpu, addr);
3639 
3640         return vcpu_match_mmio_gva(vcpu, addr);
3641 }
3642 
3643 /* return true if reserved bit is detected on spte. */
3644 static bool
3645 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
3646 {
3647         struct kvm_shadow_walk_iterator iterator;
3648         u64 sptes[PT64_ROOT_MAX_LEVEL], spte = 0ull;
3649         int root, leaf;
3650         bool reserved = false;
3651 
3652         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3653                 goto exit;
3654 
3655         walk_shadow_page_lockless_begin(vcpu);
3656 
3657         for (shadow_walk_init(&iterator, vcpu, addr),
3658                  leaf = root = iterator.level;
3659              shadow_walk_okay(&iterator);
3660              __shadow_walk_next(&iterator, spte)) {
3661                 spte = mmu_spte_get_lockless(iterator.sptep);
3662 
3663                 sptes[leaf - 1] = spte;
3664                 leaf--;
3665 
3666                 if (!is_shadow_present_pte(spte))
3667                         break;
3668 
3669                 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
3670                                                     iterator.level);
3671         }
3672 
3673         walk_shadow_page_lockless_end(vcpu);
3674 
3675         if (reserved) {
3676                 pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
3677                        __func__, addr);
3678                 while (root > leaf) {
3679                         pr_err("------ spte 0x%llx level %d.\n",
3680                                sptes[root - 1], root);
3681                         root--;
3682                 }
3683         }
3684 exit:
3685         *sptep = spte;
3686         return reserved;
3687 }
3688 
3689 static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
3690 {
3691         u64 spte;
3692         bool reserved;
3693 
3694         if (mmio_info_in_cache(vcpu, addr, direct))
3695                 return RET_PF_EMULATE;
3696 
3697         reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
3698         if (WARN_ON(reserved))
3699                 return -EINVAL;
3700 
3701         if (is_mmio_spte(spte)) {
3702                 gfn_t gfn = get_mmio_spte_gfn(spte);
3703                 unsigned access = get_mmio_spte_access(spte);
3704 
3705                 if (!check_mmio_spte(vcpu, spte))
3706                         return RET_PF_INVALID;
3707 
3708                 if (direct)
3709                         addr = 0;
3710 
3711                 trace_handle_mmio_page_fault(addr, gfn, access);
3712                 vcpu_cache_mmio_info(vcpu, addr, gfn, access);
3713                 return RET_PF_EMULATE;
3714         }
3715 
3716         /*
3717          * If the page table is zapped by other cpus, let CPU fault again on
3718          * the address.
3719          */
3720         return RET_PF_RETRY;
3721 }
3722 EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
3723 
3724 static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
3725                                          u32 error_code, gfn_t gfn)
3726 {
3727         if (unlikely(error_code & PFERR_RSVD_MASK))
3728                 return false;
3729 
3730         if (!(error_code & PFERR_PRESENT_MASK) ||
3731               !(error_code & PFERR_WRITE_MASK))
3732                 return false;
3733 
3734         /*
3735          * guest is writing the page which is write tracked which can
3736          * not be fixed by page fault handler.
3737          */
3738         if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
3739                 return true;
3740 
3741         return false;
3742 }
3743 
3744 static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
3745 {
3746         struct kvm_shadow_walk_iterator iterator;
3747         u64 spte;
3748 
3749         if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3750                 return;
3751 
3752         walk_shadow_page_lockless_begin(vcpu);
3753         for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
3754                 clear_sp_write_flooding_count(iterator.sptep);
3755                 if (!is_shadow_present_pte(spte))
3756                         break;
3757         }
3758         walk_shadow_page_lockless_end(vcpu);
3759 }
3760 
3761 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3762                                 u32 error_code, bool prefault)
3763 {
3764         gfn_t gfn = gva >> PAGE_SHIFT;
3765         int r;
3766 
3767         pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
3768 
3769         if (page_fault_handle_page_track(vcpu, error_code, gfn))
3770                 return RET_PF_EMULATE;
3771 
3772         r = mmu_topup_memory_caches(vcpu);
3773         if (r)
3774                 return r;
3775 
3776         MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3777 
3778 
3779         return nonpaging_map(vcpu, gva & PAGE_MASK,
3780                              error_code, gfn, prefault);
3781 }
3782 
3783 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
3784 {
3785         struct kvm_arch_async_pf arch;
3786 
3787         arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
3788         arch.gfn = gfn;
3789         arch.direct_map = vcpu->arch.mmu.direct_map;
3790         arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
3791 
3792         return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
3793 }
3794 
3795 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
3796 {
3797         if (unlikely(!lapic_in_kernel(vcpu) ||
3798                      kvm_event_needs_reinjection(vcpu) ||
3799                      vcpu->arch.exception.pending))
3800                 return false;
3801 
3802         if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
3803                 return false;
3804 
3805         return kvm_x86_ops->interrupt_allowed(vcpu);
3806 }
3807 
3808 static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
3809                          gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable)
3810 {
3811         struct kvm_memory_slot *slot;
3812         bool async;
3813 
3814         slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3815         async = false;
3816         *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
3817         if (!async)
3818                 return false; /* *pfn has correct page already */
3819 
3820         if (!prefault && kvm_can_do_async_pf(vcpu)) {
3821                 trace_kvm_try_async_get_page(gva, gfn);
3822                 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
3823                         trace_kvm_async_pf_doublefault(gva, gfn);
3824                         kvm_make_request(KVM_REQ_APF_HALT, vcpu);
3825                         return true;
3826                 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
3827                         return true;
3828         }
3829 
3830         *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL, write, writable);
3831         return false;
3832 }
3833 
3834 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3835                                 u64 fault_address, char *insn, int insn_len)
3836 {
3837         int r = 1;
3838 
3839         switch (vcpu->arch.apf.host_apf_reason) {
3840         default:
3841                 trace_kvm_page_fault(fault_address, error_code);
3842 
3843                 if (kvm_event_needs_reinjection(vcpu))
3844                         kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3845                 r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3846                                 insn_len);
3847                 break;
3848         case KVM_PV_REASON_PAGE_NOT_PRESENT:
3849                 vcpu->arch.apf.host_apf_reason = 0;
3850                 local_irq_disable();
3851                 kvm_async_pf_task_wait(fault_address, 0);
3852                 local_irq_enable();
3853                 break;
3854         case KVM_PV_REASON_PAGE_READY:
3855                 vcpu->arch.apf.host_apf_reason = 0;
3856                 local_irq_disable();
3857                 kvm_async_pf_task_wake(fault_address);
3858                 local_irq_enable();
3859                 break;
3860         }
3861         return r;
3862 }
3863 EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3864 
3865 static bool
3866 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
3867 {
3868         int page_num = KVM_PAGES_PER_HPAGE(level);
3869 
3870         gfn &= ~(page_num - 1);
3871 
3872         return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
3873 }
3874 
3875 static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
3876                           bool prefault)
3877 {
3878         kvm_pfn_t pfn;
3879         int r;
3880         int level;
3881         bool force_pt_level;
3882         gfn_t gfn = gpa >> PAGE_SHIFT;
3883         unsigned long mmu_seq;
3884         int write = error_code & PFERR_WRITE_MASK;
3885         bool map_writable;
3886 
3887         MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3888 
3889         if (page_fault_handle_page_track(vcpu, error_code, gfn))
3890                 return RET_PF_EMULATE;
3891 
3892         r = mmu_topup_memory_caches(vcpu);
3893         if (r)
3894                 return r;
3895 
3896         force_pt_level = !check_hugepage_cache_consistency(vcpu, gfn,
3897                                                            PT_DIRECTORY_LEVEL);
3898         level = mapping_level(vcpu, gfn, &force_pt_level);
3899         if (likely(!force_pt_level)) {
3900                 if (level > PT_DIRECTORY_LEVEL &&
3901                     !check_hugepage_cache_consistency(vcpu, gfn, level))
3902                         level = PT_DIRECTORY_LEVEL;
3903                 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
3904         }
3905 
3906         if (fast_page_fault(vcpu, gpa, level, error_code))
3907                 return RET_PF_RETRY;
3908 
3909         mmu_seq = vcpu->kvm->mmu_notifier_seq;
3910         smp_rmb();
3911 
3912         if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
3913                 return RET_PF_RETRY;
3914 
3915         if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
3916                 return r;
3917 
3918         spin_lock(&vcpu->kvm->mmu_lock);
3919         if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
3920                 goto out_unlock;
3921         if (make_mmu_pages_available(vcpu) < 0)
3922                 goto out_unlock;
3923         if (likely(!force_pt_level))
3924                 transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
3925         r = __direct_map(vcpu, write, map_writable, level, gfn, pfn, prefault);
3926         spin_unlock(&vcpu->kvm->mmu_lock);
3927 
3928         return r;
3929 
3930 out_unlock:
3931         spin_unlock(&vcpu->kvm->mmu_lock);
3932         kvm_release_pfn_clean(pfn);
3933         return RET_PF_RETRY;
3934 }
3935 
3936 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
3937                                    struct kvm_mmu *context)
3938 {
3939         context->page_fault = nonpaging_page_fault;
3940         context->gva_to_gpa = nonpaging_gva_to_gpa;
3941         context->sync_page = nonpaging_sync_page;
3942         context->invlpg = nonpaging_invlpg;
3943         context->update_pte = nonpaging_update_pte;
3944         context->root_level = 0;
3945         context->shadow_root_level = PT32E_ROOT_LEVEL;
3946         context->root_hpa = INVALID_PAGE;
3947         context->direct_map = true;
3948         context->nx = false;
3949 }
3950 
3951 void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu)
3952 {
3953         mmu_free_roots(vcpu);
3954 }
3955 
3956 static unsigned long get_cr3(struct kvm_vcpu *vcpu)
3957 {
3958         return kvm_read_cr3(vcpu);
3959 }
3960 
3961 static void inject_page_fault(struct kvm_vcpu *vcpu,
3962                               struct x86_exception *fault)
3963 {
3964         vcpu->arch.mmu.inject_page_fault(vcpu, fault);
3965 }
3966 
3967 static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
3968                            unsigned access, int *nr_present)
3969 {
3970         if (unlikely(is_mmio_spte(*sptep))) {
3971                 if (gfn != get_mmio_spte_gfn(*sptep)) {
3972                         mmu_spte_clear_no_track(sptep);
3973                         return true;
3974                 }
3975 
3976                 (*nr_present)++;
3977                 mark_mmio_spte(vcpu, sptep, gfn, access);
3978                 return true;
3979         }
3980 
3981         return false;
3982 }
3983 
3984 static inline bool is_last_gpte(struct kvm_mmu *mmu,
3985                                 unsigned level, unsigned gpte)
3986 {
3987         /*
3988          * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3989          * If it is clear, there are no large pages at this level, so clear
3990          * PT_PAGE_SIZE_MASK in gpte if that is the case.
3991          */
3992         gpte &= level - mmu->last_nonleaf_level;
3993 
3994         /*
3995          * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
3996          * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3997          * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3998          */
3999         gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
4000 
4001         return gpte & PT_PAGE_SIZE_MASK;
4002 }
4003 
4004 #define PTTYPE_EPT 18 /* arbitrary */
4005 #define PTTYPE PTTYPE_EPT
4006 #include "paging_tmpl.h"
4007 #undef PTTYPE
4008 
4009 #define PTTYPE 64
4010 #include "paging_tmpl.h"
4011 #undef PTTYPE
4012 
4013 #define PTTYPE 32
4014 #include "paging_tmpl.h"
4015 #undef PTTYPE
4016 
4017 static void
4018 __reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4019                         struct rsvd_bits_validate *rsvd_check,
4020                         int maxphyaddr, int level, bool nx, bool gbpages,
4021                         bool pse, bool amd)
4022 {
4023         u64 exb_bit_rsvd = 0;
4024         u64 gbpages_bit_rsvd = 0;
4025         u64 nonleaf_bit8_rsvd = 0;
4026 
4027         rsvd_check->bad_mt_xwr = 0;
4028 
4029         if (!nx)
4030                 exb_bit_rsvd = rsvd_bits(63, 63);
4031         if (!gbpages)
4032                 gbpages_bit_rsvd = rsvd_bits(7, 7);
4033 
4034         /*
4035          * Non-leaf PML4Es and PDPEs reserve bit 8 (which would be the G bit for
4036          * leaf entries) on AMD CPUs only.
4037          */
4038         if (amd)
4039                 nonleaf_bit8_rsvd = rsvd_bits(8, 8);
4040 
4041         switch (level) {
4042         case PT32_ROOT_LEVEL:
4043                 /* no rsvd bits for 2 level 4K page table entries */
4044                 rsvd_check->rsvd_bits_mask[0][1] = 0;
4045                 rsvd_check->rsvd_bits_mask[0][0] = 0;
4046                 rsvd_check->rsvd_bits_mask[1][0] =
4047                         rsvd_check->rsvd_bits_mask[0][0];
4048 
4049                 if (!pse) {
4050                         rsvd_check->rsvd_bits_mask[1][1] = 0;
4051                         break;
4052                 }
4053 
4054                 if (is_cpuid_PSE36())
4055                         /* 36bits PSE 4MB page */
4056                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
4057                 else
4058                         /* 32 bits PSE 4MB page */
4059                         rsvd_check->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
4060                 break;
4061         case PT32E_ROOT_LEVEL:
4062                 rsvd_check->rsvd_bits_mask[0][2] =
4063                         rsvd_bits(maxphyaddr, 63) |
4064                         rsvd_bits(5, 8) | rsvd_bits(1, 2);      /* PDPTE */
4065                 rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4066                         rsvd_bits(maxphyaddr, 62);      /* PDE */
4067                 rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4068                         rsvd_bits(maxphyaddr, 62);      /* PTE */
4069                 rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4070                         rsvd_bits(maxphyaddr, 62) |
4071                         rsvd_bits(13, 20);              /* large page */
4072                 rsvd_check->rsvd_bits_mask[1][0] =
4073                         rsvd_check->rsvd_bits_mask[0][0];
4074                 break;
4075         case PT64_ROOT_5LEVEL:
4076                 rsvd_check->rsvd_bits_mask[0][4] = exb_bit_rsvd |
4077                         nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4078                         rsvd_bits(maxphyaddr, 51);
4079                 rsvd_check->rsvd_bits_mask[1][4] =
4080                         rsvd_check->rsvd_bits_mask[0][4];
4081         case PT64_ROOT_4LEVEL:
4082                 rsvd_check->rsvd_bits_mask[0][3] = exb_bit_rsvd |
4083                         nonleaf_bit8_rsvd | rsvd_bits(7, 7) |
4084                         rsvd_bits(maxphyaddr, 51);
4085                 rsvd_check->rsvd_bits_mask[0][2] = exb_bit_rsvd |
4086                         nonleaf_bit8_rsvd | gbpages_bit_rsvd |
4087                         rsvd_bits(maxphyaddr, 51);
4088                 rsvd_check->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4089                         rsvd_bits(maxphyaddr, 51);
4090                 rsvd_check->rsvd_bits_mask[0][0] = exb_bit_rsvd |
4091                         rsvd_bits(maxphyaddr, 51);
4092                 rsvd_check->rsvd_bits_mask[1][3] =
4093                         rsvd_check->rsvd_bits_mask[0][3];
4094                 rsvd_check->rsvd_bits_mask[1][2] = exb_bit_rsvd |
4095                         gbpages_bit_rsvd | rsvd_bits(maxphyaddr, 51) |
4096                         rsvd_bits(13, 29);
4097                 rsvd_check->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4098                         rsvd_bits(maxphyaddr, 51) |
4099                         rsvd_bits(13, 20);              /* large page */
4100                 rsvd_check->rsvd_bits_mask[1][0] =
4101                         rsvd_check->rsvd_bits_mask[0][0];
4102                 break;
4103         }
4104 }
4105 
4106 static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
4107                                   struct kvm_mmu *context)
4108 {
4109         __reset_rsvds_bits_mask(vcpu, &context->guest_rsvd_check,
4110                                 cpuid_maxphyaddr(vcpu), context->root_level,
4111                                 context->nx,
4112                                 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4113                                 is_pse(vcpu), guest_cpuid_is_amd(vcpu));
4114 }
4115 
4116 static void
4117 __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
4118                             int maxphyaddr, bool execonly)
4119 {
4120         u64 bad_mt_xwr;
4121 
4122         rsvd_check->rsvd_bits_mask[0][4] =
4123                 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4124         rsvd_check->rsvd_bits_mask[0][3] =
4125                 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
4126         rsvd_check->rsvd_bits_mask[0][2] =
4127                 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4128         rsvd_check->rsvd_bits_mask[0][1] =
4129                 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
4130         rsvd_check->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
4131 
4132         /* large page */
4133         rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
4134         rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
4135         rsvd_check->rsvd_bits_mask[1][2] =
4136                 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
4137         rsvd_check->rsvd_bits_mask[1][1] =
4138                 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
4139         rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
4140 
4141         bad_mt_xwr = 0xFFull << (2 * 8);        /* bits 3..5 must not be 2 */
4142         bad_mt_xwr |= 0xFFull << (3 * 8);       /* bits 3..5 must not be 3 */
4143         bad_mt_xwr |= 0xFFull << (7 * 8);       /* bits 3..5 must not be 7 */
4144         bad_mt_xwr |= REPEAT_BYTE(1ull << 2);   /* bits 0..2 must not be 010 */
4145         bad_mt_xwr |= REPEAT_BYTE(1ull << 6);   /* bits 0..2 must not be 110 */
4146         if (!execonly) {
4147                 /* bits 0..2 must not be 100 unless VMX capabilities allow it */
4148                 bad_mt_xwr |= REPEAT_BYTE(1ull << 4);
4149         }
4150         rsvd_check->bad_mt_xwr = bad_mt_xwr;
4151 }
4152 
4153 static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
4154                 struct kvm_mmu *context, bool execonly)
4155 {
4156         __reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
4157                                     cpuid_maxphyaddr(vcpu), execonly);
4158 }
4159 
4160 /*
4161  * the page table on host is the shadow page table for the page
4162  * table in guest or amd nested guest, its mmu features completely
4163  * follow the features in guest.
4164  */
4165 void
4166 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
4167 {
4168         bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
4169         struct rsvd_bits_validate *shadow_zero_check;
4170         int i;
4171 
4172         /*
4173          * Passing "true" to the last argument is okay; it adds a check
4174          * on bit 8 of the SPTEs which KVM doesn't use anyway.
4175          */
4176         shadow_zero_check = &context->shadow_zero_check;
4177         __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4178                                 boot_cpu_data.x86_phys_bits,
4179                                 context->shadow_root_level, uses_nx,
4180                                 guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES),
4181                                 is_pse(vcpu), true);
4182 
4183         if (!shadow_me_mask)
4184                 return;
4185 
4186         for (i = context->shadow_root_level; --i >= 0;) {
4187                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4188                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4189         }
4190 
4191 }
4192 EXPORT_SYMBOL_GPL(reset_shadow_zero_bits_mask);
4193 
4194 static inline bool boot_cpu_is_amd(void)
4195 {
4196         WARN_ON_ONCE(!tdp_enabled);
4197         return shadow_x_mask == 0;
4198 }
4199 
4200 /*
4201  * the direct page table on host, use as much mmu features as
4202  * possible, however, kvm currently does not do execution-protection.
4203  */
4204 static void
4205 reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4206                                 struct kvm_mmu *context)
4207 {
4208         struct rsvd_bits_validate *shadow_zero_check;
4209         int i;
4210 
4211         shadow_zero_check = &context->shadow_zero_check;
4212 
4213         if (boot_cpu_is_amd())
4214                 __reset_rsvds_bits_mask(vcpu, shadow_zero_check,
4215                                         boot_cpu_data.x86_phys_bits,
4216                                         context->shadow_root_level, false,
4217                                         boot_cpu_has(X86_FEATURE_GBPAGES),
4218                                         true, true);
4219         else
4220                 __reset_rsvds_bits_mask_ept(shadow_zero_check,
4221                                             boot_cpu_data.x86_phys_bits,
4222                                             false);
4223 
4224         if (!shadow_me_mask)
4225                 return;
4226 
4227         for (i = context->shadow_root_level; --i >= 0;) {
4228                 shadow_zero_check->rsvd_bits_mask[0][i] &= ~shadow_me_mask;
4229                 shadow_zero_check->rsvd_bits_mask[1][i] &= ~shadow_me_mask;
4230         }
4231 }
4232 
4233 /*
4234  * as the comments in reset_shadow_zero_bits_mask() except it
4235  * is the shadow page table for intel nested guest.
4236  */
4237 static void
4238 reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
4239                                 struct kvm_mmu *context, bool execonly)
4240 {
4241         __reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
4242                                     boot_cpu_data.x86_phys_bits, execonly);
4243 }
4244 
4245 #define BYTE_MASK(access) \
4246         ((1 & (access) ? 2 : 0) | \
4247          (2 & (access) ? 4 : 0) | \
4248          (3 & (access) ? 8 : 0) | \
4249          (4 & (access) ? 16 : 0) | \
4250          (5 & (access) ? 32 : 0) | \
4251          (6 & (access) ? 64 : 0) | \
4252          (7 & (access) ? 128 : 0))
4253 
4254 
4255 static void update_permission_bitmask(struct kvm_vcpu *vcpu,
4256                                       struct kvm_mmu *mmu, bool ept)
4257 {
4258         unsigned byte;
4259 
4260         const u8 x = BYTE_MASK(ACC_EXEC_MASK);
4261         const u8 w = BYTE_MASK(ACC_WRITE_MASK);
4262         const u8 u = BYTE_MASK(ACC_USER_MASK);
4263 
4264         bool cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP) != 0;
4265         bool cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP) != 0;
4266         bool cr0_wp = is_write_protection(vcpu);
4267 
4268         for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
4269                 unsigned pfec = byte << 1;
4270 
4271                 /*
4272                  * Each "*f" variable has a 1 bit for each UWX value
4273                  * that causes a fault with the given PFEC.
4274                  */
4275 
4276                 /* Faults from writes to non-writable pages */
4277                 u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
4278                 /* Faults from user mode accesses to supervisor pages */
4279                 u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
4280                 /* Faults from fetches of non-executable pages*/
4281                 u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
4282                 /* Faults from kernel mode fetches of user pages */
4283                 u8 smepf = 0;
4284                 /* Faults from kernel mode accesses of user pages */
4285                 u8 smapf = 0;
4286 
4287                 if (!ept) {
4288                         /* Faults from kernel mode accesses to user pages */
4289                         u8 kf = (pfec & PFERR_USER_MASK) ? 0 : u;
4290 
4291                         /* Not really needed: !nx will cause pte.nx to fault */
4292                         if (!mmu->nx)
4293                                 ff = 0;
4294 
4295                         /* Allow supervisor writes if !cr0.wp */
4296                         if (!cr0_wp)
4297                                 wf = (pfec & PFERR_USER_MASK) ? wf : 0;
4298 
4299                         /* Disallow supervisor fetches of user code if cr4.smep */
4300                         if (cr4_smep)
4301                                 smepf = (pfec & PFERR_FETCH_MASK) ? kf : 0;
4302 
4303                         /*
4304                          * SMAP:kernel-mode data accesses from user-mode
4305                          * mappings should fault. A fault is considered
4306                          * as a SMAP violation if all of the following
4307                          * conditions are ture:
4308                          *   - X86_CR4_SMAP is set in CR4
4309                          *   - A user page is accessed
4310                          *   - The access is not a fetch
4311                          *   - Page fault in kernel mode
4312                          *   - if CPL = 3 or X86_EFLAGS_AC is clear
4313                          *
4314                          * Here, we cover the first three conditions.
4315                          * The fourth is computed dynamically in permission_fault();
4316                          * PFERR_RSVD_MASK bit will be set in PFEC if the access is
4317                          * *not* subject to SMAP restrictions.
4318                          */
4319                         if (cr4_smap)
4320                                 smapf = (pfec & (PFERR_RSVD_MASK|PFERR_FETCH_MASK)) ? 0 : kf;
4321                 }
4322 
4323                 mmu->permissions[byte] = ff | uf | wf | smepf | smapf;
4324         }
4325 }
4326 
4327 /*
4328 * PKU is an additional mechanism by which the paging controls access to
4329 * user-mode addresses based on the value in the PKRU register.  Protection
4330 * key violations are reported through a bit in the page fault error code.
4331 * Unlike other bits of the error code, the PK bit is not known at the
4332 * call site of e.g. gva_to_gpa; it must be computed directly in
4333 * permission_fault based on two bits of PKRU, on some machine state (CR4,
4334 * CR0, EFER, CPL), and on other bits of the error code and the page tables.
4335 *
4336 * In particular the following conditions come from the error code, the
4337 * page tables and the machine state:
4338 * - PK is always zero unless CR4.PKE=1 and EFER.LMA=1
4339 * - PK is always zero if RSVD=1 (reserved bit set) or F=1 (instruction fetch)
4340 * - PK is always zero if U=0 in the page tables
4341 * - PKRU.WD is ignored if CR0.WP=0 and the access is a supervisor access.
4342 *
4343 * The PKRU bitmask caches the result of these four conditions.  The error
4344 * code (minus the P bit) and the page table's U bit form an index into the
4345 * PKRU bitmask.  Two bits of the PKRU bitmask are then extracted and ANDed
4346 * with the two bits of the PKRU register corresponding to the protection key.
4347 * For the first three conditions above the bits will be 00, thus masking
4348 * away both AD and WD.  For all reads or if the last condition holds, WD
4349 * only will be masked away.
4350 */
4351 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
4352                                 bool ept)
4353 {
4354         unsigned bit;
4355         bool wp;
4356 
4357         if (ept) {
4358                 mmu->pkru_mask = 0;
4359                 return;
4360         }
4361 
4362         /* PKEY is enabled only if CR4.PKE and EFER.LMA are both set. */
4363         if (!kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || !is_long_mode(vcpu)) {
4364                 mmu->pkru_mask = 0;
4365                 return;
4366         }
4367 
4368         wp = is_write_protection(vcpu);
4369 
4370         for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) {
4371                 unsigned pfec, pkey_bits;
4372                 bool check_pkey, check_write, ff, uf, wf, pte_user;
4373 
4374                 pfec = bit << 1;
4375                 ff = pfec & PFERR_FETCH_MASK;
4376                 uf = pfec & PFERR_USER_MASK;
4377                 wf = pfec & PFERR_WRITE_MASK;
4378 
4379                 /* PFEC.RSVD is replaced by ACC_USER_MASK. */
4380                 pte_user = pfec & PFERR_RSVD_MASK;
4381 
4382                 /*
4383                  * Only need to check the access which is not an
4384                  * instruction fetch and is to a user page.
4385                  */
4386                 check_pkey = (!ff && pte_user);
4387                 /*
4388                  * write access is controlled by PKRU if it is a
4389                  * user access or CR0.WP = 1.
4390                  */
4391                 check_write = check_pkey && wf && (uf || wp);
4392 
4393                 /* PKRU.AD stops both read and write access. */
4394                 pkey_bits = !!check_pkey;
4395                 /* PKRU.WD stops write access. */
4396                 pkey_bits |= (!!check_write) << 1;
4397 
4398                 mmu->pkru_mask |= (pkey_bits & 3) << pfec;
4399         }
4400 }
4401 
4402 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
4403 {
4404         unsigned root_level = mmu->root_level;
4405 
4406         mmu->last_nonleaf_level = root_level;
4407         if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
4408                 mmu->last_nonleaf_level++;
4409 }
4410 
4411 static void paging64_init_context_common(struct kvm_vcpu *vcpu,
4412                                          struct kvm_mmu *context,
4413                                          int level)
4414 {
4415         context->nx = is_nx(vcpu);
4416         context->root_level = level;
4417 
4418         reset_rsvds_bits_mask(vcpu, context);
4419         update_permission_bitmask(vcpu, context, false);
4420         update_pkru_bitmask(vcpu, context, false);
4421         update_last_nonleaf_level(vcpu, context);
4422 
4423         MMU_WARN_ON(!is_pae(vcpu));
4424         context->page_fault = paging64_page_fault;
4425         context->gva_to_gpa = paging64_gva_to_gpa;
4426         context->sync_page = paging64_sync_page;
4427         context->invlpg = paging64_invlpg;
4428         context->update_pte = paging64_update_pte;
4429         context->shadow_root_level = level;
4430         context->root_hpa = INVALID_PAGE;
4431         context->direct_map = false;
4432 }
4433 
4434 static void paging64_init_context(struct kvm_vcpu *vcpu,
4435                                   struct kvm_mmu *context)
4436 {
4437         int root_level = is_la57_mode(vcpu) ?
4438                          PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4439 
4440         paging64_init_context_common(vcpu, context, root_level);
4441 }
4442 
4443 static void paging32_init_context(struct kvm_vcpu *vcpu,
4444                                   struct kvm_mmu *context)
4445 {
4446         context->nx = false;
4447         context->root_level = PT32_ROOT_LEVEL;
4448 
4449         reset_rsvds_bits_mask(vcpu, context);
4450         update_permission_bitmask(vcpu, context, false);
4451         update_pkru_bitmask(vcpu, context, false);
4452         update_last_nonleaf_level(vcpu, context);
4453 
4454         context->page_fault = paging32_page_fault;
4455         context->gva_to_gpa = paging32_gva_to_gpa;
4456         context->sync_page = paging32_sync_page;
4457         context->invlpg = paging32_invlpg;
4458         context->update_pte = paging32_update_pte;
4459         context->shadow_root_level = PT32E_ROOT_LEVEL;
4460         context->root_hpa = INVALID_PAGE;
4461         context->direct_map = false;
4462 }
4463 
4464 static void paging32E_init_context(struct kvm_vcpu *vcpu,
4465                                    struct kvm_mmu *context)
4466 {
4467         paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
4468 }
4469 
4470 static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
4471 {
4472         struct kvm_mmu *context = &vcpu->arch.mmu;
4473 
4474         context->base_role.word = 0;
4475         context->base_role.smm = is_smm(vcpu);
4476         context->base_role.ad_disabled = (shadow_accessed_mask == 0);
4477         context->page_fault = tdp_page_fault;
4478         context->sync_page = nonpaging_sync_page;
4479         context->invlpg = nonpaging_invlpg;
4480         context->update_pte = nonpaging_update_pte;
4481         context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
4482         context->root_hpa = INVALID_PAGE;
4483         context->direct_map = true;
4484         context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
4485         context->get_cr3 = get_cr3;
4486         context->get_pdptr = kvm_pdptr_read;
4487         context->inject_page_fault = kvm_inject_page_fault;
4488 
4489         if (!is_paging(vcpu)) {
4490                 context->nx = false;
4491                 context->gva_to_gpa = nonpaging_gva_to_gpa;
4492                 context->root_level = 0;
4493         } else if (is_long_mode(vcpu)) {
4494                 context->nx = is_nx(vcpu);
4495                 context->root_level = is_la57_mode(vcpu) ?
4496                                 PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4497                 reset_rsvds_bits_mask(vcpu, context);
4498                 context->gva_to_gpa = paging64_gva_to_gpa;
4499         } else if (is_pae(vcpu)) {
4500                 context->nx = is_nx(vcpu);
4501                 context->root_level = PT32E_ROOT_LEVEL;
4502                 reset_rsvds_bits_mask(vcpu, context);
4503                 context->gva_to_gpa = paging64_gva_to_gpa;
4504         } else {
4505                 context->nx = false;
4506                 context->root_level = PT32_ROOT_LEVEL;
4507                 reset_rsvds_bits_mask(vcpu, context);
4508                 context->gva_to_gpa = paging32_gva_to_gpa;
4509         }
4510 
4511         update_permission_bitmask(vcpu, context, false);
4512         update_pkru_bitmask(vcpu, context, false);
4513         update_last_nonleaf_level(vcpu, context);
4514         reset_tdp_shadow_zero_bits_mask(vcpu, context);
4515 }
4516 
4517 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
4518 {
4519         bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
4520         bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
4521         struct kvm_mmu *context = &vcpu->arch.mmu;
4522 
4523         MMU_WARN_ON(VALID_PAGE(context->root_hpa));
4524 
4525         if (!is_paging(vcpu))
4526                 nonpaging_init_context(vcpu, context);
4527         else if (is_long_mode(vcpu))
4528                 paging64_init_context(vcpu, context);
4529         else if (is_pae(vcpu))
4530                 paging32E_init_context(vcpu, context);
4531         else
4532                 paging32_init_context(vcpu, context);
4533 
4534         context->base_role.nxe = is_nx(vcpu);
4535         context->base_role.cr4_pae = !!is_pae(vcpu);
4536         context->base_role.cr0_wp  = is_write_protection(vcpu);
4537         context->base_role.smep_andnot_wp
4538                 = smep && !is_write_protection(vcpu);
4539         context->base_role.smap_andnot_wp
4540                 = smap && !is_write_protection(vcpu);
4541         context->base_role.smm = is_smm(vcpu);
4542         reset_shadow_zero_bits_mask(vcpu, context);
4543 }
4544 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
4545 
4546 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
4547                              bool accessed_dirty)
4548 {
4549         struct kvm_mmu *context = &vcpu->arch.mmu;
4550 
4551         MMU_WARN_ON(VALID_PAGE(context->root_hpa));
4552 
4553         context->shadow_root_level = PT64_ROOT_4LEVEL;
4554 
4555         context->nx = true;
4556         context->ept_ad = accessed_dirty;
4557         context->page_fault = ept_page_fault;
4558         context->gva_to_gpa = ept_gva_to_gpa;
4559         context->sync_page = ept_sync_page;
4560         context->invlpg = ept_invlpg;
4561         context->update_pte = ept_update_pte;
4562         context->root_level = PT64_ROOT_4LEVEL;
4563         context->root_hpa = INVALID_PAGE;
4564         context->direct_map = false;
4565         context->base_role.ad_disabled = !accessed_dirty;
4566 
4567         update_permission_bitmask(vcpu, context, true);
4568         update_pkru_bitmask(vcpu, context, true);
4569         update_last_nonleaf_level(vcpu, context);
4570         reset_rsvds_bits_mask_ept(vcpu, context, execonly);
4571         reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
4572 }
4573 EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
4574 
4575 static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
4576 {
4577         struct kvm_mmu *context = &vcpu->arch.mmu;
4578 
4579         kvm_init_shadow_mmu(vcpu);
4580         context->set_cr3           = kvm_x86_ops->set_cr3;
4581         context->get_cr3           = get_cr3;
4582         context->get_pdptr         = kvm_pdptr_read;
4583         context->inject_page_fault = kvm_inject_page_fault;
4584 }
4585 
4586 static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
4587 {
4588         struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
4589 
4590         g_context->get_cr3           = get_cr3;
4591         g_context->get_pdptr         = kvm_pdptr_read;
4592         g_context->inject_page_fault = kvm_inject_page_fault;
4593 
4594         /*
4595          * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
4596          * L1's nested page tables (e.g. EPT12). The nested translation
4597          * of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
4598          * L2's page tables as the first level of translation and L1's
4599          * nested page tables as the second level of translation. Basically
4600          * the gva_to_gpa functions between mmu and nested_mmu are swapped.
4601          */
4602         if (!is_paging(vcpu)) {
4603                 g_context->nx = false;
4604                 g_context->root_level = 0;
4605                 g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
4606         } else if (is_long_mode(vcpu)) {
4607                 g_context->nx = is_nx(vcpu);
4608                 g_context->root_level = is_la57_mode(vcpu) ?
4609                                         PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
4610                 reset_rsvds_bits_mask(vcpu, g_context);
4611                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4612         } else if (is_pae(vcpu)) {
4613                 g_context->nx = is_nx(vcpu);
4614                 g_context->root_level = PT32E_ROOT_LEVEL;
4615                 reset_rsvds_bits_mask(vcpu, g_context);
4616                 g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
4617         } else {
4618                 g_context->nx = false;
4619                 g_context->root_level = PT32_ROOT_LEVEL;
4620                 reset_rsvds_bits_mask(vcpu, g_context);
4621                 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
4622         }
4623 
4624         update_permission_bitmask(vcpu, g_context, false);
4625         update_pkru_bitmask(vcpu, g_context, false);
4626         update_last_nonleaf_level(vcpu, g_context);
4627 }
4628 
4629 static void init_kvm_mmu(struct kvm_vcpu *vcpu)
4630 {
4631         if (mmu_is_nested(vcpu))
4632                 init_kvm_nested_mmu(vcpu);
4633         else if (tdp_enabled)
4634                 init_kvm_tdp_mmu(vcpu);
4635         else
4636                 init_kvm_softmmu(vcpu);
4637 }
4638 
4639 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
4640 {
4641         kvm_mmu_unload(vcpu);
4642         init_kvm_mmu(vcpu);
4643 }
4644 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
4645 
4646 int kvm_mmu_load(struct kvm_vcpu *vcpu)
4647 {
4648         int r;
4649 
4650         r = mmu_topup_memory_caches(vcpu);
4651         if (r)
4652                 goto out;
4653         r = mmu_alloc_roots(vcpu);
4654         kvm_mmu_sync_roots(vcpu);
4655         if (r)
4656                 goto out;
4657         /* set_cr3() should ensure TLB has been flushed */
4658         vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
4659 out:
4660         return r;
4661 }
4662 EXPORT_SYMBOL_GPL(kvm_mmu_load);
4663 
4664 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
4665 {
4666         mmu_free_roots(vcpu);
4667         WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
4668 }
4669 EXPORT_SYMBOL_GPL(kvm_mmu_unload);
4670 
4671 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4672                                   struct kvm_mmu_page *sp, u64 *spte,
4673                                   const void *new)
4674 {
4675         if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
4676                 ++vcpu->kvm->stat.mmu_pde_zapped;
4677                 return;
4678         }
4679 
4680         ++vcpu->kvm->stat.mmu_pte_updated;
4681         vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
4682 }
4683 
4684 static bool need_remote_flush(u64 old, u64 new)
4685 {
4686         if (!is_shadow_present_pte(old))
4687                 return false;
4688         if (!is_shadow_present_pte(new))
4689                 return true;
4690         if ((old ^ new) & PT64_BASE_ADDR_MASK)
4691                 return true;
4692         old ^= shadow_nx_mask;
4693         new ^= shadow_nx_mask;
4694         return (old & ~new & PT64_PERM_MASK) != 0;
4695 }
4696 
4697 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
4698                                     const u8 *new, int *bytes)
4699 {
4700         u64 gentry;
4701         int r;
4702 
4703         /*
4704          * Assume that the pte write on a page table of the same type
4705          * as the current vcpu paging mode since we update the sptes only
4706          * when they have the same mode.
4707          */
4708         if (is_pae(vcpu) && *bytes == 4) {
4709                 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
4710                 *gpa &= ~(gpa_t)7;
4711                 *bytes = 8;
4712                 r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
4713                 if (r)
4714                         gentry = 0;
4715                 new = (const u8 *)&gentry;
4716         }
4717 
4718         switch (*bytes) {
4719         case 4:
4720                 gentry = *(const u32 *)new;
4721                 break;
4722         case 8:
4723                 gentry = *(const u64 *)new;
4724                 break;
4725         default:
4726                 gentry = 0;
4727                 break;
4728         }
4729 
4730         return gentry;
4731 }
4732 
4733 /*
4734  * If we're seeing too many writes to a page, it may no longer be a page table,
4735  * or we may be forking, in which case it is better to unmap the page.
4736  */
4737 static bool detect_write_flooding(struct kvm_mmu_page *sp)
4738 {
4739         /*
4740          * Skip write-flooding detected for the sp whose level is 1, because
4741          * it can become unsync, then the guest page is not write-protected.
4742          */
4743         if (sp->role.level == PT_PAGE_TABLE_LEVEL)
4744                 return false;
4745 
4746         atomic_inc(&sp->write_flooding_count);
4747         return atomic_read(&sp->write_flooding_count) >= 3;
4748 }
4749 
4750 /*
4751  * Misaligned accesses are too much trouble to fix up; also, they usually
4752  * indicate a page is not used as a page table.
4753  */
4754 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
4755                                     int bytes)
4756 {
4757         unsigned offset, pte_size, misaligned;
4758 
4759         pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4760                  gpa, bytes, sp->role.word);
4761 
4762         offset = offset_in_page(gpa);
4763         pte_size = sp->role.cr4_pae ? 8 : 4;
4764 
4765         /*
4766          * Sometimes, the OS only writes the last one bytes to update status
4767          * bits, for example, in linux, andb instruction is used in clear_bit().
4768          */
4769         if (!(offset & (pte_size - 1)) && bytes == 1)
4770                 return false;
4771 
4772         misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
4773         misaligned |= bytes < 4;
4774 
4775         return misaligned;
4776 }
4777 
4778 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
4779 {
4780         unsigned page_offset, quadrant;
4781         u64 *spte;
4782         int level;
4783 
4784         page_offset = offset_in_page(gpa);
4785         level = sp->role.level;
4786         *nspte = 1;
4787         if (!sp->role.cr4_pae) {
4788                 page_offset <<= 1;      /* 32->64 */
4789                 /*
4790                  * A 32-bit pde maps 4MB while the shadow pdes map
4791                  * only 2MB.  So we need to double the offset again
4792                  * and zap two pdes instead of one.
4793                  */
4794                 if (level == PT32_ROOT_LEVEL) {
4795                         page_offset &= ~7; /* kill rounding error */
4796                         page_offset <<= 1;
4797                         *nspte = 2;
4798                 }
4799                 quadrant = page_offset >> PAGE_SHIFT;
4800                 page_offset &= ~PAGE_MASK;
4801                 if (quadrant != sp->role.quadrant)
4802                         return NULL;
4803         }
4804 
4805         spte = &sp->spt[page_offset / sizeof(*spte)];
4806         return spte;
4807 }
4808 
4809 static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
4810                               const u8 *new, int bytes,
4811                               struct kvm_page_track_notifier_node *node)
4812 {
4813         gfn_t gfn = gpa >> PAGE_SHIFT;
4814         struct kvm_mmu_page *sp;
4815         LIST_HEAD(invalid_list);
4816         u64 entry, gentry, *spte;
4817         int npte;
4818         bool remote_flush, local_flush;
4819         union kvm_mmu_page_role mask = { };
4820 
4821         mask.cr0_wp = 1;
4822         mask.cr4_pae = 1;
4823         mask.nxe = 1;
4824         mask.smep_andnot_wp = 1;
4825         mask.smap_andnot_wp = 1;
4826         mask.smm = 1;
4827         mask.ad_disabled = 1;
4828 
4829         /*
4830          * If we don't have indirect shadow pages, it means no page is
4831          * write-protected, so we can exit simply.
4832          */
4833         if (!READ_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
4834                 return;
4835 
4836         remote_flush = local_flush = false;
4837 
4838         pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
4839 
4840         gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
4841 
4842         /*
4843          * No need to care whether allocation memory is successful
4844          * or not since pte prefetch is skiped if it does not have
4845          * enough objects in the cache.
4846          */
4847         mmu_topup_memory_caches(vcpu);
4848 
4849         spin_lock(&vcpu->kvm->mmu_lock);
4850         ++vcpu->kvm->stat.mmu_pte_write;
4851         kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
4852 
4853         for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
4854                 if (detect_write_misaligned(sp, gpa, bytes) ||
4855                       detect_write_flooding(sp)) {
4856                         kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
4857                         ++vcpu->kvm->stat.mmu_flooded;
4858                         continue;
4859                 }
4860 
4861                 spte = get_written_sptes(sp, gpa, &npte);
4862                 if (!spte)
4863                         continue;
4864 
4865                 local_flush = true;
4866                 while (npte--) {
4867                         entry = *spte;
4868                         mmu_page_zap_pte(vcpu->kvm, sp, spte);
4869                         if (gentry &&
4870                               !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
4871                               & mask.word) && rmap_can_add(vcpu))
4872                                 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
4873                         if (need_remote_flush(entry, *spte))
4874                                 remote_flush = true;
4875                         ++spte;
4876                 }
4877         }
4878         kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
4879         kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
4880         spin_unlock(&vcpu->kvm->mmu_lock);
4881 }
4882 
4883 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
4884 {
4885         gpa_t gpa;
4886         int r;
4887 
4888         if (vcpu->arch.mmu.direct_map)
4889                 return 0;
4890 
4891         gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
4892 
4893         r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4894 
4895         return r;
4896 }
4897 EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
4898 
4899 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
4900 {
4901         LIST_HEAD(invalid_list);
4902 
4903         if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
4904                 return 0;
4905 
4906         while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
4907                 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
4908                         break;
4909 
4910                 ++vcpu->kvm->stat.mmu_recycled;
4911         }
4912         kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
4913 
4914         if (!kvm_mmu_available_pages(vcpu->kvm))
4915                 return -ENOSPC;
4916         return 0;
4917 }
4918 
4919 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
4920                        void *insn, int insn_len)
4921 {
4922         int r, emulation_type = EMULTYPE_RETRY;
4923         enum emulation_result er;
4924         bool direct = vcpu->arch.mmu.direct_map;
4925 
4926         /* With shadow page tables, fault_address contains a GVA or nGPA.  */
4927         if (vcpu->arch.mmu.direct_map) {
4928                 vcpu->arch.gpa_available = true;
4929                 vcpu->arch.gpa_val = cr2;
4930         }
4931 
4932         r = RET_PF_INVALID;
4933         if (unlikely(error_code & PFERR_RSVD_MASK)) {
4934                 r = handle_mmio_page_fault(vcpu, cr2, direct);
4935                 if (r == RET_PF_EMULATE) {
4936                         emulation_type = 0;
4937                         goto emulate;
4938                 }
4939         }
4940 
4941         if (r == RET_PF_INVALID) {
4942                 r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
4943                                               false);
4944                 WARN_ON(r == RET_PF_INVALID);
4945         }
4946 
4947         if (r == RET_PF_RETRY)
4948                 return 1;
4949         if (r < 0)
4950                 return r;
4951 
4952         /*
4953          * Before emulating the instruction, check if the error code
4954          * was due to a RO violation while translating the guest page.
4955          * This can occur when using nested virtualization with nested
4956          * paging in both guests. If true, we simply unprotect the page
4957          * and resume the guest.
4958          */
4959         if (vcpu->arch.mmu.direct_map &&
4960             (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
4961                 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
4962                 return 1;
4963         }
4964 
4965         if (mmio_info_in_cache(vcpu, cr2, direct))
4966                 emulation_type = 0;
4967 emulate:
4968         /*
4969          * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
4970          * This can happen if a guest gets a page-fault on data access but the HW
4971          * table walker is not able to read the instruction page (e.g instruction
4972          * page is not present in memory). In those cases we simply restart the
4973          * guest.
4974          */
4975         if (unlikely(insn && !insn_len))
4976                 return 1;
4977 
4978         er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
4979 
4980         switch (er) {
4981         case EMULATE_DONE:
4982                 return 1;
4983         case EMULATE_USER_EXIT:
4984                 ++vcpu->stat.mmio_exits;
4985                 /* fall through */
4986         case EMULATE_FAIL:
4987                 return 0;
4988         default:
4989                 BUG();
4990         }
4991 }
4992 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
4993 
4994 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
4995 {
4996         vcpu->arch.mmu.invlpg(vcpu, gva);
4997         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
4998         ++vcpu->stat.invlpg;
4999 }
5000 EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
5001 
5002 void kvm_enable_tdp(void)
5003 {
5004         tdp_enabled = true;
5005 }
5006 EXPORT_SYMBOL_GPL(kvm_enable_tdp);
5007 
5008 void kvm_disable_tdp(void)
5009 {
5010         tdp_enabled = false;
5011 }
5012 EXPORT_SYMBOL_GPL(kvm_disable_tdp);
5013 
5014 static void free_mmu_pages(struct kvm_vcpu *vcpu)
5015 {
5016         free_page((unsigned long)vcpu->arch.mmu.pae_root);
5017         free_page((unsigned long)vcpu->arch.mmu.lm_root);
5018 }
5019 
5020 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
5021 {
5022         struct page *page;
5023         int i;
5024 
5025         /*
5026          * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
5027          * Therefore we need to allocate shadow page tables in the first
5028          * 4GB of memory, which happens to fit the DMA32 zone.
5029          */
5030         page = alloc_page(GFP_KERNEL | __GFP_DMA32);
5031         if (!page)
5032                 return -ENOMEM;
5033 
5034         vcpu->arch.mmu.pae_root = page_address(page);
5035         for (i = 0; i < 4; ++i)
5036                 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
5037 
5038         return 0;
5039 }
5040 
5041 int kvm_mmu_create(struct kvm_vcpu *vcpu)
5042 {
5043         vcpu->arch.walk_mmu = &vcpu->arch.mmu;
5044         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5045         vcpu->arch.mmu.translate_gpa = translate_gpa;
5046         vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
5047 
5048         return alloc_mmu_pages(vcpu);
5049 }
5050 
5051 void kvm_mmu_setup(struct kvm_vcpu *vcpu)
5052 {
5053         MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
5054 
5055         init_kvm_mmu(vcpu);
5056 }
5057 
5058 static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5059                         struct kvm_memory_slot *slot,
5060                         struct kvm_page_track_notifier_node *node)
5061 {
5062         kvm_mmu_invalidate_zap_all_pages(kvm);
5063 }
5064 
5065 void kvm_mmu_init_vm(struct kvm *kvm)
5066 {
5067         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5068 
5069         node->track_write = kvm_mmu_pte_write;
5070         node->track_flush_slot = kvm_mmu_invalidate_zap_pages_in_memslot;
5071         kvm_page_track_register_notifier(kvm, node);
5072 }
5073 
5074 void kvm_mmu_uninit_vm(struct kvm *kvm)
5075 {
5076         struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
5077 
5078         kvm_page_track_unregister_notifier(kvm, node);
5079 }
5080 
5081 /* The return value indicates if tlb flush on all vcpus is needed. */
5082 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
5083 
5084 /* The caller should hold mmu-lock before calling this function. */
5085 static __always_inline bool
5086 slot_handle_level_range(struct kvm *kvm, struct kvm_memory_slot *memslot,
5087                         slot_level_handler fn, int start_level, int end_level,
5088                         gfn_t start_gfn, gfn_t end_gfn, bool lock_flush_tlb)
5089 {
5090         struct slot_rmap_walk_iterator iterator;
5091         bool flush = false;
5092 
5093         for_each_slot_rmap_range(memslot, start_level, end_level, start_gfn,
5094                         end_gfn, &iterator) {
5095                 if (iterator.rmap)
5096                         flush |= fn(kvm, iterator.rmap);
5097 
5098                 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5099                         if (flush && lock_flush_tlb) {
5100                                 kvm_flush_remote_tlbs(kvm);
5101                                 flush = false;
5102                         }
5103                         cond_resched_lock(&kvm->mmu_lock);
5104                 }
5105         }
5106 
5107         if (flush && lock_flush_tlb) {
5108                 kvm_flush_remote_tlbs(kvm);
5109                 flush = false;
5110         }
5111 
5112         return flush;
5113 }
5114 
5115 static __always_inline bool
5116 slot_handle_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5117                   slot_level_handler fn, int start_level, int end_level,
5118                   bool lock_flush_tlb)
5119 {
5120         return slot_handle_level_range(kvm, memslot, fn, start_level,
5121                         end_level, memslot->base_gfn,
5122                         memslot->base_gfn + memslot->npages - 1,
5123                         lock_flush_tlb);
5124 }
5125 
5126 static __always_inline bool
5127 slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5128                       slot_level_handler fn, bool lock_flush_tlb)
5129 {
5130         return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
5131                                  PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5132 }
5133 
5134 static __always_inline bool
5135 slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
5136                         slot_level_handler fn, bool lock_flush_tlb)
5137 {
5138         return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
5139                                  PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
5140 }
5141 
5142 static __always_inline bool
5143 slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
5144                  slot_level_handler fn, bool lock_flush_tlb)
5145 {
5146         return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
5147                                  PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
5148 }
5149 
5150 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
5151 {
5152         struct kvm_memslots *slots;
5153         struct kvm_memory_slot *memslot;
5154         int i;
5155 
5156         spin_lock(&kvm->mmu_lock);
5157         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5158                 slots = __kvm_memslots(kvm, i);
5159                 kvm_for_each_memslot(memslot, slots) {
5160                         gfn_t start, end;
5161 
5162                         start = max(gfn_start, memslot->base_gfn);
5163                         end = min(gfn_end, memslot->base_gfn + memslot->npages);
5164                         if (start >= end)
5165                                 continue;
5166 
5167                         slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
5168                                                 PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
5169                                                 start, end - 1, true);
5170                 }
5171         }
5172 
5173         spin_unlock(&kvm->mmu_lock);
5174 }
5175 
5176 static bool slot_rmap_write_protect(struct kvm *kvm,
5177                                     struct kvm_rmap_head *rmap_head)
5178 {
5179         return __rmap_write_protect(kvm, rmap_head, false);
5180 }
5181 
5182 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
5183                                       struct kvm_memory_slot *memslot)
5184 {
5185         bool flush;
5186 
5187         spin_lock(&kvm->mmu_lock);
5188         flush = slot_handle_all_level(kvm, memslot, slot_rmap_write_protect,
5189                                       false);
5190         spin_unlock(&kvm->mmu_lock);
5191 
5192         /*
5193          * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
5194          * which do tlb flush out of mmu-lock should be serialized by
5195          * kvm->slots_lock otherwise tlb flush would be missed.
5196          */
5197         lockdep_assert_held(&kvm->slots_lock);
5198 
5199         /*
5200          * We can flush all the TLBs out of the mmu lock without TLB
5201          * corruption since we just change the spte from writable to
5202          * readonly so that we only need to care the case of changing
5203          * spte from present to present (changing the spte from present
5204          * to nonpresent will flush all the TLBs immediately), in other
5205          * words, the only case we care is mmu_spte_update() where we
5206          * haved checked SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE
5207          * instead of PT_WRITABLE_MASK, that means it does not depend
5208          * on PT_WRITABLE_MASK anymore.
5209          */
5210         if (flush)
5211                 kvm_flush_remote_tlbs(kvm);
5212 }
5213 
5214 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
5215                                          struct kvm_rmap_head *rmap_head)
5216 {
5217         u64 *sptep;
5218         struct rmap_iterator iter;
5219         int need_tlb_flush = 0;
5220         kvm_pfn_t pfn;
5221         struct kvm_mmu_page *sp;
5222 
5223 restart:
5224         for_each_rmap_spte(rmap_head, &iter, sptep) {
5225                 sp = page_header(__pa(sptep));
5226                 pfn = spte_to_pfn(*sptep);
5227 
5228                 /*
5229                  * We cannot do huge page mapping for indirect shadow pages,
5230                  * which are found on the last rmap (level = 1) when not using
5231                  * tdp; such shadow pages are synced with the page table in
5232                  * the guest, and the guest page table is using 4K page size
5233                  * mapping if the indirect sp has level = 1.
5234                  */
5235                 if (sp->role.direct &&
5236                         !kvm_is_reserved_pfn(pfn) &&
5237                         PageTransCompoundMap(pfn_to_page(pfn))) {
5238                         drop_spte(kvm, sptep);
5239                         need_tlb_flush = 1;
5240                         goto restart;
5241                 }
5242         }
5243 
5244         return need_tlb_flush;
5245 }
5246 
5247 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
5248                                    const struct kvm_memory_slot *memslot)
5249 {
5250         /* FIXME: const-ify all uses of struct kvm_memory_slot.  */
5251         spin_lock(&kvm->mmu_lock);
5252         slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
5253                          kvm_mmu_zap_collapsible_spte, true);
5254         spin_unlock(&kvm->mmu_lock);
5255 }
5256 
5257 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
5258                                    struct kvm_memory_slot *memslot)
5259 {
5260         bool flush;
5261 
5262         spin_lock(&kvm->mmu_lock);
5263         flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
5264         spin_unlock(&kvm->mmu_lock);
5265 
5266         lockdep_assert_held(&kvm->slots_lock);
5267 
5268         /*
5269          * It's also safe to flush TLBs out of mmu lock here as currently this
5270          * function is only used for dirty logging, in which case flushing TLB
5271          * out of mmu lock also guarantees no dirty pages will be lost in
5272          * dirty_bitmap.
5273          */
5274         if (flush)
5275                 kvm_flush_remote_tlbs(kvm);
5276 }
5277 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
5278 
5279 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
5280                                         struct kvm_memory_slot *memslot)
5281 {
5282         bool flush;
5283 
5284         spin_lock(&kvm->mmu_lock);
5285         flush = slot_handle_large_level(kvm, memslot, slot_rmap_write_protect,
5286                                         false);
5287         spin_unlock(&kvm->mmu_lock);
5288 
5289         /* see kvm_mmu_slot_remove_write_access */
5290         lockdep_assert_held(&kvm->slots_lock);
5291 
5292         if (flush)
5293                 kvm_flush_remote_tlbs(kvm);
5294 }
5295 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
5296 
5297 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
5298                             struct kvm_memory_slot *memslot)
5299 {
5300         bool flush;
5301 
5302         spin_lock(&kvm->mmu_lock);
5303         flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
5304         spin_unlock(&kvm->mmu_lock);
5305 
5306         lockdep_assert_held(&kvm->slots_lock);
5307 
5308         /* see kvm_mmu_slot_leaf_clear_dirty */
5309         if (flush)
5310                 kvm_flush_remote_tlbs(kvm);
5311 }
5312 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
5313 
5314 #define BATCH_ZAP_PAGES 10
5315 static void kvm_zap_obsolete_pages(struct kvm *kvm)
5316 {
5317         struct kvm_mmu_page *sp, *node;
5318         int batch = 0;
5319 
5320 restart:
5321         list_for_each_entry_safe_reverse(sp, node,
5322               &kvm->arch.active_mmu_pages, link) {
5323                 int ret;
5324 
5325                 /*
5326                  * No obsolete page exists before new created page since
5327                  * active_mmu_pages is the FIFO list.
5328                  */
5329                 if (!is_obsolete_sp(kvm, sp))
5330                         break;
5331 
5332                 /*
5333                  * Since we are reversely walking the list and the invalid
5334                  * list will be moved to the head, skip the invalid page
5335                  * can help us to avoid the infinity list walking.
5336                  */
5337                 if (sp->role.invalid)
5338                         continue;
5339 
5340                 /*
5341                  * Need not flush tlb since we only zap the sp with invalid
5342                  * generation number.
5343                  */
5344                 if (batch >= BATCH_ZAP_PAGES &&
5345                       cond_resched_lock(&kvm->mmu_lock)) {
5346                         batch = 0;
5347                         goto restart;
5348                 }
5349 
5350                 ret = kvm_mmu_prepare_zap_page(kvm, sp,
5351                                 &kvm->arch.zapped_obsolete_pages);
5352                 batch += ret;
5353 
5354                 if (ret)
5355                         goto restart;
5356         }
5357 
5358         /*
5359          * Should flush tlb before free page tables since lockless-walking
5360          * may use the pages.
5361          */
5362         kvm_mmu_commit_zap_page(kvm, &kvm->arch.zapped_obsolete_pages);
5363 }
5364 
5365 /*
5366  * Fast invalidate all shadow pages and use lock-break technique
5367  * to zap obsolete pages.
5368  *
5369  * It's required when memslot is being deleted or VM is being
5370  * destroyed, in these cases, we should ensure that KVM MMU does
5371  * not use any resource of the being-deleted slot or all slots
5372  * after calling the function.
5373  */
5374 void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm)
5375 {
5376         spin_lock(&kvm->mmu_lock);
5377         trace_kvm_mmu_invalidate_zap_all_pages(kvm);
5378         kvm->arch.mmu_valid_gen++;
5379 
5380         /*
5381          * Notify all vcpus to reload its shadow page table
5382          * and flush TLB. Then all vcpus will switch to new
5383          * shadow page table with the new mmu_valid_gen.
5384          *
5385          * Note: we should do this under the protection of
5386          * mmu-lock, otherwise, vcpu would purge shadow page
5387          * but miss tlb flush.
5388          */
5389         kvm_reload_remote_mmus(kvm);
5390 
5391         kvm_zap_obsolete_pages(kvm);
5392         spin_unlock(&kvm->mmu_lock);
5393 }
5394 
5395 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm)
5396 {
5397         return unlikely(!list_empty_careful(&kvm->arch.zapped_obsolete_pages));
5398 }
5399 
5400 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, struct kvm_memslots *slots)
5401 {
5402         /*
5403          * The very rare case: if the generation-number is round,
5404          * zap all shadow pages.
5405          */
5406         if (unlikely((slots->generation & MMIO_GEN_MASK) == 0)) {
5407                 kvm_debug_ratelimited("kvm: zapping shadow pages for mmio generation wraparound\n");
5408                 kvm_mmu_invalidate_zap_all_pages(kvm);
5409         }
5410 }
5411 
5412 static unsigned long
5413 mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
5414 {
5415         struct kvm *kvm;
5416         int nr_to_scan = sc->nr_to_scan;
5417         unsigned long freed = 0;
5418 
5419         spin_lock(&kvm_lock);
5420 
5421         list_for_each_entry(kvm, &vm_list, vm_list) {
5422                 int idx;
5423                 LIST_HEAD(invalid_list);
5424 
5425                 /*
5426                  * Never scan more than sc->nr_to_scan VM instances.
5427                  * Will not hit this condition practically since we do not try
5428                  * to shrink more than one VM and it is very unlikely to see
5429                  * !n_used_mmu_pages so many times.
5430                  */
5431                 if (!nr_to_scan--)
5432                         break;
5433                 /*
5434                  * n_used_mmu_pages is accessed without holding kvm->mmu_lock
5435                  * here. We may skip a VM instance errorneosly, but we do not
5436                  * want to shrink a VM that only started to populate its MMU
5437                  * anyway.
5438                  */
5439                 if (!kvm->arch.n_used_mmu_pages &&
5440                       !kvm_has_zapped_obsolete_pages(kvm))
5441                         continue;
5442 
5443                 idx = srcu_read_lock(&kvm->srcu);
5444                 spin_lock(&kvm->mmu_lock);
5445 
5446                 if (kvm_has_zapped_obsolete_pages(kvm)) {
5447                         kvm_mmu_commit_zap_page(kvm,
5448                               &kvm->arch.zapped_obsolete_pages);
5449                         goto unlock;
5450                 }
5451 
5452                 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
5453                         freed++;
5454                 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5455 
5456 unlock:
5457                 spin_unlock(&kvm->mmu_lock);
5458                 srcu_read_unlock(&kvm->srcu, idx);
5459 
5460                 /*
5461                  * unfair on small ones
5462                  * per-vm shrinkers cry out
5463                  * sadness comes quickly
5464                  */
5465                 list_move_tail(&kvm->vm_list, &vm_list);
5466                 break;
5467         }
5468 
5469         spin_unlock(&kvm_lock);
5470         return freed;
5471 }
5472 
5473 static unsigned long
5474 mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
5475 {
5476         return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
5477 }
5478 
5479 static struct shrinker mmu_shrinker = {
5480         .count_objects = mmu_shrink_count,
5481         .scan_objects = mmu_shrink_scan,
5482         .seeks = DEFAULT_SEEKS * 10,
5483 };
5484 
5485 static void mmu_destroy_caches(void)
5486 {
5487         kmem_cache_destroy(pte_list_desc_cache);
5488         kmem_cache_destroy(mmu_page_header_cache);
5489 }
5490 
5491 int kvm_mmu_module_init(void)
5492 {
5493         int ret = -ENOMEM;
5494 
5495         kvm_mmu_clear_all_pte_masks();
5496 
5497         pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5498                                             sizeof(struct pte_list_desc),
5499                                             0, SLAB_ACCOUNT, NULL);
5500         if (!pte_list_desc_cache)
5501                 goto out;
5502 
5503         mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
5504                                                   sizeof(struct kvm_mmu_page),
5505                                                   0, SLAB_ACCOUNT, NULL);
5506         if (!mmu_page_header_cache)
5507                 goto out;
5508 
5509         if (percpu_counter_init(&kvm_total_used_mmu_pages, 0, GFP_KERNEL))
5510                 goto out;
5511 
5512         ret = register_shrinker(&mmu_shrinker);
5513         if (ret)
5514                 goto out;
5515 
5516         return 0;
5517 
5518 out:
5519         mmu_destroy_caches();
5520         return ret;
5521 }
5522 
5523 /*
5524  * Caculate mmu pages needed for kvm.
5525  */
5526 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
5527 {
5528         unsigned int nr_mmu_pages;
5529         unsigned int  nr_pages = 0;
5530         struct kvm_memslots *slots;
5531         struct kvm_memory_slot *memslot;
5532         int i;
5533 
5534         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
5535                 slots = __kvm_memslots(kvm, i);
5536 
5537                 kvm_for_each_memslot(memslot, slots)
5538                         nr_pages += memslot->npages;
5539         }
5540 
5541         nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
5542         nr_mmu_pages = max(nr_mmu_pages,
5543                            (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
5544 
5545         return nr_mmu_pages;
5546 }
5547 
5548 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
5549 {
5550         kvm_mmu_unload(vcpu);
5551         free_mmu_pages(vcpu);
5552         mmu_free_memory_caches(vcpu);
5553 }
5554 
5555 void kvm_mmu_module_exit(void)
5556 {
5557         mmu_destroy_caches();
5558         percpu_counter_destroy(&kvm_total_used_mmu_pages);
5559         unregister_shrinker(&mmu_shrinker);
5560         mmu_audit_disable();
5561 }
5562 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp