~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/include/asm/kvm_host.h

Version: ~ [ linux-5.15-rc1 ] ~ [ linux-5.14.5 ] ~ [ linux-5.13.18 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.66 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.147 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.206 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.246 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.282 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.283 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Kernel-based Virtual Machine driver for Linux
  4  *
  5  * This header defines architecture specific interfaces, x86 version
  6  */
  7 
  8 #ifndef _ASM_X86_KVM_HOST_H
  9 #define _ASM_X86_KVM_HOST_H
 10 
 11 #include <linux/types.h>
 12 #include <linux/mm.h>
 13 #include <linux/mmu_notifier.h>
 14 #include <linux/tracepoint.h>
 15 #include <linux/cpumask.h>
 16 #include <linux/irq_work.h>
 17 #include <linux/irq.h>
 18 
 19 #include <linux/kvm.h>
 20 #include <linux/kvm_para.h>
 21 #include <linux/kvm_types.h>
 22 #include <linux/perf_event.h>
 23 #include <linux/pvclock_gtod.h>
 24 #include <linux/clocksource.h>
 25 #include <linux/irqbypass.h>
 26 #include <linux/hyperv.h>
 27 
 28 #include <asm/apic.h>
 29 #include <asm/pvclock-abi.h>
 30 #include <asm/desc.h>
 31 #include <asm/mtrr.h>
 32 #include <asm/msr-index.h>
 33 #include <asm/asm.h>
 34 #include <asm/kvm_page_track.h>
 35 #include <asm/kvm_vcpu_regs.h>
 36 #include <asm/hyperv-tlfs.h>
 37 
 38 #define __KVM_HAVE_ARCH_VCPU_DEBUGFS
 39 
 40 #define KVM_MAX_VCPUS 288
 41 #define KVM_SOFT_MAX_VCPUS 240
 42 #define KVM_MAX_VCPU_ID 1023
 43 #define KVM_USER_MEM_SLOTS 509
 44 /* memory slots that are not exposed to userspace */
 45 #define KVM_PRIVATE_MEM_SLOTS 3
 46 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
 47 
 48 #define KVM_HALT_POLL_NS_DEFAULT 200000
 49 
 50 #define KVM_IRQCHIP_NUM_PINS  KVM_IOAPIC_NUM_PINS
 51 
 52 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
 53                                         KVM_DIRTY_LOG_INITIALLY_SET)
 54 
 55 /* x86-specific vcpu->requests bit members */
 56 #define KVM_REQ_MIGRATE_TIMER           KVM_ARCH_REQ(0)
 57 #define KVM_REQ_REPORT_TPR_ACCESS       KVM_ARCH_REQ(1)
 58 #define KVM_REQ_TRIPLE_FAULT            KVM_ARCH_REQ(2)
 59 #define KVM_REQ_MMU_SYNC                KVM_ARCH_REQ(3)
 60 #define KVM_REQ_CLOCK_UPDATE            KVM_ARCH_REQ(4)
 61 #define KVM_REQ_LOAD_MMU_PGD            KVM_ARCH_REQ(5)
 62 #define KVM_REQ_EVENT                   KVM_ARCH_REQ(6)
 63 #define KVM_REQ_APF_HALT                KVM_ARCH_REQ(7)
 64 #define KVM_REQ_STEAL_UPDATE            KVM_ARCH_REQ(8)
 65 #define KVM_REQ_NMI                     KVM_ARCH_REQ(9)
 66 #define KVM_REQ_PMU                     KVM_ARCH_REQ(10)
 67 #define KVM_REQ_PMI                     KVM_ARCH_REQ(11)
 68 #define KVM_REQ_SMI                     KVM_ARCH_REQ(12)
 69 #define KVM_REQ_MASTERCLOCK_UPDATE      KVM_ARCH_REQ(13)
 70 #define KVM_REQ_MCLOCK_INPROGRESS \
 71         KVM_ARCH_REQ_FLAGS(14, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 72 #define KVM_REQ_SCAN_IOAPIC \
 73         KVM_ARCH_REQ_FLAGS(15, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 74 #define KVM_REQ_GLOBAL_CLOCK_UPDATE     KVM_ARCH_REQ(16)
 75 #define KVM_REQ_APIC_PAGE_RELOAD \
 76         KVM_ARCH_REQ_FLAGS(17, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 77 #define KVM_REQ_HV_CRASH                KVM_ARCH_REQ(18)
 78 #define KVM_REQ_IOAPIC_EOI_EXIT         KVM_ARCH_REQ(19)
 79 #define KVM_REQ_HV_RESET                KVM_ARCH_REQ(20)
 80 #define KVM_REQ_HV_EXIT                 KVM_ARCH_REQ(21)
 81 #define KVM_REQ_HV_STIMER               KVM_ARCH_REQ(22)
 82 #define KVM_REQ_LOAD_EOI_EXITMAP        KVM_ARCH_REQ(23)
 83 #define KVM_REQ_GET_NESTED_STATE_PAGES  KVM_ARCH_REQ(24)
 84 #define KVM_REQ_APICV_UPDATE \
 85         KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 86 #define KVM_REQ_TLB_FLUSH_CURRENT       KVM_ARCH_REQ(26)
 87 #define KVM_REQ_HV_TLB_FLUSH \
 88         KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
 89 #define KVM_REQ_APF_READY               KVM_ARCH_REQ(28)
 90 #define KVM_REQ_MSR_FILTER_CHANGED      KVM_ARCH_REQ(29)
 91 
 92 #define CR0_RESERVED_BITS                                               \
 93         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
 94                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
 95                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
 96 
 97 #define CR4_RESERVED_BITS                                               \
 98         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
 99                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
100                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
101                           | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
102                           | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \
103                           | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP))
104 
105 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
106 
107 
108 
109 #define INVALID_PAGE (~(hpa_t)0)
110 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
111 
112 #define UNMAPPED_GVA (~(gpa_t)0)
113 
114 /* KVM Hugepage definitions for x86 */
115 #define KVM_MAX_HUGEPAGE_LEVEL  PG_LEVEL_1G
116 #define KVM_NR_PAGE_SIZES       (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
117 #define KVM_HPAGE_GFN_SHIFT(x)  (((x) - 1) * 9)
118 #define KVM_HPAGE_SHIFT(x)      (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
119 #define KVM_HPAGE_SIZE(x)       (1UL << KVM_HPAGE_SHIFT(x))
120 #define KVM_HPAGE_MASK(x)       (~(KVM_HPAGE_SIZE(x) - 1))
121 #define KVM_PAGES_PER_HPAGE(x)  (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
122 
123 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
124 {
125         /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
126         return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
127                 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
128 }
129 
130 #define KVM_PERMILLE_MMU_PAGES 20
131 #define KVM_MIN_ALLOC_MMU_PAGES 64UL
132 #define KVM_MMU_HASH_SHIFT 12
133 #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
134 #define KVM_MIN_FREE_MMU_PAGES 5
135 #define KVM_REFILL_PAGES 25
136 #define KVM_MAX_CPUID_ENTRIES 256
137 #define KVM_NR_FIXED_MTRR_REGION 88
138 #define KVM_NR_VAR_MTRR 8
139 
140 #define ASYNC_PF_PER_VCPU 64
141 
142 enum kvm_reg {
143         VCPU_REGS_RAX = __VCPU_REGS_RAX,
144         VCPU_REGS_RCX = __VCPU_REGS_RCX,
145         VCPU_REGS_RDX = __VCPU_REGS_RDX,
146         VCPU_REGS_RBX = __VCPU_REGS_RBX,
147         VCPU_REGS_RSP = __VCPU_REGS_RSP,
148         VCPU_REGS_RBP = __VCPU_REGS_RBP,
149         VCPU_REGS_RSI = __VCPU_REGS_RSI,
150         VCPU_REGS_RDI = __VCPU_REGS_RDI,
151 #ifdef CONFIG_X86_64
152         VCPU_REGS_R8  = __VCPU_REGS_R8,
153         VCPU_REGS_R9  = __VCPU_REGS_R9,
154         VCPU_REGS_R10 = __VCPU_REGS_R10,
155         VCPU_REGS_R11 = __VCPU_REGS_R11,
156         VCPU_REGS_R12 = __VCPU_REGS_R12,
157         VCPU_REGS_R13 = __VCPU_REGS_R13,
158         VCPU_REGS_R14 = __VCPU_REGS_R14,
159         VCPU_REGS_R15 = __VCPU_REGS_R15,
160 #endif
161         VCPU_REGS_RIP,
162         NR_VCPU_REGS,
163 
164         VCPU_EXREG_PDPTR = NR_VCPU_REGS,
165         VCPU_EXREG_CR0,
166         VCPU_EXREG_CR3,
167         VCPU_EXREG_CR4,
168         VCPU_EXREG_RFLAGS,
169         VCPU_EXREG_SEGMENTS,
170         VCPU_EXREG_EXIT_INFO_1,
171         VCPU_EXREG_EXIT_INFO_2,
172 };
173 
174 enum {
175         VCPU_SREG_ES,
176         VCPU_SREG_CS,
177         VCPU_SREG_SS,
178         VCPU_SREG_DS,
179         VCPU_SREG_FS,
180         VCPU_SREG_GS,
181         VCPU_SREG_TR,
182         VCPU_SREG_LDTR,
183 };
184 
185 enum exit_fastpath_completion {
186         EXIT_FASTPATH_NONE,
187         EXIT_FASTPATH_REENTER_GUEST,
188         EXIT_FASTPATH_EXIT_HANDLED,
189 };
190 typedef enum exit_fastpath_completion fastpath_t;
191 
192 struct x86_emulate_ctxt;
193 struct x86_exception;
194 enum x86_intercept;
195 enum x86_intercept_stage;
196 
197 #define KVM_NR_DB_REGS  4
198 
199 #define DR6_BD          (1 << 13)
200 #define DR6_BS          (1 << 14)
201 #define DR6_BT          (1 << 15)
202 #define DR6_RTM         (1 << 16)
203 #define DR6_FIXED_1     0xfffe0ff0
204 #define DR6_INIT        0xffff0ff0
205 #define DR6_VOLATILE    0x0001e00f
206 
207 #define DR7_BP_EN_MASK  0x000000ff
208 #define DR7_GE          (1 << 9)
209 #define DR7_GD          (1 << 13)
210 #define DR7_FIXED_1     0x00000400
211 #define DR7_VOLATILE    0xffff2bff
212 
213 #define PFERR_PRESENT_BIT 0
214 #define PFERR_WRITE_BIT 1
215 #define PFERR_USER_BIT 2
216 #define PFERR_RSVD_BIT 3
217 #define PFERR_FETCH_BIT 4
218 #define PFERR_PK_BIT 5
219 #define PFERR_GUEST_FINAL_BIT 32
220 #define PFERR_GUEST_PAGE_BIT 33
221 
222 #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
223 #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
224 #define PFERR_USER_MASK (1U << PFERR_USER_BIT)
225 #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
226 #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
227 #define PFERR_PK_MASK (1U << PFERR_PK_BIT)
228 #define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
229 #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
230 
231 #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK |        \
232                                  PFERR_WRITE_MASK |             \
233                                  PFERR_PRESENT_MASK)
234 
235 /* apic attention bits */
236 #define KVM_APIC_CHECK_VAPIC    0
237 /*
238  * The following bit is set with PV-EOI, unset on EOI.
239  * We detect PV-EOI changes by guest by comparing
240  * this bit with PV-EOI in guest memory.
241  * See the implementation in apic_update_pv_eoi.
242  */
243 #define KVM_APIC_PV_EOI_PENDING 1
244 
245 struct kvm_kernel_irq_routing_entry;
246 
247 /*
248  * the pages used as guest page table on soft mmu are tracked by
249  * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
250  * by indirect shadow page can not be more than 15 bits.
251  *
252  * Currently, we used 14 bits that are @level, @gpte_is_8_bytes, @quadrant, @access,
253  * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
254  */
255 union kvm_mmu_page_role {
256         u32 word;
257         struct {
258                 unsigned level:4;
259                 unsigned gpte_is_8_bytes:1;
260                 unsigned quadrant:2;
261                 unsigned direct:1;
262                 unsigned access:3;
263                 unsigned invalid:1;
264                 unsigned nxe:1;
265                 unsigned cr0_wp:1;
266                 unsigned smep_andnot_wp:1;
267                 unsigned smap_andnot_wp:1;
268                 unsigned ad_disabled:1;
269                 unsigned guest_mode:1;
270                 unsigned :6;
271 
272                 /*
273                  * This is left at the top of the word so that
274                  * kvm_memslots_for_spte_role can extract it with a
275                  * simple shift.  While there is room, give it a whole
276                  * byte so it is also faster to load it from memory.
277                  */
278                 unsigned smm:8;
279         };
280 };
281 
282 union kvm_mmu_extended_role {
283 /*
284  * This structure complements kvm_mmu_page_role caching everything needed for
285  * MMU configuration. If nothing in both these structures changed, MMU
286  * re-configuration can be skipped. @valid bit is set on first usage so we don't
287  * treat all-zero structure as valid data.
288  */
289         u32 word;
290         struct {
291                 unsigned int valid:1;
292                 unsigned int execonly:1;
293                 unsigned int cr0_pg:1;
294                 unsigned int cr4_pae:1;
295                 unsigned int cr4_pse:1;
296                 unsigned int cr4_pke:1;
297                 unsigned int cr4_smap:1;
298                 unsigned int cr4_smep:1;
299                 unsigned int maxphyaddr:6;
300         };
301 };
302 
303 union kvm_mmu_role {
304         u64 as_u64;
305         struct {
306                 union kvm_mmu_page_role base;
307                 union kvm_mmu_extended_role ext;
308         };
309 };
310 
311 struct kvm_rmap_head {
312         unsigned long val;
313 };
314 
315 struct kvm_pio_request {
316         unsigned long linear_rip;
317         unsigned long count;
318         int in;
319         int port;
320         int size;
321 };
322 
323 #define PT64_ROOT_MAX_LEVEL 5
324 
325 struct rsvd_bits_validate {
326         u64 rsvd_bits_mask[2][PT64_ROOT_MAX_LEVEL];
327         u64 bad_mt_xwr;
328 };
329 
330 struct kvm_mmu_root_info {
331         gpa_t pgd;
332         hpa_t hpa;
333 };
334 
335 #define KVM_MMU_ROOT_INFO_INVALID \
336         ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
337 
338 #define KVM_MMU_NUM_PREV_ROOTS 3
339 
340 struct kvm_mmu_page;
341 
342 /*
343  * x86 supports 4 paging modes (5-level 64-bit, 4-level 64-bit, 3-level 32-bit,
344  * and 2-level 32-bit).  The kvm_mmu structure abstracts the details of the
345  * current mmu mode.
346  */
347 struct kvm_mmu {
348         unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
349         u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
350         int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
351                           bool prefault);
352         void (*inject_page_fault)(struct kvm_vcpu *vcpu,
353                                   struct x86_exception *fault);
354         gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
355                             u32 access, struct x86_exception *exception);
356         gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
357                                struct x86_exception *exception);
358         int (*sync_page)(struct kvm_vcpu *vcpu,
359                          struct kvm_mmu_page *sp);
360         void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
361         hpa_t root_hpa;
362         gpa_t root_pgd;
363         union kvm_mmu_role mmu_role;
364         u8 root_level;
365         u8 shadow_root_level;
366         u8 ept_ad;
367         bool direct_map;
368         struct kvm_mmu_root_info prev_roots[KVM_MMU_NUM_PREV_ROOTS];
369 
370         /*
371          * Bitmap; bit set = permission fault
372          * Byte index: page fault error code [4:1]
373          * Bit index: pte permissions in ACC_* format
374          */
375         u8 permissions[16];
376 
377         /*
378         * The pkru_mask indicates if protection key checks are needed.  It
379         * consists of 16 domains indexed by page fault error code bits [4:1],
380         * with PFEC.RSVD replaced by ACC_USER_MASK from the page tables.
381         * Each domain has 2 bits which are ANDed with AD and WD from PKRU.
382         */
383         u32 pkru_mask;
384 
385         u64 *pae_root;
386         u64 *lm_root;
387 
388         /*
389          * check zero bits on shadow page table entries, these
390          * bits include not only hardware reserved bits but also
391          * the bits spte never used.
392          */
393         struct rsvd_bits_validate shadow_zero_check;
394 
395         struct rsvd_bits_validate guest_rsvd_check;
396 
397         /* Can have large pages at levels 2..last_nonleaf_level-1. */
398         u8 last_nonleaf_level;
399 
400         bool nx;
401 
402         u64 pdptrs[4]; /* pae */
403 };
404 
405 struct kvm_tlb_range {
406         u64 start_gfn;
407         u64 pages;
408 };
409 
410 enum pmc_type {
411         KVM_PMC_GP = 0,
412         KVM_PMC_FIXED,
413 };
414 
415 struct kvm_pmc {
416         enum pmc_type type;
417         u8 idx;
418         u64 counter;
419         u64 eventsel;
420         struct perf_event *perf_event;
421         struct kvm_vcpu *vcpu;
422         /*
423          * eventsel value for general purpose counters,
424          * ctrl value for fixed counters.
425          */
426         u64 current_config;
427 };
428 
429 struct kvm_pmu {
430         unsigned nr_arch_gp_counters;
431         unsigned nr_arch_fixed_counters;
432         unsigned available_event_types;
433         u64 fixed_ctr_ctrl;
434         u64 global_ctrl;
435         u64 global_status;
436         u64 global_ovf_ctrl;
437         u64 counter_bitmask[2];
438         u64 global_ctrl_mask;
439         u64 global_ovf_ctrl_mask;
440         u64 reserved_bits;
441         u8 version;
442         struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
443         struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
444         struct irq_work irq_work;
445         DECLARE_BITMAP(reprogram_pmi, X86_PMC_IDX_MAX);
446         DECLARE_BITMAP(all_valid_pmc_idx, X86_PMC_IDX_MAX);
447         DECLARE_BITMAP(pmc_in_use, X86_PMC_IDX_MAX);
448 
449         /*
450          * The gate to release perf_events not marked in
451          * pmc_in_use only once in a vcpu time slice.
452          */
453         bool need_cleanup;
454 
455         /*
456          * The total number of programmed perf_events and it helps to avoid
457          * redundant check before cleanup if guest don't use vPMU at all.
458          */
459         u8 event_count;
460 };
461 
462 struct kvm_pmu_ops;
463 
464 enum {
465         KVM_DEBUGREG_BP_ENABLED = 1,
466         KVM_DEBUGREG_WONT_EXIT = 2,
467         KVM_DEBUGREG_RELOAD = 4,
468 };
469 
470 struct kvm_mtrr_range {
471         u64 base;
472         u64 mask;
473         struct list_head node;
474 };
475 
476 struct kvm_mtrr {
477         struct kvm_mtrr_range var_ranges[KVM_NR_VAR_MTRR];
478         mtrr_type fixed_ranges[KVM_NR_FIXED_MTRR_REGION];
479         u64 deftype;
480 
481         struct list_head head;
482 };
483 
484 /* Hyper-V SynIC timer */
485 struct kvm_vcpu_hv_stimer {
486         struct hrtimer timer;
487         int index;
488         union hv_stimer_config config;
489         u64 count;
490         u64 exp_time;
491         struct hv_message msg;
492         bool msg_pending;
493 };
494 
495 /* Hyper-V synthetic interrupt controller (SynIC)*/
496 struct kvm_vcpu_hv_synic {
497         u64 version;
498         u64 control;
499         u64 msg_page;
500         u64 evt_page;
501         atomic64_t sint[HV_SYNIC_SINT_COUNT];
502         atomic_t sint_to_gsi[HV_SYNIC_SINT_COUNT];
503         DECLARE_BITMAP(auto_eoi_bitmap, 256);
504         DECLARE_BITMAP(vec_bitmap, 256);
505         bool active;
506         bool dont_zero_synic_pages;
507 };
508 
509 /* Hyper-V per vcpu emulation context */
510 struct kvm_vcpu_hv {
511         u32 vp_index;
512         u64 hv_vapic;
513         s64 runtime_offset;
514         struct kvm_vcpu_hv_synic synic;
515         struct kvm_hyperv_exit exit;
516         struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
517         DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
518         cpumask_t tlb_flush;
519 };
520 
521 struct kvm_vcpu_arch {
522         /*
523          * rip and regs accesses must go through
524          * kvm_{register,rip}_{read,write} functions.
525          */
526         unsigned long regs[NR_VCPU_REGS];
527         u32 regs_avail;
528         u32 regs_dirty;
529 
530         unsigned long cr0;
531         unsigned long cr0_guest_owned_bits;
532         unsigned long cr2;
533         unsigned long cr3;
534         unsigned long cr4;
535         unsigned long cr4_guest_owned_bits;
536         unsigned long cr4_guest_rsvd_bits;
537         unsigned long cr8;
538         u32 host_pkru;
539         u32 pkru;
540         u32 hflags;
541         u64 efer;
542         u64 apic_base;
543         struct kvm_lapic *apic;    /* kernel irqchip context */
544         bool apicv_active;
545         bool load_eoi_exitmap_pending;
546         DECLARE_BITMAP(ioapic_handled_vectors, 256);
547         unsigned long apic_attention;
548         int32_t apic_arb_prio;
549         int mp_state;
550         u64 ia32_misc_enable_msr;
551         u64 smbase;
552         u64 smi_count;
553         bool tpr_access_reporting;
554         bool xsaves_enabled;
555         u64 ia32_xss;
556         u64 microcode_version;
557         u64 arch_capabilities;
558         u64 perf_capabilities;
559 
560         /*
561          * Paging state of the vcpu
562          *
563          * If the vcpu runs in guest mode with two level paging this still saves
564          * the paging mode of the l1 guest. This context is always used to
565          * handle faults.
566          */
567         struct kvm_mmu *mmu;
568 
569         /* Non-nested MMU for L1 */
570         struct kvm_mmu root_mmu;
571 
572         /* L1 MMU when running nested */
573         struct kvm_mmu guest_mmu;
574 
575         /*
576          * Paging state of an L2 guest (used for nested npt)
577          *
578          * This context will save all necessary information to walk page tables
579          * of an L2 guest. This context is only initialized for page table
580          * walking and not for faulting since we never handle l2 page faults on
581          * the host.
582          */
583         struct kvm_mmu nested_mmu;
584 
585         /*
586          * Pointer to the mmu context currently used for
587          * gva_to_gpa translations.
588          */
589         struct kvm_mmu *walk_mmu;
590 
591         struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
592         struct kvm_mmu_memory_cache mmu_shadow_page_cache;
593         struct kvm_mmu_memory_cache mmu_gfn_array_cache;
594         struct kvm_mmu_memory_cache mmu_page_header_cache;
595 
596         /*
597          * QEMU userspace and the guest each have their own FPU state.
598          * In vcpu_run, we switch between the user and guest FPU contexts.
599          * While running a VCPU, the VCPU thread will have the guest FPU
600          * context.
601          *
602          * Note that while the PKRU state lives inside the fpu registers,
603          * it is switched out separately at VMENTER and VMEXIT time. The
604          * "guest_fpu" state here contains the guest FPU context, with the
605          * host PRKU bits.
606          */
607         struct fpu *user_fpu;
608         struct fpu *guest_fpu;
609 
610         u64 xcr0;
611         u64 guest_supported_xcr0;
612 
613         struct kvm_pio_request pio;
614         void *pio_data;
615         void *guest_ins_data;
616 
617         u8 event_exit_inst_len;
618 
619         struct kvm_queued_exception {
620                 bool pending;
621                 bool injected;
622                 bool has_error_code;
623                 u8 nr;
624                 u32 error_code;
625                 unsigned long payload;
626                 bool has_payload;
627                 u8 nested_apf;
628         } exception;
629 
630         struct kvm_queued_interrupt {
631                 bool injected;
632                 bool soft;
633                 u8 nr;
634         } interrupt;
635 
636         int halt_request; /* real mode on Intel only */
637 
638         int cpuid_nent;
639         struct kvm_cpuid_entry2 *cpuid_entries;
640 
641         unsigned long cr3_lm_rsvd_bits;
642         int maxphyaddr;
643         int max_tdp_level;
644 
645         /* emulate context */
646 
647         struct x86_emulate_ctxt *emulate_ctxt;
648         bool emulate_regs_need_sync_to_vcpu;
649         bool emulate_regs_need_sync_from_vcpu;
650         int (*complete_userspace_io)(struct kvm_vcpu *vcpu);
651 
652         gpa_t time;
653         struct pvclock_vcpu_time_info hv_clock;
654         unsigned int hw_tsc_khz;
655         struct gfn_to_hva_cache pv_time;
656         bool pv_time_enabled;
657         /* set guest stopped flag in pvclock flags field */
658         bool pvclock_set_guest_stopped_request;
659 
660         struct {
661                 u8 preempted;
662                 u64 msr_val;
663                 u64 last_steal;
664                 struct gfn_to_pfn_cache cache;
665         } st;
666 
667         u64 l1_tsc_offset;
668         u64 tsc_offset;
669         u64 last_guest_tsc;
670         u64 last_host_tsc;
671         u64 tsc_offset_adjustment;
672         u64 this_tsc_nsec;
673         u64 this_tsc_write;
674         u64 this_tsc_generation;
675         bool tsc_catchup;
676         bool tsc_always_catchup;
677         s8 virtual_tsc_shift;
678         u32 virtual_tsc_mult;
679         u32 virtual_tsc_khz;
680         s64 ia32_tsc_adjust_msr;
681         u64 msr_ia32_power_ctl;
682         u64 tsc_scaling_ratio;
683 
684         atomic_t nmi_queued;  /* unprocessed asynchronous NMIs */
685         unsigned nmi_pending; /* NMI queued after currently running handler */
686         bool nmi_injected;    /* Trying to inject an NMI this entry */
687         bool smi_pending;    /* SMI queued after currently running handler */
688 
689         struct kvm_mtrr mtrr_state;
690         u64 pat;
691 
692         unsigned switch_db_regs;
693         unsigned long db[KVM_NR_DB_REGS];
694         unsigned long dr6;
695         unsigned long dr7;
696         unsigned long eff_db[KVM_NR_DB_REGS];
697         unsigned long guest_debug_dr7;
698         u64 msr_platform_info;
699         u64 msr_misc_features_enables;
700 
701         u64 mcg_cap;
702         u64 mcg_status;
703         u64 mcg_ctl;
704         u64 mcg_ext_ctl;
705         u64 *mce_banks;
706 
707         /* Cache MMIO info */
708         u64 mmio_gva;
709         unsigned mmio_access;
710         gfn_t mmio_gfn;
711         u64 mmio_gen;
712 
713         struct kvm_pmu pmu;
714 
715         /* used for guest single stepping over the given code position */
716         unsigned long singlestep_rip;
717 
718         struct kvm_vcpu_hv hyperv;
719 
720         cpumask_var_t wbinvd_dirty_mask;
721 
722         unsigned long last_retry_eip;
723         unsigned long last_retry_addr;
724 
725         struct {
726                 bool halted;
727                 gfn_t gfns[ASYNC_PF_PER_VCPU];
728                 struct gfn_to_hva_cache data;
729                 u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
730                 u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
731                 u16 vec;
732                 u32 id;
733                 bool send_user_only;
734                 u32 host_apf_flags;
735                 unsigned long nested_apf_token;
736                 bool delivery_as_pf_vmexit;
737                 bool pageready_pending;
738         } apf;
739 
740         /* OSVW MSRs (AMD only) */
741         struct {
742                 u64 length;
743                 u64 status;
744         } osvw;
745 
746         struct {
747                 u64 msr_val;
748                 struct gfn_to_hva_cache data;
749         } pv_eoi;
750 
751         u64 msr_kvm_poll_control;
752 
753         /*
754          * Indicates the guest is trying to write a gfn that contains one or
755          * more of the PTEs used to translate the write itself, i.e. the access
756          * is changing its own translation in the guest page tables.  KVM exits
757          * to userspace if emulation of the faulting instruction fails and this
758          * flag is set, as KVM cannot make forward progress.
759          *
760          * If emulation fails for a write to guest page tables, KVM unprotects
761          * (zaps) the shadow page for the target gfn and resumes the guest to
762          * retry the non-emulatable instruction (on hardware).  Unprotecting the
763          * gfn doesn't allow forward progress for a self-changing access because
764          * doing so also zaps the translation for the gfn, i.e. retrying the
765          * instruction will hit a !PRESENT fault, which results in a new shadow
766          * page and sends KVM back to square one.
767          */
768         bool write_fault_to_shadow_pgtable;
769 
770         /* set at EPT violation at this point */
771         unsigned long exit_qualification;
772 
773         /* pv related host specific info */
774         struct {
775                 bool pv_unhalted;
776         } pv;
777 
778         int pending_ioapic_eoi;
779         int pending_external_vector;
780 
781         /* be preempted when it's in kernel-mode(cpl=0) */
782         bool preempted_in_kernel;
783 
784         /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
785         bool l1tf_flush_l1d;
786 
787         /* Host CPU on which VM-entry was most recently attempted */
788         unsigned int last_vmentry_cpu;
789 
790         /* AMD MSRC001_0015 Hardware Configuration */
791         u64 msr_hwcr;
792 
793         /* pv related cpuid info */
794         struct {
795                 /*
796                  * value of the eax register in the KVM_CPUID_FEATURES CPUID
797                  * leaf.
798                  */
799                 u32 features;
800 
801                 /*
802                  * indicates whether pv emulation should be disabled if features
803                  * are not present in the guest's cpuid
804                  */
805                 bool enforce;
806         } pv_cpuid;
807 
808         /* Protected Guests */
809         bool guest_state_protected;
810 };
811 
812 struct kvm_lpage_info {
813         int disallow_lpage;
814 };
815 
816 struct kvm_arch_memory_slot {
817         struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
818         struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
819         unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
820 };
821 
822 /*
823  * We use as the mode the number of bits allocated in the LDR for the
824  * logical processor ID.  It happens that these are all powers of two.
825  * This makes it is very easy to detect cases where the APICs are
826  * configured for multiple modes; in that case, we cannot use the map and
827  * hence cannot use kvm_irq_delivery_to_apic_fast either.
828  */
829 #define KVM_APIC_MODE_XAPIC_CLUSTER          4
830 #define KVM_APIC_MODE_XAPIC_FLAT             8
831 #define KVM_APIC_MODE_X2APIC                16
832 
833 struct kvm_apic_map {
834         struct rcu_head rcu;
835         u8 mode;
836         u32 max_apic_id;
837         union {
838                 struct kvm_lapic *xapic_flat_map[8];
839                 struct kvm_lapic *xapic_cluster_map[16][4];
840         };
841         struct kvm_lapic *phys_map[];
842 };
843 
844 /* Hyper-V synthetic debugger (SynDbg)*/
845 struct kvm_hv_syndbg {
846         struct {
847                 u64 control;
848                 u64 status;
849                 u64 send_page;
850                 u64 recv_page;
851                 u64 pending_page;
852         } control;
853         u64 options;
854 };
855 
856 /* Hyper-V emulation context */
857 struct kvm_hv {
858         struct mutex hv_lock;
859         u64 hv_guest_os_id;
860         u64 hv_hypercall;
861         u64 hv_tsc_page;
862 
863         /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
864         u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
865         u64 hv_crash_ctl;
866 
867         struct ms_hyperv_tsc_page tsc_ref;
868 
869         struct idr conn_to_evt;
870 
871         u64 hv_reenlightenment_control;
872         u64 hv_tsc_emulation_control;
873         u64 hv_tsc_emulation_status;
874 
875         /* How many vCPUs have VP index != vCPU index */
876         atomic_t num_mismatched_vp_indexes;
877 
878         struct hv_partition_assist_pg *hv_pa_pg;
879         struct kvm_hv_syndbg hv_syndbg;
880 };
881 
882 struct msr_bitmap_range {
883         u32 flags;
884         u32 nmsrs;
885         u32 base;
886         unsigned long *bitmap;
887 };
888 
889 enum kvm_irqchip_mode {
890         KVM_IRQCHIP_NONE,
891         KVM_IRQCHIP_KERNEL,       /* created with KVM_CREATE_IRQCHIP */
892         KVM_IRQCHIP_SPLIT,        /* created with KVM_CAP_SPLIT_IRQCHIP */
893 };
894 
895 struct kvm_x86_msr_filter {
896         u8 count;
897         bool default_allow:1;
898         struct msr_bitmap_range ranges[16];
899 };
900 
901 #define APICV_INHIBIT_REASON_DISABLE    0
902 #define APICV_INHIBIT_REASON_HYPERV     1
903 #define APICV_INHIBIT_REASON_NESTED     2
904 #define APICV_INHIBIT_REASON_IRQWIN     3
905 #define APICV_INHIBIT_REASON_PIT_REINJ  4
906 #define APICV_INHIBIT_REASON_X2APIC     5
907 
908 struct kvm_arch {
909         unsigned long n_used_mmu_pages;
910         unsigned long n_requested_mmu_pages;
911         unsigned long n_max_mmu_pages;
912         unsigned int indirect_shadow_pages;
913         u8 mmu_valid_gen;
914         struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
915         /*
916          * Hash table of struct kvm_mmu_page.
917          */
918         struct list_head active_mmu_pages;
919         struct list_head zapped_obsolete_pages;
920         struct list_head lpage_disallowed_mmu_pages;
921         struct kvm_page_track_notifier_node mmu_sp_tracker;
922         struct kvm_page_track_notifier_head track_notifier_head;
923 
924         struct list_head assigned_dev_head;
925         struct iommu_domain *iommu_domain;
926         bool iommu_noncoherent;
927 #define __KVM_HAVE_ARCH_NONCOHERENT_DMA
928         atomic_t noncoherent_dma_count;
929 #define __KVM_HAVE_ARCH_ASSIGNED_DEVICE
930         atomic_t assigned_device_count;
931         struct kvm_pic *vpic;
932         struct kvm_ioapic *vioapic;
933         struct kvm_pit *vpit;
934         atomic_t vapics_in_nmi_mode;
935         struct mutex apic_map_lock;
936         struct kvm_apic_map *apic_map;
937         atomic_t apic_map_dirty;
938 
939         bool apic_access_page_done;
940         unsigned long apicv_inhibit_reasons;
941 
942         gpa_t wall_clock;
943 
944         bool mwait_in_guest;
945         bool hlt_in_guest;
946         bool pause_in_guest;
947         bool cstate_in_guest;
948 
949         unsigned long irq_sources_bitmap;
950         s64 kvmclock_offset;
951         raw_spinlock_t tsc_write_lock;
952         u64 last_tsc_nsec;
953         u64 last_tsc_write;
954         u32 last_tsc_khz;
955         u64 cur_tsc_nsec;
956         u64 cur_tsc_write;
957         u64 cur_tsc_offset;
958         u64 cur_tsc_generation;
959         int nr_vcpus_matched_tsc;
960 
961         spinlock_t pvclock_gtod_sync_lock;
962         bool use_master_clock;
963         u64 master_kernel_ns;
964         u64 master_cycle_now;
965         struct delayed_work kvmclock_update_work;
966         struct delayed_work kvmclock_sync_work;
967 
968         struct kvm_xen_hvm_config xen_hvm_config;
969 
970         /* reads protected by irq_srcu, writes by irq_lock */
971         struct hlist_head mask_notifier_list;
972 
973         struct kvm_hv hyperv;
974 
975         #ifdef CONFIG_KVM_MMU_AUDIT
976         int audit_point;
977         #endif
978 
979         bool backwards_tsc_observed;
980         bool boot_vcpu_runs_old_kvmclock;
981         u32 bsp_vcpu_id;
982 
983         u64 disabled_quirks;
984 
985         enum kvm_irqchip_mode irqchip_mode;
986         u8 nr_reserved_ioapic_pins;
987 
988         bool disabled_lapic_found;
989 
990         bool x2apic_format;
991         bool x2apic_broadcast_quirk_disabled;
992 
993         bool guest_can_read_msr_platform_info;
994         bool exception_payload_enabled;
995 
996         bool bus_lock_detection_enabled;
997 
998         /* Deflect RDMSR and WRMSR to user space when they trigger a #GP */
999         u32 user_space_msr_mask;
1000 
1001         struct kvm_x86_msr_filter __rcu *msr_filter;
1002 
1003         struct kvm_pmu_event_filter *pmu_event_filter;
1004         struct task_struct *nx_lpage_recovery_thread;
1005 
1006         /*
1007          * Whether the TDP MMU is enabled for this VM. This contains a
1008          * snapshot of the TDP MMU module parameter from when the VM was
1009          * created and remains unchanged for the life of the VM. If this is
1010          * true, TDP MMU handler functions will run for various MMU
1011          * operations.
1012          */
1013         bool tdp_mmu_enabled;
1014 
1015         /*
1016          * List of struct kvmp_mmu_pages being used as roots.
1017          * All struct kvm_mmu_pages in the list should have
1018          * tdp_mmu_page set.
1019          * All struct kvm_mmu_pages in the list should have a positive
1020          * root_count except when a thread holds the MMU lock and is removing
1021          * an entry from the list.
1022          */
1023         struct list_head tdp_mmu_roots;
1024 
1025         /*
1026          * List of struct kvmp_mmu_pages not being used as roots.
1027          * All struct kvm_mmu_pages in the list should have
1028          * tdp_mmu_page set and a root_count of 0.
1029          */
1030         struct list_head tdp_mmu_pages;
1031 };
1032 
1033 struct kvm_vm_stat {
1034         ulong mmu_shadow_zapped;
1035         ulong mmu_pte_write;
1036         ulong mmu_pde_zapped;
1037         ulong mmu_flooded;
1038         ulong mmu_recycled;
1039         ulong mmu_cache_miss;
1040         ulong mmu_unsync;
1041         ulong remote_tlb_flush;
1042         ulong lpages;
1043         ulong nx_lpage_splits;
1044         ulong max_mmu_page_hash_collisions;
1045 };
1046 
1047 struct kvm_vcpu_stat {
1048         u64 pf_fixed;
1049         u64 pf_guest;
1050         u64 tlb_flush;
1051         u64 invlpg;
1052 
1053         u64 exits;
1054         u64 io_exits;
1055         u64 mmio_exits;
1056         u64 signal_exits;
1057         u64 irq_window_exits;
1058         u64 nmi_window_exits;
1059         u64 l1d_flush;
1060         u64 halt_exits;
1061         u64 halt_successful_poll;
1062         u64 halt_attempted_poll;
1063         u64 halt_poll_invalid;
1064         u64 halt_wakeup;
1065         u64 request_irq_exits;
1066         u64 irq_exits;
1067         u64 host_state_reload;
1068         u64 fpu_reload;
1069         u64 insn_emulation;
1070         u64 insn_emulation_fail;
1071         u64 hypercalls;
1072         u64 irq_injections;
1073         u64 nmi_injections;
1074         u64 req_event;
1075         u64 halt_poll_success_ns;
1076         u64 halt_poll_fail_ns;
1077 };
1078 
1079 struct x86_instruction_info;
1080 
1081 struct msr_data {
1082         bool host_initiated;
1083         u32 index;
1084         u64 data;
1085 };
1086 
1087 struct kvm_lapic_irq {
1088         u32 vector;
1089         u16 delivery_mode;
1090         u16 dest_mode;
1091         bool level;
1092         u16 trig_mode;
1093         u32 shorthand;
1094         u32 dest_id;
1095         bool msi_redir_hint;
1096 };
1097 
1098 static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
1099 {
1100         return dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
1101 }
1102 
1103 struct kvm_x86_ops {
1104         int (*hardware_enable)(void);
1105         void (*hardware_disable)(void);
1106         void (*hardware_unsetup)(void);
1107         bool (*cpu_has_accelerated_tpr)(void);
1108         bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
1109         void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
1110 
1111         unsigned int vm_size;
1112         int (*vm_init)(struct kvm *kvm);
1113         void (*vm_destroy)(struct kvm *kvm);
1114 
1115         /* Create, but do not attach this VCPU */
1116         int (*vcpu_create)(struct kvm_vcpu *vcpu);
1117         void (*vcpu_free)(struct kvm_vcpu *vcpu);
1118         void (*vcpu_reset)(struct kvm_vcpu *vcpu, bool init_event);
1119 
1120         void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
1121         void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
1122         void (*vcpu_put)(struct kvm_vcpu *vcpu);
1123 
1124         void (*update_exception_bitmap)(struct kvm_vcpu *vcpu);
1125         int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1126         int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1127         u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
1128         void (*get_segment)(struct kvm_vcpu *vcpu,
1129                             struct kvm_segment *var, int seg);
1130         int (*get_cpl)(struct kvm_vcpu *vcpu);
1131         void (*set_segment)(struct kvm_vcpu *vcpu,
1132                             struct kvm_segment *var, int seg);
1133         void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
1134         void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
1135         bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
1136         void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
1137         int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
1138         void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1139         void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1140         void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1141         void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
1142         void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
1143         void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
1144         void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
1145         unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
1146         void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
1147 
1148         void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
1149         void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
1150         int  (*tlb_remote_flush)(struct kvm *kvm);
1151         int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
1152                         struct kvm_tlb_range *range);
1153 
1154         /*
1155          * Flush any TLB entries associated with the given GVA.
1156          * Does not need to flush GPA->HPA mappings.
1157          * Can potentially get non-canonical addresses through INVLPGs, which
1158          * the implementation may choose to ignore if appropriate.
1159          */
1160         void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
1161 
1162         /*
1163          * Flush any TLB entries created by the guest.  Like tlb_flush_gva(),
1164          * does not need to flush GPA->HPA mappings.
1165          */
1166         void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
1167 
1168         enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
1169         int (*handle_exit)(struct kvm_vcpu *vcpu,
1170                 enum exit_fastpath_completion exit_fastpath);
1171         int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
1172         void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
1173         void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
1174         u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
1175         void (*patch_hypercall)(struct kvm_vcpu *vcpu,
1176                                 unsigned char *hypercall_addr);
1177         void (*set_irq)(struct kvm_vcpu *vcpu);
1178         void (*set_nmi)(struct kvm_vcpu *vcpu);
1179         void (*queue_exception)(struct kvm_vcpu *vcpu);
1180         void (*cancel_injection)(struct kvm_vcpu *vcpu);
1181         int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1182         int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1183         bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
1184         void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
1185         void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
1186         void (*enable_irq_window)(struct kvm_vcpu *vcpu);
1187         void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
1188         bool (*check_apicv_inhibit_reasons)(ulong bit);
1189         void (*pre_update_apicv_exec_ctrl)(struct kvm *kvm, bool activate);
1190         void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
1191         void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
1192         void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1193         bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
1194         void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
1195         void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
1196         void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
1197         int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
1198         int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
1199         int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
1200         int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
1201         u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1202 
1203         void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, unsigned long pgd,
1204                              int pgd_level);
1205 
1206         bool (*has_wbinvd_exit)(void);
1207 
1208         /* Returns actual tsc_offset set in active VMCS */
1209         u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
1210 
1211         /*
1212          * Retrieve somewhat arbitrary exit information.  Intended to be used
1213          * only from within tracepoints to avoid VMREADs when tracing is off.
1214          */
1215         void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2,
1216                               u32 *exit_int_info, u32 *exit_int_info_err_code);
1217 
1218         int (*check_intercept)(struct kvm_vcpu *vcpu,
1219                                struct x86_instruction_info *info,
1220                                enum x86_intercept_stage stage,
1221                                struct x86_exception *exception);
1222         void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
1223 
1224         void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
1225 
1226         void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
1227 
1228         /*
1229          * Arch-specific dirty logging hooks. These hooks are only supposed to
1230          * be valid if the specific arch has hardware-accelerated dirty logging
1231          * mechanism. Currently only for PML on VMX.
1232          *
1233          *  - slot_enable_log_dirty:
1234          *      called when enabling log dirty mode for the slot.
1235          *  - slot_disable_log_dirty:
1236          *      called when disabling log dirty mode for the slot.
1237          *      also called when slot is created with log dirty disabled.
1238          *  - flush_log_dirty:
1239          *      called before reporting dirty_bitmap to userspace.
1240          *  - enable_log_dirty_pt_masked:
1241          *      called when reenabling log dirty for the GFNs in the mask after
1242          *      corresponding bits are cleared in slot->dirty_bitmap.
1243          */
1244         void (*slot_enable_log_dirty)(struct kvm *kvm,
1245                                       struct kvm_memory_slot *slot);
1246         void (*slot_disable_log_dirty)(struct kvm *kvm,
1247                                        struct kvm_memory_slot *slot);
1248         void (*flush_log_dirty)(struct kvm *kvm);
1249         void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
1250                                            struct kvm_memory_slot *slot,
1251                                            gfn_t offset, unsigned long mask);
1252         int (*cpu_dirty_log_size)(void);
1253 
1254         /* pmu operations of sub-arch */
1255         const struct kvm_pmu_ops *pmu_ops;
1256         const struct kvm_x86_nested_ops *nested_ops;
1257 
1258         /*
1259          * Architecture specific hooks for vCPU blocking due to
1260          * HLT instruction.
1261          * Returns for .pre_block():
1262          *    - 0 means continue to block the vCPU.
1263          *    - 1 means we cannot block the vCPU since some event
1264          *        happens during this period, such as, 'ON' bit in
1265          *        posted-interrupts descriptor is set.
1266          */
1267         int (*pre_block)(struct kvm_vcpu *vcpu);
1268         void (*post_block)(struct kvm_vcpu *vcpu);
1269 
1270         void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
1271         void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
1272 
1273         int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
1274                               uint32_t guest_irq, bool set);
1275         void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
1276         bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
1277 
1278         int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
1279                             bool *expired);
1280         void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
1281 
1282         void (*setup_mce)(struct kvm_vcpu *vcpu);
1283 
1284         int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
1285         int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1286         int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
1287         void (*enable_smi_window)(struct kvm_vcpu *vcpu);
1288 
1289         int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
1290         int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1291         int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
1292 
1293         int (*get_msr_feature)(struct kvm_msr_entry *entry);
1294 
1295         bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, void *insn, int insn_len);
1296 
1297         bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
1298         int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
1299 
1300         void (*migrate_timers)(struct kvm_vcpu *vcpu);
1301         void (*msr_filter_changed)(struct kvm_vcpu *vcpu);
1302         int (*complete_emulated_msr)(struct kvm_vcpu *vcpu, int err);
1303 
1304         void (*vcpu_deliver_sipi_vector)(struct kvm_vcpu *vcpu, u8 vector);
1305 };
1306 
1307 struct kvm_x86_nested_ops {
1308         int (*check_events)(struct kvm_vcpu *vcpu);
1309         bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
1310         int (*get_state)(struct kvm_vcpu *vcpu,
1311                          struct kvm_nested_state __user *user_kvm_nested_state,
1312                          unsigned user_data_size);
1313         int (*set_state)(struct kvm_vcpu *vcpu,
1314                          struct kvm_nested_state __user *user_kvm_nested_state,
1315                          struct kvm_nested_state *kvm_state);
1316         bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
1317         int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
1318 
1319         int (*enable_evmcs)(struct kvm_vcpu *vcpu,
1320                             uint16_t *vmcs_version);
1321         uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
1322 };
1323 
1324 struct kvm_x86_init_ops {
1325         int (*cpu_has_kvm_support)(void);
1326         int (*disabled_by_bios)(void);
1327         int (*check_processor_compatibility)(void);
1328         int (*hardware_setup)(void);
1329 
1330         struct kvm_x86_ops *runtime_ops;
1331 };
1332 
1333 struct kvm_arch_async_pf {
1334         u32 token;
1335         gfn_t gfn;
1336         unsigned long cr3;
1337         bool direct_map;
1338 };
1339 
1340 extern u64 __read_mostly host_efer;
1341 extern bool __read_mostly allow_smaller_maxphyaddr;
1342 extern struct kvm_x86_ops kvm_x86_ops;
1343 
1344 #define __KVM_HAVE_ARCH_VM_ALLOC
1345 static inline struct kvm *kvm_arch_alloc_vm(void)
1346 {
1347         return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1348 }
1349 void kvm_arch_free_vm(struct kvm *kvm);
1350 
1351 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
1352 static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
1353 {
1354         if (kvm_x86_ops.tlb_remote_flush &&
1355             !kvm_x86_ops.tlb_remote_flush(kvm))
1356                 return 0;
1357         else
1358                 return -ENOTSUPP;
1359 }
1360 
1361 int kvm_mmu_module_init(void);
1362 void kvm_mmu_module_exit(void);
1363 
1364 void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
1365 int kvm_mmu_create(struct kvm_vcpu *vcpu);
1366 void kvm_mmu_init_vm(struct kvm *kvm);
1367 void kvm_mmu_uninit_vm(struct kvm *kvm);
1368 void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
1369                 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 p_mask,
1370                 u64 acc_track_mask, u64 me_mask);
1371 
1372 void kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
1373 void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
1374                                       struct kvm_memory_slot *memslot,
1375                                       int start_level);
1376 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
1377                                    const struct kvm_memory_slot *memslot);
1378 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
1379                                    struct kvm_memory_slot *memslot);
1380 void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
1381                                         struct kvm_memory_slot *memslot);
1382 void kvm_mmu_slot_set_dirty(struct kvm *kvm,
1383                             struct kvm_memory_slot *memslot);
1384 void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1385                                    struct kvm_memory_slot *slot,
1386                                    gfn_t gfn_offset, unsigned long mask);
1387 void kvm_mmu_zap_all(struct kvm *kvm);
1388 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1389 unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
1390 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1391 
1392 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1393 bool pdptrs_changed(struct kvm_vcpu *vcpu);
1394 
1395 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
1396                           const void *val, int bytes);
1397 
1398 struct kvm_irq_mask_notifier {
1399         void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
1400         int irq;
1401         struct hlist_node link;
1402 };
1403 
1404 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
1405                                     struct kvm_irq_mask_notifier *kimn);
1406 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
1407                                       struct kvm_irq_mask_notifier *kimn);
1408 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
1409                              bool mask);
1410 
1411 extern bool tdp_enabled;
1412 
1413 u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu);
1414 
1415 /* control of guest tsc rate supported? */
1416 extern bool kvm_has_tsc_control;
1417 /* maximum supported tsc_khz for guests */
1418 extern u32  kvm_max_guest_tsc_khz;
1419 /* number of bits of the fractional part of the TSC scaling ratio */
1420 extern u8   kvm_tsc_scaling_ratio_frac_bits;
1421 /* maximum allowed value of TSC scaling ratio */
1422 extern u64  kvm_max_tsc_scaling_ratio;
1423 /* 1ull << kvm_tsc_scaling_ratio_frac_bits */
1424 extern u64  kvm_default_tsc_scaling_ratio;
1425 
1426 extern u64 kvm_mce_cap_supported;
1427 
1428 /*
1429  * EMULTYPE_NO_DECODE - Set when re-emulating an instruction (after completing
1430  *                      userspace I/O) to indicate that the emulation context
1431  *                      should be resued as is, i.e. skip initialization of
1432  *                      emulation context, instruction fetch and decode.
1433  *
1434  * EMULTYPE_TRAP_UD - Set when emulating an intercepted #UD from hardware.
1435  *                    Indicates that only select instructions (tagged with
1436  *                    EmulateOnUD) should be emulated (to minimize the emulator
1437  *                    attack surface).  See also EMULTYPE_TRAP_UD_FORCED.
1438  *
1439  * EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
1440  *                 decode the instruction length.  For use *only* by
1441  *                 kvm_x86_ops.skip_emulated_instruction() implementations.
1442  *
1443  * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
1444  *                           retry native execution under certain conditions,
1445  *                           Can only be set in conjunction with EMULTYPE_PF.
1446  *
1447  * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
1448  *                           triggered by KVM's magic "force emulation" prefix,
1449  *                           which is opt in via module param (off by default).
1450  *                           Bypasses EmulateOnUD restriction despite emulating
1451  *                           due to an intercepted #UD (see EMULTYPE_TRAP_UD).
1452  *                           Used to test the full emulator from userspace.
1453  *
1454  * EMULTYPE_VMWARE_GP - Set when emulating an intercepted #GP for VMware
1455  *                      backdoor emulation, which is opt in via module param.
1456  *                      VMware backoor emulation handles select instructions
1457  *                      and reinjects the #GP for all other cases.
1458  *
1459  * EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
1460  *               case the CR2/GPA value pass on the stack is valid.
1461  */
1462 #define EMULTYPE_NO_DECODE          (1 << 0)
1463 #define EMULTYPE_TRAP_UD            (1 << 1)
1464 #define EMULTYPE_SKIP               (1 << 2)
1465 #define EMULTYPE_ALLOW_RETRY_PF     (1 << 3)
1466 #define EMULTYPE_TRAP_UD_FORCED     (1 << 4)
1467 #define EMULTYPE_VMWARE_GP          (1 << 5)
1468 #define EMULTYPE_PF                 (1 << 6)
1469 
1470 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
1471 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
1472                                         void *insn, int insn_len);
1473 
1474 void kvm_enable_efer_bits(u64);
1475 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
1476 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, bool host_initiated);
1477 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data);
1478 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data);
1479 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu);
1480 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu);
1481 
1482 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
1483 int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
1484 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
1485 int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
1486 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
1487 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
1488 
1489 void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
1490 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
1491 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
1492 
1493 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
1494                     int reason, bool has_error_code, u32 error_code);
1495 
1496 void kvm_free_guest_fpu(struct kvm_vcpu *vcpu);
1497 
1498 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0);
1499 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4);
1500 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
1501 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
1502 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1503 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
1504 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val);
1505 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val);
1506 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
1507 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
1508 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
1509 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr);
1510 
1511 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1512 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
1513 
1514 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
1515 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
1516 bool kvm_rdpmc(struct kvm_vcpu *vcpu);
1517 
1518 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1519 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1520 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
1521 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
1522 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
1523 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
1524 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
1525                                     struct x86_exception *fault);
1526 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1527                             gfn_t gfn, void *data, int offset, int len,
1528                             u32 access);
1529 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
1530 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
1531 
1532 static inline int __kvm_irq_line_state(unsigned long *irq_state,
1533                                        int irq_source_id, int level)
1534 {
1535         /* Logical OR for level trig interrupt */
1536         if (level)
1537                 __set_bit(irq_source_id, irq_state);
1538         else
1539                 __clear_bit(irq_source_id, irq_state);
1540 
1541         return !!(*irq_state);
1542 }
1543 
1544 #define KVM_MMU_ROOT_CURRENT            BIT(0)
1545 #define KVM_MMU_ROOT_PREVIOUS(i)        BIT(1+i)
1546 #define KVM_MMU_ROOTS_ALL               (~0UL)
1547 
1548 int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
1549 void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
1550 
1551 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
1552 
1553 void kvm_update_dr7(struct kvm_vcpu *vcpu);
1554 
1555 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
1556 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
1557 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
1558 int kvm_mmu_load(struct kvm_vcpu *vcpu);
1559 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
1560 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
1561 void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1562                         ulong roots_to_free);
1563 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1564                            struct x86_exception *exception);
1565 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1566                               struct x86_exception *exception);
1567 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1568                                struct x86_exception *exception);
1569 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1570                                struct x86_exception *exception);
1571 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1572                                 struct x86_exception *exception);
1573 
1574 bool kvm_apicv_activated(struct kvm *kvm);
1575 void kvm_apicv_init(struct kvm *kvm, bool enable);
1576 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu);
1577 void kvm_request_apicv_update(struct kvm *kvm, bool activate,
1578                               unsigned long bit);
1579 
1580 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1581 
1582 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
1583                        void *insn, int insn_len);
1584 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1585 void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
1586                             gva_t gva, hpa_t root_hpa);
1587 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
1588 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
1589                      bool skip_mmu_sync);
1590 
1591 void kvm_configure_mmu(bool enable_tdp, int tdp_max_root_level,
1592                        int tdp_huge_page_level);
1593 
1594 static inline u16 kvm_read_ldt(void)
1595 {
1596         u16 ldt;
1597         asm("sldt %0" : "=g"(ldt));
1598         return ldt;
1599 }
1600 
1601 static inline void kvm_load_ldt(u16 sel)
1602 {
1603         asm("lldt %0" : : "rm"(sel));
1604 }
1605 
1606 #ifdef CONFIG_X86_64
1607 static inline unsigned long read_msr(unsigned long msr)
1608 {
1609         u64 value;
1610 
1611         rdmsrl(msr, value);
1612         return value;
1613 }
1614 #endif
1615 
1616 static inline u32 get_rdx_init_val(void)
1617 {
1618         return 0x600; /* P6 family */
1619 }
1620 
1621 static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
1622 {
1623         kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
1624 }
1625 
1626 #define TSS_IOPB_BASE_OFFSET 0x66
1627 #define TSS_BASE_SIZE 0x68
1628 #define TSS_IOPB_SIZE (65536 / 8)
1629 #define TSS_REDIRECTION_SIZE (256 / 8)
1630 #define RMODE_TSS_SIZE                                                  \
1631         (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
1632 
1633 enum {
1634         TASK_SWITCH_CALL = 0,
1635         TASK_SWITCH_IRET = 1,
1636         TASK_SWITCH_JMP = 2,
1637         TASK_SWITCH_GATE = 3,
1638 };
1639 
1640 #define HF_GIF_MASK             (1 << 0)
1641 #define HF_NMI_MASK             (1 << 3)
1642 #define HF_IRET_MASK            (1 << 4)
1643 #define HF_GUEST_MASK           (1 << 5) /* VCPU is in guest-mode */
1644 #define HF_SMM_MASK             (1 << 6)
1645 #define HF_SMM_INSIDE_NMI_MASK  (1 << 7)
1646 
1647 #define __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
1648 #define KVM_ADDRESS_SPACE_NUM 2
1649 
1650 #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
1651 #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
1652 
1653 asmlinkage void kvm_spurious_fault(void);
1654 
1655 /*
1656  * Hardware virtualization extension instructions may fault if a
1657  * reboot turns off virtualization while processes are running.
1658  * Usually after catching the fault we just panic; during reboot
1659  * instead the instruction is ignored.
1660  */
1661 #define __kvm_handle_fault_on_reboot(insn)                              \
1662         "666: \n\t"                                                     \
1663         insn "\n\t"                                                     \
1664         "jmp    668f \n\t"                                              \
1665         "667: \n\t"                                                     \
1666         "1: \n\t"                                                       \
1667         ".pushsection .discard.instr_begin \n\t"                        \
1668         ".long 1b - . \n\t"                                             \
1669         ".popsection \n\t"                                              \
1670         "call   kvm_spurious_fault \n\t"                                \
1671         "1: \n\t"                                                       \
1672         ".pushsection .discard.instr_end \n\t"                          \
1673         ".long 1b - . \n\t"                                             \
1674         ".popsection \n\t"                                              \
1675         "668: \n\t"                                                     \
1676         _ASM_EXTABLE(666b, 667b)
1677 
1678 #define KVM_ARCH_WANT_MMU_NOTIFIER
1679 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
1680                         unsigned flags);
1681 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
1682 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
1683 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
1684 int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v);
1685 int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu);
1686 int kvm_cpu_has_extint(struct kvm_vcpu *v);
1687 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
1688 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
1689 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
1690 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
1691 
1692 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
1693                     unsigned long ipi_bitmap_high, u32 min,
1694                     unsigned long icr, int op_64_bit);
1695 
1696 void kvm_define_user_return_msr(unsigned index, u32 msr);
1697 int kvm_probe_user_return_msr(u32 msr);
1698 int kvm_set_user_return_msr(unsigned index, u64 val, u64 mask);
1699 
1700 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
1701 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc);
1702 
1703 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu);
1704 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1705 
1706 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
1707 void kvm_make_scan_ioapic_request(struct kvm *kvm);
1708 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
1709                                        unsigned long *vcpu_bitmap);
1710 
1711 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1712                                      struct kvm_async_pf *work);
1713 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1714                                  struct kvm_async_pf *work);
1715 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1716                                struct kvm_async_pf *work);
1717 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
1718 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
1719 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
1720 
1721 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
1722 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
1723 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
1724 
1725 int kvm_is_in_guest(void);
1726 
1727 void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
1728                                      u32 size);
1729 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
1730 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu);
1731 
1732 bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
1733                              struct kvm_vcpu **dest_vcpu);
1734 
1735 void kvm_set_msi_irq(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
1736                      struct kvm_lapic_irq *irq);
1737 
1738 static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
1739 {
1740         /* We can only post Fixed and LowPrio IRQs */
1741         return (irq->delivery_mode == APIC_DM_FIXED ||
1742                 irq->delivery_mode == APIC_DM_LOWEST);
1743 }
1744 
1745 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
1746 {
1747         if (kvm_x86_ops.vcpu_blocking)
1748                 kvm_x86_ops.vcpu_blocking(vcpu);
1749 }
1750 
1751 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
1752 {
1753         if (kvm_x86_ops.vcpu_unblocking)
1754                 kvm_x86_ops.vcpu_unblocking(vcpu);
1755 }
1756 
1757 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1758 
1759 static inline int kvm_cpu_get_apicid(int mps_cpu)
1760 {
1761 #ifdef CONFIG_X86_LOCAL_APIC
1762         return default_cpu_present_to_apicid(mps_cpu);
1763 #else
1764         WARN_ON_ONCE(1);
1765         return BAD_APICID;
1766 #endif
1767 }
1768 
1769 #define put_smstate(type, buf, offset, val)                      \
1770         *(type *)((buf) + (offset) - 0x7e00) = val
1771 
1772 #define GET_SMSTATE(type, buf, offset)          \
1773         (*(type *)((buf) + (offset) - 0x7e00))
1774 
1775 int kvm_cpu_dirty_log_size(void);
1776 
1777 #endif /* _ASM_X86_KVM_HOST_H */
1778 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp