~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/kvm_host.h

Version: ~ [ linux-5.17-rc1 ] ~ [ linux-5.16.2 ] ~ [ linux-5.15.16 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.93 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.173 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.225 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.262 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.297 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.299 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef __KVM_HOST_H
  2 #define __KVM_HOST_H
  3 
  4 /*
  5  * This work is licensed under the terms of the GNU GPL, version 2.  See
  6  * the COPYING file in the top-level directory.
  7  */
  8 
  9 #include <linux/types.h>
 10 #include <linux/hardirq.h>
 11 #include <linux/list.h>
 12 #include <linux/mutex.h>
 13 #include <linux/spinlock.h>
 14 #include <linux/signal.h>
 15 #include <linux/sched.h>
 16 #include <linux/bug.h>
 17 #include <linux/mm.h>
 18 #include <linux/mmu_notifier.h>
 19 #include <linux/preempt.h>
 20 #include <linux/msi.h>
 21 #include <linux/slab.h>
 22 #include <linux/rcupdate.h>
 23 #include <linux/ratelimit.h>
 24 #include <linux/err.h>
 25 #include <linux/irqflags.h>
 26 #include <linux/context_tracking.h>
 27 #include <asm/signal.h>
 28 
 29 #include <linux/kvm.h>
 30 #include <linux/kvm_para.h>
 31 
 32 #include <linux/kvm_types.h>
 33 
 34 #include <asm/kvm_host.h>
 35 
 36 #ifndef KVM_MMIO_SIZE
 37 #define KVM_MMIO_SIZE 8
 38 #endif
 39 
 40 /*
 41  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
 42  * in kvm, other bits are visible for userspace which are defined in
 43  * include/linux/kvm_h.
 44  */
 45 #define KVM_MEMSLOT_INVALID     (1UL << 16)
 46 
 47 /* Two fragments for cross MMIO pages. */
 48 #define KVM_MAX_MMIO_FRAGMENTS  2
 49 
 50 /*
 51  * For the normal pfn, the highest 12 bits should be zero,
 52  * so we can mask bit 62 ~ bit 52  to indicate the error pfn,
 53  * mask bit 63 to indicate the noslot pfn.
 54  */
 55 #define KVM_PFN_ERR_MASK        (0x7ffULL << 52)
 56 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
 57 #define KVM_PFN_NOSLOT          (0x1ULL << 63)
 58 
 59 #define KVM_PFN_ERR_FAULT       (KVM_PFN_ERR_MASK)
 60 #define KVM_PFN_ERR_HWPOISON    (KVM_PFN_ERR_MASK + 1)
 61 #define KVM_PFN_ERR_RO_FAULT    (KVM_PFN_ERR_MASK + 2)
 62 
 63 /*
 64  * error pfns indicate that the gfn is in slot but faild to
 65  * translate it to pfn on host.
 66  */
 67 static inline bool is_error_pfn(pfn_t pfn)
 68 {
 69         return !!(pfn & KVM_PFN_ERR_MASK);
 70 }
 71 
 72 /*
 73  * error_noslot pfns indicate that the gfn can not be
 74  * translated to pfn - it is not in slot or failed to
 75  * translate it to pfn.
 76  */
 77 static inline bool is_error_noslot_pfn(pfn_t pfn)
 78 {
 79         return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
 80 }
 81 
 82 /* noslot pfn indicates that the gfn is not in slot. */
 83 static inline bool is_noslot_pfn(pfn_t pfn)
 84 {
 85         return pfn == KVM_PFN_NOSLOT;
 86 }
 87 
 88 #define KVM_HVA_ERR_BAD         (PAGE_OFFSET)
 89 #define KVM_HVA_ERR_RO_BAD      (PAGE_OFFSET + PAGE_SIZE)
 90 
 91 static inline bool kvm_is_error_hva(unsigned long addr)
 92 {
 93         return addr >= PAGE_OFFSET;
 94 }
 95 
 96 #define KVM_ERR_PTR_BAD_PAGE    (ERR_PTR(-ENOENT))
 97 
 98 static inline bool is_error_page(struct page *page)
 99 {
100         return IS_ERR(page);
101 }
102 
103 /*
104  * vcpu->requests bit members
105  */
106 #define KVM_REQ_TLB_FLUSH          0
107 #define KVM_REQ_MIGRATE_TIMER      1
108 #define KVM_REQ_REPORT_TPR_ACCESS  2
109 #define KVM_REQ_MMU_RELOAD         3
110 #define KVM_REQ_TRIPLE_FAULT       4
111 #define KVM_REQ_PENDING_TIMER      5
112 #define KVM_REQ_UNHALT             6
113 #define KVM_REQ_MMU_SYNC           7
114 #define KVM_REQ_CLOCK_UPDATE       8
115 #define KVM_REQ_KICK               9
116 #define KVM_REQ_DEACTIVATE_FPU    10
117 #define KVM_REQ_EVENT             11
118 #define KVM_REQ_APF_HALT          12
119 #define KVM_REQ_STEAL_UPDATE      13
120 #define KVM_REQ_NMI               14
121 #define KVM_REQ_PMU               15
122 #define KVM_REQ_PMI               16
123 #define KVM_REQ_WATCHDOG          17
124 #define KVM_REQ_MASTERCLOCK_UPDATE 18
125 #define KVM_REQ_MCLOCK_INPROGRESS 19
126 #define KVM_REQ_EPR_EXIT          20
127 #define KVM_REQ_SCAN_IOAPIC       21
128 
129 #define KVM_USERSPACE_IRQ_SOURCE_ID             0
130 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID        1
131 
132 struct kvm;
133 struct kvm_vcpu;
134 extern struct kmem_cache *kvm_vcpu_cache;
135 
136 extern raw_spinlock_t kvm_lock;
137 extern struct list_head vm_list;
138 
139 struct kvm_io_range {
140         gpa_t addr;
141         int len;
142         struct kvm_io_device *dev;
143 };
144 
145 #define NR_IOBUS_DEVS 1000
146 
147 struct kvm_io_bus {
148         int dev_count;
149         int ioeventfd_count;
150         struct kvm_io_range range[];
151 };
152 
153 enum kvm_bus {
154         KVM_MMIO_BUS,
155         KVM_PIO_BUS,
156         KVM_VIRTIO_CCW_NOTIFY_BUS,
157         KVM_NR_BUSES
158 };
159 
160 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
161                      int len, const void *val);
162 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
163                     void *val);
164 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
165                             int len, struct kvm_io_device *dev);
166 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
167                                struct kvm_io_device *dev);
168 
169 #ifdef CONFIG_KVM_ASYNC_PF
170 struct kvm_async_pf {
171         struct work_struct work;
172         struct list_head link;
173         struct list_head queue;
174         struct kvm_vcpu *vcpu;
175         struct mm_struct *mm;
176         gva_t gva;
177         unsigned long addr;
178         struct kvm_arch_async_pf arch;
179         struct page *page;
180         bool done;
181 };
182 
183 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
184 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
185 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
186                        struct kvm_arch_async_pf *arch);
187 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
188 #endif
189 
190 enum {
191         OUTSIDE_GUEST_MODE,
192         IN_GUEST_MODE,
193         EXITING_GUEST_MODE,
194         READING_SHADOW_PAGE_TABLES,
195 };
196 
197 /*
198  * Sometimes a large or cross-page mmio needs to be broken up into separate
199  * exits for userspace servicing.
200  */
201 struct kvm_mmio_fragment {
202         gpa_t gpa;
203         void *data;
204         unsigned len;
205 };
206 
207 struct kvm_vcpu {
208         struct kvm *kvm;
209 #ifdef CONFIG_PREEMPT_NOTIFIERS
210         struct preempt_notifier preempt_notifier;
211 #endif
212         int cpu;
213         int vcpu_id;
214         int srcu_idx;
215         int mode;
216         unsigned long requests;
217         unsigned long guest_debug;
218 
219         struct mutex mutex;
220         struct kvm_run *run;
221 
222         int fpu_active;
223         int guest_fpu_loaded, guest_xcr0_loaded;
224         wait_queue_head_t wq;
225         struct pid *pid;
226         int sigset_active;
227         sigset_t sigset;
228         struct kvm_vcpu_stat stat;
229 
230 #ifdef CONFIG_HAS_IOMEM
231         int mmio_needed;
232         int mmio_read_completed;
233         int mmio_is_write;
234         int mmio_cur_fragment;
235         int mmio_nr_fragments;
236         struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
237 #endif
238 
239 #ifdef CONFIG_KVM_ASYNC_PF
240         struct {
241                 u32 queued;
242                 struct list_head queue;
243                 struct list_head done;
244                 spinlock_t lock;
245         } async_pf;
246 #endif
247 
248 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
249         /*
250          * Cpu relax intercept or pause loop exit optimization
251          * in_spin_loop: set when a vcpu does a pause loop exit
252          *  or cpu relax intercepted.
253          * dy_eligible: indicates whether vcpu is eligible for directed yield.
254          */
255         struct {
256                 bool in_spin_loop;
257                 bool dy_eligible;
258         } spin_loop;
259 #endif
260         bool preempted;
261         struct kvm_vcpu_arch arch;
262 };
263 
264 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
265 {
266         return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
267 }
268 
269 /*
270  * Some of the bitops functions do not support too long bitmaps.
271  * This number must be determined not to exceed such limits.
272  */
273 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
274 
275 struct kvm_memory_slot {
276         gfn_t base_gfn;
277         unsigned long npages;
278         unsigned long *dirty_bitmap;
279         struct kvm_arch_memory_slot arch;
280         unsigned long userspace_addr;
281         u32 flags;
282         short id;
283 };
284 
285 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
286 {
287         return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
288 }
289 
290 struct kvm_kernel_irq_routing_entry {
291         u32 gsi;
292         u32 type;
293         int (*set)(struct kvm_kernel_irq_routing_entry *e,
294                    struct kvm *kvm, int irq_source_id, int level,
295                    bool line_status);
296         union {
297                 struct {
298                         unsigned irqchip;
299                         unsigned pin;
300                 } irqchip;
301                 struct msi_msg msi;
302         };
303         struct hlist_node link;
304 };
305 
306 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
307 
308 struct kvm_irq_routing_table {
309         int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
310         struct kvm_kernel_irq_routing_entry *rt_entries;
311         u32 nr_rt_entries;
312         /*
313          * Array indexed by gsi. Each entry contains list of irq chips
314          * the gsi is connected to.
315          */
316         struct hlist_head map[0];
317 };
318 
319 #else
320 
321 struct kvm_irq_routing_table {};
322 
323 #endif
324 
325 #ifndef KVM_PRIVATE_MEM_SLOTS
326 #define KVM_PRIVATE_MEM_SLOTS 0
327 #endif
328 
329 #ifndef KVM_MEM_SLOTS_NUM
330 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
331 #endif
332 
333 /*
334  * Note:
335  * memslots are not sorted by id anymore, please use id_to_memslot()
336  * to get the memslot by its id.
337  */
338 struct kvm_memslots {
339         u64 generation;
340         struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
341         /* The mapping table from slot id to the index in memslots[]. */
342         short id_to_index[KVM_MEM_SLOTS_NUM];
343 };
344 
345 struct kvm {
346         spinlock_t mmu_lock;
347         struct mutex slots_lock;
348         struct mm_struct *mm; /* userspace tied to this vm */
349         struct kvm_memslots *memslots;
350         struct srcu_struct srcu;
351 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
352         u32 bsp_vcpu_id;
353 #endif
354         struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
355         atomic_t online_vcpus;
356         int last_boosted_vcpu;
357         struct list_head vm_list;
358         struct mutex lock;
359         struct kvm_io_bus *buses[KVM_NR_BUSES];
360 #ifdef CONFIG_HAVE_KVM_EVENTFD
361         struct {
362                 spinlock_t        lock;
363                 struct list_head  items;
364                 struct list_head  resampler_list;
365                 struct mutex      resampler_lock;
366         } irqfds;
367         struct list_head ioeventfds;
368 #endif
369         struct kvm_vm_stat stat;
370         struct kvm_arch arch;
371         atomic_t users_count;
372 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
373         struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
374         spinlock_t ring_lock;
375         struct list_head coalesced_zones;
376 #endif
377 
378         struct mutex irq_lock;
379 #ifdef CONFIG_HAVE_KVM_IRQCHIP
380         /*
381          * Update side is protected by irq_lock and,
382          * if configured, irqfds.lock.
383          */
384         struct kvm_irq_routing_table __rcu *irq_routing;
385         struct hlist_head mask_notifier_list;
386         struct hlist_head irq_ack_notifier_list;
387 #endif
388 
389 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
390         struct mmu_notifier mmu_notifier;
391         unsigned long mmu_notifier_seq;
392         long mmu_notifier_count;
393 #endif
394         long tlbs_dirty;
395         struct list_head devices;
396 };
397 
398 #define kvm_err(fmt, ...) \
399         pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
400 #define kvm_info(fmt, ...) \
401         pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
402 #define kvm_debug(fmt, ...) \
403         pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
404 #define kvm_pr_unimpl(fmt, ...) \
405         pr_err_ratelimited("kvm [%i]: " fmt, \
406                            task_tgid_nr(current), ## __VA_ARGS__)
407 
408 /* The guest did something we don't support. */
409 #define vcpu_unimpl(vcpu, fmt, ...)                                     \
410         kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
411 
412 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
413 {
414         smp_rmb();
415         return kvm->vcpus[i];
416 }
417 
418 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
419         for (idx = 0; \
420              idx < atomic_read(&kvm->online_vcpus) && \
421              (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
422              idx++)
423 
424 #define kvm_for_each_memslot(memslot, slots)    \
425         for (memslot = &slots->memslots[0];     \
426               memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
427                 memslot++)
428 
429 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
430 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
431 
432 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
433 void vcpu_put(struct kvm_vcpu *vcpu);
434 
435 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
436 int kvm_irqfd_init(void);
437 void kvm_irqfd_exit(void);
438 #else
439 static inline int kvm_irqfd_init(void)
440 {
441         return 0;
442 }
443 
444 static inline void kvm_irqfd_exit(void)
445 {
446 }
447 #endif
448 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
449                   struct module *module);
450 void kvm_exit(void);
451 
452 void kvm_get_kvm(struct kvm *kvm);
453 void kvm_put_kvm(struct kvm *kvm);
454 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new,
455                      u64 last_generation);
456 
457 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
458 {
459         return rcu_dereference_check(kvm->memslots,
460                         srcu_read_lock_held(&kvm->srcu)
461                         || lockdep_is_held(&kvm->slots_lock));
462 }
463 
464 static inline struct kvm_memory_slot *
465 id_to_memslot(struct kvm_memslots *slots, int id)
466 {
467         int index = slots->id_to_index[id];
468         struct kvm_memory_slot *slot;
469 
470         slot = &slots->memslots[index];
471 
472         WARN_ON(slot->id != id);
473         return slot;
474 }
475 
476 /*
477  * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
478  * - create a new memory slot
479  * - delete an existing memory slot
480  * - modify an existing memory slot
481  *   -- move it in the guest physical memory space
482  *   -- just change its flags
483  *
484  * Since flags can be changed by some of these operations, the following
485  * differentiation is the best we can do for __kvm_set_memory_region():
486  */
487 enum kvm_mr_change {
488         KVM_MR_CREATE,
489         KVM_MR_DELETE,
490         KVM_MR_MOVE,
491         KVM_MR_FLAGS_ONLY,
492 };
493 
494 int kvm_set_memory_region(struct kvm *kvm,
495                           struct kvm_userspace_memory_region *mem);
496 int __kvm_set_memory_region(struct kvm *kvm,
497                             struct kvm_userspace_memory_region *mem);
498 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
499                            struct kvm_memory_slot *dont);
500 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
501 int kvm_arch_prepare_memory_region(struct kvm *kvm,
502                                 struct kvm_memory_slot *memslot,
503                                 struct kvm_userspace_memory_region *mem,
504                                 enum kvm_mr_change change);
505 void kvm_arch_commit_memory_region(struct kvm *kvm,
506                                 struct kvm_userspace_memory_region *mem,
507                                 const struct kvm_memory_slot *old,
508                                 enum kvm_mr_change change);
509 bool kvm_largepages_enabled(void);
510 void kvm_disable_largepages(void);
511 /* flush all memory translations */
512 void kvm_arch_flush_shadow_all(struct kvm *kvm);
513 /* flush memory translations pointing to 'slot' */
514 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
515                                    struct kvm_memory_slot *slot);
516 
517 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
518                             int nr_pages);
519 
520 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
521 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
522 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
523 void kvm_release_page_clean(struct page *page);
524 void kvm_release_page_dirty(struct page *page);
525 void kvm_set_page_dirty(struct page *page);
526 void kvm_set_page_accessed(struct page *page);
527 
528 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
529 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
530                        bool write_fault, bool *writable);
531 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
532 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
533                       bool *writable);
534 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
535 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
536 
537 void kvm_release_pfn_dirty(pfn_t pfn);
538 void kvm_release_pfn_clean(pfn_t pfn);
539 void kvm_set_pfn_dirty(pfn_t pfn);
540 void kvm_set_pfn_accessed(pfn_t pfn);
541 void kvm_get_pfn(pfn_t pfn);
542 
543 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
544                         int len);
545 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
546                           unsigned long len);
547 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
548 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
549                            void *data, unsigned long len);
550 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
551                          int offset, int len);
552 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
553                     unsigned long len);
554 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
555                            void *data, unsigned long len);
556 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
557                               gpa_t gpa, unsigned long len);
558 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
559 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
560 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
561 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
562 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
563 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
564 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
565                              gfn_t gfn);
566 
567 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
568 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
569 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
570 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
571 void kvm_resched(struct kvm_vcpu *vcpu);
572 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
573 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
574 
575 void kvm_flush_remote_tlbs(struct kvm *kvm);
576 void kvm_reload_remote_mmus(struct kvm *kvm);
577 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
578 void kvm_make_scan_ioapic_request(struct kvm *kvm);
579 
580 long kvm_arch_dev_ioctl(struct file *filp,
581                         unsigned int ioctl, unsigned long arg);
582 long kvm_arch_vcpu_ioctl(struct file *filp,
583                          unsigned int ioctl, unsigned long arg);
584 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
585 
586 int kvm_dev_ioctl_check_extension(long ext);
587 
588 int kvm_get_dirty_log(struct kvm *kvm,
589                         struct kvm_dirty_log *log, int *is_dirty);
590 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
591                                 struct kvm_dirty_log *log);
592 
593 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
594                                    struct kvm_userspace_memory_region *mem);
595 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
596                         bool line_status);
597 long kvm_arch_vm_ioctl(struct file *filp,
598                        unsigned int ioctl, unsigned long arg);
599 
600 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
601 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
602 
603 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
604                                     struct kvm_translation *tr);
605 
606 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
607 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
608 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
609                                   struct kvm_sregs *sregs);
610 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
611                                   struct kvm_sregs *sregs);
612 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
613                                     struct kvm_mp_state *mp_state);
614 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
615                                     struct kvm_mp_state *mp_state);
616 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
617                                         struct kvm_guest_debug *dbg);
618 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
619 
620 int kvm_arch_init(void *opaque);
621 void kvm_arch_exit(void);
622 
623 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
624 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
625 
626 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
627 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
628 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
629 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
630 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
631 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
632 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
633 
634 int kvm_arch_hardware_enable(void *garbage);
635 void kvm_arch_hardware_disable(void *garbage);
636 int kvm_arch_hardware_setup(void);
637 void kvm_arch_hardware_unsetup(void);
638 void kvm_arch_check_processor_compat(void *rtn);
639 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
640 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
641 
642 void kvm_free_physmem(struct kvm *kvm);
643 
644 void *kvm_kvzalloc(unsigned long size);
645 void kvm_kvfree(const void *addr);
646 
647 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
648 static inline struct kvm *kvm_arch_alloc_vm(void)
649 {
650         return kzalloc(sizeof(struct kvm), GFP_KERNEL);
651 }
652 
653 static inline void kvm_arch_free_vm(struct kvm *kvm)
654 {
655         kfree(kvm);
656 }
657 #endif
658 
659 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
660 {
661 #ifdef __KVM_HAVE_ARCH_WQP
662         return vcpu->arch.wqp;
663 #else
664         return &vcpu->wq;
665 #endif
666 }
667 
668 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
669 void kvm_arch_destroy_vm(struct kvm *kvm);
670 void kvm_arch_sync_events(struct kvm *kvm);
671 
672 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
673 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
674 
675 bool kvm_is_mmio_pfn(pfn_t pfn);
676 
677 struct kvm_irq_ack_notifier {
678         struct hlist_node link;
679         unsigned gsi;
680         void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
681 };
682 
683 struct kvm_assigned_dev_kernel {
684         struct kvm_irq_ack_notifier ack_notifier;
685         struct list_head list;
686         int assigned_dev_id;
687         int host_segnr;
688         int host_busnr;
689         int host_devfn;
690         unsigned int entries_nr;
691         int host_irq;
692         bool host_irq_disabled;
693         bool pci_2_3;
694         struct msix_entry *host_msix_entries;
695         int guest_irq;
696         struct msix_entry *guest_msix_entries;
697         unsigned long irq_requested_type;
698         int irq_source_id;
699         int flags;
700         struct pci_dev *dev;
701         struct kvm *kvm;
702         spinlock_t intx_lock;
703         spinlock_t intx_mask_lock;
704         char irq_name[32];
705         struct pci_saved_state *pci_saved_state;
706 };
707 
708 struct kvm_irq_mask_notifier {
709         void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
710         int irq;
711         struct hlist_node link;
712 };
713 
714 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
715                                     struct kvm_irq_mask_notifier *kimn);
716 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
717                                       struct kvm_irq_mask_notifier *kimn);
718 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
719                              bool mask);
720 
721 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
722                 bool line_status);
723 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
724 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
725                 int irq_source_id, int level, bool line_status);
726 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
727 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
728 void kvm_register_irq_ack_notifier(struct kvm *kvm,
729                                    struct kvm_irq_ack_notifier *kian);
730 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
731                                    struct kvm_irq_ack_notifier *kian);
732 int kvm_request_irq_source_id(struct kvm *kvm);
733 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
734 
735 /* For vcpu->arch.iommu_flags */
736 #define KVM_IOMMU_CACHE_COHERENCY       0x1
737 
738 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
739 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
740 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
741 int kvm_iommu_map_guest(struct kvm *kvm);
742 int kvm_iommu_unmap_guest(struct kvm *kvm);
743 int kvm_assign_device(struct kvm *kvm,
744                       struct kvm_assigned_dev_kernel *assigned_dev);
745 int kvm_deassign_device(struct kvm *kvm,
746                         struct kvm_assigned_dev_kernel *assigned_dev);
747 #else
748 static inline int kvm_iommu_map_pages(struct kvm *kvm,
749                                       struct kvm_memory_slot *slot)
750 {
751         return 0;
752 }
753 
754 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
755                                          struct kvm_memory_slot *slot)
756 {
757 }
758 
759 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
760 {
761         return 0;
762 }
763 #endif
764 
765 static inline void kvm_guest_enter(void)
766 {
767         unsigned long flags;
768 
769         BUG_ON(preemptible());
770 
771         local_irq_save(flags);
772         guest_enter();
773         local_irq_restore(flags);
774 
775         /* KVM does not hold any references to rcu protected data when it
776          * switches CPU into a guest mode. In fact switching to a guest mode
777          * is very similar to exiting to userspase from rcu point of view. In
778          * addition CPU may stay in a guest mode for quite a long time (up to
779          * one time slice). Lets treat guest mode as quiescent state, just like
780          * we do with user-mode execution.
781          */
782         rcu_virt_note_context_switch(smp_processor_id());
783 }
784 
785 static inline void kvm_guest_exit(void)
786 {
787         unsigned long flags;
788 
789         local_irq_save(flags);
790         guest_exit();
791         local_irq_restore(flags);
792 }
793 
794 /*
795  * search_memslots() and __gfn_to_memslot() are here because they are
796  * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
797  * gfn_to_memslot() itself isn't here as an inline because that would
798  * bloat other code too much.
799  */
800 static inline struct kvm_memory_slot *
801 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
802 {
803         struct kvm_memory_slot *memslot;
804 
805         kvm_for_each_memslot(memslot, slots)
806                 if (gfn >= memslot->base_gfn &&
807                       gfn < memslot->base_gfn + memslot->npages)
808                         return memslot;
809 
810         return NULL;
811 }
812 
813 static inline struct kvm_memory_slot *
814 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
815 {
816         return search_memslots(slots, gfn);
817 }
818 
819 static inline unsigned long
820 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
821 {
822         return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
823 }
824 
825 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
826 {
827         return gfn_to_memslot(kvm, gfn)->id;
828 }
829 
830 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
831 {
832         /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
833         return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
834                 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
835 }
836 
837 static inline gfn_t
838 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
839 {
840         gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
841 
842         return slot->base_gfn + gfn_offset;
843 }
844 
845 static inline gpa_t gfn_to_gpa(gfn_t gfn)
846 {
847         return (gpa_t)gfn << PAGE_SHIFT;
848 }
849 
850 static inline gfn_t gpa_to_gfn(gpa_t gpa)
851 {
852         return (gfn_t)(gpa >> PAGE_SHIFT);
853 }
854 
855 static inline hpa_t pfn_to_hpa(pfn_t pfn)
856 {
857         return (hpa_t)pfn << PAGE_SHIFT;
858 }
859 
860 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
861 {
862         set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
863 }
864 
865 enum kvm_stat_kind {
866         KVM_STAT_VM,
867         KVM_STAT_VCPU,
868 };
869 
870 struct kvm_stats_debugfs_item {
871         const char *name;
872         int offset;
873         enum kvm_stat_kind kind;
874         struct dentry *dentry;
875 };
876 extern struct kvm_stats_debugfs_item debugfs_entries[];
877 extern struct dentry *kvm_debugfs_dir;
878 
879 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
880 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
881 {
882         if (unlikely(kvm->mmu_notifier_count))
883                 return 1;
884         /*
885          * Ensure the read of mmu_notifier_count happens before the read
886          * of mmu_notifier_seq.  This interacts with the smp_wmb() in
887          * mmu_notifier_invalidate_range_end to make sure that the caller
888          * either sees the old (non-zero) value of mmu_notifier_count or
889          * the new (incremented) value of mmu_notifier_seq.
890          * PowerPC Book3s HV KVM calls this under a per-page lock
891          * rather than under kvm->mmu_lock, for scalability, so
892          * can't rely on kvm->mmu_lock to keep things ordered.
893          */
894         smp_rmb();
895         if (kvm->mmu_notifier_seq != mmu_seq)
896                 return 1;
897         return 0;
898 }
899 #endif
900 
901 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
902 
903 #define KVM_MAX_IRQ_ROUTES 1024
904 
905 int kvm_setup_default_irq_routing(struct kvm *kvm);
906 int kvm_set_irq_routing(struct kvm *kvm,
907                         const struct kvm_irq_routing_entry *entries,
908                         unsigned nr,
909                         unsigned flags);
910 int kvm_set_routing_entry(struct kvm_irq_routing_table *rt,
911                           struct kvm_kernel_irq_routing_entry *e,
912                           const struct kvm_irq_routing_entry *ue);
913 void kvm_free_irq_routing(struct kvm *kvm);
914 
915 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
916 
917 #else
918 
919 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
920 
921 #endif
922 
923 #ifdef CONFIG_HAVE_KVM_EVENTFD
924 
925 void kvm_eventfd_init(struct kvm *kvm);
926 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
927 
928 #ifdef CONFIG_HAVE_KVM_IRQCHIP
929 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
930 void kvm_irqfd_release(struct kvm *kvm);
931 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
932 #else
933 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
934 {
935         return -EINVAL;
936 }
937 
938 static inline void kvm_irqfd_release(struct kvm *kvm) {}
939 #endif
940 
941 #else
942 
943 static inline void kvm_eventfd_init(struct kvm *kvm) {}
944 
945 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
946 {
947         return -EINVAL;
948 }
949 
950 static inline void kvm_irqfd_release(struct kvm *kvm) {}
951 
952 #ifdef CONFIG_HAVE_KVM_IRQCHIP
953 static inline void kvm_irq_routing_update(struct kvm *kvm,
954                                           struct kvm_irq_routing_table *irq_rt)
955 {
956         rcu_assign_pointer(kvm->irq_routing, irq_rt);
957 }
958 #endif
959 
960 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
961 {
962         return -ENOSYS;
963 }
964 
965 #endif /* CONFIG_HAVE_KVM_EVENTFD */
966 
967 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
968 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
969 {
970         return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
971 }
972 
973 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
974 
975 #else
976 
977 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
978 
979 #endif
980 
981 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
982 
983 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
984                                   unsigned long arg);
985 
986 void kvm_free_all_assigned_devices(struct kvm *kvm);
987 
988 #else
989 
990 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
991                                                 unsigned long arg)
992 {
993         return -ENOTTY;
994 }
995 
996 static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
997 
998 #endif
999 
1000 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1001 {
1002         set_bit(req, &vcpu->requests);
1003 }
1004 
1005 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1006 {
1007         if (test_bit(req, &vcpu->requests)) {
1008                 clear_bit(req, &vcpu->requests);
1009                 return true;
1010         } else {
1011                 return false;
1012         }
1013 }
1014 
1015 extern bool kvm_rebooting;
1016 
1017 struct kvm_device_ops;
1018 
1019 struct kvm_device {
1020         struct kvm_device_ops *ops;
1021         struct kvm *kvm;
1022         void *private;
1023         struct list_head vm_node;
1024 };
1025 
1026 /* create, destroy, and name are mandatory */
1027 struct kvm_device_ops {
1028         const char *name;
1029         int (*create)(struct kvm_device *dev, u32 type);
1030 
1031         /*
1032          * Destroy is responsible for freeing dev.
1033          *
1034          * Destroy may be called before or after destructors are called
1035          * on emulated I/O regions, depending on whether a reference is
1036          * held by a vcpu or other kvm component that gets destroyed
1037          * after the emulated I/O.
1038          */
1039         void (*destroy)(struct kvm_device *dev);
1040 
1041         int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1042         int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1043         int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1044         long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1045                       unsigned long arg);
1046 };
1047 
1048 void kvm_device_get(struct kvm_device *dev);
1049 void kvm_device_put(struct kvm_device *dev);
1050 struct kvm_device *kvm_device_from_filp(struct file *filp);
1051 
1052 extern struct kvm_device_ops kvm_mpic_ops;
1053 extern struct kvm_device_ops kvm_xics_ops;
1054 
1055 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1056 
1057 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1058 {
1059         vcpu->spin_loop.in_spin_loop = val;
1060 }
1061 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1062 {
1063         vcpu->spin_loop.dy_eligible = val;
1064 }
1065 
1066 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1067 
1068 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1069 {
1070 }
1071 
1072 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1073 {
1074 }
1075 
1076 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1077 {
1078         return true;
1079 }
1080 
1081 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1082 #endif
1083 
1084 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp