1 /* 2 * Support KVM gust page tracking 3 * 4 * This feature allows us to track page access in guest. Currently, only 5 * write access is tracked. 6 * 7 * Copyright(C) 2015 Intel Corporation. 8 * 9 * Author: 10 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 11 * 12 * This work is licensed under the terms of the GNU GPL, version 2. See 13 * the COPYING file in the top-level directory. 14 */ 15 16 #include <linux/kvm_host.h> 17 #include <linux/rculist.h> 18 19 #include <asm/kvm_host.h> 20 #include <asm/kvm_page_track.h> 21 22 #include "mmu.h" 23 24 void kvm_page_track_free_memslot(struct kvm_memory_slot *free, 25 struct kvm_memory_slot *dont) 26 { 27 int i; 28 29 for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) 30 if (!dont || free->arch.gfn_track[i] != 31 dont->arch.gfn_track[i]) { 32 kvfree(free->arch.gfn_track[i]); 33 free->arch.gfn_track[i] = NULL; 34 } 35 } 36 37 int kvm_page_track_create_memslot(struct kvm_memory_slot *slot, 38 unsigned long npages) 39 { 40 int i; 41 42 for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) { 43 slot->arch.gfn_track[i] = 44 kvcalloc(npages, sizeof(*slot->arch.gfn_track[i]), 45 GFP_KERNEL); 46 if (!slot->arch.gfn_track[i]) 47 goto track_free; 48 } 49 50 return 0; 51 52 track_free: 53 kvm_page_track_free_memslot(slot, NULL); 54 return -ENOMEM; 55 } 56 57 static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode) 58 { 59 if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX) 60 return false; 61 62 return true; 63 } 64 65 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, 66 enum kvm_page_track_mode mode, short count) 67 { 68 int index, val; 69 70 index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); 71 72 val = slot->arch.gfn_track[mode][index]; 73 74 if (WARN_ON(val + count < 0 || val + count > USHRT_MAX)) 75 return; 76 77 slot->arch.gfn_track[mode][index] += count; 78 } 79 80 /* 81 * add guest page to the tracking pool so that corresponding access on that 82 * page will be intercepted. 83 * 84 * It should be called under the protection both of mmu-lock and kvm->srcu 85 * or kvm->slots_lock. 86 * 87 * @kvm: the guest instance we are interested in. 88 * @slot: the @gfn belongs to. 89 * @gfn: the guest page. 90 * @mode: tracking mode, currently only write track is supported. 91 */ 92 void kvm_slot_page_track_add_page(struct kvm *kvm, 93 struct kvm_memory_slot *slot, gfn_t gfn, 94 enum kvm_page_track_mode mode) 95 { 96 97 if (WARN_ON(!page_track_mode_is_valid(mode))) 98 return; 99 100 update_gfn_track(slot, gfn, mode, 1); 101 102 /* 103 * new track stops large page mapping for the 104 * tracked page. 105 */ 106 kvm_mmu_gfn_disallow_lpage(slot, gfn); 107 108 if (mode == KVM_PAGE_TRACK_WRITE) 109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) 110 kvm_flush_remote_tlbs(kvm); 111 } 112 EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page); 113 114 /* 115 * remove the guest page from the tracking pool which stops the interception 116 * of corresponding access on that page. It is the opposed operation of 117 * kvm_slot_page_track_add_page(). 118 * 119 * It should be called under the protection both of mmu-lock and kvm->srcu 120 * or kvm->slots_lock. 121 * 122 * @kvm: the guest instance we are interested in. 123 * @slot: the @gfn belongs to. 124 * @gfn: the guest page. 125 * @mode: tracking mode, currently only write track is supported. 126 */ 127 void kvm_slot_page_track_remove_page(struct kvm *kvm, 128 struct kvm_memory_slot *slot, gfn_t gfn, 129 enum kvm_page_track_mode mode) 130 { 131 if (WARN_ON(!page_track_mode_is_valid(mode))) 132 return; 133 134 update_gfn_track(slot, gfn, mode, -1); 135 136 /* 137 * allow large page mapping for the tracked page 138 * after the tracker is gone. 139 */ 140 kvm_mmu_gfn_allow_lpage(slot, gfn); 141 } 142 EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page); 143 144 /* 145 * check if the corresponding access on the specified guest page is tracked. 146 */ 147 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, 148 enum kvm_page_track_mode mode) 149 { 150 struct kvm_memory_slot *slot; 151 int index; 152 153 if (WARN_ON(!page_track_mode_is_valid(mode))) 154 return false; 155 156 slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); 157 if (!slot) 158 return false; 159 160 index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); 161 return !!READ_ONCE(slot->arch.gfn_track[mode][index]); 162 } 163 164 void kvm_page_track_cleanup(struct kvm *kvm) 165 { 166 struct kvm_page_track_notifier_head *head; 167 168 head = &kvm->arch.track_notifier_head; 169 cleanup_srcu_struct(&head->track_srcu); 170 } 171 172 void kvm_page_track_init(struct kvm *kvm) 173 { 174 struct kvm_page_track_notifier_head *head; 175 176 head = &kvm->arch.track_notifier_head; 177 init_srcu_struct(&head->track_srcu); 178 INIT_HLIST_HEAD(&head->track_notifier_list); 179 } 180 181 /* 182 * register the notifier so that event interception for the tracked guest 183 * pages can be received. 184 */ 185 void 186 kvm_page_track_register_notifier(struct kvm *kvm, 187 struct kvm_page_track_notifier_node *n) 188 { 189 struct kvm_page_track_notifier_head *head; 190 191 head = &kvm->arch.track_notifier_head; 192 193 spin_lock(&kvm->mmu_lock); 194 hlist_add_head_rcu(&n->node, &head->track_notifier_list); 195 spin_unlock(&kvm->mmu_lock); 196 } 197 EXPORT_SYMBOL_GPL(kvm_page_track_register_notifier); 198 199 /* 200 * stop receiving the event interception. It is the opposed operation of 201 * kvm_page_track_register_notifier(). 202 */ 203 void 204 kvm_page_track_unregister_notifier(struct kvm *kvm, 205 struct kvm_page_track_notifier_node *n) 206 { 207 struct kvm_page_track_notifier_head *head; 208 209 head = &kvm->arch.track_notifier_head; 210 211 spin_lock(&kvm->mmu_lock); 212 hlist_del_rcu(&n->node); 213 spin_unlock(&kvm->mmu_lock); 214 synchronize_srcu(&head->track_srcu); 215 } 216 EXPORT_SYMBOL_GPL(kvm_page_track_unregister_notifier); 217 218 /* 219 * Notify the node that write access is intercepted and write emulation is 220 * finished at this time. 221 * 222 * The node should figure out if the written page is the one that node is 223 * interested in by itself. 224 */ 225 void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new, 226 int bytes) 227 { 228 struct kvm_page_track_notifier_head *head; 229 struct kvm_page_track_notifier_node *n; 230 int idx; 231 232 head = &vcpu->kvm->arch.track_notifier_head; 233 234 if (hlist_empty(&head->track_notifier_list)) 235 return; 236 237 idx = srcu_read_lock(&head->track_srcu); 238 hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) 239 if (n->track_write) 240 n->track_write(vcpu, gpa, new, bytes, n); 241 srcu_read_unlock(&head->track_srcu, idx); 242 } 243 244 /* 245 * Notify the node that memory slot is being removed or moved so that it can 246 * drop write-protection for the pages in the memory slot. 247 * 248 * The node should figure out it has any write-protected pages in this slot 249 * by itself. 250 */ 251 void kvm_page_track_flush_slot(struct kvm *kvm, struct kvm_memory_slot *slot) 252 { 253 struct kvm_page_track_notifier_head *head; 254 struct kvm_page_track_notifier_node *n; 255 int idx; 256 257 head = &kvm->arch.track_notifier_head; 258 259 if (hlist_empty(&head->track_notifier_list)) 260 return; 261 262 idx = srcu_read_lock(&head->track_srcu); 263 hlist_for_each_entry_rcu(n, &head->track_notifier_list, node) 264 if (n->track_flush_slot) 265 n->track_flush_slot(kvm, slot, n); 266 srcu_read_unlock(&head->track_srcu, idx); 267 } 268
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.