~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/book3s_64_mmu_radix.c

Version: ~ [ linux-5.0-rc6 ] ~ [ linux-4.20.7 ] ~ [ linux-4.19.20 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.98 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.155 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.174 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.134 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.62 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This program is free software; you can redistribute it and/or modify
  3  * it under the terms of the GNU General Public License, version 2, as
  4  * published by the Free Software Foundation.
  5  *
  6  * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  7  */
  8 
  9 #include <linux/types.h>
 10 #include <linux/string.h>
 11 #include <linux/kvm.h>
 12 #include <linux/kvm_host.h>
 13 #include <linux/anon_inodes.h>
 14 #include <linux/file.h>
 15 #include <linux/debugfs.h>
 16 
 17 #include <asm/kvm_ppc.h>
 18 #include <asm/kvm_book3s.h>
 19 #include <asm/page.h>
 20 #include <asm/mmu.h>
 21 #include <asm/pgtable.h>
 22 #include <asm/pgalloc.h>
 23 #include <asm/pte-walk.h>
 24 
 25 /*
 26  * Supported radix tree geometry.
 27  * Like p9, we support either 5 or 9 bits at the first (lowest) level,
 28  * for a page size of 64k or 4k.
 29  */
 30 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
 31 
 32 unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
 33                                               gva_t eaddr, void *to, void *from,
 34                                               unsigned long n)
 35 {
 36         int uninitialized_var(old_pid), old_lpid;
 37         unsigned long quadrant, ret = n;
 38         bool is_load = !!to;
 39 
 40         /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
 41         if (kvmhv_on_pseries())
 42                 return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
 43                                           __pa(to), __pa(from), n);
 44 
 45         quadrant = 1;
 46         if (!pid)
 47                 quadrant = 2;
 48         if (is_load)
 49                 from = (void *) (eaddr | (quadrant << 62));
 50         else
 51                 to = (void *) (eaddr | (quadrant << 62));
 52 
 53         preempt_disable();
 54 
 55         /* switch the lpid first to avoid running host with unallocated pid */
 56         old_lpid = mfspr(SPRN_LPID);
 57         if (old_lpid != lpid)
 58                 mtspr(SPRN_LPID, lpid);
 59         if (quadrant == 1) {
 60                 old_pid = mfspr(SPRN_PID);
 61                 if (old_pid != pid)
 62                         mtspr(SPRN_PID, pid);
 63         }
 64         isync();
 65 
 66         pagefault_disable();
 67         if (is_load)
 68                 ret = raw_copy_from_user(to, from, n);
 69         else
 70                 ret = raw_copy_to_user(to, from, n);
 71         pagefault_enable();
 72 
 73         /* switch the pid first to avoid running host with unallocated pid */
 74         if (quadrant == 1 && pid != old_pid)
 75                 mtspr(SPRN_PID, old_pid);
 76         if (lpid != old_lpid)
 77                 mtspr(SPRN_LPID, old_lpid);
 78         isync();
 79 
 80         preempt_enable();
 81 
 82         return ret;
 83 }
 84 EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
 85 
 86 static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
 87                                           void *to, void *from, unsigned long n)
 88 {
 89         int lpid = vcpu->kvm->arch.lpid;
 90         int pid = vcpu->arch.pid;
 91 
 92         /* This would cause a data segment intr so don't allow the access */
 93         if (eaddr & (0x3FFUL << 52))
 94                 return -EINVAL;
 95 
 96         /* Should we be using the nested lpid */
 97         if (vcpu->arch.nested)
 98                 lpid = vcpu->arch.nested->shadow_lpid;
 99 
100         /* If accessing quadrant 3 then pid is expected to be 0 */
101         if (((eaddr >> 62) & 0x3) == 0x3)
102                 pid = 0;
103 
104         eaddr &= ~(0xFFFUL << 52);
105 
106         return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
107 }
108 
109 long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
110                                  unsigned long n)
111 {
112         long ret;
113 
114         ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
115         if (ret > 0)
116                 memset(to + (n - ret), 0, ret);
117 
118         return ret;
119 }
120 EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
121 
122 long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
123                                unsigned long n)
124 {
125         return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
126 }
127 EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
128 
129 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
130                                struct kvmppc_pte *gpte, u64 root,
131                                u64 *pte_ret_p)
132 {
133         struct kvm *kvm = vcpu->kvm;
134         int ret, level, ps;
135         unsigned long rts, bits, offset, index;
136         u64 pte, base, gpa;
137         __be64 rpte;
138 
139         rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
140                 ((root & RTS2_MASK) >> RTS2_SHIFT);
141         bits = root & RPDS_MASK;
142         base = root & RPDB_MASK;
143 
144         offset = rts + 31;
145 
146         /* Current implementations only support 52-bit space */
147         if (offset != 52)
148                 return -EINVAL;
149 
150         /* Walk each level of the radix tree */
151         for (level = 3; level >= 0; --level) {
152                 u64 addr;
153                 /* Check a valid size */
154                 if (level && bits != p9_supported_radix_bits[level])
155                         return -EINVAL;
156                 if (level == 0 && !(bits == 5 || bits == 9))
157                         return -EINVAL;
158                 offset -= bits;
159                 index = (eaddr >> offset) & ((1UL << bits) - 1);
160                 /* Check that low bits of page table base are zero */
161                 if (base & ((1UL << (bits + 3)) - 1))
162                         return -EINVAL;
163                 /* Read the entry from guest memory */
164                 addr = base + (index * sizeof(rpte));
165                 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
166                 if (ret) {
167                         if (pte_ret_p)
168                                 *pte_ret_p = addr;
169                         return ret;
170                 }
171                 pte = __be64_to_cpu(rpte);
172                 if (!(pte & _PAGE_PRESENT))
173                         return -ENOENT;
174                 /* Check if a leaf entry */
175                 if (pte & _PAGE_PTE)
176                         break;
177                 /* Get ready to walk the next level */
178                 base = pte & RPDB_MASK;
179                 bits = pte & RPDS_MASK;
180         }
181 
182         /* Need a leaf at lowest level; 512GB pages not supported */
183         if (level < 0 || level == 3)
184                 return -EINVAL;
185 
186         /* We found a valid leaf PTE */
187         /* Offset is now log base 2 of the page size */
188         gpa = pte & 0x01fffffffffff000ul;
189         if (gpa & ((1ul << offset) - 1))
190                 return -EINVAL;
191         gpa |= eaddr & ((1ul << offset) - 1);
192         for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
193                 if (offset == mmu_psize_defs[ps].shift)
194                         break;
195         gpte->page_size = ps;
196         gpte->page_shift = offset;
197 
198         gpte->eaddr = eaddr;
199         gpte->raddr = gpa;
200 
201         /* Work out permissions */
202         gpte->may_read = !!(pte & _PAGE_READ);
203         gpte->may_write = !!(pte & _PAGE_WRITE);
204         gpte->may_execute = !!(pte & _PAGE_EXEC);
205 
206         gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
207 
208         if (pte_ret_p)
209                 *pte_ret_p = pte;
210 
211         return 0;
212 }
213 
214 /*
215  * Used to walk a partition or process table radix tree in guest memory
216  * Note: We exploit the fact that a partition table and a process
217  * table have the same layout, a partition-scoped page table and a
218  * process-scoped page table have the same layout, and the 2nd
219  * doubleword of a partition table entry has the same layout as
220  * the PTCR register.
221  */
222 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
223                                      struct kvmppc_pte *gpte, u64 table,
224                                      int table_index, u64 *pte_ret_p)
225 {
226         struct kvm *kvm = vcpu->kvm;
227         int ret;
228         unsigned long size, ptbl, root;
229         struct prtb_entry entry;
230 
231         if ((table & PRTS_MASK) > 24)
232                 return -EINVAL;
233         size = 1ul << ((table & PRTS_MASK) + 12);
234 
235         /* Is the table big enough to contain this entry? */
236         if ((table_index * sizeof(entry)) >= size)
237                 return -EINVAL;
238 
239         /* Read the table to find the root of the radix tree */
240         ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
241         ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
242         if (ret)
243                 return ret;
244 
245         /* Root is stored in the first double word */
246         root = be64_to_cpu(entry.prtb0);
247 
248         return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
249 }
250 
251 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
252                            struct kvmppc_pte *gpte, bool data, bool iswrite)
253 {
254         u32 pid;
255         u64 pte;
256         int ret;
257 
258         /* Work out effective PID */
259         switch (eaddr >> 62) {
260         case 0:
261                 pid = vcpu->arch.pid;
262                 break;
263         case 3:
264                 pid = 0;
265                 break;
266         default:
267                 return -EINVAL;
268         }
269 
270         ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
271                                 vcpu->kvm->arch.process_table, pid, &pte);
272         if (ret)
273                 return ret;
274 
275         /* Check privilege (applies only to process scoped translations) */
276         if (kvmppc_get_msr(vcpu) & MSR_PR) {
277                 if (pte & _PAGE_PRIVILEGED) {
278                         gpte->may_read = 0;
279                         gpte->may_write = 0;
280                         gpte->may_execute = 0;
281                 }
282         } else {
283                 if (!(pte & _PAGE_PRIVILEGED)) {
284                         /* Check AMR/IAMR to see if strict mode is in force */
285                         if (vcpu->arch.amr & (1ul << 62))
286                                 gpte->may_read = 0;
287                         if (vcpu->arch.amr & (1ul << 63))
288                                 gpte->may_write = 0;
289                         if (vcpu->arch.iamr & (1ul << 62))
290                                 gpte->may_execute = 0;
291                 }
292         }
293 
294         return 0;
295 }
296 
297 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
298                              unsigned int pshift, unsigned int lpid)
299 {
300         unsigned long psize = PAGE_SIZE;
301         int psi;
302         long rc;
303         unsigned long rb;
304 
305         if (pshift)
306                 psize = 1UL << pshift;
307         else
308                 pshift = PAGE_SHIFT;
309 
310         addr &= ~(psize - 1);
311 
312         if (!kvmhv_on_pseries()) {
313                 radix__flush_tlb_lpid_page(lpid, addr, psize);
314                 return;
315         }
316 
317         psi = shift_to_mmu_psize(pshift);
318         rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
319         rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
320                                 lpid, rb);
321         if (rc)
322                 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
323 }
324 
325 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
326 {
327         long rc;
328 
329         if (!kvmhv_on_pseries()) {
330                 radix__flush_pwc_lpid(lpid);
331                 return;
332         }
333 
334         rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
335                                 lpid, TLBIEL_INVAL_SET_LPID);
336         if (rc)
337                 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
338 }
339 
340 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
341                                       unsigned long clr, unsigned long set,
342                                       unsigned long addr, unsigned int shift)
343 {
344         return __radix_pte_update(ptep, clr, set);
345 }
346 
347 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
348                              pte_t *ptep, pte_t pte)
349 {
350         radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
351 }
352 
353 static struct kmem_cache *kvm_pte_cache;
354 static struct kmem_cache *kvm_pmd_cache;
355 
356 static pte_t *kvmppc_pte_alloc(void)
357 {
358         return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
359 }
360 
361 static void kvmppc_pte_free(pte_t *ptep)
362 {
363         kmem_cache_free(kvm_pte_cache, ptep);
364 }
365 
366 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
367 static inline int pmd_is_leaf(pmd_t pmd)
368 {
369         return !!(pmd_val(pmd) & _PAGE_PTE);
370 }
371 
372 static pmd_t *kvmppc_pmd_alloc(void)
373 {
374         return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
375 }
376 
377 static void kvmppc_pmd_free(pmd_t *pmdp)
378 {
379         kmem_cache_free(kvm_pmd_cache, pmdp);
380 }
381 
382 /* Called with kvm->mmu_lock held */
383 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
384                       unsigned int shift,
385                       const struct kvm_memory_slot *memslot,
386                       unsigned int lpid)
387 
388 {
389         unsigned long old;
390         unsigned long gfn = gpa >> PAGE_SHIFT;
391         unsigned long page_size = PAGE_SIZE;
392         unsigned long hpa;
393 
394         old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
395         kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
396 
397         /* The following only applies to L1 entries */
398         if (lpid != kvm->arch.lpid)
399                 return;
400 
401         if (!memslot) {
402                 memslot = gfn_to_memslot(kvm, gfn);
403                 if (!memslot)
404                         return;
405         }
406         if (shift)
407                 page_size = 1ul << shift;
408 
409         gpa &= ~(page_size - 1);
410         hpa = old & PTE_RPN_MASK;
411         kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
412 
413         if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
414                 kvmppc_update_dirty_map(memslot, gfn, page_size);
415 }
416 
417 /*
418  * kvmppc_free_p?d are used to free existing page tables, and recursively
419  * descend and clear and free children.
420  * Callers are responsible for flushing the PWC.
421  *
422  * When page tables are being unmapped/freed as part of page fault path
423  * (full == false), ptes are not expected. There is code to unmap them
424  * and emit a warning if encountered, but there may already be data
425  * corruption due to the unexpected mappings.
426  */
427 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
428                                   unsigned int lpid)
429 {
430         if (full) {
431                 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
432         } else {
433                 pte_t *p = pte;
434                 unsigned long it;
435 
436                 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
437                         if (pte_val(*p) == 0)
438                                 continue;
439                         WARN_ON_ONCE(1);
440                         kvmppc_unmap_pte(kvm, p,
441                                          pte_pfn(*p) << PAGE_SHIFT,
442                                          PAGE_SHIFT, NULL, lpid);
443                 }
444         }
445 
446         kvmppc_pte_free(pte);
447 }
448 
449 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
450                                   unsigned int lpid)
451 {
452         unsigned long im;
453         pmd_t *p = pmd;
454 
455         for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
456                 if (!pmd_present(*p))
457                         continue;
458                 if (pmd_is_leaf(*p)) {
459                         if (full) {
460                                 pmd_clear(p);
461                         } else {
462                                 WARN_ON_ONCE(1);
463                                 kvmppc_unmap_pte(kvm, (pte_t *)p,
464                                          pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
465                                          PMD_SHIFT, NULL, lpid);
466                         }
467                 } else {
468                         pte_t *pte;
469 
470                         pte = pte_offset_map(p, 0);
471                         kvmppc_unmap_free_pte(kvm, pte, full, lpid);
472                         pmd_clear(p);
473                 }
474         }
475         kvmppc_pmd_free(pmd);
476 }
477 
478 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
479                                   unsigned int lpid)
480 {
481         unsigned long iu;
482         pud_t *p = pud;
483 
484         for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
485                 if (!pud_present(*p))
486                         continue;
487                 if (pud_huge(*p)) {
488                         pud_clear(p);
489                 } else {
490                         pmd_t *pmd;
491 
492                         pmd = pmd_offset(p, 0);
493                         kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
494                         pud_clear(p);
495                 }
496         }
497         pud_free(kvm->mm, pud);
498 }
499 
500 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
501 {
502         unsigned long ig;
503 
504         for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
505                 pud_t *pud;
506 
507                 if (!pgd_present(*pgd))
508                         continue;
509                 pud = pud_offset(pgd, 0);
510                 kvmppc_unmap_free_pud(kvm, pud, lpid);
511                 pgd_clear(pgd);
512         }
513 }
514 
515 void kvmppc_free_radix(struct kvm *kvm)
516 {
517         if (kvm->arch.pgtable) {
518                 kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
519                                           kvm->arch.lpid);
520                 pgd_free(kvm->mm, kvm->arch.pgtable);
521                 kvm->arch.pgtable = NULL;
522         }
523 }
524 
525 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
526                                         unsigned long gpa, unsigned int lpid)
527 {
528         pte_t *pte = pte_offset_kernel(pmd, 0);
529 
530         /*
531          * Clearing the pmd entry then flushing the PWC ensures that the pte
532          * page no longer be cached by the MMU, so can be freed without
533          * flushing the PWC again.
534          */
535         pmd_clear(pmd);
536         kvmppc_radix_flush_pwc(kvm, lpid);
537 
538         kvmppc_unmap_free_pte(kvm, pte, false, lpid);
539 }
540 
541 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
542                                         unsigned long gpa, unsigned int lpid)
543 {
544         pmd_t *pmd = pmd_offset(pud, 0);
545 
546         /*
547          * Clearing the pud entry then flushing the PWC ensures that the pmd
548          * page and any children pte pages will no longer be cached by the MMU,
549          * so can be freed without flushing the PWC again.
550          */
551         pud_clear(pud);
552         kvmppc_radix_flush_pwc(kvm, lpid);
553 
554         kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
555 }
556 
557 /*
558  * There are a number of bits which may differ between different faults to
559  * the same partition scope entry. RC bits, in the course of cleaning and
560  * aging. And the write bit can change, either the access could have been
561  * upgraded, or a read fault could happen concurrently with a write fault
562  * that sets those bits first.
563  */
564 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
565 
566 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
567                       unsigned long gpa, unsigned int level,
568                       unsigned long mmu_seq, unsigned int lpid,
569                       unsigned long *rmapp, struct rmap_nested **n_rmap)
570 {
571         pgd_t *pgd;
572         pud_t *pud, *new_pud = NULL;
573         pmd_t *pmd, *new_pmd = NULL;
574         pte_t *ptep, *new_ptep = NULL;
575         int ret;
576 
577         /* Traverse the guest's 2nd-level tree, allocate new levels needed */
578         pgd = pgtable + pgd_index(gpa);
579         pud = NULL;
580         if (pgd_present(*pgd))
581                 pud = pud_offset(pgd, gpa);
582         else
583                 new_pud = pud_alloc_one(kvm->mm, gpa);
584 
585         pmd = NULL;
586         if (pud && pud_present(*pud) && !pud_huge(*pud))
587                 pmd = pmd_offset(pud, gpa);
588         else if (level <= 1)
589                 new_pmd = kvmppc_pmd_alloc();
590 
591         if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
592                 new_ptep = kvmppc_pte_alloc();
593 
594         /* Check if we might have been invalidated; let the guest retry if so */
595         spin_lock(&kvm->mmu_lock);
596         ret = -EAGAIN;
597         if (mmu_notifier_retry(kvm, mmu_seq))
598                 goto out_unlock;
599 
600         /* Now traverse again under the lock and change the tree */
601         ret = -ENOMEM;
602         if (pgd_none(*pgd)) {
603                 if (!new_pud)
604                         goto out_unlock;
605                 pgd_populate(kvm->mm, pgd, new_pud);
606                 new_pud = NULL;
607         }
608         pud = pud_offset(pgd, gpa);
609         if (pud_huge(*pud)) {
610                 unsigned long hgpa = gpa & PUD_MASK;
611 
612                 /* Check if we raced and someone else has set the same thing */
613                 if (level == 2) {
614                         if (pud_raw(*pud) == pte_raw(pte)) {
615                                 ret = 0;
616                                 goto out_unlock;
617                         }
618                         /* Valid 1GB page here already, add our extra bits */
619                         WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
620                                                         PTE_BITS_MUST_MATCH);
621                         kvmppc_radix_update_pte(kvm, (pte_t *)pud,
622                                               0, pte_val(pte), hgpa, PUD_SHIFT);
623                         ret = 0;
624                         goto out_unlock;
625                 }
626                 /*
627                  * If we raced with another CPU which has just put
628                  * a 1GB pte in after we saw a pmd page, try again.
629                  */
630                 if (!new_pmd) {
631                         ret = -EAGAIN;
632                         goto out_unlock;
633                 }
634                 /* Valid 1GB page here already, remove it */
635                 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
636                                  lpid);
637         }
638         if (level == 2) {
639                 if (!pud_none(*pud)) {
640                         /*
641                          * There's a page table page here, but we wanted to
642                          * install a large page, so remove and free the page
643                          * table page.
644                          */
645                         kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
646                 }
647                 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
648                 if (rmapp && n_rmap)
649                         kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
650                 ret = 0;
651                 goto out_unlock;
652         }
653         if (pud_none(*pud)) {
654                 if (!new_pmd)
655                         goto out_unlock;
656                 pud_populate(kvm->mm, pud, new_pmd);
657                 new_pmd = NULL;
658         }
659         pmd = pmd_offset(pud, gpa);
660         if (pmd_is_leaf(*pmd)) {
661                 unsigned long lgpa = gpa & PMD_MASK;
662 
663                 /* Check if we raced and someone else has set the same thing */
664                 if (level == 1) {
665                         if (pmd_raw(*pmd) == pte_raw(pte)) {
666                                 ret = 0;
667                                 goto out_unlock;
668                         }
669                         /* Valid 2MB page here already, add our extra bits */
670                         WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
671                                                         PTE_BITS_MUST_MATCH);
672                         kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
673                                         0, pte_val(pte), lgpa, PMD_SHIFT);
674                         ret = 0;
675                         goto out_unlock;
676                 }
677 
678                 /*
679                  * If we raced with another CPU which has just put
680                  * a 2MB pte in after we saw a pte page, try again.
681                  */
682                 if (!new_ptep) {
683                         ret = -EAGAIN;
684                         goto out_unlock;
685                 }
686                 /* Valid 2MB page here already, remove it */
687                 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
688                                  lpid);
689         }
690         if (level == 1) {
691                 if (!pmd_none(*pmd)) {
692                         /*
693                          * There's a page table page here, but we wanted to
694                          * install a large page, so remove and free the page
695                          * table page.
696                          */
697                         kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
698                 }
699                 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
700                 if (rmapp && n_rmap)
701                         kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
702                 ret = 0;
703                 goto out_unlock;
704         }
705         if (pmd_none(*pmd)) {
706                 if (!new_ptep)
707                         goto out_unlock;
708                 pmd_populate(kvm->mm, pmd, new_ptep);
709                 new_ptep = NULL;
710         }
711         ptep = pte_offset_kernel(pmd, gpa);
712         if (pte_present(*ptep)) {
713                 /* Check if someone else set the same thing */
714                 if (pte_raw(*ptep) == pte_raw(pte)) {
715                         ret = 0;
716                         goto out_unlock;
717                 }
718                 /* Valid page here already, add our extra bits */
719                 WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
720                                                         PTE_BITS_MUST_MATCH);
721                 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
722                 ret = 0;
723                 goto out_unlock;
724         }
725         kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
726         if (rmapp && n_rmap)
727                 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
728         ret = 0;
729 
730  out_unlock:
731         spin_unlock(&kvm->mmu_lock);
732         if (new_pud)
733                 pud_free(kvm->mm, new_pud);
734         if (new_pmd)
735                 kvmppc_pmd_free(new_pmd);
736         if (new_ptep)
737                 kvmppc_pte_free(new_ptep);
738         return ret;
739 }
740 
741 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
742                              unsigned long gpa, unsigned int lpid)
743 {
744         unsigned long pgflags;
745         unsigned int shift;
746         pte_t *ptep;
747 
748         /*
749          * Need to set an R or C bit in the 2nd-level tables;
750          * since we are just helping out the hardware here,
751          * it is sufficient to do what the hardware does.
752          */
753         pgflags = _PAGE_ACCESSED;
754         if (writing)
755                 pgflags |= _PAGE_DIRTY;
756         /*
757          * We are walking the secondary (partition-scoped) page table here.
758          * We can do this without disabling irq because the Linux MM
759          * subsystem doesn't do THP splits and collapses on this tree.
760          */
761         ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
762         if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
763                 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
764                 return true;
765         }
766         return false;
767 }
768 
769 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
770                                    unsigned long gpa,
771                                    struct kvm_memory_slot *memslot,
772                                    bool writing, bool kvm_ro,
773                                    pte_t *inserted_pte, unsigned int *levelp)
774 {
775         struct kvm *kvm = vcpu->kvm;
776         struct page *page = NULL;
777         unsigned long mmu_seq;
778         unsigned long hva, gfn = gpa >> PAGE_SHIFT;
779         bool upgrade_write = false;
780         bool *upgrade_p = &upgrade_write;
781         pte_t pte, *ptep;
782         unsigned int shift, level;
783         int ret;
784         bool large_enable;
785 
786         /* used to check for invalidations in progress */
787         mmu_seq = kvm->mmu_notifier_seq;
788         smp_rmb();
789 
790         /*
791          * Do a fast check first, since __gfn_to_pfn_memslot doesn't
792          * do it with !atomic && !async, which is how we call it.
793          * We always ask for write permission since the common case
794          * is that the page is writable.
795          */
796         hva = gfn_to_hva_memslot(memslot, gfn);
797         if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
798                 upgrade_write = true;
799         } else {
800                 unsigned long pfn;
801 
802                 /* Call KVM generic code to do the slow-path check */
803                 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
804                                            writing, upgrade_p);
805                 if (is_error_noslot_pfn(pfn))
806                         return -EFAULT;
807                 page = NULL;
808                 if (pfn_valid(pfn)) {
809                         page = pfn_to_page(pfn);
810                         if (PageReserved(page))
811                                 page = NULL;
812                 }
813         }
814 
815         /*
816          * Read the PTE from the process' radix tree and use that
817          * so we get the shift and attribute bits.
818          */
819         local_irq_disable();
820         ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
821         /*
822          * If the PTE disappeared temporarily due to a THP
823          * collapse, just return and let the guest try again.
824          */
825         if (!ptep) {
826                 local_irq_enable();
827                 if (page)
828                         put_page(page);
829                 return RESUME_GUEST;
830         }
831         pte = *ptep;
832         local_irq_enable();
833 
834         /* If we're logging dirty pages, always map single pages */
835         large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
836 
837         /* Get pte level from shift/size */
838         if (large_enable && shift == PUD_SHIFT &&
839             (gpa & (PUD_SIZE - PAGE_SIZE)) ==
840             (hva & (PUD_SIZE - PAGE_SIZE))) {
841                 level = 2;
842         } else if (large_enable && shift == PMD_SHIFT &&
843                    (gpa & (PMD_SIZE - PAGE_SIZE)) ==
844                    (hva & (PMD_SIZE - PAGE_SIZE))) {
845                 level = 1;
846         } else {
847                 level = 0;
848                 if (shift > PAGE_SHIFT) {
849                         /*
850                          * If the pte maps more than one page, bring over
851                          * bits from the virtual address to get the real
852                          * address of the specific single page we want.
853                          */
854                         unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
855                         pte = __pte(pte_val(pte) | (hva & rpnmask));
856                 }
857         }
858 
859         pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
860         if (writing || upgrade_write) {
861                 if (pte_val(pte) & _PAGE_WRITE)
862                         pte = __pte(pte_val(pte) | _PAGE_DIRTY);
863         } else {
864                 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
865         }
866 
867         /* Allocate space in the tree and write the PTE */
868         ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
869                                 mmu_seq, kvm->arch.lpid, NULL, NULL);
870         if (inserted_pte)
871                 *inserted_pte = pte;
872         if (levelp)
873                 *levelp = level;
874 
875         if (page) {
876                 if (!ret && (pte_val(pte) & _PAGE_WRITE))
877                         set_page_dirty_lock(page);
878                 put_page(page);
879         }
880 
881         return ret;
882 }
883 
884 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
885                                    unsigned long ea, unsigned long dsisr)
886 {
887         struct kvm *kvm = vcpu->kvm;
888         unsigned long gpa, gfn;
889         struct kvm_memory_slot *memslot;
890         long ret;
891         bool writing = !!(dsisr & DSISR_ISSTORE);
892         bool kvm_ro = false;
893 
894         /* Check for unusual errors */
895         if (dsisr & DSISR_UNSUPP_MMU) {
896                 pr_err("KVM: Got unsupported MMU fault\n");
897                 return -EFAULT;
898         }
899         if (dsisr & DSISR_BADACCESS) {
900                 /* Reflect to the guest as DSI */
901                 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
902                 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
903                 return RESUME_GUEST;
904         }
905 
906         /* Translate the logical address */
907         gpa = vcpu->arch.fault_gpa & ~0xfffUL;
908         gpa &= ~0xF000000000000000ul;
909         gfn = gpa >> PAGE_SHIFT;
910         if (!(dsisr & DSISR_PRTABLE_FAULT))
911                 gpa |= ea & 0xfff;
912 
913         /* Get the corresponding memslot */
914         memslot = gfn_to_memslot(kvm, gfn);
915 
916         /* No memslot means it's an emulated MMIO region */
917         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
918                 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
919                              DSISR_SET_RC)) {
920                         /*
921                          * Bad address in guest page table tree, or other
922                          * unusual error - reflect it to the guest as DSI.
923                          */
924                         kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
925                         return RESUME_GUEST;
926                 }
927                 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
928         }
929 
930         if (memslot->flags & KVM_MEM_READONLY) {
931                 if (writing) {
932                         /* give the guest a DSI */
933                         kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
934                                                        DSISR_PROTFAULT);
935                         return RESUME_GUEST;
936                 }
937                 kvm_ro = true;
938         }
939 
940         /* Failed to set the reference/change bits */
941         if (dsisr & DSISR_SET_RC) {
942                 spin_lock(&kvm->mmu_lock);
943                 if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
944                                             writing, gpa, kvm->arch.lpid))
945                         dsisr &= ~DSISR_SET_RC;
946                 spin_unlock(&kvm->mmu_lock);
947 
948                 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
949                                DSISR_PROTFAULT | DSISR_SET_RC)))
950                         return RESUME_GUEST;
951         }
952 
953         /* Try to insert a pte */
954         ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
955                                              kvm_ro, NULL, NULL);
956 
957         if (ret == 0 || ret == -EAGAIN)
958                 ret = RESUME_GUEST;
959         return ret;
960 }
961 
962 /* Called with kvm->mmu_lock held */
963 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
964                     unsigned long gfn)
965 {
966         pte_t *ptep;
967         unsigned long gpa = gfn << PAGE_SHIFT;
968         unsigned int shift;
969 
970         ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
971         if (ptep && pte_present(*ptep))
972                 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
973                                  kvm->arch.lpid);
974         return 0;                               
975 }
976 
977 /* Called with kvm->mmu_lock held */
978 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
979                   unsigned long gfn)
980 {
981         pte_t *ptep;
982         unsigned long gpa = gfn << PAGE_SHIFT;
983         unsigned int shift;
984         int ref = 0;
985         unsigned long old, *rmapp;
986 
987         ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
988         if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
989                 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
990                                               gpa, shift);
991                 /* XXX need to flush tlb here? */
992                 /* Also clear bit in ptes in shadow pgtable for nested guests */
993                 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
994                 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
995                                                old & PTE_RPN_MASK,
996                                                1UL << shift);
997                 ref = 1;
998         }
999         return ref;
1000 }
1001 
1002 /* Called with kvm->mmu_lock held */
1003 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
1004                        unsigned long gfn)
1005 {
1006         pte_t *ptep;
1007         unsigned long gpa = gfn << PAGE_SHIFT;
1008         unsigned int shift;
1009         int ref = 0;
1010 
1011         ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1012         if (ptep && pte_present(*ptep) && pte_young(*ptep))
1013                 ref = 1;
1014         return ref;
1015 }
1016 
1017 /* Returns the number of PAGE_SIZE pages that are dirty */
1018 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
1019                                 struct kvm_memory_slot *memslot, int pagenum)
1020 {
1021         unsigned long gfn = memslot->base_gfn + pagenum;
1022         unsigned long gpa = gfn << PAGE_SHIFT;
1023         pte_t *ptep;
1024         unsigned int shift;
1025         int ret = 0;
1026         unsigned long old, *rmapp;
1027 
1028         ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1029         if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
1030                 ret = 1;
1031                 if (shift)
1032                         ret = 1 << (shift - PAGE_SHIFT);
1033                 spin_lock(&kvm->mmu_lock);
1034                 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1035                                               gpa, shift);
1036                 kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1037                 /* Also clear bit in ptes in shadow pgtable for nested guests */
1038                 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1039                 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1040                                                old & PTE_RPN_MASK,
1041                                                1UL << shift);
1042                 spin_unlock(&kvm->mmu_lock);
1043         }
1044         return ret;
1045 }
1046 
1047 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
1048                         struct kvm_memory_slot *memslot, unsigned long *map)
1049 {
1050         unsigned long i, j;
1051         int npages;
1052 
1053         for (i = 0; i < memslot->npages; i = j) {
1054                 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
1055 
1056                 /*
1057                  * Note that if npages > 0 then i must be a multiple of npages,
1058                  * since huge pages are only used to back the guest at guest
1059                  * real addresses that are a multiple of their size.
1060                  * Since we have at most one PTE covering any given guest
1061                  * real address, if npages > 1 we can skip to i + npages.
1062                  */
1063                 j = i + 1;
1064                 if (npages) {
1065                         set_dirty_bits(map, i, npages);
1066                         j = i + npages;
1067                 }
1068         }
1069         return 0;
1070 }
1071 
1072 void kvmppc_radix_flush_memslot(struct kvm *kvm,
1073                                 const struct kvm_memory_slot *memslot)
1074 {
1075         unsigned long n;
1076         pte_t *ptep;
1077         unsigned long gpa;
1078         unsigned int shift;
1079 
1080         gpa = memslot->base_gfn << PAGE_SHIFT;
1081         spin_lock(&kvm->mmu_lock);
1082         for (n = memslot->npages; n; --n) {
1083                 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1084                 if (ptep && pte_present(*ptep))
1085                         kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1086                                          kvm->arch.lpid);
1087                 gpa += PAGE_SIZE;
1088         }
1089         spin_unlock(&kvm->mmu_lock);
1090 }
1091 
1092 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
1093                                  int psize, int *indexp)
1094 {
1095         if (!mmu_psize_defs[psize].shift)
1096                 return;
1097         info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
1098                 (mmu_psize_defs[psize].ap << 29);
1099         ++(*indexp);
1100 }
1101 
1102 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
1103 {
1104         int i;
1105 
1106         if (!radix_enabled())
1107                 return -EINVAL;
1108         memset(info, 0, sizeof(*info));
1109 
1110         /* 4k page size */
1111         info->geometries[0].page_shift = 12;
1112         info->geometries[0].level_bits[0] = 9;
1113         for (i = 1; i < 4; ++i)
1114                 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
1115         /* 64k page size */
1116         info->geometries[1].page_shift = 16;
1117         for (i = 0; i < 4; ++i)
1118                 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
1119 
1120         i = 0;
1121         add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
1122         add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
1123         add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
1124         add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
1125 
1126         return 0;
1127 }
1128 
1129 int kvmppc_init_vm_radix(struct kvm *kvm)
1130 {
1131         kvm->arch.pgtable = pgd_alloc(kvm->mm);
1132         if (!kvm->arch.pgtable)
1133                 return -ENOMEM;
1134         return 0;
1135 }
1136 
1137 static void pte_ctor(void *addr)
1138 {
1139         memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1140 }
1141 
1142 static void pmd_ctor(void *addr)
1143 {
1144         memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1145 }
1146 
1147 struct debugfs_radix_state {
1148         struct kvm      *kvm;
1149         struct mutex    mutex;
1150         unsigned long   gpa;
1151         int             lpid;
1152         int             chars_left;
1153         int             buf_index;
1154         char            buf[128];
1155         u8              hdr;
1156 };
1157 
1158 static int debugfs_radix_open(struct inode *inode, struct file *file)
1159 {
1160         struct kvm *kvm = inode->i_private;
1161         struct debugfs_radix_state *p;
1162 
1163         p = kzalloc(sizeof(*p), GFP_KERNEL);
1164         if (!p)
1165                 return -ENOMEM;
1166 
1167         kvm_get_kvm(kvm);
1168         p->kvm = kvm;
1169         mutex_init(&p->mutex);
1170         file->private_data = p;
1171 
1172         return nonseekable_open(inode, file);
1173 }
1174 
1175 static int debugfs_radix_release(struct inode *inode, struct file *file)
1176 {
1177         struct debugfs_radix_state *p = file->private_data;
1178 
1179         kvm_put_kvm(p->kvm);
1180         kfree(p);
1181         return 0;
1182 }
1183 
1184 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1185                                  size_t len, loff_t *ppos)
1186 {
1187         struct debugfs_radix_state *p = file->private_data;
1188         ssize_t ret, r;
1189         unsigned long n;
1190         struct kvm *kvm;
1191         unsigned long gpa;
1192         pgd_t *pgt;
1193         struct kvm_nested_guest *nested;
1194         pgd_t pgd, *pgdp;
1195         pud_t pud, *pudp;
1196         pmd_t pmd, *pmdp;
1197         pte_t *ptep;
1198         int shift;
1199         unsigned long pte;
1200 
1201         kvm = p->kvm;
1202         if (!kvm_is_radix(kvm))
1203                 return 0;
1204 
1205         ret = mutex_lock_interruptible(&p->mutex);
1206         if (ret)
1207                 return ret;
1208 
1209         if (p->chars_left) {
1210                 n = p->chars_left;
1211                 if (n > len)
1212                         n = len;
1213                 r = copy_to_user(buf, p->buf + p->buf_index, n);
1214                 n -= r;
1215                 p->chars_left -= n;
1216                 p->buf_index += n;
1217                 buf += n;
1218                 len -= n;
1219                 ret = n;
1220                 if (r) {
1221                         if (!n)
1222                                 ret = -EFAULT;
1223                         goto out;
1224                 }
1225         }
1226 
1227         gpa = p->gpa;
1228         nested = NULL;
1229         pgt = NULL;
1230         while (len != 0 && p->lpid >= 0) {
1231                 if (gpa >= RADIX_PGTABLE_RANGE) {
1232                         gpa = 0;
1233                         pgt = NULL;
1234                         if (nested) {
1235                                 kvmhv_put_nested(nested);
1236                                 nested = NULL;
1237                         }
1238                         p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1239                         p->hdr = 0;
1240                         if (p->lpid < 0)
1241                                 break;
1242                 }
1243                 if (!pgt) {
1244                         if (p->lpid == 0) {
1245                                 pgt = kvm->arch.pgtable;
1246                         } else {
1247                                 nested = kvmhv_get_nested(kvm, p->lpid, false);
1248                                 if (!nested) {
1249                                         gpa = RADIX_PGTABLE_RANGE;
1250                                         continue;
1251                                 }
1252                                 pgt = nested->shadow_pgtable;
1253                         }
1254                 }
1255                 n = 0;
1256                 if (!p->hdr) {
1257                         if (p->lpid > 0)
1258                                 n = scnprintf(p->buf, sizeof(p->buf),
1259                                               "\nNested LPID %d: ", p->lpid);
1260                         n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1261                                       "pgdir: %lx\n", (unsigned long)pgt);
1262                         p->hdr = 1;
1263                         goto copy;
1264                 }
1265 
1266                 pgdp = pgt + pgd_index(gpa);
1267                 pgd = READ_ONCE(*pgdp);
1268                 if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
1269                         gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
1270                         continue;
1271                 }
1272 
1273                 pudp = pud_offset(&pgd, gpa);
1274                 pud = READ_ONCE(*pudp);
1275                 if (!(pud_val(pud) & _PAGE_PRESENT)) {
1276                         gpa = (gpa & PUD_MASK) + PUD_SIZE;
1277                         continue;
1278                 }
1279                 if (pud_val(pud) & _PAGE_PTE) {
1280                         pte = pud_val(pud);
1281                         shift = PUD_SHIFT;
1282                         goto leaf;
1283                 }
1284 
1285                 pmdp = pmd_offset(&pud, gpa);
1286                 pmd = READ_ONCE(*pmdp);
1287                 if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1288                         gpa = (gpa & PMD_MASK) + PMD_SIZE;
1289                         continue;
1290                 }
1291                 if (pmd_val(pmd) & _PAGE_PTE) {
1292                         pte = pmd_val(pmd);
1293                         shift = PMD_SHIFT;
1294                         goto leaf;
1295                 }
1296 
1297                 ptep = pte_offset_kernel(&pmd, gpa);
1298                 pte = pte_val(READ_ONCE(*ptep));
1299                 if (!(pte & _PAGE_PRESENT)) {
1300                         gpa += PAGE_SIZE;
1301                         continue;
1302                 }
1303                 shift = PAGE_SHIFT;
1304         leaf:
1305                 n = scnprintf(p->buf, sizeof(p->buf),
1306                               " %lx: %lx %d\n", gpa, pte, shift);
1307                 gpa += 1ul << shift;
1308         copy:
1309                 p->chars_left = n;
1310                 if (n > len)
1311                         n = len;
1312                 r = copy_to_user(buf, p->buf, n);
1313                 n -= r;
1314                 p->chars_left -= n;
1315                 p->buf_index = n;
1316                 buf += n;
1317                 len -= n;
1318                 ret += n;
1319                 if (r) {
1320                         if (!ret)
1321                                 ret = -EFAULT;
1322                         break;
1323                 }
1324         }
1325         p->gpa = gpa;
1326         if (nested)
1327                 kvmhv_put_nested(nested);
1328 
1329  out:
1330         mutex_unlock(&p->mutex);
1331         return ret;
1332 }
1333 
1334 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1335                            size_t len, loff_t *ppos)
1336 {
1337         return -EACCES;
1338 }
1339 
1340 static const struct file_operations debugfs_radix_fops = {
1341         .owner   = THIS_MODULE,
1342         .open    = debugfs_radix_open,
1343         .release = debugfs_radix_release,
1344         .read    = debugfs_radix_read,
1345         .write   = debugfs_radix_write,
1346         .llseek  = generic_file_llseek,
1347 };
1348 
1349 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1350 {
1351         kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
1352                                                      kvm->arch.debugfs_dir, kvm,
1353                                                      &debugfs_radix_fops);
1354 }
1355 
1356 int kvmppc_radix_init(void)
1357 {
1358         unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1359 
1360         kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1361         if (!kvm_pte_cache)
1362                 return -ENOMEM;
1363 
1364         size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1365 
1366         kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1367         if (!kvm_pmd_cache) {
1368                 kmem_cache_destroy(kvm_pte_cache);
1369                 return -ENOMEM;
1370         }
1371 
1372         return 0;
1373 }
1374 
1375 void kvmppc_radix_exit(void)
1376 {
1377         kmem_cache_destroy(kvm_pte_cache);
1378         kmem_cache_destroy(kvm_pmd_cache);
1379 }
1380 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp