~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/book3s_hv_rm_mmu.c

Version: ~ [ linux-5.12 ] ~ [ linux-5.11.16 ] ~ [ linux-5.10.32 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.114 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.188 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.231 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.267 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.267 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * This program is free software; you can redistribute it and/or modify
  3  * it under the terms of the GNU General Public License, version 2, as
  4  * published by the Free Software Foundation.
  5  *
  6  * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  7  */
  8 
  9 #include <linux/types.h>
 10 #include <linux/string.h>
 11 #include <linux/kvm.h>
 12 #include <linux/kvm_host.h>
 13 #include <linux/hugetlb.h>
 14 #include <linux/module.h>
 15 
 16 #include <asm/tlbflush.h>
 17 #include <asm/kvm_ppc.h>
 18 #include <asm/kvm_book3s.h>
 19 #include <asm/mmu-hash64.h>
 20 #include <asm/hvcall.h>
 21 #include <asm/synch.h>
 22 #include <asm/ppc-opcode.h>
 23 
 24 /* Translate address of a vmalloc'd thing to a linear map address */
 25 static void *real_vmalloc_addr(void *x)
 26 {
 27         unsigned long addr = (unsigned long) x;
 28         pte_t *p;
 29         /*
 30          * assume we don't have huge pages in vmalloc space...
 31          * So don't worry about THP collapse/split. Called
 32          * Only in realmode, hence won't need irq_save/restore.
 33          */
 34         p = __find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
 35         if (!p || !pte_present(*p))
 36                 return NULL;
 37         addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
 38         return __va(addr);
 39 }
 40 
 41 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
 42 static int global_invalidates(struct kvm *kvm, unsigned long flags)
 43 {
 44         int global;
 45 
 46         /*
 47          * If there is only one vcore, and it's currently running,
 48          * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
 49          * we can use tlbiel as long as we mark all other physical
 50          * cores as potentially having stale TLB entries for this lpid.
 51          * Otherwise, don't use tlbiel.
 52          */
 53         if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
 54                 global = 0;
 55         else
 56                 global = 1;
 57 
 58         if (!global) {
 59                 /* any other core might now have stale TLB entries... */
 60                 smp_wmb();
 61                 cpumask_setall(&kvm->arch.need_tlb_flush);
 62                 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
 63                                   &kvm->arch.need_tlb_flush);
 64         }
 65 
 66         return global;
 67 }
 68 
 69 /*
 70  * Add this HPTE into the chain for the real page.
 71  * Must be called with the chain locked; it unlocks the chain.
 72  */
 73 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
 74                              unsigned long *rmap, long pte_index, int realmode)
 75 {
 76         struct revmap_entry *head, *tail;
 77         unsigned long i;
 78 
 79         if (*rmap & KVMPPC_RMAP_PRESENT) {
 80                 i = *rmap & KVMPPC_RMAP_INDEX;
 81                 head = &kvm->arch.revmap[i];
 82                 if (realmode)
 83                         head = real_vmalloc_addr(head);
 84                 tail = &kvm->arch.revmap[head->back];
 85                 if (realmode)
 86                         tail = real_vmalloc_addr(tail);
 87                 rev->forw = i;
 88                 rev->back = head->back;
 89                 tail->forw = pte_index;
 90                 head->back = pte_index;
 91         } else {
 92                 rev->forw = rev->back = pte_index;
 93                 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
 94                         pte_index | KVMPPC_RMAP_PRESENT;
 95         }
 96         unlock_rmap(rmap);
 97 }
 98 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
 99 
100 /* Remove this HPTE from the chain for a real page */
101 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
102                                 struct revmap_entry *rev,
103                                 unsigned long hpte_v, unsigned long hpte_r)
104 {
105         struct revmap_entry *next, *prev;
106         unsigned long gfn, ptel, head;
107         struct kvm_memory_slot *memslot;
108         unsigned long *rmap;
109         unsigned long rcbits;
110 
111         rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
112         ptel = rev->guest_rpte |= rcbits;
113         gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
114         memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
115         if (!memslot)
116                 return;
117 
118         rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
119         lock_rmap(rmap);
120 
121         head = *rmap & KVMPPC_RMAP_INDEX;
122         next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
123         prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
124         next->back = rev->back;
125         prev->forw = rev->forw;
126         if (head == pte_index) {
127                 head = rev->forw;
128                 if (head == pte_index)
129                         *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
130                 else
131                         *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
132         }
133         *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
134         unlock_rmap(rmap);
135 }
136 
137 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
138                        long pte_index, unsigned long pteh, unsigned long ptel,
139                        pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
140 {
141         unsigned long i, pa, gpa, gfn, psize;
142         unsigned long slot_fn, hva;
143         __be64 *hpte;
144         struct revmap_entry *rev;
145         unsigned long g_ptel;
146         struct kvm_memory_slot *memslot;
147         unsigned hpage_shift;
148         unsigned long is_io;
149         unsigned long *rmap;
150         pte_t *ptep;
151         unsigned int writing;
152         unsigned long mmu_seq;
153         unsigned long rcbits, irq_flags = 0;
154 
155         psize = hpte_page_size(pteh, ptel);
156         if (!psize)
157                 return H_PARAMETER;
158         writing = hpte_is_writable(ptel);
159         pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
160         ptel &= ~HPTE_GR_RESERVED;
161         g_ptel = ptel;
162 
163         /* used later to detect if we might have been invalidated */
164         mmu_seq = kvm->mmu_notifier_seq;
165         smp_rmb();
166 
167         /* Find the memslot (if any) for this address */
168         gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
169         gfn = gpa >> PAGE_SHIFT;
170         memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
171         pa = 0;
172         is_io = ~0ul;
173         rmap = NULL;
174         if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
175                 /* Emulated MMIO - mark this with key=31 */
176                 pteh |= HPTE_V_ABSENT;
177                 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
178                 goto do_insert;
179         }
180 
181         /* Check if the requested page fits entirely in the memslot. */
182         if (!slot_is_aligned(memslot, psize))
183                 return H_PARAMETER;
184         slot_fn = gfn - memslot->base_gfn;
185         rmap = &memslot->arch.rmap[slot_fn];
186 
187         /* Translate to host virtual address */
188         hva = __gfn_to_hva_memslot(memslot, gfn);
189         /*
190          * If we had a page table table change after lookup, we would
191          * retry via mmu_notifier_retry.
192          */
193         if (realmode)
194                 ptep = __find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
195         else {
196                 local_irq_save(irq_flags);
197                 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
198         }
199         if (ptep) {
200                 pte_t pte;
201                 unsigned int host_pte_size;
202 
203                 if (hpage_shift)
204                         host_pte_size = 1ul << hpage_shift;
205                 else
206                         host_pte_size = PAGE_SIZE;
207                 /*
208                  * We should always find the guest page size
209                  * to <= host page size, if host is using hugepage
210                  */
211                 if (host_pte_size < psize) {
212                         if (!realmode)
213                                 local_irq_restore(flags);
214                         return H_PARAMETER;
215                 }
216                 pte = kvmppc_read_update_linux_pte(ptep, writing);
217                 if (pte_present(pte) && !pte_protnone(pte)) {
218                         if (writing && !pte_write(pte))
219                                 /* make the actual HPTE be read-only */
220                                 ptel = hpte_make_readonly(ptel);
221                         is_io = hpte_cache_bits(pte_val(pte));
222                         pa = pte_pfn(pte) << PAGE_SHIFT;
223                         pa |= hva & (host_pte_size - 1);
224                         pa |= gpa & ~PAGE_MASK;
225                 }
226         }
227         if (!realmode)
228                 local_irq_restore(irq_flags);
229 
230         ptel &= ~(HPTE_R_PP0 - psize);
231         ptel |= pa;
232 
233         if (pa)
234                 pteh |= HPTE_V_VALID;
235         else
236                 pteh |= HPTE_V_ABSENT;
237 
238         /* Check WIMG */
239         if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
240                 if (is_io)
241                         return H_PARAMETER;
242                 /*
243                  * Allow guest to map emulated device memory as
244                  * uncacheable, but actually make it cacheable.
245                  */
246                 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
247                 ptel |= HPTE_R_M;
248         }
249 
250         /* Find and lock the HPTEG slot to use */
251  do_insert:
252         if (pte_index >= kvm->arch.hpt_npte)
253                 return H_PARAMETER;
254         if (likely((flags & H_EXACT) == 0)) {
255                 pte_index &= ~7UL;
256                 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
257                 for (i = 0; i < 8; ++i) {
258                         if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
259                             try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
260                                           HPTE_V_ABSENT))
261                                 break;
262                         hpte += 2;
263                 }
264                 if (i == 8) {
265                         /*
266                          * Since try_lock_hpte doesn't retry (not even stdcx.
267                          * failures), it could be that there is a free slot
268                          * but we transiently failed to lock it.  Try again,
269                          * actually locking each slot and checking it.
270                          */
271                         hpte -= 16;
272                         for (i = 0; i < 8; ++i) {
273                                 u64 pte;
274                                 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
275                                         cpu_relax();
276                                 pte = be64_to_cpu(hpte[0]);
277                                 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
278                                         break;
279                                 __unlock_hpte(hpte, pte);
280                                 hpte += 2;
281                         }
282                         if (i == 8)
283                                 return H_PTEG_FULL;
284                 }
285                 pte_index += i;
286         } else {
287                 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
288                 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
289                                    HPTE_V_ABSENT)) {
290                         /* Lock the slot and check again */
291                         u64 pte;
292 
293                         while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
294                                 cpu_relax();
295                         pte = be64_to_cpu(hpte[0]);
296                         if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
297                                 __unlock_hpte(hpte, pte);
298                                 return H_PTEG_FULL;
299                         }
300                 }
301         }
302 
303         /* Save away the guest's idea of the second HPTE dword */
304         rev = &kvm->arch.revmap[pte_index];
305         if (realmode)
306                 rev = real_vmalloc_addr(rev);
307         if (rev) {
308                 rev->guest_rpte = g_ptel;
309                 note_hpte_modification(kvm, rev);
310         }
311 
312         /* Link HPTE into reverse-map chain */
313         if (pteh & HPTE_V_VALID) {
314                 if (realmode)
315                         rmap = real_vmalloc_addr(rmap);
316                 lock_rmap(rmap);
317                 /* Check for pending invalidations under the rmap chain lock */
318                 if (mmu_notifier_retry(kvm, mmu_seq)) {
319                         /* inval in progress, write a non-present HPTE */
320                         pteh |= HPTE_V_ABSENT;
321                         pteh &= ~HPTE_V_VALID;
322                         unlock_rmap(rmap);
323                 } else {
324                         kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
325                                                 realmode);
326                         /* Only set R/C in real HPTE if already set in *rmap */
327                         rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
328                         ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
329                 }
330         }
331 
332         hpte[1] = cpu_to_be64(ptel);
333 
334         /* Write the first HPTE dword, unlocking the HPTE and making it valid */
335         eieio();
336         __unlock_hpte(hpte, pteh);
337         asm volatile("ptesync" : : : "memory");
338 
339         *pte_idx_ret = pte_index;
340         return H_SUCCESS;
341 }
342 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
343 
344 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
345                     long pte_index, unsigned long pteh, unsigned long ptel)
346 {
347         return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
348                                  vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
349 }
350 
351 #ifdef __BIG_ENDIAN__
352 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
353 #else
354 #define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
355 #endif
356 
357 static inline int try_lock_tlbie(unsigned int *lock)
358 {
359         unsigned int tmp, old;
360         unsigned int token = LOCK_TOKEN;
361 
362         asm volatile("1:lwarx   %1,0,%2\n"
363                      "  cmpwi   cr0,%1,0\n"
364                      "  bne     2f\n"
365                      "  stwcx.  %3,0,%2\n"
366                      "  bne-    1b\n"
367                      "  isync\n"
368                      "2:"
369                      : "=&r" (tmp), "=&r" (old)
370                      : "r" (lock), "r" (token)
371                      : "cc", "memory");
372         return old == 0;
373 }
374 
375 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
376                       long npages, int global, bool need_sync)
377 {
378         long i;
379 
380         if (global) {
381                 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
382                         cpu_relax();
383                 if (need_sync)
384                         asm volatile("ptesync" : : : "memory");
385                 for (i = 0; i < npages; ++i)
386                         asm volatile(PPC_TLBIE(%1,%0) : :
387                                      "r" (rbvalues[i]), "r" (kvm->arch.lpid));
388                 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
389                 kvm->arch.tlbie_lock = 0;
390         } else {
391                 if (need_sync)
392                         asm volatile("ptesync" : : : "memory");
393                 for (i = 0; i < npages; ++i)
394                         asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
395                 asm volatile("ptesync" : : : "memory");
396         }
397 }
398 
399 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
400                         unsigned long pte_index, unsigned long avpn,
401                         unsigned long *hpret)
402 {
403         __be64 *hpte;
404         unsigned long v, r, rb;
405         struct revmap_entry *rev;
406         u64 pte;
407 
408         if (pte_index >= kvm->arch.hpt_npte)
409                 return H_PARAMETER;
410         hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
411         while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
412                 cpu_relax();
413         pte = be64_to_cpu(hpte[0]);
414         if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
415             ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
416             ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
417                 __unlock_hpte(hpte, pte);
418                 return H_NOT_FOUND;
419         }
420 
421         rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
422         v = pte & ~HPTE_V_HVLOCK;
423         if (v & HPTE_V_VALID) {
424                 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
425                 rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
426                 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
427                 /*
428                  * The reference (R) and change (C) bits in a HPT
429                  * entry can be set by hardware at any time up until
430                  * the HPTE is invalidated and the TLB invalidation
431                  * sequence has completed.  This means that when
432                  * removing a HPTE, we need to re-read the HPTE after
433                  * the invalidation sequence has completed in order to
434                  * obtain reliable values of R and C.
435                  */
436                 remove_revmap_chain(kvm, pte_index, rev, v,
437                                     be64_to_cpu(hpte[1]));
438         }
439         r = rev->guest_rpte & ~HPTE_GR_RESERVED;
440         note_hpte_modification(kvm, rev);
441         unlock_hpte(hpte, 0);
442 
443         hpret[0] = v;
444         hpret[1] = r;
445         return H_SUCCESS;
446 }
447 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
448 
449 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
450                      unsigned long pte_index, unsigned long avpn)
451 {
452         return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
453                                   &vcpu->arch.gpr[4]);
454 }
455 
456 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
457 {
458         struct kvm *kvm = vcpu->kvm;
459         unsigned long *args = &vcpu->arch.gpr[4];
460         __be64 *hp, *hptes[4];
461         unsigned long tlbrb[4];
462         long int i, j, k, n, found, indexes[4];
463         unsigned long flags, req, pte_index, rcbits;
464         int global;
465         long int ret = H_SUCCESS;
466         struct revmap_entry *rev, *revs[4];
467         u64 hp0;
468 
469         global = global_invalidates(kvm, 0);
470         for (i = 0; i < 4 && ret == H_SUCCESS; ) {
471                 n = 0;
472                 for (; i < 4; ++i) {
473                         j = i * 2;
474                         pte_index = args[j];
475                         flags = pte_index >> 56;
476                         pte_index &= ((1ul << 56) - 1);
477                         req = flags >> 6;
478                         flags &= 3;
479                         if (req == 3) {         /* no more requests */
480                                 i = 4;
481                                 break;
482                         }
483                         if (req != 1 || flags == 3 ||
484                             pte_index >= kvm->arch.hpt_npte) {
485                                 /* parameter error */
486                                 args[j] = ((0xa0 | flags) << 56) + pte_index;
487                                 ret = H_PARAMETER;
488                                 break;
489                         }
490                         hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
491                         /* to avoid deadlock, don't spin except for first */
492                         if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
493                                 if (n)
494                                         break;
495                                 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
496                                         cpu_relax();
497                         }
498                         found = 0;
499                         hp0 = be64_to_cpu(hp[0]);
500                         if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
501                                 switch (flags & 3) {
502                                 case 0:         /* absolute */
503                                         found = 1;
504                                         break;
505                                 case 1:         /* andcond */
506                                         if (!(hp0 & args[j + 1]))
507                                                 found = 1;
508                                         break;
509                                 case 2:         /* AVPN */
510                                         if ((hp0 & ~0x7fUL) == args[j + 1])
511                                                 found = 1;
512                                         break;
513                                 }
514                         }
515                         if (!found) {
516                                 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
517                                 args[j] = ((0x90 | flags) << 56) + pte_index;
518                                 continue;
519                         }
520 
521                         args[j] = ((0x80 | flags) << 56) + pte_index;
522                         rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
523                         note_hpte_modification(kvm, rev);
524 
525                         if (!(hp0 & HPTE_V_VALID)) {
526                                 /* insert R and C bits from PTE */
527                                 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
528                                 args[j] |= rcbits << (56 - 5);
529                                 hp[0] = 0;
530                                 continue;
531                         }
532 
533                         /* leave it locked */
534                         hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
535                         tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
536                                 be64_to_cpu(hp[1]), pte_index);
537                         indexes[n] = j;
538                         hptes[n] = hp;
539                         revs[n] = rev;
540                         ++n;
541                 }
542 
543                 if (!n)
544                         break;
545 
546                 /* Now that we've collected a batch, do the tlbies */
547                 do_tlbies(kvm, tlbrb, n, global, true);
548 
549                 /* Read PTE low words after tlbie to get final R/C values */
550                 for (k = 0; k < n; ++k) {
551                         j = indexes[k];
552                         pte_index = args[j] & ((1ul << 56) - 1);
553                         hp = hptes[k];
554                         rev = revs[k];
555                         remove_revmap_chain(kvm, pte_index, rev,
556                                 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
557                         rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
558                         args[j] |= rcbits << (56 - 5);
559                         __unlock_hpte(hp, 0);
560                 }
561         }
562 
563         return ret;
564 }
565 
566 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
567                       unsigned long pte_index, unsigned long avpn,
568                       unsigned long va)
569 {
570         struct kvm *kvm = vcpu->kvm;
571         __be64 *hpte;
572         struct revmap_entry *rev;
573         unsigned long v, r, rb, mask, bits;
574         u64 pte;
575 
576         if (pte_index >= kvm->arch.hpt_npte)
577                 return H_PARAMETER;
578 
579         hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
580         while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
581                 cpu_relax();
582         pte = be64_to_cpu(hpte[0]);
583         if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
584             ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
585                 __unlock_hpte(hpte, pte);
586                 return H_NOT_FOUND;
587         }
588 
589         v = pte;
590         bits = (flags << 55) & HPTE_R_PP0;
591         bits |= (flags << 48) & HPTE_R_KEY_HI;
592         bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
593 
594         /* Update guest view of 2nd HPTE dword */
595         mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
596                 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
597         rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
598         if (rev) {
599                 r = (rev->guest_rpte & ~mask) | bits;
600                 rev->guest_rpte = r;
601                 note_hpte_modification(kvm, rev);
602         }
603 
604         /* Update HPTE */
605         if (v & HPTE_V_VALID) {
606                 /*
607                  * If the page is valid, don't let it transition from
608                  * readonly to writable.  If it should be writable, we'll
609                  * take a trap and let the page fault code sort it out.
610                  */
611                 pte = be64_to_cpu(hpte[1]);
612                 r = (pte & ~mask) | bits;
613                 if (hpte_is_writable(r) && !hpte_is_writable(pte))
614                         r = hpte_make_readonly(r);
615                 /* If the PTE is changing, invalidate it first */
616                 if (r != pte) {
617                         rb = compute_tlbie_rb(v, r, pte_index);
618                         hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
619                                               HPTE_V_ABSENT);
620                         do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
621                                   true);
622                         hpte[1] = cpu_to_be64(r);
623                 }
624         }
625         unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
626         asm volatile("ptesync" : : : "memory");
627         return H_SUCCESS;
628 }
629 
630 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
631                    unsigned long pte_index)
632 {
633         struct kvm *kvm = vcpu->kvm;
634         __be64 *hpte;
635         unsigned long v, r;
636         int i, n = 1;
637         struct revmap_entry *rev = NULL;
638 
639         if (pte_index >= kvm->arch.hpt_npte)
640                 return H_PARAMETER;
641         if (flags & H_READ_4) {
642                 pte_index &= ~3;
643                 n = 4;
644         }
645         rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
646         for (i = 0; i < n; ++i, ++pte_index) {
647                 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
648                 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
649                 r = be64_to_cpu(hpte[1]);
650                 if (v & HPTE_V_ABSENT) {
651                         v &= ~HPTE_V_ABSENT;
652                         v |= HPTE_V_VALID;
653                 }
654                 if (v & HPTE_V_VALID) {
655                         r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
656                         r &= ~HPTE_GR_RESERVED;
657                 }
658                 vcpu->arch.gpr[4 + i * 2] = v;
659                 vcpu->arch.gpr[5 + i * 2] = r;
660         }
661         return H_SUCCESS;
662 }
663 
664 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
665                         unsigned long pte_index)
666 {
667         unsigned long rb;
668 
669         hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
670         rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
671                               pte_index);
672         do_tlbies(kvm, &rb, 1, 1, true);
673 }
674 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
675 
676 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
677                            unsigned long pte_index)
678 {
679         unsigned long rb;
680         unsigned char rbyte;
681 
682         rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
683                               pte_index);
684         rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
685         /* modify only the second-last byte, which contains the ref bit */
686         *((char *)hptep + 14) = rbyte;
687         do_tlbies(kvm, &rb, 1, 1, false);
688 }
689 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
690 
691 static int slb_base_page_shift[4] = {
692         24,     /* 16M */
693         16,     /* 64k */
694         34,     /* 16G */
695         20,     /* 1M, unsupported */
696 };
697 
698 /* When called from virtmode, this func should be protected by
699  * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
700  * can trigger deadlock issue.
701  */
702 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
703                               unsigned long valid)
704 {
705         unsigned int i;
706         unsigned int pshift;
707         unsigned long somask;
708         unsigned long vsid, hash;
709         unsigned long avpn;
710         __be64 *hpte;
711         unsigned long mask, val;
712         unsigned long v, r;
713 
714         /* Get page shift, work out hash and AVPN etc. */
715         mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
716         val = 0;
717         pshift = 12;
718         if (slb_v & SLB_VSID_L) {
719                 mask |= HPTE_V_LARGE;
720                 val |= HPTE_V_LARGE;
721                 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
722         }
723         if (slb_v & SLB_VSID_B_1T) {
724                 somask = (1UL << 40) - 1;
725                 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
726                 vsid ^= vsid << 25;
727         } else {
728                 somask = (1UL << 28) - 1;
729                 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
730         }
731         hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
732         avpn = slb_v & ~(somask >> 16); /* also includes B */
733         avpn |= (eaddr & somask) >> 16;
734 
735         if (pshift >= 24)
736                 avpn &= ~((1UL << (pshift - 16)) - 1);
737         else
738                 avpn &= ~0x7fUL;
739         val |= avpn;
740 
741         for (;;) {
742                 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
743 
744                 for (i = 0; i < 16; i += 2) {
745                         /* Read the PTE racily */
746                         v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
747 
748                         /* Check valid/absent, hash, segment size and AVPN */
749                         if (!(v & valid) || (v & mask) != val)
750                                 continue;
751 
752                         /* Lock the PTE and read it under the lock */
753                         while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
754                                 cpu_relax();
755                         v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
756                         r = be64_to_cpu(hpte[i+1]);
757 
758                         /*
759                          * Check the HPTE again, including base page size
760                          */
761                         if ((v & valid) && (v & mask) == val &&
762                             hpte_base_page_size(v, r) == (1ul << pshift))
763                                 /* Return with the HPTE still locked */
764                                 return (hash << 3) + (i >> 1);
765 
766                         __unlock_hpte(&hpte[i], v);
767                 }
768 
769                 if (val & HPTE_V_SECONDARY)
770                         break;
771                 val |= HPTE_V_SECONDARY;
772                 hash = hash ^ kvm->arch.hpt_mask;
773         }
774         return -1;
775 }
776 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
777 
778 /*
779  * Called in real mode to check whether an HPTE not found fault
780  * is due to accessing a paged-out page or an emulated MMIO page,
781  * or if a protection fault is due to accessing a page that the
782  * guest wanted read/write access to but which we made read-only.
783  * Returns a possibly modified status (DSISR) value if not
784  * (i.e. pass the interrupt to the guest),
785  * -1 to pass the fault up to host kernel mode code, -2 to do that
786  * and also load the instruction word (for MMIO emulation),
787  * or 0 if we should make the guest retry the access.
788  */
789 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
790                           unsigned long slb_v, unsigned int status, bool data)
791 {
792         struct kvm *kvm = vcpu->kvm;
793         long int index;
794         unsigned long v, r, gr;
795         __be64 *hpte;
796         unsigned long valid;
797         struct revmap_entry *rev;
798         unsigned long pp, key;
799 
800         /* For protection fault, expect to find a valid HPTE */
801         valid = HPTE_V_VALID;
802         if (status & DSISR_NOHPTE)
803                 valid |= HPTE_V_ABSENT;
804 
805         index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
806         if (index < 0) {
807                 if (status & DSISR_NOHPTE)
808                         return status;  /* there really was no HPTE */
809                 return 0;               /* for prot fault, HPTE disappeared */
810         }
811         hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
812         v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
813         r = be64_to_cpu(hpte[1]);
814         rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
815         gr = rev->guest_rpte;
816 
817         unlock_hpte(hpte, v);
818 
819         /* For not found, if the HPTE is valid by now, retry the instruction */
820         if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
821                 return 0;
822 
823         /* Check access permissions to the page */
824         pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
825         key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
826         status &= ~DSISR_NOHPTE;        /* DSISR_NOHPTE == SRR1_ISI_NOPT */
827         if (!data) {
828                 if (gr & (HPTE_R_N | HPTE_R_G))
829                         return status | SRR1_ISI_N_OR_G;
830                 if (!hpte_read_permission(pp, slb_v & key))
831                         return status | SRR1_ISI_PROT;
832         } else if (status & DSISR_ISSTORE) {
833                 /* check write permission */
834                 if (!hpte_write_permission(pp, slb_v & key))
835                         return status | DSISR_PROTFAULT;
836         } else {
837                 if (!hpte_read_permission(pp, slb_v & key))
838                         return status | DSISR_PROTFAULT;
839         }
840 
841         /* Check storage key, if applicable */
842         if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
843                 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
844                 if (status & DSISR_ISSTORE)
845                         perm >>= 1;
846                 if (perm & 1)
847                         return status | DSISR_KEYFAULT;
848         }
849 
850         /* Save HPTE info for virtual-mode handler */
851         vcpu->arch.pgfault_addr = addr;
852         vcpu->arch.pgfault_index = index;
853         vcpu->arch.pgfault_hpte[0] = v;
854         vcpu->arch.pgfault_hpte[1] = r;
855 
856         /* Check the storage key to see if it is possibly emulated MMIO */
857         if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
858             (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
859             (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
860                 return -2;      /* MMIO emulation - load instr word */
861 
862         return -1;              /* send fault up to host kernel mode */
863 }
864 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp