~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/book3s_64_vio_hv.c

Version: ~ [ linux-5.11-rc3 ] ~ [ linux-5.10.7 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.89 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.167 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.215 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.251 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.251 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *
  4  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  5  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  6  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  7  */
  8 
  9 #include <linux/types.h>
 10 #include <linux/string.h>
 11 #include <linux/kvm.h>
 12 #include <linux/kvm_host.h>
 13 #include <linux/highmem.h>
 14 #include <linux/gfp.h>
 15 #include <linux/slab.h>
 16 #include <linux/hugetlb.h>
 17 #include <linux/list.h>
 18 #include <linux/stringify.h>
 19 
 20 #include <asm/kvm_ppc.h>
 21 #include <asm/kvm_book3s.h>
 22 #include <asm/book3s/64/mmu-hash.h>
 23 #include <asm/mmu_context.h>
 24 #include <asm/hvcall.h>
 25 #include <asm/synch.h>
 26 #include <asm/ppc-opcode.h>
 27 #include <asm/kvm_host.h>
 28 #include <asm/udbg.h>
 29 #include <asm/iommu.h>
 30 #include <asm/tce.h>
 31 #include <asm/pte-walk.h>
 32 
 33 #ifdef CONFIG_BUG
 34 
 35 #define WARN_ON_ONCE_RM(condition)      ({                      \
 36         static bool __section(.data.unlikely) __warned;         \
 37         int __ret_warn_once = !!(condition);                    \
 38                                                                 \
 39         if (unlikely(__ret_warn_once && !__warned)) {           \
 40                 __warned = true;                                \
 41                 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n",      \
 42                                 __stringify(condition),         \
 43                                 __func__, __LINE__);            \
 44                 dump_stack();                                   \
 45         }                                                       \
 46         unlikely(__ret_warn_once);                              \
 47 })
 48 
 49 #else
 50 
 51 #define WARN_ON_ONCE_RM(condition) ({                           \
 52         int __ret_warn_on = !!(condition);                      \
 53         unlikely(__ret_warn_on);                                \
 54 })
 55 
 56 #endif
 57 
 58 /*
 59  * Finds a TCE table descriptor by LIOBN.
 60  *
 61  * WARNING: This will be called in real or virtual mode on HV KVM and virtual
 62  *          mode on PR KVM
 63  */
 64 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
 65                 unsigned long liobn)
 66 {
 67         struct kvmppc_spapr_tce_table *stt;
 68 
 69         list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
 70                 if (stt->liobn == liobn)
 71                         return stt;
 72 
 73         return NULL;
 74 }
 75 EXPORT_SYMBOL_GPL(kvmppc_find_table);
 76 
 77 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 78 static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
 79                 unsigned long *ua, unsigned long **prmap)
 80 {
 81         unsigned long gfn = tce >> PAGE_SHIFT;
 82         struct kvm_memory_slot *memslot;
 83 
 84         memslot = search_memslots(kvm_memslots_raw(kvm), gfn);
 85         if (!memslot)
 86                 return -EINVAL;
 87 
 88         *ua = __gfn_to_hva_memslot(memslot, gfn) |
 89                 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
 90 
 91         if (prmap)
 92                 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
 93 
 94         return 0;
 95 }
 96 
 97 /*
 98  * Validates TCE address.
 99  * At the moment flags and page mask are validated.
100  * As the host kernel does not access those addresses (just puts them
101  * to the table and user space is supposed to process them), we can skip
102  * checking other things (such as TCE is a guest RAM address or the page
103  * was actually allocated).
104  */
105 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
106                 unsigned long tce)
107 {
108         unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
109         enum dma_data_direction dir = iommu_tce_direction(tce);
110         struct kvmppc_spapr_tce_iommu_table *stit;
111         unsigned long ua = 0;
112 
113         /* Allow userspace to poison TCE table */
114         if (dir == DMA_NONE)
115                 return H_SUCCESS;
116 
117         if (iommu_tce_check_gpa(stt->page_shift, gpa))
118                 return H_PARAMETER;
119 
120         if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
121                 return H_TOO_HARD;
122 
123         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
124                 unsigned long hpa = 0;
125                 struct mm_iommu_table_group_mem_t *mem;
126                 long shift = stit->tbl->it_page_shift;
127 
128                 mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
129                 if (!mem)
130                         return H_TOO_HARD;
131 
132                 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
133                         return H_TOO_HARD;
134         }
135 
136         return H_SUCCESS;
137 }
138 
139 /* Note on the use of page_address() in real mode,
140  *
141  * It is safe to use page_address() in real mode on ppc64 because
142  * page_address() is always defined as lowmem_page_address()
143  * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
144  * operation and does not access page struct.
145  *
146  * Theoretically page_address() could be defined different
147  * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
148  * would have to be enabled.
149  * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
150  * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
151  * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
152  * is not expected to be enabled on ppc32, page_address()
153  * is safe for ppc32 as well.
154  *
155  * WARNING: This will be called in real-mode on HV KVM and virtual
156  *          mode on PR KVM
157  */
158 static u64 *kvmppc_page_address(struct page *page)
159 {
160 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
161 #error TODO: fix to avoid page_address() here
162 #endif
163         return (u64 *) page_address(page);
164 }
165 
166 /*
167  * Handles TCE requests for emulated devices.
168  * Puts guest TCE values to the table and expects user space to convert them.
169  * Cannot fail so kvmppc_rm_tce_validate must be called before it.
170  */
171 static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt,
172                 unsigned long idx, unsigned long tce)
173 {
174         struct page *page;
175         u64 *tbl;
176 
177         idx -= stt->offset;
178         page = stt->pages[idx / TCES_PER_PAGE];
179         /*
180          * page must not be NULL in real mode,
181          * kvmppc_rm_ioba_validate() must have taken care of this.
182          */
183         WARN_ON_ONCE_RM(!page);
184         tbl = kvmppc_page_address(page);
185 
186         tbl[idx % TCES_PER_PAGE] = tce;
187 }
188 
189 /*
190  * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so
191  * in real mode.
192  * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is
193  * allocated or not required (when clearing a tce entry).
194  */
195 static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt,
196                 unsigned long ioba, unsigned long npages, bool clearing)
197 {
198         unsigned long i, idx, sttpage, sttpages;
199         unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages);
200 
201         if (ret)
202                 return ret;
203         /*
204          * clearing==true says kvmppc_rm_tce_put won't be allocating pages
205          * for empty tces.
206          */
207         if (clearing)
208                 return H_SUCCESS;
209 
210         idx = (ioba >> stt->page_shift) - stt->offset;
211         sttpage = idx / TCES_PER_PAGE;
212         sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) /
213                         TCES_PER_PAGE;
214         for (i = sttpage; i < sttpage + sttpages; ++i)
215                 if (!stt->pages[i])
216                         return H_TOO_HARD;
217 
218         return H_SUCCESS;
219 }
220 
221 static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
222                 unsigned long entry, unsigned long *hpa,
223                 enum dma_data_direction *direction)
224 {
225         long ret;
226 
227         ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
228 
229         if (!ret && ((*direction == DMA_FROM_DEVICE) ||
230                                 (*direction == DMA_BIDIRECTIONAL))) {
231                 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
232                 /*
233                  * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
234                  * calling this so we still get here a valid UA.
235                  */
236                 if (pua && *pua)
237                         mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
238         }
239 
240         return ret;
241 }
242 
243 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
244                 unsigned long entry)
245 {
246         unsigned long hpa = 0;
247         enum dma_data_direction dir = DMA_NONE;
248 
249         iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
250 }
251 
252 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
253                 struct iommu_table *tbl, unsigned long entry)
254 {
255         struct mm_iommu_table_group_mem_t *mem = NULL;
256         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
257         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
258 
259         if (!pua)
260                 /* it_userspace allocation might be delayed */
261                 return H_TOO_HARD;
262 
263         mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
264         if (!mem)
265                 return H_TOO_HARD;
266 
267         mm_iommu_mapped_dec(mem);
268 
269         *pua = cpu_to_be64(0);
270 
271         return H_SUCCESS;
272 }
273 
274 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
275                 struct iommu_table *tbl, unsigned long entry)
276 {
277         enum dma_data_direction dir = DMA_NONE;
278         unsigned long hpa = 0;
279         long ret;
280 
281         if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
282                 /*
283                  * real mode xchg can fail if struct page crosses
284                  * a page boundary
285                  */
286                 return H_TOO_HARD;
287 
288         if (dir == DMA_NONE)
289                 return H_SUCCESS;
290 
291         ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
292         if (ret)
293                 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
294 
295         return ret;
296 }
297 
298 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
299                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
300                 unsigned long entry)
301 {
302         unsigned long i, ret = H_SUCCESS;
303         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
304         unsigned long io_entry = entry * subpages;
305 
306         for (i = 0; i < subpages; ++i) {
307                 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
308                 if (ret != H_SUCCESS)
309                         break;
310         }
311 
312         return ret;
313 }
314 
315 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
316                 unsigned long entry, unsigned long ua,
317                 enum dma_data_direction dir)
318 {
319         long ret;
320         unsigned long hpa = 0;
321         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
322         struct mm_iommu_table_group_mem_t *mem;
323 
324         if (!pua)
325                 /* it_userspace allocation might be delayed */
326                 return H_TOO_HARD;
327 
328         mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
329         if (!mem)
330                 return H_TOO_HARD;
331 
332         if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
333                         &hpa)))
334                 return H_TOO_HARD;
335 
336         if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
337                 return H_TOO_HARD;
338 
339         ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
340         if (ret) {
341                 mm_iommu_mapped_dec(mem);
342                 /*
343                  * real mode xchg can fail if struct page crosses
344                  * a page boundary
345                  */
346                 return H_TOO_HARD;
347         }
348 
349         if (dir != DMA_NONE)
350                 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
351 
352         *pua = cpu_to_be64(ua);
353 
354         return 0;
355 }
356 
357 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
358                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
359                 unsigned long entry, unsigned long ua,
360                 enum dma_data_direction dir)
361 {
362         unsigned long i, pgoff, ret = H_SUCCESS;
363         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
364         unsigned long io_entry = entry * subpages;
365 
366         for (i = 0, pgoff = 0; i < subpages;
367                         ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
368 
369                 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
370                                 io_entry + i, ua + pgoff, dir);
371                 if (ret != H_SUCCESS)
372                         break;
373         }
374 
375         return ret;
376 }
377 
378 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
379                 unsigned long ioba, unsigned long tce)
380 {
381         struct kvmppc_spapr_tce_table *stt;
382         long ret;
383         struct kvmppc_spapr_tce_iommu_table *stit;
384         unsigned long entry, ua = 0;
385         enum dma_data_direction dir;
386 
387         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
388         /*          liobn, ioba, tce); */
389 
390         /* For radix, we might be in virtual mode, so punt */
391         if (kvm_is_radix(vcpu->kvm))
392                 return H_TOO_HARD;
393 
394         stt = kvmppc_find_table(vcpu->kvm, liobn);
395         if (!stt)
396                 return H_TOO_HARD;
397 
398         ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0);
399         if (ret != H_SUCCESS)
400                 return ret;
401 
402         ret = kvmppc_rm_tce_validate(stt, tce);
403         if (ret != H_SUCCESS)
404                 return ret;
405 
406         dir = iommu_tce_direction(tce);
407         if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
408                 return H_PARAMETER;
409 
410         entry = ioba >> stt->page_shift;
411 
412         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
413                 if (dir == DMA_NONE)
414                         ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
415                                         stit->tbl, entry);
416                 else
417                         ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
418                                         stit->tbl, entry, ua, dir);
419 
420                 if (ret != H_SUCCESS) {
421                         kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
422                         return ret;
423                 }
424         }
425 
426         kvmppc_rm_tce_put(stt, entry, tce);
427 
428         return H_SUCCESS;
429 }
430 
431 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
432                 unsigned long ua, unsigned long *phpa)
433 {
434         pte_t *ptep, pte;
435         unsigned shift = 0;
436 
437         /*
438          * Called in real mode with MSR_EE = 0. We are safe here.
439          * It is ok to do the lookup with arch.pgdir here, because
440          * we are doing this on secondary cpus and current task there
441          * is not the hypervisor. Also this is safe against THP in the
442          * host, because an IPI to primary thread will wait for the secondary
443          * to exit which will agains result in the below page table walk
444          * to finish.
445          */
446         ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
447         if (!ptep || !pte_present(*ptep))
448                 return -ENXIO;
449         pte = *ptep;
450 
451         if (!shift)
452                 shift = PAGE_SHIFT;
453 
454         /* Avoid handling anything potentially complicated in realmode */
455         if (shift > PAGE_SHIFT)
456                 return -EAGAIN;
457 
458         if (!pte_young(pte))
459                 return -EAGAIN;
460 
461         *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
462                         (ua & ~PAGE_MASK);
463 
464         return 0;
465 }
466 
467 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
468                 unsigned long liobn, unsigned long ioba,
469                 unsigned long tce_list, unsigned long npages)
470 {
471         struct kvmppc_spapr_tce_table *stt;
472         long i, ret = H_SUCCESS;
473         unsigned long tces, entry, ua = 0;
474         unsigned long *rmap = NULL;
475         bool prereg = false;
476         struct kvmppc_spapr_tce_iommu_table *stit;
477 
478         /* For radix, we might be in virtual mode, so punt */
479         if (kvm_is_radix(vcpu->kvm))
480                 return H_TOO_HARD;
481 
482         stt = kvmppc_find_table(vcpu->kvm, liobn);
483         if (!stt)
484                 return H_TOO_HARD;
485 
486         entry = ioba >> stt->page_shift;
487         /*
488          * The spec says that the maximum size of the list is 512 TCEs
489          * so the whole table addressed resides in 4K page
490          */
491         if (npages > 512)
492                 return H_PARAMETER;
493 
494         if (tce_list & (SZ_4K - 1))
495                 return H_PARAMETER;
496 
497         ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false);
498         if (ret != H_SUCCESS)
499                 return ret;
500 
501         if (mm_iommu_preregistered(vcpu->kvm->mm)) {
502                 /*
503                  * We get here if guest memory was pre-registered which
504                  * is normally VFIO case and gpa->hpa translation does not
505                  * depend on hpt.
506                  */
507                 struct mm_iommu_table_group_mem_t *mem;
508 
509                 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
510                         return H_TOO_HARD;
511 
512                 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
513                 if (mem)
514                         prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
515                                         IOMMU_PAGE_SHIFT_4K, &tces) == 0;
516         }
517 
518         if (!prereg) {
519                 /*
520                  * This is usually a case of a guest with emulated devices only
521                  * when TCE list is not in preregistered memory.
522                  * We do not require memory to be preregistered in this case
523                  * so lock rmap and do __find_linux_pte_or_hugepte().
524                  */
525                 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
526                         return H_TOO_HARD;
527 
528                 rmap = (void *) vmalloc_to_phys(rmap);
529                 if (WARN_ON_ONCE_RM(!rmap))
530                         return H_TOO_HARD;
531 
532                 /*
533                  * Synchronize with the MMU notifier callbacks in
534                  * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
535                  * While we have the rmap lock, code running on other CPUs
536                  * cannot finish unmapping the host real page that backs
537                  * this guest real page, so we are OK to access the host
538                  * real page.
539                  */
540                 lock_rmap(rmap);
541                 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
542                         ret = H_TOO_HARD;
543                         goto unlock_exit;
544                 }
545         }
546 
547         for (i = 0; i < npages; ++i) {
548                 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
549 
550                 ret = kvmppc_rm_tce_validate(stt, tce);
551                 if (ret != H_SUCCESS)
552                         goto unlock_exit;
553         }
554 
555         for (i = 0; i < npages; ++i) {
556                 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
557 
558                 ua = 0;
559                 if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
560                         ret = H_PARAMETER;
561                         goto unlock_exit;
562                 }
563 
564                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
565                         ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
566                                         stit->tbl, entry + i, ua,
567                                         iommu_tce_direction(tce));
568 
569                         if (ret != H_SUCCESS) {
570                                 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
571                                                 entry);
572                                 goto unlock_exit;
573                         }
574                 }
575 
576                 kvmppc_rm_tce_put(stt, entry + i, tce);
577         }
578 
579 unlock_exit:
580         if (rmap)
581                 unlock_rmap(rmap);
582 
583         return ret;
584 }
585 
586 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
587                 unsigned long liobn, unsigned long ioba,
588                 unsigned long tce_value, unsigned long npages)
589 {
590         struct kvmppc_spapr_tce_table *stt;
591         long i, ret;
592         struct kvmppc_spapr_tce_iommu_table *stit;
593 
594         /* For radix, we might be in virtual mode, so punt */
595         if (kvm_is_radix(vcpu->kvm))
596                 return H_TOO_HARD;
597 
598         stt = kvmppc_find_table(vcpu->kvm, liobn);
599         if (!stt)
600                 return H_TOO_HARD;
601 
602         ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0);
603         if (ret != H_SUCCESS)
604                 return ret;
605 
606         /* Check permission bits only to allow userspace poison TCE for debug */
607         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
608                 return H_PARAMETER;
609 
610         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
611                 unsigned long entry = ioba >> stt->page_shift;
612 
613                 for (i = 0; i < npages; ++i) {
614                         ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
615                                         stit->tbl, entry + i);
616 
617                         if (ret == H_SUCCESS)
618                                 continue;
619 
620                         if (ret == H_TOO_HARD)
621                                 return ret;
622 
623                         WARN_ON_ONCE_RM(1);
624                         kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
625                 }
626         }
627 
628         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
629                 kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
630 
631         return H_SUCCESS;
632 }
633 
634 /* This can be called in either virtual mode or real mode */
635 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
636                       unsigned long ioba)
637 {
638         struct kvmppc_spapr_tce_table *stt;
639         long ret;
640         unsigned long idx;
641         struct page *page;
642         u64 *tbl;
643 
644         stt = kvmppc_find_table(vcpu->kvm, liobn);
645         if (!stt)
646                 return H_TOO_HARD;
647 
648         ret = kvmppc_ioba_validate(stt, ioba, 1);
649         if (ret != H_SUCCESS)
650                 return ret;
651 
652         idx = (ioba >> stt->page_shift) - stt->offset;
653         page = stt->pages[idx / TCES_PER_PAGE];
654         if (!page) {
655                 vcpu->arch.regs.gpr[4] = 0;
656                 return H_SUCCESS;
657         }
658         tbl = (u64 *)page_address(page);
659 
660         vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
661 
662         return H_SUCCESS;
663 }
664 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
665 
666 #endif /* KVM_BOOK3S_HV_POSSIBLE */
667 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp