~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kvm/book3s_64_mmu_host.c

Version: ~ [ linux-5.11 ] ~ [ linux-5.10.17 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.99 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.176 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.221 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.257 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.257 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
  3  *
  4  * Authors:
  5  *     Alexander Graf <agraf@suse.de>
  6  *     Kevin Wolf <mail@kevin-wolf.de>
  7  *
  8  * This program is free software; you can redistribute it and/or modify
  9  * it under the terms of the GNU General Public License, version 2, as
 10  * published by the Free Software Foundation.
 11  *
 12  * This program is distributed in the hope that it will be useful,
 13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15  * GNU General Public License for more details.
 16  *
 17  * You should have received a copy of the GNU General Public License
 18  * along with this program; if not, write to the Free Software
 19  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 20  */
 21 
 22 #include <linux/kvm_host.h>
 23 
 24 #include <asm/kvm_ppc.h>
 25 #include <asm/kvm_book3s.h>
 26 #include <asm/mmu-hash64.h>
 27 #include <asm/machdep.h>
 28 #include <asm/mmu_context.h>
 29 #include <asm/hw_irq.h>
 30 #include "trace_pr.h"
 31 
 32 #define PTE_SIZE 12
 33 
 34 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 35 {
 36         ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
 37                                pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
 38                                false);
 39 }
 40 
 41 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
 42  * a hash, so we don't waste cycles on looping */
 43 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
 44 {
 45         return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
 46                      ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
 47                      ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
 48                      ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
 49                      ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
 50                      ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
 51                      ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
 52                      ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
 53 }
 54 
 55 
 56 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
 57 {
 58         struct kvmppc_sid_map *map;
 59         u16 sid_map_mask;
 60 
 61         if (vcpu->arch.shared->msr & MSR_PR)
 62                 gvsid |= VSID_PR;
 63 
 64         sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
 65         map = &to_book3s(vcpu)->sid_map[sid_map_mask];
 66         if (map->valid && (map->guest_vsid == gvsid)) {
 67                 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
 68                 return map;
 69         }
 70 
 71         map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
 72         if (map->valid && (map->guest_vsid == gvsid)) {
 73                 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
 74                 return map;
 75         }
 76 
 77         trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
 78         return NULL;
 79 }
 80 
 81 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
 82                         bool iswrite)
 83 {
 84         unsigned long vpn;
 85         pfn_t hpaddr;
 86         ulong hash, hpteg;
 87         u64 vsid;
 88         int ret;
 89         int rflags = 0x192;
 90         int vflags = 0;
 91         int attempt = 0;
 92         struct kvmppc_sid_map *map;
 93         int r = 0;
 94         int hpsize = MMU_PAGE_4K;
 95         bool writable;
 96         unsigned long mmu_seq;
 97         struct kvm *kvm = vcpu->kvm;
 98         struct hpte_cache *cpte;
 99         unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
100         unsigned long pfn;
101 
102         /* used to check for invalidations in progress */
103         mmu_seq = kvm->mmu_notifier_seq;
104         smp_rmb();
105 
106         /* Get host physical address for gpa */
107         pfn = kvmppc_gfn_to_pfn(vcpu, gfn, iswrite, &writable);
108         if (is_error_noslot_pfn(pfn)) {
109                 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", gfn);
110                 r = -EINVAL;
111                 goto out;
112         }
113         hpaddr = pfn << PAGE_SHIFT;
114 
115         /* and write the mapping ea -> hpa into the pt */
116         vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
117         map = find_sid_vsid(vcpu, vsid);
118         if (!map) {
119                 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
120                 WARN_ON(ret < 0);
121                 map = find_sid_vsid(vcpu, vsid);
122         }
123         if (!map) {
124                 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
125                                 vsid, orig_pte->eaddr);
126                 WARN_ON(true);
127                 r = -EINVAL;
128                 goto out;
129         }
130 
131         vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
132 
133         kvm_set_pfn_accessed(pfn);
134         if (!orig_pte->may_write || !writable)
135                 rflags |= PP_RXRX;
136         else {
137                 mark_page_dirty(vcpu->kvm, gfn);
138                 kvm_set_pfn_dirty(pfn);
139         }
140 
141         if (!orig_pte->may_execute)
142                 rflags |= HPTE_R_N;
143         else
144                 kvmppc_mmu_flush_icache(pfn);
145 
146         /*
147          * Use 64K pages if possible; otherwise, on 64K page kernels,
148          * we need to transfer 4 more bits from guest real to host real addr.
149          */
150         if (vsid & VSID_64K)
151                 hpsize = MMU_PAGE_64K;
152         else
153                 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
154 
155         hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
156 
157         cpte = kvmppc_mmu_hpte_cache_next(vcpu);
158 
159         spin_lock(&kvm->mmu_lock);
160         if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
161                 r = -EAGAIN;
162                 goto out_unlock;
163         }
164 
165 map_again:
166         hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
167 
168         /* In case we tried normal mapping already, let's nuke old entries */
169         if (attempt > 1)
170                 if (ppc_md.hpte_remove(hpteg) < 0) {
171                         r = -1;
172                         goto out_unlock;
173                 }
174 
175         ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
176                                  hpsize, hpsize, MMU_SEGSIZE_256M);
177 
178         if (ret < 0) {
179                 /* If we couldn't map a primary PTE, try a secondary */
180                 hash = ~hash;
181                 vflags ^= HPTE_V_SECONDARY;
182                 attempt++;
183                 goto map_again;
184         } else {
185                 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
186                                             vpn, hpaddr, orig_pte);
187 
188                 /* The ppc_md code may give us a secondary entry even though we
189                    asked for a primary. Fix up. */
190                 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
191                         hash = ~hash;
192                         hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
193                 }
194 
195                 cpte->slot = hpteg + (ret & 7);
196                 cpte->host_vpn = vpn;
197                 cpte->pte = *orig_pte;
198                 cpte->pfn = pfn;
199                 cpte->pagesize = hpsize;
200 
201                 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
202                 cpte = NULL;
203         }
204 
205 out_unlock:
206         spin_unlock(&kvm->mmu_lock);
207         kvm_release_pfn_clean(pfn);
208         if (cpte)
209                 kvmppc_mmu_hpte_cache_free(cpte);
210 
211 out:
212         return r;
213 }
214 
215 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
216 {
217         u64 mask = 0xfffffffffULL;
218         u64 vsid;
219 
220         vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
221         if (vsid & VSID_64K)
222                 mask = 0xffffffff0ULL;
223         kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
224 }
225 
226 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
227 {
228         struct kvmppc_sid_map *map;
229         struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
230         u16 sid_map_mask;
231         static int backwards_map = 0;
232 
233         if (vcpu->arch.shared->msr & MSR_PR)
234                 gvsid |= VSID_PR;
235 
236         /* We might get collisions that trap in preceding order, so let's
237            map them differently */
238 
239         sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
240         if (backwards_map)
241                 sid_map_mask = SID_MAP_MASK - sid_map_mask;
242 
243         map = &to_book3s(vcpu)->sid_map[sid_map_mask];
244 
245         /* Make sure we're taking the other map next time */
246         backwards_map = !backwards_map;
247 
248         /* Uh-oh ... out of mappings. Let's flush! */
249         if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
250                 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
251                 memset(vcpu_book3s->sid_map, 0,
252                        sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
253                 kvmppc_mmu_pte_flush(vcpu, 0, 0);
254                 kvmppc_mmu_flush_segments(vcpu);
255         }
256         map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
257 
258         map->guest_vsid = gvsid;
259         map->valid = true;
260 
261         trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
262 
263         return map;
264 }
265 
266 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
267 {
268         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
269         int i;
270         int max_slb_size = 64;
271         int found_inval = -1;
272         int r;
273 
274         if (!svcpu->slb_max)
275                 svcpu->slb_max = 1;
276 
277         /* Are we overwriting? */
278         for (i = 1; i < svcpu->slb_max; i++) {
279                 if (!(svcpu->slb[i].esid & SLB_ESID_V))
280                         found_inval = i;
281                 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
282                         r = i;
283                         goto out;
284                 }
285         }
286 
287         /* Found a spare entry that was invalidated before */
288         if (found_inval > 0) {
289                 r = found_inval;
290                 goto out;
291         }
292 
293         /* No spare invalid entry, so create one */
294 
295         if (mmu_slb_size < 64)
296                 max_slb_size = mmu_slb_size;
297 
298         /* Overflowing -> purge */
299         if ((svcpu->slb_max) == max_slb_size)
300                 kvmppc_mmu_flush_segments(vcpu);
301 
302         r = svcpu->slb_max;
303         svcpu->slb_max++;
304 
305 out:
306         svcpu_put(svcpu);
307         return r;
308 }
309 
310 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
311 {
312         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
313         u64 esid = eaddr >> SID_SHIFT;
314         u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
315         u64 slb_vsid = SLB_VSID_USER;
316         u64 gvsid;
317         int slb_index;
318         struct kvmppc_sid_map *map;
319         int r = 0;
320 
321         slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
322 
323         if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
324                 /* Invalidate an entry */
325                 svcpu->slb[slb_index].esid = 0;
326                 r = -ENOENT;
327                 goto out;
328         }
329 
330         map = find_sid_vsid(vcpu, gvsid);
331         if (!map)
332                 map = create_sid_map(vcpu, gvsid);
333 
334         map->guest_esid = esid;
335 
336         slb_vsid |= (map->host_vsid << 12);
337         slb_vsid &= ~SLB_VSID_KP;
338         slb_esid |= slb_index;
339 
340 #ifdef CONFIG_PPC_64K_PAGES
341         /* Set host segment base page size to 64K if possible */
342         if (gvsid & VSID_64K)
343                 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
344 #endif
345 
346         svcpu->slb[slb_index].esid = slb_esid;
347         svcpu->slb[slb_index].vsid = slb_vsid;
348 
349         trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
350 
351 out:
352         svcpu_put(svcpu);
353         return r;
354 }
355 
356 void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
357 {
358         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
359         ulong seg_mask = -seg_size;
360         int i;
361 
362         for (i = 1; i < svcpu->slb_max; i++) {
363                 if ((svcpu->slb[i].esid & SLB_ESID_V) &&
364                     (svcpu->slb[i].esid & seg_mask) == ea) {
365                         /* Invalidate this entry */
366                         svcpu->slb[i].esid = 0;
367                 }
368         }
369 
370         svcpu_put(svcpu);
371 }
372 
373 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
374 {
375         struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
376         svcpu->slb_max = 1;
377         svcpu->slb[0].esid = 0;
378         svcpu_put(svcpu);
379 }
380 
381 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
382 {
383         kvmppc_mmu_hpte_destroy(vcpu);
384         __destroy_context(to_book3s(vcpu)->context_id[0]);
385 }
386 
387 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
388 {
389         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
390         int err;
391 
392         err = __init_new_context();
393         if (err < 0)
394                 return -1;
395         vcpu3s->context_id[0] = err;
396 
397         vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
398                                   << ESID_BITS) - 1;
399         vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
400         vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
401 
402         kvmppc_mmu_hpte_init(vcpu);
403 
404         return 0;
405 }
406 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp