~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sparc/kernel/iommu.c

Version: ~ [ linux-5.14-rc1 ] ~ [ linux-5.13.1 ] ~ [ linux-5.12.16 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.49 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.131 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.197 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.239 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.275 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.275 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.18.140 ] ~ [ linux-3.16.85 ] ~ [ linux-3.14.79 ] ~ [ linux-3.12.74 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* iommu.c: Generic sparc64 IOMMU support.
  2  *
  3  * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4  * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5  */
  6 
  7 #include <linux/kernel.h>
  8 #include <linux/export.h>
  9 #include <linux/slab.h>
 10 #include <linux/delay.h>
 11 #include <linux/device.h>
 12 #include <linux/dma-mapping.h>
 13 #include <linux/errno.h>
 14 #include <linux/iommu-helper.h>
 15 #include <linux/bitmap.h>
 16 #include <linux/iommu-common.h>
 17 
 18 #ifdef CONFIG_PCI
 19 #include <linux/pci.h>
 20 #endif
 21 
 22 #include <asm/iommu.h>
 23 
 24 #include "iommu_common.h"
 25 #include "kernel.h"
 26 
 27 #define STC_CTXMATCH_ADDR(STC, CTX)     \
 28         ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 29 #define STC_FLUSHFLAG_INIT(STC) \
 30         (*((STC)->strbuf_flushflag) = 0UL)
 31 #define STC_FLUSHFLAG_SET(STC) \
 32         (*((STC)->strbuf_flushflag) != 0UL)
 33 
 34 #define iommu_read(__reg) \
 35 ({      u64 __ret; \
 36         __asm__ __volatile__("ldxa [%1] %2, %0" \
 37                              : "=r" (__ret) \
 38                              : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 39                              : "memory"); \
 40         __ret; \
 41 })
 42 #define iommu_write(__reg, __val) \
 43         __asm__ __volatile__("stxa %0, [%1] %2" \
 44                              : /* no outputs */ \
 45                              : "r" (__val), "r" (__reg), \
 46                                "i" (ASI_PHYS_BYPASS_EC_E))
 47 
 48 /* Must be invoked under the IOMMU lock. */
 49 static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 50 {
 51         struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 52         if (iommu->iommu_flushinv) {
 53                 iommu_write(iommu->iommu_flushinv, ~(u64)0);
 54         } else {
 55                 unsigned long tag;
 56                 int entry;
 57 
 58                 tag = iommu->iommu_tags;
 59                 for (entry = 0; entry < 16; entry++) {
 60                         iommu_write(tag, 0);
 61                         tag += 8;
 62                 }
 63 
 64                 /* Ensure completion of previous PIO writes. */
 65                 (void) iommu_read(iommu->write_complete_reg);
 66         }
 67 }
 68 
 69 #define IOPTE_CONSISTENT(CTX) \
 70         (IOPTE_VALID | IOPTE_CACHE | \
 71          (((CTX) << 47) & IOPTE_CONTEXT))
 72 
 73 #define IOPTE_STREAMING(CTX) \
 74         (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 75 
 76 /* Existing mappings are never marked invalid, instead they
 77  * are pointed to a dummy page.
 78  */
 79 #define IOPTE_IS_DUMMY(iommu, iopte)    \
 80         ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 81 
 82 static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 83 {
 84         unsigned long val = iopte_val(*iopte);
 85 
 86         val &= ~IOPTE_PAGE;
 87         val |= iommu->dummy_page_pa;
 88 
 89         iopte_val(*iopte) = val;
 90 }
 91 
 92 int iommu_table_init(struct iommu *iommu, int tsbsize,
 93                      u32 dma_offset, u32 dma_addr_mask,
 94                      int numa_node)
 95 {
 96         unsigned long i, order, sz, num_tsb_entries;
 97         struct page *page;
 98 
 99         num_tsb_entries = tsbsize / sizeof(iopte_t);
100 
101         /* Setup initial software IOMMU state. */
102         spin_lock_init(&iommu->lock);
103         iommu->ctx_lowest_free = 1;
104         iommu->tbl.table_map_base = dma_offset;
105         iommu->dma_addr_mask = dma_addr_mask;
106 
107         /* Allocate and initialize the free area map.  */
108         sz = num_tsb_entries / 8;
109         sz = (sz + 7UL) & ~7UL;
110         iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
111         if (!iommu->tbl.map)
112                 return -ENOMEM;
113         memset(iommu->tbl.map, 0, sz);
114 
115         iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116                             (tlb_type != hypervisor ? iommu_flushall : NULL),
117                             false, 1, false);
118 
119         /* Allocate and initialize the dummy page which we
120          * set inactive IO PTEs to point to.
121          */
122         page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
123         if (!page) {
124                 printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
125                 goto out_free_map;
126         }
127         iommu->dummy_page = (unsigned long) page_address(page);
128         memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129         iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
130 
131         /* Now allocate and setup the IOMMU page table itself.  */
132         order = get_order(tsbsize);
133         page = alloc_pages_node(numa_node, GFP_KERNEL, order);
134         if (!page) {
135                 printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136                 goto out_free_dummy_page;
137         }
138         iommu->page_table = (iopte_t *)page_address(page);
139 
140         for (i = 0; i < num_tsb_entries; i++)
141                 iopte_make_dummy(iommu, &iommu->page_table[i]);
142 
143         return 0;
144 
145 out_free_dummy_page:
146         free_page(iommu->dummy_page);
147         iommu->dummy_page = 0UL;
148 
149 out_free_map:
150         kfree(iommu->tbl.map);
151         iommu->tbl.map = NULL;
152 
153         return -ENOMEM;
154 }
155 
156 static inline iopte_t *alloc_npages(struct device *dev,
157                                     struct iommu *iommu,
158                                     unsigned long npages)
159 {
160         unsigned long entry;
161 
162         entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163                                       (unsigned long)(-1), 0);
164         if (unlikely(entry == IOMMU_ERROR_CODE))
165                 return NULL;
166 
167         return iommu->page_table + entry;
168 }
169 
170 static int iommu_alloc_ctx(struct iommu *iommu)
171 {
172         int lowest = iommu->ctx_lowest_free;
173         int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
174 
175         if (unlikely(n == IOMMU_NUM_CTXS)) {
176                 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177                 if (unlikely(n == lowest)) {
178                         printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
179                         n = 0;
180                 }
181         }
182         if (n)
183                 __set_bit(n, iommu->ctx_bitmap);
184 
185         return n;
186 }
187 
188 static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
189 {
190         if (likely(ctx)) {
191                 __clear_bit(ctx, iommu->ctx_bitmap);
192                 if (ctx < iommu->ctx_lowest_free)
193                         iommu->ctx_lowest_free = ctx;
194         }
195 }
196 
197 static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198                                    dma_addr_t *dma_addrp, gfp_t gfp,
199                                    struct dma_attrs *attrs)
200 {
201         unsigned long order, first_page;
202         struct iommu *iommu;
203         struct page *page;
204         int npages, nid;
205         iopte_t *iopte;
206         void *ret;
207 
208         size = IO_PAGE_ALIGN(size);
209         order = get_order(size);
210         if (order >= 10)
211                 return NULL;
212 
213         nid = dev->archdata.numa_node;
214         page = alloc_pages_node(nid, gfp, order);
215         if (unlikely(!page))
216                 return NULL;
217 
218         first_page = (unsigned long) page_address(page);
219         memset((char *)first_page, 0, PAGE_SIZE << order);
220 
221         iommu = dev->archdata.iommu;
222 
223         iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
224 
225         if (unlikely(iopte == NULL)) {
226                 free_pages(first_page, order);
227                 return NULL;
228         }
229 
230         *dma_addrp = (iommu->tbl.table_map_base +
231                       ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232         ret = (void *) first_page;
233         npages = size >> IO_PAGE_SHIFT;
234         first_page = __pa(first_page);
235         while (npages--) {
236                 iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
237                                      IOPTE_WRITE |
238                                      (first_page & IOPTE_PAGE));
239                 iopte++;
240                 first_page += IO_PAGE_SIZE;
241         }
242 
243         return ret;
244 }
245 
246 static void dma_4u_free_coherent(struct device *dev, size_t size,
247                                  void *cpu, dma_addr_t dvma,
248                                  struct dma_attrs *attrs)
249 {
250         struct iommu *iommu;
251         unsigned long order, npages;
252 
253         npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254         iommu = dev->archdata.iommu;
255 
256         iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
257 
258         order = get_order(size);
259         if (order < 10)
260                 free_pages((unsigned long)cpu, order);
261 }
262 
263 static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264                                   unsigned long offset, size_t sz,
265                                   enum dma_data_direction direction,
266                                   struct dma_attrs *attrs)
267 {
268         struct iommu *iommu;
269         struct strbuf *strbuf;
270         iopte_t *base;
271         unsigned long flags, npages, oaddr;
272         unsigned long i, base_paddr, ctx;
273         u32 bus_addr, ret;
274         unsigned long iopte_protection;
275 
276         iommu = dev->archdata.iommu;
277         strbuf = dev->archdata.stc;
278 
279         if (unlikely(direction == DMA_NONE))
280                 goto bad_no_ctx;
281 
282         oaddr = (unsigned long)(page_address(page) + offset);
283         npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284         npages >>= IO_PAGE_SHIFT;
285 
286         base = alloc_npages(dev, iommu, npages);
287         spin_lock_irqsave(&iommu->lock, flags);
288         ctx = 0;
289         if (iommu->iommu_ctxflush)
290                 ctx = iommu_alloc_ctx(iommu);
291         spin_unlock_irqrestore(&iommu->lock, flags);
292 
293         if (unlikely(!base))
294                 goto bad;
295 
296         bus_addr = (iommu->tbl.table_map_base +
297                     ((base - iommu->page_table) << IO_PAGE_SHIFT));
298         ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299         base_paddr = __pa(oaddr & IO_PAGE_MASK);
300         if (strbuf->strbuf_enabled)
301                 iopte_protection = IOPTE_STREAMING(ctx);
302         else
303                 iopte_protection = IOPTE_CONSISTENT(ctx);
304         if (direction != DMA_TO_DEVICE)
305                 iopte_protection |= IOPTE_WRITE;
306 
307         for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308                 iopte_val(*base) = iopte_protection | base_paddr;
309 
310         return ret;
311 
312 bad:
313         iommu_free_ctx(iommu, ctx);
314 bad_no_ctx:
315         if (printk_ratelimit())
316                 WARN_ON(1);
317         return DMA_ERROR_CODE;
318 }
319 
320 static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
321                          u32 vaddr, unsigned long ctx, unsigned long npages,
322                          enum dma_data_direction direction)
323 {
324         int limit;
325 
326         if (strbuf->strbuf_ctxflush &&
327             iommu->iommu_ctxflush) {
328                 unsigned long matchreg, flushreg;
329                 u64 val;
330 
331                 flushreg = strbuf->strbuf_ctxflush;
332                 matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
333 
334                 iommu_write(flushreg, ctx);
335                 val = iommu_read(matchreg);
336                 val &= 0xffff;
337                 if (!val)
338                         goto do_flush_sync;
339 
340                 while (val) {
341                         if (val & 0x1)
342                                 iommu_write(flushreg, ctx);
343                         val >>= 1;
344                 }
345                 val = iommu_read(matchreg);
346                 if (unlikely(val)) {
347                         printk(KERN_WARNING "strbuf_flush: ctx flush "
348                                "timeout matchreg[%llx] ctx[%lx]\n",
349                                val, ctx);
350                         goto do_page_flush;
351                 }
352         } else {
353                 unsigned long i;
354 
355         do_page_flush:
356                 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
357                         iommu_write(strbuf->strbuf_pflush, vaddr);
358         }
359 
360 do_flush_sync:
361         /* If the device could not have possibly put dirty data into
362          * the streaming cache, no flush-flag synchronization needs
363          * to be performed.
364          */
365         if (direction == DMA_TO_DEVICE)
366                 return;
367 
368         STC_FLUSHFLAG_INIT(strbuf);
369         iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
370         (void) iommu_read(iommu->write_complete_reg);
371 
372         limit = 100000;
373         while (!STC_FLUSHFLAG_SET(strbuf)) {
374                 limit--;
375                 if (!limit)
376                         break;
377                 udelay(1);
378                 rmb();
379         }
380         if (!limit)
381                 printk(KERN_WARNING "strbuf_flush: flushflag timeout "
382                        "vaddr[%08x] ctx[%lx] npages[%ld]\n",
383                        vaddr, ctx, npages);
384 }
385 
386 static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
387                               size_t sz, enum dma_data_direction direction,
388                               struct dma_attrs *attrs)
389 {
390         struct iommu *iommu;
391         struct strbuf *strbuf;
392         iopte_t *base;
393         unsigned long flags, npages, ctx, i;
394 
395         if (unlikely(direction == DMA_NONE)) {
396                 if (printk_ratelimit())
397                         WARN_ON(1);
398                 return;
399         }
400 
401         iommu = dev->archdata.iommu;
402         strbuf = dev->archdata.stc;
403 
404         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
405         npages >>= IO_PAGE_SHIFT;
406         base = iommu->page_table +
407                 ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
408         bus_addr &= IO_PAGE_MASK;
409 
410         spin_lock_irqsave(&iommu->lock, flags);
411 
412         /* Record the context, if any. */
413         ctx = 0;
414         if (iommu->iommu_ctxflush)
415                 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
416 
417         /* Step 1: Kick data out of streaming buffers if necessary. */
418         if (strbuf->strbuf_enabled)
419                 strbuf_flush(strbuf, iommu, bus_addr, ctx,
420                              npages, direction);
421 
422         /* Step 2: Clear out TSB entries. */
423         for (i = 0; i < npages; i++)
424                 iopte_make_dummy(iommu, base + i);
425 
426         iommu_free_ctx(iommu, ctx);
427         spin_unlock_irqrestore(&iommu->lock, flags);
428 
429         iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
430 }
431 
432 static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
433                          int nelems, enum dma_data_direction direction,
434                          struct dma_attrs *attrs)
435 {
436         struct scatterlist *s, *outs, *segstart;
437         unsigned long flags, handle, prot, ctx;
438         dma_addr_t dma_next = 0, dma_addr;
439         unsigned int max_seg_size;
440         unsigned long seg_boundary_size;
441         int outcount, incount, i;
442         struct strbuf *strbuf;
443         struct iommu *iommu;
444         unsigned long base_shift;
445 
446         BUG_ON(direction == DMA_NONE);
447 
448         iommu = dev->archdata.iommu;
449         strbuf = dev->archdata.stc;
450         if (nelems == 0 || !iommu)
451                 return 0;
452 
453         spin_lock_irqsave(&iommu->lock, flags);
454 
455         ctx = 0;
456         if (iommu->iommu_ctxflush)
457                 ctx = iommu_alloc_ctx(iommu);
458 
459         if (strbuf->strbuf_enabled)
460                 prot = IOPTE_STREAMING(ctx);
461         else
462                 prot = IOPTE_CONSISTENT(ctx);
463         if (direction != DMA_TO_DEVICE)
464                 prot |= IOPTE_WRITE;
465 
466         outs = s = segstart = &sglist[0];
467         outcount = 1;
468         incount = nelems;
469         handle = 0;
470 
471         /* Init first segment length for backout at failure */
472         outs->dma_length = 0;
473 
474         max_seg_size = dma_get_max_seg_size(dev);
475         seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
476                                   IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
477         base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
478         for_each_sg(sglist, s, nelems, i) {
479                 unsigned long paddr, npages, entry, out_entry = 0, slen;
480                 iopte_t *base;
481 
482                 slen = s->length;
483                 /* Sanity check */
484                 if (slen == 0) {
485                         dma_next = 0;
486                         continue;
487                 }
488                 /* Allocate iommu entries for that segment */
489                 paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
490                 npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
491                 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
492                                               &handle, (unsigned long)(-1), 0);
493 
494                 /* Handle failure */
495                 if (unlikely(entry == IOMMU_ERROR_CODE)) {
496                         if (printk_ratelimit())
497                                 printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
498                                        " npages %lx\n", iommu, paddr, npages);
499                         goto iommu_map_failed;
500                 }
501 
502                 base = iommu->page_table + entry;
503 
504                 /* Convert entry to a dma_addr_t */
505                 dma_addr = iommu->tbl.table_map_base +
506                         (entry << IO_PAGE_SHIFT);
507                 dma_addr |= (s->offset & ~IO_PAGE_MASK);
508 
509                 /* Insert into HW table */
510                 paddr &= IO_PAGE_MASK;
511                 while (npages--) {
512                         iopte_val(*base) = prot | paddr;
513                         base++;
514                         paddr += IO_PAGE_SIZE;
515                 }
516 
517                 /* If we are in an open segment, try merging */
518                 if (segstart != s) {
519                         /* We cannot merge if:
520                          * - allocated dma_addr isn't contiguous to previous allocation
521                          */
522                         if ((dma_addr != dma_next) ||
523                             (outs->dma_length + s->length > max_seg_size) ||
524                             (is_span_boundary(out_entry, base_shift,
525                                               seg_boundary_size, outs, s))) {
526                                 /* Can't merge: create a new segment */
527                                 segstart = s;
528                                 outcount++;
529                                 outs = sg_next(outs);
530                         } else {
531                                 outs->dma_length += s->length;
532                         }
533                 }
534 
535                 if (segstart == s) {
536                         /* This is a new segment, fill entries */
537                         outs->dma_address = dma_addr;
538                         outs->dma_length = slen;
539                         out_entry = entry;
540                 }
541 
542                 /* Calculate next page pointer for contiguous check */
543                 dma_next = dma_addr + slen;
544         }
545 
546         spin_unlock_irqrestore(&iommu->lock, flags);
547 
548         if (outcount < incount) {
549                 outs = sg_next(outs);
550                 outs->dma_address = DMA_ERROR_CODE;
551                 outs->dma_length = 0;
552         }
553 
554         return outcount;
555 
556 iommu_map_failed:
557         for_each_sg(sglist, s, nelems, i) {
558                 if (s->dma_length != 0) {
559                         unsigned long vaddr, npages, entry, j;
560                         iopte_t *base;
561 
562                         vaddr = s->dma_address & IO_PAGE_MASK;
563                         npages = iommu_num_pages(s->dma_address, s->dma_length,
564                                                  IO_PAGE_SIZE);
565 
566                         entry = (vaddr - iommu->tbl.table_map_base)
567                                 >> IO_PAGE_SHIFT;
568                         base = iommu->page_table + entry;
569 
570                         for (j = 0; j < npages; j++)
571                                 iopte_make_dummy(iommu, base + j);
572 
573                         iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
574                                              IOMMU_ERROR_CODE);
575 
576                         s->dma_address = DMA_ERROR_CODE;
577                         s->dma_length = 0;
578                 }
579                 if (s == outs)
580                         break;
581         }
582         spin_unlock_irqrestore(&iommu->lock, flags);
583 
584         return 0;
585 }
586 
587 /* If contexts are being used, they are the same in all of the mappings
588  * we make for a particular SG.
589  */
590 static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
591 {
592         unsigned long ctx = 0;
593 
594         if (iommu->iommu_ctxflush) {
595                 iopte_t *base;
596                 u32 bus_addr;
597                 struct iommu_map_table *tbl = &iommu->tbl;
598 
599                 bus_addr = sg->dma_address & IO_PAGE_MASK;
600                 base = iommu->page_table +
601                         ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
602 
603                 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
604         }
605         return ctx;
606 }
607 
608 static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
609                             int nelems, enum dma_data_direction direction,
610                             struct dma_attrs *attrs)
611 {
612         unsigned long flags, ctx;
613         struct scatterlist *sg;
614         struct strbuf *strbuf;
615         struct iommu *iommu;
616 
617         BUG_ON(direction == DMA_NONE);
618 
619         iommu = dev->archdata.iommu;
620         strbuf = dev->archdata.stc;
621 
622         ctx = fetch_sg_ctx(iommu, sglist);
623 
624         spin_lock_irqsave(&iommu->lock, flags);
625 
626         sg = sglist;
627         while (nelems--) {
628                 dma_addr_t dma_handle = sg->dma_address;
629                 unsigned int len = sg->dma_length;
630                 unsigned long npages, entry;
631                 iopte_t *base;
632                 int i;
633 
634                 if (!len)
635                         break;
636                 npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
637 
638                 entry = ((dma_handle - iommu->tbl.table_map_base)
639                          >> IO_PAGE_SHIFT);
640                 base = iommu->page_table + entry;
641 
642                 dma_handle &= IO_PAGE_MASK;
643                 if (strbuf->strbuf_enabled)
644                         strbuf_flush(strbuf, iommu, dma_handle, ctx,
645                                      npages, direction);
646 
647                 for (i = 0; i < npages; i++)
648                         iopte_make_dummy(iommu, base + i);
649 
650                 iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
651                                      IOMMU_ERROR_CODE);
652                 sg = sg_next(sg);
653         }
654 
655         iommu_free_ctx(iommu, ctx);
656 
657         spin_unlock_irqrestore(&iommu->lock, flags);
658 }
659 
660 static void dma_4u_sync_single_for_cpu(struct device *dev,
661                                        dma_addr_t bus_addr, size_t sz,
662                                        enum dma_data_direction direction)
663 {
664         struct iommu *iommu;
665         struct strbuf *strbuf;
666         unsigned long flags, ctx, npages;
667 
668         iommu = dev->archdata.iommu;
669         strbuf = dev->archdata.stc;
670 
671         if (!strbuf->strbuf_enabled)
672                 return;
673 
674         spin_lock_irqsave(&iommu->lock, flags);
675 
676         npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
677         npages >>= IO_PAGE_SHIFT;
678         bus_addr &= IO_PAGE_MASK;
679 
680         /* Step 1: Record the context, if any. */
681         ctx = 0;
682         if (iommu->iommu_ctxflush &&
683             strbuf->strbuf_ctxflush) {
684                 iopte_t *iopte;
685                 struct iommu_map_table *tbl = &iommu->tbl;
686 
687                 iopte = iommu->page_table +
688                         ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
689                 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
690         }
691 
692         /* Step 2: Kick data out of streaming buffers. */
693         strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
694 
695         spin_unlock_irqrestore(&iommu->lock, flags);
696 }
697 
698 static void dma_4u_sync_sg_for_cpu(struct device *dev,
699                                    struct scatterlist *sglist, int nelems,
700                                    enum dma_data_direction direction)
701 {
702         struct iommu *iommu;
703         struct strbuf *strbuf;
704         unsigned long flags, ctx, npages, i;
705         struct scatterlist *sg, *sgprv;
706         u32 bus_addr;
707 
708         iommu = dev->archdata.iommu;
709         strbuf = dev->archdata.stc;
710 
711         if (!strbuf->strbuf_enabled)
712                 return;
713 
714         spin_lock_irqsave(&iommu->lock, flags);
715 
716         /* Step 1: Record the context, if any. */
717         ctx = 0;
718         if (iommu->iommu_ctxflush &&
719             strbuf->strbuf_ctxflush) {
720                 iopte_t *iopte;
721                 struct iommu_map_table *tbl = &iommu->tbl;
722 
723                 iopte = iommu->page_table + ((sglist[0].dma_address -
724                         tbl->table_map_base) >> IO_PAGE_SHIFT);
725                 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
726         }
727 
728         /* Step 2: Kick data out of streaming buffers. */
729         bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
730         sgprv = NULL;
731         for_each_sg(sglist, sg, nelems, i) {
732                 if (sg->dma_length == 0)
733                         break;
734                 sgprv = sg;
735         }
736 
737         npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
738                   - bus_addr) >> IO_PAGE_SHIFT;
739         strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
740 
741         spin_unlock_irqrestore(&iommu->lock, flags);
742 }
743 
744 static struct dma_map_ops sun4u_dma_ops = {
745         .alloc                  = dma_4u_alloc_coherent,
746         .free                   = dma_4u_free_coherent,
747         .map_page               = dma_4u_map_page,
748         .unmap_page             = dma_4u_unmap_page,
749         .map_sg                 = dma_4u_map_sg,
750         .unmap_sg               = dma_4u_unmap_sg,
751         .sync_single_for_cpu    = dma_4u_sync_single_for_cpu,
752         .sync_sg_for_cpu        = dma_4u_sync_sg_for_cpu,
753 };
754 
755 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
756 EXPORT_SYMBOL(dma_ops);
757 
758 int dma_supported(struct device *dev, u64 device_mask)
759 {
760         struct iommu *iommu = dev->archdata.iommu;
761         u64 dma_addr_mask = iommu->dma_addr_mask;
762 
763         if (device_mask >= (1UL << 32UL))
764                 return 0;
765 
766         if ((device_mask & dma_addr_mask) == dma_addr_mask)
767                 return 1;
768 
769 #ifdef CONFIG_PCI
770         if (dev_is_pci(dev))
771                 return pci64_dma_supported(to_pci_dev(dev), device_mask);
772 #endif
773 
774         return 0;
775 }
776 EXPORT_SYMBOL(dma_supported);
777 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp