~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/lib/swiotlb.c

Version: ~ [ linux-5.1-rc1 ] ~ [ linux-5.0.3 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.30 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.107 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.164 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.176 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.136 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.63 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.39.4 ] ~ [ linux-2.6.38.8 ] ~ [ linux-2.6.37.6 ] ~ [ linux-2.6.36.4 ] ~ [ linux-2.6.35.14 ] ~ [ linux-2.6.34.15 ] ~ [ linux-2.6.33.20 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  * Dynamic DMA mapping support.
  3  *
  4  * This implementation is a fallback for platforms that do not support
  5  * I/O TLBs (aka DMA address translation hardware).
  6  * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
  7  * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
  8  * Copyright (C) 2000, 2003 Hewlett-Packard Co
  9  *      David Mosberger-Tang <davidm@hpl.hp.com>
 10  *
 11  * 03/05/07 davidm      Switch from PCI-DMA to generic device DMA API.
 12  * 00/12/13 davidm      Rename to swiotlb.c and add mark_clean() to avoid
 13  *                      unnecessary i-cache flushing.
 14  * 04/07/.. ak          Better overflow handling. Assorted fixes.
 15  * 05/09/10 linville    Add support for syncing ranges, support syncing for
 16  *                      DMA_BIDIRECTIONAL mappings, miscellaneous cleanup.
 17  * 08/12/11 beckyb      Add highmem support
 18  */
 19 
 20 #include <linux/cache.h>
 21 #include <linux/dma-direct.h>
 22 #include <linux/mm.h>
 23 #include <linux/export.h>
 24 #include <linux/spinlock.h>
 25 #include <linux/string.h>
 26 #include <linux/swiotlb.h>
 27 #include <linux/pfn.h>
 28 #include <linux/types.h>
 29 #include <linux/ctype.h>
 30 #include <linux/highmem.h>
 31 #include <linux/gfp.h>
 32 #include <linux/scatterlist.h>
 33 #include <linux/mem_encrypt.h>
 34 
 35 #include <asm/io.h>
 36 #include <asm/dma.h>
 37 
 38 #include <linux/init.h>
 39 #include <linux/bootmem.h>
 40 #include <linux/iommu-helper.h>
 41 
 42 #define CREATE_TRACE_POINTS
 43 #include <trace/events/swiotlb.h>
 44 
 45 #define OFFSET(val,align) ((unsigned long)      \
 46                            ( (val) & ( (align) - 1)))
 47 
 48 #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
 49 
 50 /*
 51  * Minimum IO TLB size to bother booting with.  Systems with mainly
 52  * 64bit capable cards will only lightly use the swiotlb.  If we can't
 53  * allocate a contiguous 1MB, we're probably in trouble anyway.
 54  */
 55 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
 56 
 57 enum swiotlb_force swiotlb_force;
 58 
 59 /*
 60  * Used to do a quick range check in swiotlb_tbl_unmap_single and
 61  * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
 62  * API.
 63  */
 64 static phys_addr_t io_tlb_start, io_tlb_end;
 65 
 66 /*
 67  * The number of IO TLB blocks (in groups of 64) between io_tlb_start and
 68  * io_tlb_end.  This is command line adjustable via setup_io_tlb_npages.
 69  */
 70 static unsigned long io_tlb_nslabs;
 71 
 72 /*
 73  * When the IOMMU overflows we return a fallback buffer. This sets the size.
 74  */
 75 static unsigned long io_tlb_overflow = 32*1024;
 76 
 77 static phys_addr_t io_tlb_overflow_buffer;
 78 
 79 /*
 80  * This is a free list describing the number of free entries available from
 81  * each index
 82  */
 83 static unsigned int *io_tlb_list;
 84 static unsigned int io_tlb_index;
 85 
 86 /*
 87  * Max segment that we can provide which (if pages are contingous) will
 88  * not be bounced (unless SWIOTLB_FORCE is set).
 89  */
 90 unsigned int max_segment;
 91 
 92 /*
 93  * We need to save away the original address corresponding to a mapped entry
 94  * for the sync operations.
 95  */
 96 #define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 97 static phys_addr_t *io_tlb_orig_addr;
 98 
 99 /*
100  * Protect the above data structures in the map and unmap calls
101  */
102 static DEFINE_SPINLOCK(io_tlb_lock);
103 
104 static int late_alloc;
105 
106 static int __init
107 setup_io_tlb_npages(char *str)
108 {
109         if (isdigit(*str)) {
110                 io_tlb_nslabs = simple_strtoul(str, &str, 0);
111                 /* avoid tail segment of size < IO_TLB_SEGSIZE */
112                 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
113         }
114         if (*str == ',')
115                 ++str;
116         if (!strcmp(str, "force")) {
117                 swiotlb_force = SWIOTLB_FORCE;
118         } else if (!strcmp(str, "noforce")) {
119                 swiotlb_force = SWIOTLB_NO_FORCE;
120                 io_tlb_nslabs = 1;
121         }
122 
123         return 0;
124 }
125 early_param("swiotlb", setup_io_tlb_npages);
126 /* make io_tlb_overflow tunable too? */
127 
128 unsigned long swiotlb_nr_tbl(void)
129 {
130         return io_tlb_nslabs;
131 }
132 EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
133 
134 unsigned int swiotlb_max_segment(void)
135 {
136         return max_segment;
137 }
138 EXPORT_SYMBOL_GPL(swiotlb_max_segment);
139 
140 void swiotlb_set_max_segment(unsigned int val)
141 {
142         if (swiotlb_force == SWIOTLB_FORCE)
143                 max_segment = 1;
144         else
145                 max_segment = rounddown(val, PAGE_SIZE);
146 }
147 
148 /* default to 64MB */
149 #define IO_TLB_DEFAULT_SIZE (64UL<<20)
150 unsigned long swiotlb_size_or_default(void)
151 {
152         unsigned long size;
153 
154         size = io_tlb_nslabs << IO_TLB_SHIFT;
155 
156         return size ? size : (IO_TLB_DEFAULT_SIZE);
157 }
158 
159 void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { }
160 
161 /* For swiotlb, clear memory encryption mask from dma addresses */
162 static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev,
163                                       phys_addr_t address)
164 {
165         return __sme_clr(phys_to_dma(hwdev, address));
166 }
167 
168 /* Note that this doesn't work with highmem page */
169 static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
170                                       volatile void *address)
171 {
172         return phys_to_dma(hwdev, virt_to_phys(address));
173 }
174 
175 static bool no_iotlb_memory;
176 
177 void swiotlb_print_info(void)
178 {
179         unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
180         unsigned char *vstart, *vend;
181 
182         if (no_iotlb_memory) {
183                 pr_warn("software IO TLB: No low mem\n");
184                 return;
185         }
186 
187         vstart = phys_to_virt(io_tlb_start);
188         vend = phys_to_virt(io_tlb_end);
189 
190         printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
191                (unsigned long long)io_tlb_start,
192                (unsigned long long)io_tlb_end,
193                bytes >> 20, vstart, vend - 1);
194 }
195 
196 /*
197  * Early SWIOTLB allocation may be too early to allow an architecture to
198  * perform the desired operations.  This function allows the architecture to
199  * call SWIOTLB when the operations are possible.  It needs to be called
200  * before the SWIOTLB memory is used.
201  */
202 void __init swiotlb_update_mem_attributes(void)
203 {
204         void *vaddr;
205         unsigned long bytes;
206 
207         if (no_iotlb_memory || late_alloc)
208                 return;
209 
210         vaddr = phys_to_virt(io_tlb_start);
211         bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
212         swiotlb_set_mem_attributes(vaddr, bytes);
213         memset(vaddr, 0, bytes);
214 
215         vaddr = phys_to_virt(io_tlb_overflow_buffer);
216         bytes = PAGE_ALIGN(io_tlb_overflow);
217         swiotlb_set_mem_attributes(vaddr, bytes);
218         memset(vaddr, 0, bytes);
219 }
220 
221 int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
222 {
223         void *v_overflow_buffer;
224         unsigned long i, bytes;
225 
226         bytes = nslabs << IO_TLB_SHIFT;
227 
228         io_tlb_nslabs = nslabs;
229         io_tlb_start = __pa(tlb);
230         io_tlb_end = io_tlb_start + bytes;
231 
232         /*
233          * Get the overflow emergency buffer
234          */
235         v_overflow_buffer = memblock_virt_alloc_low_nopanic(
236                                                 PAGE_ALIGN(io_tlb_overflow),
237                                                 PAGE_SIZE);
238         if (!v_overflow_buffer)
239                 return -ENOMEM;
240 
241         io_tlb_overflow_buffer = __pa(v_overflow_buffer);
242 
243         /*
244          * Allocate and initialize the free list array.  This array is used
245          * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
246          * between io_tlb_start and io_tlb_end.
247          */
248         io_tlb_list = memblock_virt_alloc(
249                                 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
250                                 PAGE_SIZE);
251         io_tlb_orig_addr = memblock_virt_alloc(
252                                 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
253                                 PAGE_SIZE);
254         for (i = 0; i < io_tlb_nslabs; i++) {
255                 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
256                 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
257         }
258         io_tlb_index = 0;
259 
260         if (verbose)
261                 swiotlb_print_info();
262 
263         swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
264         return 0;
265 }
266 
267 /*
268  * Statically reserve bounce buffer space and initialize bounce buffer data
269  * structures for the software IO TLB used to implement the DMA API.
270  */
271 void  __init
272 swiotlb_init(int verbose)
273 {
274         size_t default_size = IO_TLB_DEFAULT_SIZE;
275         unsigned char *vstart;
276         unsigned long bytes;
277 
278         if (!io_tlb_nslabs) {
279                 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
280                 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
281         }
282 
283         bytes = io_tlb_nslabs << IO_TLB_SHIFT;
284 
285         /* Get IO TLB memory from the low pages */
286         vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
287         if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
288                 return;
289 
290         if (io_tlb_start)
291                 memblock_free_early(io_tlb_start,
292                                     PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
293         pr_warn("Cannot allocate SWIOTLB buffer");
294         no_iotlb_memory = true;
295 }
296 
297 /*
298  * Systems with larger DMA zones (those that don't support ISA) can
299  * initialize the swiotlb later using the slab allocator if needed.
300  * This should be just like above, but with some error catching.
301  */
302 int
303 swiotlb_late_init_with_default_size(size_t default_size)
304 {
305         unsigned long bytes, req_nslabs = io_tlb_nslabs;
306         unsigned char *vstart = NULL;
307         unsigned int order;
308         int rc = 0;
309 
310         if (!io_tlb_nslabs) {
311                 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
312                 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
313         }
314 
315         /*
316          * Get IO TLB memory from the low pages
317          */
318         order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
319         io_tlb_nslabs = SLABS_PER_PAGE << order;
320         bytes = io_tlb_nslabs << IO_TLB_SHIFT;
321 
322         while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
323                 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
324                                                   order);
325                 if (vstart)
326                         break;
327                 order--;
328         }
329 
330         if (!vstart) {
331                 io_tlb_nslabs = req_nslabs;
332                 return -ENOMEM;
333         }
334         if (order != get_order(bytes)) {
335                 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
336                        "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
337                 io_tlb_nslabs = SLABS_PER_PAGE << order;
338         }
339         rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
340         if (rc)
341                 free_pages((unsigned long)vstart, order);
342 
343         return rc;
344 }
345 
346 int
347 swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
348 {
349         unsigned long i, bytes;
350         unsigned char *v_overflow_buffer;
351 
352         bytes = nslabs << IO_TLB_SHIFT;
353 
354         io_tlb_nslabs = nslabs;
355         io_tlb_start = virt_to_phys(tlb);
356         io_tlb_end = io_tlb_start + bytes;
357 
358         swiotlb_set_mem_attributes(tlb, bytes);
359         memset(tlb, 0, bytes);
360 
361         /*
362          * Get the overflow emergency buffer
363          */
364         v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
365                                                      get_order(io_tlb_overflow));
366         if (!v_overflow_buffer)
367                 goto cleanup2;
368 
369         swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow);
370         memset(v_overflow_buffer, 0, io_tlb_overflow);
371         io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
372 
373         /*
374          * Allocate and initialize the free list array.  This array is used
375          * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
376          * between io_tlb_start and io_tlb_end.
377          */
378         io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL,
379                                       get_order(io_tlb_nslabs * sizeof(int)));
380         if (!io_tlb_list)
381                 goto cleanup3;
382 
383         io_tlb_orig_addr = (phys_addr_t *)
384                 __get_free_pages(GFP_KERNEL,
385                                  get_order(io_tlb_nslabs *
386                                            sizeof(phys_addr_t)));
387         if (!io_tlb_orig_addr)
388                 goto cleanup4;
389 
390         for (i = 0; i < io_tlb_nslabs; i++) {
391                 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
392                 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
393         }
394         io_tlb_index = 0;
395 
396         swiotlb_print_info();
397 
398         late_alloc = 1;
399 
400         swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT);
401 
402         return 0;
403 
404 cleanup4:
405         free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
406                                                          sizeof(int)));
407         io_tlb_list = NULL;
408 cleanup3:
409         free_pages((unsigned long)v_overflow_buffer,
410                    get_order(io_tlb_overflow));
411         io_tlb_overflow_buffer = 0;
412 cleanup2:
413         io_tlb_end = 0;
414         io_tlb_start = 0;
415         io_tlb_nslabs = 0;
416         max_segment = 0;
417         return -ENOMEM;
418 }
419 
420 void __init swiotlb_exit(void)
421 {
422         if (!io_tlb_orig_addr)
423                 return;
424 
425         if (late_alloc) {
426                 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
427                            get_order(io_tlb_overflow));
428                 free_pages((unsigned long)io_tlb_orig_addr,
429                            get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
430                 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
431                                                                  sizeof(int)));
432                 free_pages((unsigned long)phys_to_virt(io_tlb_start),
433                            get_order(io_tlb_nslabs << IO_TLB_SHIFT));
434         } else {
435                 memblock_free_late(io_tlb_overflow_buffer,
436                                    PAGE_ALIGN(io_tlb_overflow));
437                 memblock_free_late(__pa(io_tlb_orig_addr),
438                                    PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
439                 memblock_free_late(__pa(io_tlb_list),
440                                    PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));
441                 memblock_free_late(io_tlb_start,
442                                    PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
443         }
444         io_tlb_nslabs = 0;
445         max_segment = 0;
446 }
447 
448 int is_swiotlb_buffer(phys_addr_t paddr)
449 {
450         return paddr >= io_tlb_start && paddr < io_tlb_end;
451 }
452 
453 /*
454  * Bounce: copy the swiotlb buffer back to the original dma location
455  */
456 static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
457                            size_t size, enum dma_data_direction dir)
458 {
459         unsigned long pfn = PFN_DOWN(orig_addr);
460         unsigned char *vaddr = phys_to_virt(tlb_addr);
461 
462         if (PageHighMem(pfn_to_page(pfn))) {
463                 /* The buffer does not have a mapping.  Map it in and copy */
464                 unsigned int offset = orig_addr & ~PAGE_MASK;
465                 char *buffer;
466                 unsigned int sz = 0;
467                 unsigned long flags;
468 
469                 while (size) {
470                         sz = min_t(size_t, PAGE_SIZE - offset, size);
471 
472                         local_irq_save(flags);
473                         buffer = kmap_atomic(pfn_to_page(pfn));
474                         if (dir == DMA_TO_DEVICE)
475                                 memcpy(vaddr, buffer + offset, sz);
476                         else
477                                 memcpy(buffer + offset, vaddr, sz);
478                         kunmap_atomic(buffer);
479                         local_irq_restore(flags);
480 
481                         size -= sz;
482                         pfn++;
483                         vaddr += sz;
484                         offset = 0;
485                 }
486         } else if (dir == DMA_TO_DEVICE) {
487                 memcpy(vaddr, phys_to_virt(orig_addr), size);
488         } else {
489                 memcpy(phys_to_virt(orig_addr), vaddr, size);
490         }
491 }
492 
493 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
494                                    dma_addr_t tbl_dma_addr,
495                                    phys_addr_t orig_addr, size_t size,
496                                    enum dma_data_direction dir,
497                                    unsigned long attrs)
498 {
499         unsigned long flags;
500         phys_addr_t tlb_addr;
501         unsigned int nslots, stride, index, wrap;
502         int i;
503         unsigned long mask;
504         unsigned long offset_slots;
505         unsigned long max_slots;
506 
507         if (no_iotlb_memory)
508                 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
509 
510         if (mem_encrypt_active())
511                 pr_warn_once("%s is active and system is using DMA bounce buffers\n",
512                              sme_active() ? "SME" : "SEV");
513 
514         mask = dma_get_seg_boundary(hwdev);
515 
516         tbl_dma_addr &= mask;
517 
518         offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
519 
520         /*
521          * Carefully handle integer overflow which can occur when mask == ~0UL.
522          */
523         max_slots = mask + 1
524                     ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
525                     : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
526 
527         /*
528          * For mappings greater than or equal to a page, we limit the stride
529          * (and hence alignment) to a page size.
530          */
531         nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
532         if (size >= PAGE_SIZE)
533                 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
534         else
535                 stride = 1;
536 
537         BUG_ON(!nslots);
538 
539         /*
540          * Find suitable number of IO TLB entries size that will fit this
541          * request and allocate a buffer from that IO TLB pool.
542          */
543         spin_lock_irqsave(&io_tlb_lock, flags);
544         index = ALIGN(io_tlb_index, stride);
545         if (index >= io_tlb_nslabs)
546                 index = 0;
547         wrap = index;
548 
549         do {
550                 while (iommu_is_span_boundary(index, nslots, offset_slots,
551                                               max_slots)) {
552                         index += stride;
553                         if (index >= io_tlb_nslabs)
554                                 index = 0;
555                         if (index == wrap)
556                                 goto not_found;
557                 }
558 
559                 /*
560                  * If we find a slot that indicates we have 'nslots' number of
561                  * contiguous buffers, we allocate the buffers from that slot
562                  * and mark the entries as '' indicating unavailable.
563                  */
564                 if (io_tlb_list[index] >= nslots) {
565                         int count = 0;
566 
567                         for (i = index; i < (int) (index + nslots); i++)
568                                 io_tlb_list[i] = 0;
569                         for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
570                                 io_tlb_list[i] = ++count;
571                         tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
572 
573                         /*
574                          * Update the indices to avoid searching in the next
575                          * round.
576                          */
577                         io_tlb_index = ((index + nslots) < io_tlb_nslabs
578                                         ? (index + nslots) : 0);
579 
580                         goto found;
581                 }
582                 index += stride;
583                 if (index >= io_tlb_nslabs)
584                         index = 0;
585         } while (index != wrap);
586 
587 not_found:
588         spin_unlock_irqrestore(&io_tlb_lock, flags);
589         if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
590                 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
591         return SWIOTLB_MAP_ERROR;
592 found:
593         spin_unlock_irqrestore(&io_tlb_lock, flags);
594 
595         /*
596          * Save away the mapping from the original address to the DMA address.
597          * This is needed when we sync the memory.  Then we sync the buffer if
598          * needed.
599          */
600         for (i = 0; i < nslots; i++)
601                 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
602         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
603             (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
604                 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
605 
606         return tlb_addr;
607 }
608 
609 /*
610  * Allocates bounce buffer and returns its kernel virtual address.
611  */
612 
613 static phys_addr_t
614 map_single(struct device *hwdev, phys_addr_t phys, size_t size,
615            enum dma_data_direction dir, unsigned long attrs)
616 {
617         dma_addr_t start_dma_addr;
618 
619         if (swiotlb_force == SWIOTLB_NO_FORCE) {
620                 dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
621                                      &phys);
622                 return SWIOTLB_MAP_ERROR;
623         }
624 
625         start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start);
626         return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
627                                       dir, attrs);
628 }
629 
630 /*
631  * dma_addr is the kernel virtual address of the bounce buffer to unmap.
632  */
633 void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
634                               size_t size, enum dma_data_direction dir,
635                               unsigned long attrs)
636 {
637         unsigned long flags;
638         int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
639         int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
640         phys_addr_t orig_addr = io_tlb_orig_addr[index];
641 
642         /*
643          * First, sync the memory before unmapping the entry
644          */
645         if (orig_addr != INVALID_PHYS_ADDR &&
646             !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
647             ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
648                 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
649 
650         /*
651          * Return the buffer to the free list by setting the corresponding
652          * entries to indicate the number of contiguous entries available.
653          * While returning the entries to the free list, we merge the entries
654          * with slots below and above the pool being returned.
655          */
656         spin_lock_irqsave(&io_tlb_lock, flags);
657         {
658                 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
659                          io_tlb_list[index + nslots] : 0);
660                 /*
661                  * Step 1: return the slots to the free list, merging the
662                  * slots with superceeding slots
663                  */
664                 for (i = index + nslots - 1; i >= index; i--) {
665                         io_tlb_list[i] = ++count;
666                         io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
667                 }
668                 /*
669                  * Step 2: merge the returned slots with the preceding slots,
670                  * if available (non zero)
671                  */
672                 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
673                         io_tlb_list[i] = ++count;
674         }
675         spin_unlock_irqrestore(&io_tlb_lock, flags);
676 }
677 
678 void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
679                              size_t size, enum dma_data_direction dir,
680                              enum dma_sync_target target)
681 {
682         int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
683         phys_addr_t orig_addr = io_tlb_orig_addr[index];
684 
685         if (orig_addr == INVALID_PHYS_ADDR)
686                 return;
687         orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
688 
689         switch (target) {
690         case SYNC_FOR_CPU:
691                 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
692                         swiotlb_bounce(orig_addr, tlb_addr,
693                                        size, DMA_FROM_DEVICE);
694                 else
695                         BUG_ON(dir != DMA_TO_DEVICE);
696                 break;
697         case SYNC_FOR_DEVICE:
698                 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
699                         swiotlb_bounce(orig_addr, tlb_addr,
700                                        size, DMA_TO_DEVICE);
701                 else
702                         BUG_ON(dir != DMA_FROM_DEVICE);
703                 break;
704         default:
705                 BUG();
706         }
707 }
708 
709 static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
710                 size_t size)
711 {
712         u64 mask = DMA_BIT_MASK(32);
713 
714         if (dev && dev->coherent_dma_mask)
715                 mask = dev->coherent_dma_mask;
716         return addr + size - 1 <= mask;
717 }
718 
719 static void *
720 swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
721                 unsigned long attrs)
722 {
723         phys_addr_t phys_addr;
724 
725         if (swiotlb_force == SWIOTLB_NO_FORCE)
726                 goto out_warn;
727 
728         phys_addr = swiotlb_tbl_map_single(dev,
729                         swiotlb_phys_to_dma(dev, io_tlb_start),
730                         0, size, DMA_FROM_DEVICE, attrs);
731         if (phys_addr == SWIOTLB_MAP_ERROR)
732                 goto out_warn;
733 
734         *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
735         if (!dma_coherent_ok(dev, *dma_handle, size))
736                 goto out_unmap;
737 
738         memset(phys_to_virt(phys_addr), 0, size);
739         return phys_to_virt(phys_addr);
740 
741 out_unmap:
742         dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
743                 (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
744                 (unsigned long long)*dma_handle);
745 
746         /*
747          * DMA_TO_DEVICE to avoid memcpy in unmap_single.
748          * DMA_ATTR_SKIP_CPU_SYNC is optional.
749          */
750         swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
751                         DMA_ATTR_SKIP_CPU_SYNC);
752 out_warn:
753         if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
754                 dev_warn(dev,
755                         "swiotlb: coherent allocation failed, size=%zu\n",
756                         size);
757                 dump_stack();
758         }
759         return NULL;
760 }
761 
762 void *
763 swiotlb_alloc_coherent(struct device *hwdev, size_t size,
764                        dma_addr_t *dma_handle, gfp_t flags)
765 {
766         int order = get_order(size);
767         unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
768         void *ret;
769 
770         ret = (void *)__get_free_pages(flags, order);
771         if (ret) {
772                 *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
773                 if (dma_coherent_ok(hwdev, *dma_handle, size)) {
774                         memset(ret, 0, size);
775                         return ret;
776                 }
777                 free_pages((unsigned long)ret, order);
778         }
779 
780         return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
781 }
782 EXPORT_SYMBOL(swiotlb_alloc_coherent);
783 
784 static bool swiotlb_free_buffer(struct device *dev, size_t size,
785                 dma_addr_t dma_addr)
786 {
787         phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
788 
789         WARN_ON_ONCE(irqs_disabled());
790 
791         if (!is_swiotlb_buffer(phys_addr))
792                 return false;
793 
794         /*
795          * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
796          * DMA_ATTR_SKIP_CPU_SYNC is optional.
797          */
798         swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
799                                  DMA_ATTR_SKIP_CPU_SYNC);
800         return true;
801 }
802 
803 void
804 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
805                       dma_addr_t dev_addr)
806 {
807         if (!swiotlb_free_buffer(hwdev, size, dev_addr))
808                 free_pages((unsigned long)vaddr, get_order(size));
809 }
810 EXPORT_SYMBOL(swiotlb_free_coherent);
811 
812 static void
813 swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
814              int do_panic)
815 {
816         if (swiotlb_force == SWIOTLB_NO_FORCE)
817                 return;
818 
819         /*
820          * Ran out of IOMMU space for this operation. This is very bad.
821          * Unfortunately the drivers cannot handle this operation properly.
822          * unless they check for dma_mapping_error (most don't)
823          * When the mapping is small enough return a static buffer to limit
824          * the damage, or panic when the transfer is too big.
825          */
826         dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
827                             size);
828 
829         if (size <= io_tlb_overflow || !do_panic)
830                 return;
831 
832         if (dir == DMA_BIDIRECTIONAL)
833                 panic("DMA: Random memory could be DMA accessed\n");
834         if (dir == DMA_FROM_DEVICE)
835                 panic("DMA: Random memory could be DMA written\n");
836         if (dir == DMA_TO_DEVICE)
837                 panic("DMA: Random memory could be DMA read\n");
838 }
839 
840 /*
841  * Map a single buffer of the indicated size for DMA in streaming mode.  The
842  * physical address to use is returned.
843  *
844  * Once the device is given the dma address, the device owns this memory until
845  * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed.
846  */
847 dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
848                             unsigned long offset, size_t size,
849                             enum dma_data_direction dir,
850                             unsigned long attrs)
851 {
852         phys_addr_t map, phys = page_to_phys(page) + offset;
853         dma_addr_t dev_addr = phys_to_dma(dev, phys);
854 
855         BUG_ON(dir == DMA_NONE);
856         /*
857          * If the address happens to be in the device's DMA window,
858          * we can safely return the device addr and not worry about bounce
859          * buffering it.
860          */
861         if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
862                 return dev_addr;
863 
864         trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
865 
866         /* Oh well, have to allocate and map a bounce buffer. */
867         map = map_single(dev, phys, size, dir, attrs);
868         if (map == SWIOTLB_MAP_ERROR) {
869                 swiotlb_full(dev, size, dir, 1);
870                 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
871         }
872 
873         dev_addr = swiotlb_phys_to_dma(dev, map);
874 
875         /* Ensure that the address returned is DMA'ble */
876         if (dma_capable(dev, dev_addr, size))
877                 return dev_addr;
878 
879         attrs |= DMA_ATTR_SKIP_CPU_SYNC;
880         swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
881 
882         return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
883 }
884 
885 /*
886  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
887  * match what was provided for in a previous swiotlb_map_page call.  All
888  * other usages are undefined.
889  *
890  * After this call, reads by the cpu to the buffer are guaranteed to see
891  * whatever the device wrote there.
892  */
893 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
894                          size_t size, enum dma_data_direction dir,
895                          unsigned long attrs)
896 {
897         phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
898 
899         BUG_ON(dir == DMA_NONE);
900 
901         if (is_swiotlb_buffer(paddr)) {
902                 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
903                 return;
904         }
905 
906         if (dir != DMA_FROM_DEVICE)
907                 return;
908 
909         /*
910          * phys_to_virt doesn't work with hihgmem page but we could
911          * call dma_mark_clean() with hihgmem page here. However, we
912          * are fine since dma_mark_clean() is null on POWERPC. We can
913          * make dma_mark_clean() take a physical address if necessary.
914          */
915         dma_mark_clean(phys_to_virt(paddr), size);
916 }
917 
918 void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
919                         size_t size, enum dma_data_direction dir,
920                         unsigned long attrs)
921 {
922         unmap_single(hwdev, dev_addr, size, dir, attrs);
923 }
924 
925 /*
926  * Make physical memory consistent for a single streaming mode DMA translation
927  * after a transfer.
928  *
929  * If you perform a swiotlb_map_page() but wish to interrogate the buffer
930  * using the cpu, yet do not wish to teardown the dma mapping, you must
931  * call this function before doing so.  At the next point you give the dma
932  * address back to the card, you must first perform a
933  * swiotlb_dma_sync_for_device, and then the device again owns the buffer
934  */
935 static void
936 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
937                     size_t size, enum dma_data_direction dir,
938                     enum dma_sync_target target)
939 {
940         phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
941 
942         BUG_ON(dir == DMA_NONE);
943 
944         if (is_swiotlb_buffer(paddr)) {
945                 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
946                 return;
947         }
948 
949         if (dir != DMA_FROM_DEVICE)
950                 return;
951 
952         dma_mark_clean(phys_to_virt(paddr), size);
953 }
954 
955 void
956 swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
957                             size_t size, enum dma_data_direction dir)
958 {
959         swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
960 }
961 
962 void
963 swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
964                                size_t size, enum dma_data_direction dir)
965 {
966         swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
967 }
968 
969 /*
970  * Map a set of buffers described by scatterlist in streaming mode for DMA.
971  * This is the scatter-gather version of the above swiotlb_map_page
972  * interface.  Here the scatter gather list elements are each tagged with the
973  * appropriate dma address and length.  They are obtained via
974  * sg_dma_{address,length}(SG).
975  *
976  * NOTE: An implementation may be able to use a smaller number of
977  *       DMA address/length pairs than there are SG table elements.
978  *       (for example via virtual mapping capabilities)
979  *       The routine returns the number of addr/length pairs actually
980  *       used, at most nents.
981  *
982  * Device ownership issues as mentioned above for swiotlb_map_page are the
983  * same here.
984  */
985 int
986 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
987                      enum dma_data_direction dir, unsigned long attrs)
988 {
989         struct scatterlist *sg;
990         int i;
991 
992         BUG_ON(dir == DMA_NONE);
993 
994         for_each_sg(sgl, sg, nelems, i) {
995                 phys_addr_t paddr = sg_phys(sg);
996                 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
997 
998                 if (swiotlb_force == SWIOTLB_FORCE ||
999                     !dma_capable(hwdev, dev_addr, sg->length)) {
1000                         phys_addr_t map = map_single(hwdev, sg_phys(sg),
1001                                                      sg->length, dir, attrs);
1002                         if (map == SWIOTLB_MAP_ERROR) {
1003                                 /* Don't panic here, we expect map_sg users
1004                                    to do proper error handling. */
1005                                 swiotlb_full(hwdev, sg->length, dir, 0);
1006                                 attrs |= DMA_ATTR_SKIP_CPU_SYNC;
1007                                 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
1008                                                        attrs);
1009                                 sg_dma_len(sgl) = 0;
1010                                 return 0;
1011                         }
1012                         sg->dma_address = swiotlb_phys_to_dma(hwdev, map);
1013                 } else
1014                         sg->dma_address = dev_addr;
1015                 sg_dma_len(sg) = sg->length;
1016         }
1017         return nelems;
1018 }
1019 
1020 /*
1021  * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
1022  * concerning calls here are the same as for swiotlb_unmap_page() above.
1023  */
1024 void
1025 swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
1026                        int nelems, enum dma_data_direction dir,
1027                        unsigned long attrs)
1028 {
1029         struct scatterlist *sg;
1030         int i;
1031 
1032         BUG_ON(dir == DMA_NONE);
1033 
1034         for_each_sg(sgl, sg, nelems, i)
1035                 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
1036                              attrs);
1037 }
1038 
1039 /*
1040  * Make physical memory consistent for a set of streaming mode DMA translations
1041  * after a transfer.
1042  *
1043  * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
1044  * and usage.
1045  */
1046 static void
1047 swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
1048                 int nelems, enum dma_data_direction dir,
1049                 enum dma_sync_target target)
1050 {
1051         struct scatterlist *sg;
1052         int i;
1053 
1054         for_each_sg(sgl, sg, nelems, i)
1055                 swiotlb_sync_single(hwdev, sg->dma_address,
1056                                     sg_dma_len(sg), dir, target);
1057 }
1058 
1059 void
1060 swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
1061                         int nelems, enum dma_data_direction dir)
1062 {
1063         swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
1064 }
1065 
1066 void
1067 swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
1068                            int nelems, enum dma_data_direction dir)
1069 {
1070         swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
1071 }
1072 
1073 int
1074 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
1075 {
1076         return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
1077 }
1078 
1079 /*
1080  * Return whether the given device DMA address mask can be supported
1081  * properly.  For example, if your device can only drive the low 24-bits
1082  * during bus mastering, then you would pass 0x00ffffff as the mask to
1083  * this function.
1084  */
1085 int
1086 swiotlb_dma_supported(struct device *hwdev, u64 mask)
1087 {
1088         return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
1089 }
1090 
1091 #ifdef CONFIG_DMA_DIRECT_OPS
1092 void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
1093                 gfp_t gfp, unsigned long attrs)
1094 {
1095         void *vaddr;
1096 
1097         /* temporary workaround: */
1098         if (gfp & __GFP_NOWARN)
1099                 attrs |= DMA_ATTR_NO_WARN;
1100 
1101         /*
1102          * Don't print a warning when the first allocation attempt fails.
1103          * swiotlb_alloc_coherent() will print a warning when the DMA memory
1104          * allocation ultimately failed.
1105          */
1106         gfp |= __GFP_NOWARN;
1107 
1108         vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
1109         if (!vaddr)
1110                 vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
1111         return vaddr;
1112 }
1113 
1114 void swiotlb_free(struct device *dev, size_t size, void *vaddr,
1115                 dma_addr_t dma_addr, unsigned long attrs)
1116 {
1117         if (!swiotlb_free_buffer(dev, size, dma_addr))
1118                 dma_direct_free(dev, size, vaddr, dma_addr, attrs);
1119 }
1120 
1121 const struct dma_map_ops swiotlb_dma_ops = {
1122         .mapping_error          = swiotlb_dma_mapping_error,
1123         .alloc                  = swiotlb_alloc,
1124         .free                   = swiotlb_free,
1125         .sync_single_for_cpu    = swiotlb_sync_single_for_cpu,
1126         .sync_single_for_device = swiotlb_sync_single_for_device,
1127         .sync_sg_for_cpu        = swiotlb_sync_sg_for_cpu,
1128         .sync_sg_for_device     = swiotlb_sync_sg_for_device,
1129         .map_sg                 = swiotlb_map_sg_attrs,
1130         .unmap_sg               = swiotlb_unmap_sg_attrs,
1131         .map_page               = swiotlb_map_page,
1132         .unmap_page             = swiotlb_unmap_page,
1133         .dma_supported          = swiotlb_dma_supported,
1134 };
1135 #endif /* CONFIG_DMA_DIRECT_OPS */
1136 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp