~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/avr32/mm/dma-coherent.c

Version: ~ [ linux-5.5-rc7 ] ~ [ linux-5.4.13 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.97 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.166 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.210 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.210 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.81 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /*
  2  *  Copyright (C) 2004-2006 Atmel Corporation
  3  *
  4  * This program is free software; you can redistribute it and/or modify
  5  * it under the terms of the GNU General Public License version 2 as
  6  * published by the Free Software Foundation.
  7  */
  8 
  9 #include <linux/dma-mapping.h>
 10 #include <linux/gfp.h>
 11 #include <linux/export.h>
 12 #include <linux/mm.h>
 13 #include <linux/device.h>
 14 #include <linux/scatterlist.h>
 15 
 16 #include <asm/processor.h>
 17 #include <asm/cacheflush.h>
 18 #include <asm/io.h>
 19 #include <asm/addrspace.h>
 20 
 21 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
 22 {
 23         /*
 24          * No need to sync an uncached area
 25          */
 26         if (PXSEG(vaddr) == P2SEG)
 27                 return;
 28 
 29         switch (direction) {
 30         case DMA_FROM_DEVICE:           /* invalidate only */
 31                 invalidate_dcache_region(vaddr, size);
 32                 break;
 33         case DMA_TO_DEVICE:             /* writeback only */
 34                 clean_dcache_region(vaddr, size);
 35                 break;
 36         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
 37                 flush_dcache_region(vaddr, size);
 38                 break;
 39         default:
 40                 BUG();
 41         }
 42 }
 43 EXPORT_SYMBOL(dma_cache_sync);
 44 
 45 static struct page *__dma_alloc(struct device *dev, size_t size,
 46                                 dma_addr_t *handle, gfp_t gfp)
 47 {
 48         struct page *page, *free, *end;
 49         int order;
 50 
 51         /* Following is a work-around (a.k.a. hack) to prevent pages
 52          * with __GFP_COMP being passed to split_page() which cannot
 53          * handle them.  The real problem is that this flag probably
 54          * should be 0 on AVR32 as it is not supported on this
 55          * platform--see CONFIG_HUGETLB_PAGE. */
 56         gfp &= ~(__GFP_COMP);
 57 
 58         size = PAGE_ALIGN(size);
 59         order = get_order(size);
 60 
 61         page = alloc_pages(gfp, order);
 62         if (!page)
 63                 return NULL;
 64         split_page(page, order);
 65 
 66         /*
 67          * When accessing physical memory with valid cache data, we
 68          * get a cache hit even if the virtual memory region is marked
 69          * as uncached.
 70          *
 71          * Since the memory is newly allocated, there is no point in
 72          * doing a writeback. If the previous owner cares, he should
 73          * have flushed the cache before releasing the memory.
 74          */
 75         invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
 76 
 77         *handle = page_to_bus(page);
 78         free = page + (size >> PAGE_SHIFT);
 79         end = page + (1 << order);
 80 
 81         /*
 82          * Free any unused pages
 83          */
 84         while (free < end) {
 85                 __free_page(free);
 86                 free++;
 87         }
 88 
 89         return page;
 90 }
 91 
 92 static void __dma_free(struct device *dev, size_t size,
 93                        struct page *page, dma_addr_t handle)
 94 {
 95         struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
 96 
 97         while (page < end)
 98                 __free_page(page++);
 99 }
100 
101 static void *avr32_dma_alloc(struct device *dev, size_t size,
102                 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
103 {
104         struct page *page;
105         dma_addr_t phys;
106 
107         page = __dma_alloc(dev, size, handle, gfp);
108         if (!page)
109                 return NULL;
110         phys = page_to_phys(page);
111 
112         if (attrs & DMA_ATTR_WRITE_COMBINE) {
113                 /* Now, map the page into P3 with write-combining turned on */
114                 *handle = phys;
115                 return __ioremap(phys, size, _PAGE_BUFFER);
116         } else {
117                 return phys_to_uncached(phys);
118         }
119 }
120 
121 static void avr32_dma_free(struct device *dev, size_t size,
122                 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
123 {
124         struct page *page;
125 
126         if (attrs & DMA_ATTR_WRITE_COMBINE) {
127                 iounmap(cpu_addr);
128 
129                 page = phys_to_page(handle);
130         } else {
131                 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
132 
133                 pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
134                          cpu_addr, (unsigned long)handle, (unsigned)size);
135 
136                 BUG_ON(!virt_addr_valid(addr));
137                 page = virt_to_page(addr);
138         }
139 
140         __dma_free(dev, size, page, handle);
141 }
142 
143 static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
144                 unsigned long offset, size_t size,
145                 enum dma_data_direction direction, unsigned long attrs)
146 {
147         void *cpu_addr = page_address(page) + offset;
148 
149         if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
150                 dma_cache_sync(dev, cpu_addr, size, direction);
151         return virt_to_bus(cpu_addr);
152 }
153 
154 static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
155                 int nents, enum dma_data_direction direction,
156                 unsigned long attrs)
157 {
158         int i;
159         struct scatterlist *sg;
160 
161         for_each_sg(sglist, sg, nents, i) {
162                 char *virt;
163 
164                 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
165                 virt = sg_virt(sg);
166 
167                 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
168                         continue;
169 
170                 dma_cache_sync(dev, virt, sg->length, direction);
171         }
172 
173         return nents;
174 }
175 
176 static void avr32_dma_sync_single_for_device(struct device *dev,
177                 dma_addr_t dma_handle, size_t size,
178                 enum dma_data_direction direction)
179 {
180         dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
181 }
182 
183 static void avr32_dma_sync_sg_for_device(struct device *dev,
184                 struct scatterlist *sglist, int nents,
185                 enum dma_data_direction direction)
186 {
187         int i;
188         struct scatterlist *sg;
189 
190         for_each_sg(sglist, sg, nents, i)
191                 dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
192 }
193 
194 struct dma_map_ops avr32_dma_ops = {
195         .alloc                  = avr32_dma_alloc,
196         .free                   = avr32_dma_free,
197         .map_page               = avr32_dma_map_page,
198         .map_sg                 = avr32_dma_map_sg,
199         .sync_single_for_device = avr32_dma_sync_single_for_device,
200         .sync_sg_for_device     = avr32_dma_sync_sg_for_device,
201 };
202 EXPORT_SYMBOL(avr32_dma_ops);
203 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp