~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/xen/mm.c

Version: ~ [ linux-5.4-rc7 ] ~ [ linux-5.3.11 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.84 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.154 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.201 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.201 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.77 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #include <linux/cpu.h>
  2 #include <linux/dma-mapping.h>
  3 #include <linux/bootmem.h>
  4 #include <linux/gfp.h>
  5 #include <linux/highmem.h>
  6 #include <linux/export.h>
  7 #include <linux/memblock.h>
  8 #include <linux/of_address.h>
  9 #include <linux/slab.h>
 10 #include <linux/types.h>
 11 #include <linux/dma-mapping.h>
 12 #include <linux/vmalloc.h>
 13 #include <linux/swiotlb.h>
 14 
 15 #include <xen/xen.h>
 16 #include <xen/interface/grant_table.h>
 17 #include <xen/interface/memory.h>
 18 #include <xen/page.h>
 19 #include <xen/swiotlb-xen.h>
 20 
 21 #include <asm/cacheflush.h>
 22 #include <asm/xen/hypercall.h>
 23 #include <asm/xen/interface.h>
 24 
 25 unsigned long xen_get_swiotlb_free_pages(unsigned int order)
 26 {
 27         struct memblock_region *reg;
 28         gfp_t flags = __GFP_NOWARN;
 29 
 30         for_each_memblock(memory, reg) {
 31                 if (reg->base < (phys_addr_t)0xffffffff) {
 32                         flags |= __GFP_DMA;
 33                         break;
 34                 }
 35         }
 36         return __get_free_pages(flags, order);
 37 }
 38 
 39 enum dma_cache_op {
 40        DMA_UNMAP,
 41        DMA_MAP,
 42 };
 43 static bool hypercall_cflush = false;
 44 
 45 /* functions called by SWIOTLB */
 46 
 47 static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
 48         size_t size, enum dma_data_direction dir, enum dma_cache_op op)
 49 {
 50         struct gnttab_cache_flush cflush;
 51         unsigned long pfn;
 52         size_t left = size;
 53 
 54         pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
 55         offset %= PAGE_SIZE;
 56 
 57         do {
 58                 size_t len = left;
 59         
 60                 /* buffers in highmem or foreign pages cannot cross page
 61                  * boundaries */
 62                 if (len + offset > PAGE_SIZE)
 63                         len = PAGE_SIZE - offset;
 64 
 65                 cflush.op = 0;
 66                 cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
 67                 cflush.offset = offset;
 68                 cflush.length = len;
 69 
 70                 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
 71                         cflush.op = GNTTAB_CACHE_INVAL;
 72                 if (op == DMA_MAP) {
 73                         if (dir == DMA_FROM_DEVICE)
 74                                 cflush.op = GNTTAB_CACHE_INVAL;
 75                         else
 76                                 cflush.op = GNTTAB_CACHE_CLEAN;
 77                 }
 78                 if (cflush.op)
 79                         HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
 80 
 81                 offset = 0;
 82                 pfn++;
 83                 left -= len;
 84         } while (left);
 85 }
 86 
 87 static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
 88                 size_t size, enum dma_data_direction dir)
 89 {
 90         dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
 91 }
 92 
 93 static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
 94                 size_t size, enum dma_data_direction dir)
 95 {
 96         dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
 97 }
 98 
 99 void __xen_dma_map_page(struct device *hwdev, struct page *page,
100              dma_addr_t dev_addr, unsigned long offset, size_t size,
101              enum dma_data_direction dir, struct dma_attrs *attrs)
102 {
103         if (is_device_dma_coherent(hwdev))
104                 return;
105         if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
106                 return;
107 
108         __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
109 }
110 
111 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
112                 size_t size, enum dma_data_direction dir,
113                 struct dma_attrs *attrs)
114 
115 {
116         if (is_device_dma_coherent(hwdev))
117                 return;
118         if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
119                 return;
120 
121         __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
122 }
123 
124 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
125                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
126 {
127         if (is_device_dma_coherent(hwdev))
128                 return;
129         __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
130 }
131 
132 void __xen_dma_sync_single_for_device(struct device *hwdev,
133                 dma_addr_t handle, size_t size, enum dma_data_direction dir)
134 {
135         if (is_device_dma_coherent(hwdev))
136                 return;
137         __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
138 }
139 
140 bool xen_arch_need_swiotlb(struct device *dev,
141                            unsigned long pfn,
142                            unsigned long bfn)
143 {
144         return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
145 }
146 
147 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
148                                  unsigned int address_bits,
149                                  dma_addr_t *dma_handle)
150 {
151         if (!xen_initial_domain())
152                 return -EINVAL;
153 
154         /* we assume that dom0 is mapped 1:1 for now */
155         *dma_handle = pstart;
156         return 0;
157 }
158 EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
159 
160 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
161 {
162         return;
163 }
164 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
165 
166 struct dma_map_ops *xen_dma_ops;
167 EXPORT_SYMBOL(xen_dma_ops);
168 
169 static struct dma_map_ops xen_swiotlb_dma_ops = {
170         .mapping_error = xen_swiotlb_dma_mapping_error,
171         .alloc = xen_swiotlb_alloc_coherent,
172         .free = xen_swiotlb_free_coherent,
173         .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
174         .sync_single_for_device = xen_swiotlb_sync_single_for_device,
175         .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
176         .sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
177         .map_sg = xen_swiotlb_map_sg_attrs,
178         .unmap_sg = xen_swiotlb_unmap_sg_attrs,
179         .map_page = xen_swiotlb_map_page,
180         .unmap_page = xen_swiotlb_unmap_page,
181         .dma_supported = xen_swiotlb_dma_supported,
182         .set_dma_mask = xen_swiotlb_set_dma_mask,
183 };
184 
185 int __init xen_mm_init(void)
186 {
187         struct gnttab_cache_flush cflush;
188         if (!xen_initial_domain())
189                 return 0;
190         xen_swiotlb_init(1, false);
191         xen_dma_ops = &xen_swiotlb_dma_ops;
192 
193         cflush.op = 0;
194         cflush.a.dev_bus_addr = 0;
195         cflush.offset = 0;
196         cflush.length = 0;
197         if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
198                 hypercall_cflush = true;
199         return 0;
200 }
201 arch_initcall(xen_mm_init);
202 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp