~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-mapping.h

Version: ~ [ linux-5.8 ] ~ [ linux-5.7.14 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.57 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.138 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.193 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.232 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.232 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.85 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_DMA_MAPPING_H
  3 #define _LINUX_DMA_MAPPING_H
  4 
  5 #include <linux/sizes.h>
  6 #include <linux/string.h>
  7 #include <linux/device.h>
  8 #include <linux/err.h>
  9 #include <linux/dma-debug.h>
 10 #include <linux/dma-direction.h>
 11 #include <linux/scatterlist.h>
 12 #include <linux/bug.h>
 13 #include <linux/mem_encrypt.h>
 14 
 15 /**
 16  * List of possible attributes associated with a DMA mapping. The semantics
 17  * of each attribute should be defined in Documentation/DMA-attributes.txt.
 18  */
 19 
 20 /*
 21  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
 22  * may be weakly ordered, that is that reads and writes may pass each other.
 23  */
 24 #define DMA_ATTR_WEAK_ORDERING          (1UL << 1)
 25 /*
 26  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
 27  * buffered to improve performance.
 28  */
 29 #define DMA_ATTR_WRITE_COMBINE          (1UL << 2)
 30 /*
 31  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
 32  * consistent or non-consistent memory as it sees fit.
 33  */
 34 #define DMA_ATTR_NON_CONSISTENT         (1UL << 3)
 35 /*
 36  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
 37  * virtual mapping for the allocated buffer.
 38  */
 39 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL << 4)
 40 /*
 41  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
 42  * the CPU cache for the given buffer assuming that it has been already
 43  * transferred to 'device' domain.
 44  */
 45 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL << 5)
 46 /*
 47  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
 48  * in physical memory.
 49  */
 50 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL << 6)
 51 /*
 52  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
 53  * that it's probably not worth the time to try to allocate memory to in a way
 54  * that gives better TLB efficiency.
 55  */
 56 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL << 7)
 57 /*
 58  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
 59  * allocation failure reports (similarly to __GFP_NOWARN).
 60  */
 61 #define DMA_ATTR_NO_WARN        (1UL << 8)
 62 
 63 /*
 64  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
 65  * accessible at an elevated privilege level (and ideally inaccessible or
 66  * at least read-only at lesser-privileged levels).
 67  */
 68 #define DMA_ATTR_PRIVILEGED             (1UL << 9)
 69 
 70 /*
 71  * A dma_addr_t can hold any valid DMA or bus address for the platform.
 72  * It can be given to a device to use as a DMA source or target.  A CPU cannot
 73  * reference a dma_addr_t directly because there may be translation between
 74  * its physical address space and the bus address space.
 75  */
 76 struct dma_map_ops {
 77         void* (*alloc)(struct device *dev, size_t size,
 78                                 dma_addr_t *dma_handle, gfp_t gfp,
 79                                 unsigned long attrs);
 80         void (*free)(struct device *dev, size_t size,
 81                               void *vaddr, dma_addr_t dma_handle,
 82                               unsigned long attrs);
 83         int (*mmap)(struct device *, struct vm_area_struct *,
 84                           void *, dma_addr_t, size_t,
 85                           unsigned long attrs);
 86 
 87         int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
 88                            dma_addr_t, size_t, unsigned long attrs);
 89 
 90         dma_addr_t (*map_page)(struct device *dev, struct page *page,
 91                                unsigned long offset, size_t size,
 92                                enum dma_data_direction dir,
 93                                unsigned long attrs);
 94         void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
 95                            size_t size, enum dma_data_direction dir,
 96                            unsigned long attrs);
 97         /*
 98          * map_sg returns 0 on error and a value > 0 on success.
 99          * It should never return a value < 0.
100          */
101         int (*map_sg)(struct device *dev, struct scatterlist *sg,
102                       int nents, enum dma_data_direction dir,
103                       unsigned long attrs);
104         void (*unmap_sg)(struct device *dev,
105                          struct scatterlist *sg, int nents,
106                          enum dma_data_direction dir,
107                          unsigned long attrs);
108         dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
109                                size_t size, enum dma_data_direction dir,
110                                unsigned long attrs);
111         void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
112                            size_t size, enum dma_data_direction dir,
113                            unsigned long attrs);
114         void (*sync_single_for_cpu)(struct device *dev,
115                                     dma_addr_t dma_handle, size_t size,
116                                     enum dma_data_direction dir);
117         void (*sync_single_for_device)(struct device *dev,
118                                        dma_addr_t dma_handle, size_t size,
119                                        enum dma_data_direction dir);
120         void (*sync_sg_for_cpu)(struct device *dev,
121                                 struct scatterlist *sg, int nents,
122                                 enum dma_data_direction dir);
123         void (*sync_sg_for_device)(struct device *dev,
124                                    struct scatterlist *sg, int nents,
125                                    enum dma_data_direction dir);
126         void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
127                         enum dma_data_direction direction);
128         int (*dma_supported)(struct device *dev, u64 mask);
129         u64 (*get_required_mask)(struct device *dev);
130         size_t (*max_mapping_size)(struct device *dev);
131         unsigned long (*get_merge_boundary)(struct device *dev);
132 };
133 
134 #define DMA_MAPPING_ERROR               (~(dma_addr_t)0)
135 
136 extern const struct dma_map_ops dma_virt_ops;
137 extern const struct dma_map_ops dma_dummy_ops;
138 
139 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
140 
141 #define DMA_MASK_NONE   0x0ULL
142 
143 static inline int valid_dma_direction(int dma_direction)
144 {
145         return ((dma_direction == DMA_BIDIRECTIONAL) ||
146                 (dma_direction == DMA_TO_DEVICE) ||
147                 (dma_direction == DMA_FROM_DEVICE));
148 }
149 
150 #ifdef CONFIG_DMA_DECLARE_COHERENT
151 /*
152  * These three functions are only for dma allocator.
153  * Don't use them in device drivers.
154  */
155 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
156                                        dma_addr_t *dma_handle, void **ret);
157 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
158 
159 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
160                             void *cpu_addr, size_t size, int *ret);
161 
162 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
163 int dma_release_from_global_coherent(int order, void *vaddr);
164 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
165                                   size_t size, int *ret);
166 
167 #else
168 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
169 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
170 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
171 
172 static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
173                                                    dma_addr_t *dma_handle)
174 {
175         return NULL;
176 }
177 
178 static inline int dma_release_from_global_coherent(int order, void *vaddr)
179 {
180         return 0;
181 }
182 
183 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
184                                                 void *cpu_addr, size_t size,
185                                                 int *ret)
186 {
187         return 0;
188 }
189 #endif /* CONFIG_DMA_DECLARE_COHERENT */
190 
191 static inline bool dma_is_direct(const struct dma_map_ops *ops)
192 {
193         return likely(!ops);
194 }
195 
196 /*
197  * All the dma_direct_* declarations are here just for the indirect call bypass,
198  * and must not be used directly drivers!
199  */
200 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
201                 unsigned long offset, size_t size, enum dma_data_direction dir,
202                 unsigned long attrs);
203 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
204                 enum dma_data_direction dir, unsigned long attrs);
205 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
206                 size_t size, enum dma_data_direction dir, unsigned long attrs);
207 
208 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
209     defined(CONFIG_SWIOTLB)
210 void dma_direct_sync_single_for_device(struct device *dev,
211                 dma_addr_t addr, size_t size, enum dma_data_direction dir);
212 void dma_direct_sync_sg_for_device(struct device *dev,
213                 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
214 #else
215 static inline void dma_direct_sync_single_for_device(struct device *dev,
216                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
217 {
218 }
219 static inline void dma_direct_sync_sg_for_device(struct device *dev,
220                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
221 {
222 }
223 #endif
224 
225 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
226     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
227     defined(CONFIG_SWIOTLB)
228 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
229                 size_t size, enum dma_data_direction dir, unsigned long attrs);
230 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
231                 int nents, enum dma_data_direction dir, unsigned long attrs);
232 void dma_direct_sync_single_for_cpu(struct device *dev,
233                 dma_addr_t addr, size_t size, enum dma_data_direction dir);
234 void dma_direct_sync_sg_for_cpu(struct device *dev,
235                 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
236 #else
237 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
238                 size_t size, enum dma_data_direction dir, unsigned long attrs)
239 {
240 }
241 static inline void dma_direct_unmap_sg(struct device *dev,
242                 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
243                 unsigned long attrs)
244 {
245 }
246 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
247                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
248 {
249 }
250 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
251                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
252 {
253 }
254 #endif
255 
256 size_t dma_direct_max_mapping_size(struct device *dev);
257 
258 #ifdef CONFIG_HAS_DMA
259 #include <asm/dma-mapping.h>
260 
261 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
262 {
263         if (dev->dma_ops)
264                 return dev->dma_ops;
265         return get_arch_dma_ops(dev->bus);
266 }
267 
268 static inline void set_dma_ops(struct device *dev,
269                                const struct dma_map_ops *dma_ops)
270 {
271         dev->dma_ops = dma_ops;
272 }
273 
274 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
275                 struct page *page, size_t offset, size_t size,
276                 enum dma_data_direction dir, unsigned long attrs)
277 {
278         const struct dma_map_ops *ops = get_dma_ops(dev);
279         dma_addr_t addr;
280 
281         BUG_ON(!valid_dma_direction(dir));
282         if (dma_is_direct(ops))
283                 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
284         else
285                 addr = ops->map_page(dev, page, offset, size, dir, attrs);
286         debug_dma_map_page(dev, page, offset, size, dir, addr);
287 
288         return addr;
289 }
290 
291 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
292                 size_t size, enum dma_data_direction dir, unsigned long attrs)
293 {
294         const struct dma_map_ops *ops = get_dma_ops(dev);
295 
296         BUG_ON(!valid_dma_direction(dir));
297         if (dma_is_direct(ops))
298                 dma_direct_unmap_page(dev, addr, size, dir, attrs);
299         else if (ops->unmap_page)
300                 ops->unmap_page(dev, addr, size, dir, attrs);
301         debug_dma_unmap_page(dev, addr, size, dir);
302 }
303 
304 /*
305  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
306  * It should never return a value < 0.
307  */
308 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
309                                    int nents, enum dma_data_direction dir,
310                                    unsigned long attrs)
311 {
312         const struct dma_map_ops *ops = get_dma_ops(dev);
313         int ents;
314 
315         BUG_ON(!valid_dma_direction(dir));
316         if (dma_is_direct(ops))
317                 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
318         else
319                 ents = ops->map_sg(dev, sg, nents, dir, attrs);
320         BUG_ON(ents < 0);
321         debug_dma_map_sg(dev, sg, nents, ents, dir);
322 
323         return ents;
324 }
325 
326 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
327                                       int nents, enum dma_data_direction dir,
328                                       unsigned long attrs)
329 {
330         const struct dma_map_ops *ops = get_dma_ops(dev);
331 
332         BUG_ON(!valid_dma_direction(dir));
333         debug_dma_unmap_sg(dev, sg, nents, dir);
334         if (dma_is_direct(ops))
335                 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
336         else if (ops->unmap_sg)
337                 ops->unmap_sg(dev, sg, nents, dir, attrs);
338 }
339 
340 static inline dma_addr_t dma_map_resource(struct device *dev,
341                                           phys_addr_t phys_addr,
342                                           size_t size,
343                                           enum dma_data_direction dir,
344                                           unsigned long attrs)
345 {
346         const struct dma_map_ops *ops = get_dma_ops(dev);
347         dma_addr_t addr = DMA_MAPPING_ERROR;
348 
349         BUG_ON(!valid_dma_direction(dir));
350 
351         /* Don't allow RAM to be mapped */
352         if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
353                 return DMA_MAPPING_ERROR;
354 
355         if (dma_is_direct(ops))
356                 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
357         else if (ops->map_resource)
358                 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
359 
360         debug_dma_map_resource(dev, phys_addr, size, dir, addr);
361         return addr;
362 }
363 
364 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
365                                       size_t size, enum dma_data_direction dir,
366                                       unsigned long attrs)
367 {
368         const struct dma_map_ops *ops = get_dma_ops(dev);
369 
370         BUG_ON(!valid_dma_direction(dir));
371         if (!dma_is_direct(ops) && ops->unmap_resource)
372                 ops->unmap_resource(dev, addr, size, dir, attrs);
373         debug_dma_unmap_resource(dev, addr, size, dir);
374 }
375 
376 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
377                                            size_t size,
378                                            enum dma_data_direction dir)
379 {
380         const struct dma_map_ops *ops = get_dma_ops(dev);
381 
382         BUG_ON(!valid_dma_direction(dir));
383         if (dma_is_direct(ops))
384                 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
385         else if (ops->sync_single_for_cpu)
386                 ops->sync_single_for_cpu(dev, addr, size, dir);
387         debug_dma_sync_single_for_cpu(dev, addr, size, dir);
388 }
389 
390 static inline void dma_sync_single_for_device(struct device *dev,
391                                               dma_addr_t addr, size_t size,
392                                               enum dma_data_direction dir)
393 {
394         const struct dma_map_ops *ops = get_dma_ops(dev);
395 
396         BUG_ON(!valid_dma_direction(dir));
397         if (dma_is_direct(ops))
398                 dma_direct_sync_single_for_device(dev, addr, size, dir);
399         else if (ops->sync_single_for_device)
400                 ops->sync_single_for_device(dev, addr, size, dir);
401         debug_dma_sync_single_for_device(dev, addr, size, dir);
402 }
403 
404 static inline void
405 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
406                     int nelems, enum dma_data_direction dir)
407 {
408         const struct dma_map_ops *ops = get_dma_ops(dev);
409 
410         BUG_ON(!valid_dma_direction(dir));
411         if (dma_is_direct(ops))
412                 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
413         else if (ops->sync_sg_for_cpu)
414                 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
415         debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
416 }
417 
418 static inline void
419 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
420                        int nelems, enum dma_data_direction dir)
421 {
422         const struct dma_map_ops *ops = get_dma_ops(dev);
423 
424         BUG_ON(!valid_dma_direction(dir));
425         if (dma_is_direct(ops))
426                 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
427         else if (ops->sync_sg_for_device)
428                 ops->sync_sg_for_device(dev, sg, nelems, dir);
429         debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
430 
431 }
432 
433 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
434 {
435         debug_dma_mapping_error(dev, dma_addr);
436 
437         if (dma_addr == DMA_MAPPING_ERROR)
438                 return -ENOMEM;
439         return 0;
440 }
441 
442 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
443                 gfp_t flag, unsigned long attrs);
444 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
445                 dma_addr_t dma_handle, unsigned long attrs);
446 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
447                 gfp_t gfp, unsigned long attrs);
448 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
449                 dma_addr_t dma_handle);
450 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
451                 enum dma_data_direction dir);
452 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
453                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
454                 unsigned long attrs);
455 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
456                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
457                 unsigned long attrs);
458 bool dma_can_mmap(struct device *dev);
459 int dma_supported(struct device *dev, u64 mask);
460 int dma_set_mask(struct device *dev, u64 mask);
461 int dma_set_coherent_mask(struct device *dev, u64 mask);
462 u64 dma_get_required_mask(struct device *dev);
463 size_t dma_max_mapping_size(struct device *dev);
464 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
465 unsigned long dma_get_merge_boundary(struct device *dev);
466 #else /* CONFIG_HAS_DMA */
467 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
468                 struct page *page, size_t offset, size_t size,
469                 enum dma_data_direction dir, unsigned long attrs)
470 {
471         return DMA_MAPPING_ERROR;
472 }
473 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
474                 size_t size, enum dma_data_direction dir, unsigned long attrs)
475 {
476 }
477 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
478                 int nents, enum dma_data_direction dir, unsigned long attrs)
479 {
480         return 0;
481 }
482 static inline void dma_unmap_sg_attrs(struct device *dev,
483                 struct scatterlist *sg, int nents, enum dma_data_direction dir,
484                 unsigned long attrs)
485 {
486 }
487 static inline dma_addr_t dma_map_resource(struct device *dev,
488                 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
489                 unsigned long attrs)
490 {
491         return DMA_MAPPING_ERROR;
492 }
493 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
494                 size_t size, enum dma_data_direction dir, unsigned long attrs)
495 {
496 }
497 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
498                 size_t size, enum dma_data_direction dir)
499 {
500 }
501 static inline void dma_sync_single_for_device(struct device *dev,
502                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
503 {
504 }
505 static inline void dma_sync_sg_for_cpu(struct device *dev,
506                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
507 {
508 }
509 static inline void dma_sync_sg_for_device(struct device *dev,
510                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
511 {
512 }
513 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
514 {
515         return -ENOMEM;
516 }
517 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
518                 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
519 {
520         return NULL;
521 }
522 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
523                 dma_addr_t dma_handle, unsigned long attrs)
524 {
525 }
526 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
527                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
528 {
529         return NULL;
530 }
531 static inline void dmam_free_coherent(struct device *dev, size_t size,
532                 void *vaddr, dma_addr_t dma_handle)
533 {
534 }
535 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
536                 enum dma_data_direction dir)
537 {
538 }
539 static inline int dma_get_sgtable_attrs(struct device *dev,
540                 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
541                 size_t size, unsigned long attrs)
542 {
543         return -ENXIO;
544 }
545 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
546                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
547                 unsigned long attrs)
548 {
549         return -ENXIO;
550 }
551 static inline bool dma_can_mmap(struct device *dev)
552 {
553         return false;
554 }
555 static inline int dma_supported(struct device *dev, u64 mask)
556 {
557         return 0;
558 }
559 static inline int dma_set_mask(struct device *dev, u64 mask)
560 {
561         return -EIO;
562 }
563 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
564 {
565         return -EIO;
566 }
567 static inline u64 dma_get_required_mask(struct device *dev)
568 {
569         return 0;
570 }
571 static inline size_t dma_max_mapping_size(struct device *dev)
572 {
573         return 0;
574 }
575 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
576 {
577         return false;
578 }
579 static inline unsigned long dma_get_merge_boundary(struct device *dev)
580 {
581         return 0;
582 }
583 #endif /* CONFIG_HAS_DMA */
584 
585 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
586                 size_t size, enum dma_data_direction dir, unsigned long attrs)
587 {
588         /* DMA must never operate on areas that might be remapped. */
589         if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
590                           "rejecting DMA map of vmalloc memory\n"))
591                 return DMA_MAPPING_ERROR;
592         debug_dma_map_single(dev, ptr, size);
593         return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
594                         size, dir, attrs);
595 }
596 
597 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
598                 size_t size, enum dma_data_direction dir, unsigned long attrs)
599 {
600         return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
601 }
602 
603 static inline void dma_sync_single_range_for_cpu(struct device *dev,
604                 dma_addr_t addr, unsigned long offset, size_t size,
605                 enum dma_data_direction dir)
606 {
607         return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
608 }
609 
610 static inline void dma_sync_single_range_for_device(struct device *dev,
611                 dma_addr_t addr, unsigned long offset, size_t size,
612                 enum dma_data_direction dir)
613 {
614         return dma_sync_single_for_device(dev, addr + offset, size, dir);
615 }
616 
617 /**
618  * dma_map_sgtable - Map the given buffer for DMA
619  * @dev:        The device for which to perform the DMA operation
620  * @sgt:        The sg_table object describing the buffer
621  * @dir:        DMA direction
622  * @attrs:      Optional DMA attributes for the map operation
623  *
624  * Maps a buffer described by a scatterlist stored in the given sg_table
625  * object for the @dir DMA operation by the @dev device. After success the
626  * ownership for the buffer is transferred to the DMA domain.  One has to
627  * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
628  * ownership of the buffer back to the CPU domain before touching the
629  * buffer by the CPU.
630  *
631  * Returns 0 on success or -EINVAL on error during mapping the buffer.
632  */
633 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
634                 enum dma_data_direction dir, unsigned long attrs)
635 {
636         int nents;
637 
638         nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
639         if (nents <= 0)
640                 return -EINVAL;
641         sgt->nents = nents;
642         return 0;
643 }
644 
645 /**
646  * dma_unmap_sgtable - Unmap the given buffer for DMA
647  * @dev:        The device for which to perform the DMA operation
648  * @sgt:        The sg_table object describing the buffer
649  * @dir:        DMA direction
650  * @attrs:      Optional DMA attributes for the unmap operation
651  *
652  * Unmaps a buffer described by a scatterlist stored in the given sg_table
653  * object for the @dir DMA operation by the @dev device. After this function
654  * the ownership of the buffer is transferred back to the CPU domain.
655  */
656 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
657                 enum dma_data_direction dir, unsigned long attrs)
658 {
659         dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
660 }
661 
662 /**
663  * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
664  * @dev:        The device for which to perform the DMA operation
665  * @sgt:        The sg_table object describing the buffer
666  * @dir:        DMA direction
667  *
668  * Performs the needed cache synchronization and moves the ownership of the
669  * buffer back to the CPU domain, so it is safe to perform any access to it
670  * by the CPU. Before doing any further DMA operations, one has to transfer
671  * the ownership of the buffer back to the DMA domain by calling the
672  * dma_sync_sgtable_for_device().
673  */
674 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
675                 struct sg_table *sgt, enum dma_data_direction dir)
676 {
677         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
678 }
679 
680 /**
681  * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
682  * @dev:        The device for which to perform the DMA operation
683  * @sgt:        The sg_table object describing the buffer
684  * @dir:        DMA direction
685  *
686  * Performs the needed cache synchronization and moves the ownership of the
687  * buffer back to the DMA domain, so it is safe to perform the DMA operation.
688  * Once finished, one has to call dma_sync_sgtable_for_cpu() or
689  * dma_unmap_sgtable().
690  */
691 static inline void dma_sync_sgtable_for_device(struct device *dev,
692                 struct sg_table *sgt, enum dma_data_direction dir)
693 {
694         dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
695 }
696 
697 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
698 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
699 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
700 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
701 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
702 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
703 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
704 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
705 
706 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
707                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
708                 unsigned long attrs);
709 
710 struct page **dma_common_find_pages(void *cpu_addr);
711 void *dma_common_contiguous_remap(struct page *page, size_t size,
712                         pgprot_t prot, const void *caller);
713 
714 void *dma_common_pages_remap(struct page **pages, size_t size,
715                         pgprot_t prot, const void *caller);
716 void dma_common_free_remap(void *cpu_addr, size_t size);
717 
718 void *dma_alloc_from_pool(struct device *dev, size_t size,
719                           struct page **ret_page, gfp_t flags);
720 bool dma_free_from_pool(struct device *dev, void *start, size_t size);
721 
722 int
723 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
724                 dma_addr_t dma_addr, size_t size, unsigned long attrs);
725 
726 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
727                 dma_addr_t *dma_handle, gfp_t gfp)
728 {
729 
730         return dma_alloc_attrs(dev, size, dma_handle, gfp,
731                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
732 }
733 
734 static inline void dma_free_coherent(struct device *dev, size_t size,
735                 void *cpu_addr, dma_addr_t dma_handle)
736 {
737         return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
738 }
739 
740 
741 static inline u64 dma_get_mask(struct device *dev)
742 {
743         if (dev->dma_mask && *dev->dma_mask)
744                 return *dev->dma_mask;
745         return DMA_BIT_MASK(32);
746 }
747 
748 /*
749  * Set both the DMA mask and the coherent DMA mask to the same thing.
750  * Note that we don't check the return value from dma_set_coherent_mask()
751  * as the DMA API guarantees that the coherent DMA mask can be set to
752  * the same or smaller than the streaming DMA mask.
753  */
754 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
755 {
756         int rc = dma_set_mask(dev, mask);
757         if (rc == 0)
758                 dma_set_coherent_mask(dev, mask);
759         return rc;
760 }
761 
762 /*
763  * Similar to the above, except it deals with the case where the device
764  * does not have dev->dma_mask appropriately setup.
765  */
766 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
767 {
768         dev->dma_mask = &dev->coherent_dma_mask;
769         return dma_set_mask_and_coherent(dev, mask);
770 }
771 
772 /**
773  * dma_addressing_limited - return if the device is addressing limited
774  * @dev:        device to check
775  *
776  * Return %true if the devices DMA mask is too small to address all memory in
777  * the system, else %false.  Lack of addressing bits is the prime reason for
778  * bounce buffering, but might not be the only one.
779  */
780 static inline bool dma_addressing_limited(struct device *dev)
781 {
782         return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
783                             dma_get_required_mask(dev);
784 }
785 
786 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
787 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
788                 const struct iommu_ops *iommu, bool coherent);
789 #else
790 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
791                 u64 size, const struct iommu_ops *iommu, bool coherent)
792 {
793 }
794 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
795 
796 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
797 void arch_teardown_dma_ops(struct device *dev);
798 #else
799 static inline void arch_teardown_dma_ops(struct device *dev)
800 {
801 }
802 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
803 
804 static inline unsigned int dma_get_max_seg_size(struct device *dev)
805 {
806         if (dev->dma_parms && dev->dma_parms->max_segment_size)
807                 return dev->dma_parms->max_segment_size;
808         return SZ_64K;
809 }
810 
811 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
812 {
813         if (dev->dma_parms) {
814                 dev->dma_parms->max_segment_size = size;
815                 return 0;
816         }
817         return -EIO;
818 }
819 
820 static inline unsigned long dma_get_seg_boundary(struct device *dev)
821 {
822         if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
823                 return dev->dma_parms->segment_boundary_mask;
824         return DMA_BIT_MASK(32);
825 }
826 
827 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
828 {
829         if (dev->dma_parms) {
830                 dev->dma_parms->segment_boundary_mask = mask;
831                 return 0;
832         }
833         return -EIO;
834 }
835 
836 static inline int dma_get_cache_alignment(void)
837 {
838 #ifdef ARCH_DMA_MINALIGN
839         return ARCH_DMA_MINALIGN;
840 #endif
841         return 1;
842 }
843 
844 #ifdef CONFIG_DMA_DECLARE_COHERENT
845 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
846                                 dma_addr_t device_addr, size_t size);
847 #else
848 static inline int
849 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
850                             dma_addr_t device_addr, size_t size)
851 {
852         return -ENOSYS;
853 }
854 #endif /* CONFIG_DMA_DECLARE_COHERENT */
855 
856 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
857                 dma_addr_t *dma_handle, gfp_t gfp)
858 {
859         return dmam_alloc_attrs(dev, size, dma_handle, gfp,
860                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
861 }
862 
863 static inline void *dma_alloc_wc(struct device *dev, size_t size,
864                                  dma_addr_t *dma_addr, gfp_t gfp)
865 {
866         unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
867 
868         if (gfp & __GFP_NOWARN)
869                 attrs |= DMA_ATTR_NO_WARN;
870 
871         return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
872 }
873 
874 static inline void dma_free_wc(struct device *dev, size_t size,
875                                void *cpu_addr, dma_addr_t dma_addr)
876 {
877         return dma_free_attrs(dev, size, cpu_addr, dma_addr,
878                               DMA_ATTR_WRITE_COMBINE);
879 }
880 
881 static inline int dma_mmap_wc(struct device *dev,
882                               struct vm_area_struct *vma,
883                               void *cpu_addr, dma_addr_t dma_addr,
884                               size_t size)
885 {
886         return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
887                               DMA_ATTR_WRITE_COMBINE);
888 }
889 
890 #ifdef CONFIG_NEED_DMA_MAP_STATE
891 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
892 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
893 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
894 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
895 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
896 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
897 #else
898 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
899 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
900 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
901 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
902 #define dma_unmap_len(PTR, LEN_NAME)             (0)
903 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
904 #endif
905 
906 #endif
907 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp