~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-mapping.h

Version: ~ [ linux-5.5-rc1 ] ~ [ linux-5.4.2 ] ~ [ linux-5.3.15 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.88 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.158 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.206 ] ~ [ linux-4.8.17 ] ~ [ linux-4.7.10 ] ~ [ linux-4.6.7 ] ~ [ linux-4.5.7 ] ~ [ linux-4.4.206 ] ~ [ linux-4.3.6 ] ~ [ linux-4.2.8 ] ~ [ linux-4.1.52 ] ~ [ linux-4.0.9 ] ~ [ linux-3.19.8 ] ~ [ linux-3.18.140 ] ~ [ linux-3.17.8 ] ~ [ linux-3.16.78 ] ~ [ linux-3.15.10 ] ~ [ linux-3.14.79 ] ~ [ linux-3.13.11 ] ~ [ linux-3.12.74 ] ~ [ linux-3.11.10 ] ~ [ linux-3.10.108 ] ~ [ linux-3.9.11 ] ~ [ linux-3.8.13 ] ~ [ linux-3.7.10 ] ~ [ linux-3.6.11 ] ~ [ linux-3.5.7 ] ~ [ linux-3.4.113 ] ~ [ linux-3.3.8 ] ~ [ linux-3.2.102 ] ~ [ linux-3.1.10 ] ~ [ linux-3.0.101 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.5 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 #ifndef _LINUX_DMA_MAPPING_H
  2 #define _LINUX_DMA_MAPPING_H
  3 
  4 #include <linux/sizes.h>
  5 #include <linux/string.h>
  6 #include <linux/device.h>
  7 #include <linux/err.h>
  8 #include <linux/dma-attrs.h>
  9 #include <linux/dma-debug.h>
 10 #include <linux/dma-direction.h>
 11 #include <linux/scatterlist.h>
 12 #include <linux/kmemcheck.h>
 13 #include <linux/bug.h>
 14 
 15 /*
 16  * A dma_addr_t can hold any valid DMA or bus address for the platform.
 17  * It can be given to a device to use as a DMA source or target.  A CPU cannot
 18  * reference a dma_addr_t directly because there may be translation between
 19  * its physical address space and the bus address space.
 20  */
 21 struct dma_map_ops {
 22         void* (*alloc)(struct device *dev, size_t size,
 23                                 dma_addr_t *dma_handle, gfp_t gfp,
 24                                 struct dma_attrs *attrs);
 25         void (*free)(struct device *dev, size_t size,
 26                               void *vaddr, dma_addr_t dma_handle,
 27                               struct dma_attrs *attrs);
 28         int (*mmap)(struct device *, struct vm_area_struct *,
 29                           void *, dma_addr_t, size_t, struct dma_attrs *attrs);
 30 
 31         int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
 32                            dma_addr_t, size_t, struct dma_attrs *attrs);
 33 
 34         dma_addr_t (*map_page)(struct device *dev, struct page *page,
 35                                unsigned long offset, size_t size,
 36                                enum dma_data_direction dir,
 37                                struct dma_attrs *attrs);
 38         void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
 39                            size_t size, enum dma_data_direction dir,
 40                            struct dma_attrs *attrs);
 41         /*
 42          * map_sg returns 0 on error and a value > 0 on success.
 43          * It should never return a value < 0.
 44          */
 45         int (*map_sg)(struct device *dev, struct scatterlist *sg,
 46                       int nents, enum dma_data_direction dir,
 47                       struct dma_attrs *attrs);
 48         void (*unmap_sg)(struct device *dev,
 49                          struct scatterlist *sg, int nents,
 50                          enum dma_data_direction dir,
 51                          struct dma_attrs *attrs);
 52         void (*sync_single_for_cpu)(struct device *dev,
 53                                     dma_addr_t dma_handle, size_t size,
 54                                     enum dma_data_direction dir);
 55         void (*sync_single_for_device)(struct device *dev,
 56                                        dma_addr_t dma_handle, size_t size,
 57                                        enum dma_data_direction dir);
 58         void (*sync_sg_for_cpu)(struct device *dev,
 59                                 struct scatterlist *sg, int nents,
 60                                 enum dma_data_direction dir);
 61         void (*sync_sg_for_device)(struct device *dev,
 62                                    struct scatterlist *sg, int nents,
 63                                    enum dma_data_direction dir);
 64         int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
 65         int (*dma_supported)(struct device *dev, u64 mask);
 66         int (*set_dma_mask)(struct device *dev, u64 mask);
 67 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
 68         u64 (*get_required_mask)(struct device *dev);
 69 #endif
 70         int is_phys;
 71 };
 72 
 73 extern struct dma_map_ops dma_noop_ops;
 74 
 75 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 76 
 77 #define DMA_MASK_NONE   0x0ULL
 78 
 79 static inline int valid_dma_direction(int dma_direction)
 80 {
 81         return ((dma_direction == DMA_BIDIRECTIONAL) ||
 82                 (dma_direction == DMA_TO_DEVICE) ||
 83                 (dma_direction == DMA_FROM_DEVICE));
 84 }
 85 
 86 static inline int is_device_dma_capable(struct device *dev)
 87 {
 88         return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
 89 }
 90 
 91 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 92 /*
 93  * These three functions are only for dma allocator.
 94  * Don't use them in device drivers.
 95  */
 96 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
 97                                        dma_addr_t *dma_handle, void **ret);
 98 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
 99 
100 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
101                             void *cpu_addr, size_t size, int *ret);
102 #else
103 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
104 #define dma_release_from_coherent(dev, order, vaddr) (0)
105 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
106 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
107 
108 #ifdef CONFIG_HAS_DMA
109 #include <asm/dma-mapping.h>
110 #else
111 /*
112  * Define the dma api to allow compilation but not linking of
113  * dma dependent code.  Code that depends on the dma-mapping
114  * API needs to set 'depends on HAS_DMA' in its Kconfig
115  */
116 extern struct dma_map_ops bad_dma_ops;
117 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
118 {
119         return &bad_dma_ops;
120 }
121 #endif
122 
123 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
124                                               size_t size,
125                                               enum dma_data_direction dir,
126                                               struct dma_attrs *attrs)
127 {
128         struct dma_map_ops *ops = get_dma_ops(dev);
129         dma_addr_t addr;
130 
131         kmemcheck_mark_initialized(ptr, size);
132         BUG_ON(!valid_dma_direction(dir));
133         addr = ops->map_page(dev, virt_to_page(ptr),
134                              offset_in_page(ptr), size,
135                              dir, attrs);
136         debug_dma_map_page(dev, virt_to_page(ptr),
137                            offset_in_page(ptr), size,
138                            dir, addr, true);
139         return addr;
140 }
141 
142 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
143                                           size_t size,
144                                           enum dma_data_direction dir,
145                                           struct dma_attrs *attrs)
146 {
147         struct dma_map_ops *ops = get_dma_ops(dev);
148 
149         BUG_ON(!valid_dma_direction(dir));
150         if (ops->unmap_page)
151                 ops->unmap_page(dev, addr, size, dir, attrs);
152         debug_dma_unmap_page(dev, addr, size, dir, true);
153 }
154 
155 /*
156  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
157  * It should never return a value < 0.
158  */
159 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
160                                    int nents, enum dma_data_direction dir,
161                                    struct dma_attrs *attrs)
162 {
163         struct dma_map_ops *ops = get_dma_ops(dev);
164         int i, ents;
165         struct scatterlist *s;
166 
167         for_each_sg(sg, s, nents, i)
168                 kmemcheck_mark_initialized(sg_virt(s), s->length);
169         BUG_ON(!valid_dma_direction(dir));
170         ents = ops->map_sg(dev, sg, nents, dir, attrs);
171         BUG_ON(ents < 0);
172         debug_dma_map_sg(dev, sg, nents, ents, dir);
173 
174         return ents;
175 }
176 
177 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
178                                       int nents, enum dma_data_direction dir,
179                                       struct dma_attrs *attrs)
180 {
181         struct dma_map_ops *ops = get_dma_ops(dev);
182 
183         BUG_ON(!valid_dma_direction(dir));
184         debug_dma_unmap_sg(dev, sg, nents, dir);
185         if (ops->unmap_sg)
186                 ops->unmap_sg(dev, sg, nents, dir, attrs);
187 }
188 
189 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
190                                       size_t offset, size_t size,
191                                       enum dma_data_direction dir)
192 {
193         struct dma_map_ops *ops = get_dma_ops(dev);
194         dma_addr_t addr;
195 
196         kmemcheck_mark_initialized(page_address(page) + offset, size);
197         BUG_ON(!valid_dma_direction(dir));
198         addr = ops->map_page(dev, page, offset, size, dir, NULL);
199         debug_dma_map_page(dev, page, offset, size, dir, addr, false);
200 
201         return addr;
202 }
203 
204 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
205                                   size_t size, enum dma_data_direction dir)
206 {
207         struct dma_map_ops *ops = get_dma_ops(dev);
208 
209         BUG_ON(!valid_dma_direction(dir));
210         if (ops->unmap_page)
211                 ops->unmap_page(dev, addr, size, dir, NULL);
212         debug_dma_unmap_page(dev, addr, size, dir, false);
213 }
214 
215 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
216                                            size_t size,
217                                            enum dma_data_direction dir)
218 {
219         struct dma_map_ops *ops = get_dma_ops(dev);
220 
221         BUG_ON(!valid_dma_direction(dir));
222         if (ops->sync_single_for_cpu)
223                 ops->sync_single_for_cpu(dev, addr, size, dir);
224         debug_dma_sync_single_for_cpu(dev, addr, size, dir);
225 }
226 
227 static inline void dma_sync_single_for_device(struct device *dev,
228                                               dma_addr_t addr, size_t size,
229                                               enum dma_data_direction dir)
230 {
231         struct dma_map_ops *ops = get_dma_ops(dev);
232 
233         BUG_ON(!valid_dma_direction(dir));
234         if (ops->sync_single_for_device)
235                 ops->sync_single_for_device(dev, addr, size, dir);
236         debug_dma_sync_single_for_device(dev, addr, size, dir);
237 }
238 
239 static inline void dma_sync_single_range_for_cpu(struct device *dev,
240                                                  dma_addr_t addr,
241                                                  unsigned long offset,
242                                                  size_t size,
243                                                  enum dma_data_direction dir)
244 {
245         const struct dma_map_ops *ops = get_dma_ops(dev);
246 
247         BUG_ON(!valid_dma_direction(dir));
248         if (ops->sync_single_for_cpu)
249                 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
250         debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
251 }
252 
253 static inline void dma_sync_single_range_for_device(struct device *dev,
254                                                     dma_addr_t addr,
255                                                     unsigned long offset,
256                                                     size_t size,
257                                                     enum dma_data_direction dir)
258 {
259         const struct dma_map_ops *ops = get_dma_ops(dev);
260 
261         BUG_ON(!valid_dma_direction(dir));
262         if (ops->sync_single_for_device)
263                 ops->sync_single_for_device(dev, addr + offset, size, dir);
264         debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
265 }
266 
267 static inline void
268 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
269                     int nelems, enum dma_data_direction dir)
270 {
271         struct dma_map_ops *ops = get_dma_ops(dev);
272 
273         BUG_ON(!valid_dma_direction(dir));
274         if (ops->sync_sg_for_cpu)
275                 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
276         debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
277 }
278 
279 static inline void
280 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
281                        int nelems, enum dma_data_direction dir)
282 {
283         struct dma_map_ops *ops = get_dma_ops(dev);
284 
285         BUG_ON(!valid_dma_direction(dir));
286         if (ops->sync_sg_for_device)
287                 ops->sync_sg_for_device(dev, sg, nelems, dir);
288         debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
289 
290 }
291 
292 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
293 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
294 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
295 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
296 
297 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
298                            void *cpu_addr, dma_addr_t dma_addr, size_t size);
299 
300 void *dma_common_contiguous_remap(struct page *page, size_t size,
301                         unsigned long vm_flags,
302                         pgprot_t prot, const void *caller);
303 
304 void *dma_common_pages_remap(struct page **pages, size_t size,
305                         unsigned long vm_flags, pgprot_t prot,
306                         const void *caller);
307 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
308 
309 /**
310  * dma_mmap_attrs - map a coherent DMA allocation into user space
311  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
312  * @vma: vm_area_struct describing requested user mapping
313  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
314  * @handle: device-view address returned from dma_alloc_attrs
315  * @size: size of memory originally requested in dma_alloc_attrs
316  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
317  *
318  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
319  * into user space.  The coherent DMA buffer must not be freed by the
320  * driver until the user space mapping has been released.
321  */
322 static inline int
323 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
324                dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
325 {
326         struct dma_map_ops *ops = get_dma_ops(dev);
327         BUG_ON(!ops);
328         if (ops->mmap)
329                 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
330         return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
331 }
332 
333 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
334 
335 int
336 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
337                        void *cpu_addr, dma_addr_t dma_addr, size_t size);
338 
339 static inline int
340 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
341                       dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
342 {
343         struct dma_map_ops *ops = get_dma_ops(dev);
344         BUG_ON(!ops);
345         if (ops->get_sgtable)
346                 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
347                                         attrs);
348         return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
349 }
350 
351 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
352 
353 #ifndef arch_dma_alloc_attrs
354 #define arch_dma_alloc_attrs(dev, flag) (true)
355 #endif
356 
357 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
358                                        dma_addr_t *dma_handle, gfp_t flag,
359                                        struct dma_attrs *attrs)
360 {
361         struct dma_map_ops *ops = get_dma_ops(dev);
362         void *cpu_addr;
363 
364         BUG_ON(!ops);
365 
366         if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
367                 return cpu_addr;
368 
369         if (!arch_dma_alloc_attrs(&dev, &flag))
370                 return NULL;
371         if (!ops->alloc)
372                 return NULL;
373 
374         cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
375         debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
376         return cpu_addr;
377 }
378 
379 static inline void dma_free_attrs(struct device *dev, size_t size,
380                                      void *cpu_addr, dma_addr_t dma_handle,
381                                      struct dma_attrs *attrs)
382 {
383         struct dma_map_ops *ops = get_dma_ops(dev);
384 
385         BUG_ON(!ops);
386         WARN_ON(irqs_disabled());
387 
388         if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
389                 return;
390 
391         if (!ops->free || !cpu_addr)
392                 return;
393 
394         debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
395         ops->free(dev, size, cpu_addr, dma_handle, attrs);
396 }
397 
398 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
399                 dma_addr_t *dma_handle, gfp_t flag)
400 {
401         return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
402 }
403 
404 static inline void dma_free_coherent(struct device *dev, size_t size,
405                 void *cpu_addr, dma_addr_t dma_handle)
406 {
407         return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
408 }
409 
410 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
411                 dma_addr_t *dma_handle, gfp_t gfp)
412 {
413         DEFINE_DMA_ATTRS(attrs);
414 
415         dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
416         return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
417 }
418 
419 static inline void dma_free_noncoherent(struct device *dev, size_t size,
420                 void *cpu_addr, dma_addr_t dma_handle)
421 {
422         DEFINE_DMA_ATTRS(attrs);
423 
424         dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
425         dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
426 }
427 
428 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
429 {
430         debug_dma_mapping_error(dev, dma_addr);
431 
432         if (get_dma_ops(dev)->mapping_error)
433                 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
434 
435 #ifdef DMA_ERROR_CODE
436         return dma_addr == DMA_ERROR_CODE;
437 #else
438         return 0;
439 #endif
440 }
441 
442 #ifndef HAVE_ARCH_DMA_SUPPORTED
443 static inline int dma_supported(struct device *dev, u64 mask)
444 {
445         struct dma_map_ops *ops = get_dma_ops(dev);
446 
447         if (!ops)
448                 return 0;
449         if (!ops->dma_supported)
450                 return 1;
451         return ops->dma_supported(dev, mask);
452 }
453 #endif
454 
455 #ifndef HAVE_ARCH_DMA_SET_MASK
456 static inline int dma_set_mask(struct device *dev, u64 mask)
457 {
458         struct dma_map_ops *ops = get_dma_ops(dev);
459 
460         if (ops->set_dma_mask)
461                 return ops->set_dma_mask(dev, mask);
462 
463         if (!dev->dma_mask || !dma_supported(dev, mask))
464                 return -EIO;
465         *dev->dma_mask = mask;
466         return 0;
467 }
468 #endif
469 
470 static inline u64 dma_get_mask(struct device *dev)
471 {
472         if (dev && dev->dma_mask && *dev->dma_mask)
473                 return *dev->dma_mask;
474         return DMA_BIT_MASK(32);
475 }
476 
477 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
478 int dma_set_coherent_mask(struct device *dev, u64 mask);
479 #else
480 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
481 {
482         if (!dma_supported(dev, mask))
483                 return -EIO;
484         dev->coherent_dma_mask = mask;
485         return 0;
486 }
487 #endif
488 
489 /*
490  * Set both the DMA mask and the coherent DMA mask to the same thing.
491  * Note that we don't check the return value from dma_set_coherent_mask()
492  * as the DMA API guarantees that the coherent DMA mask can be set to
493  * the same or smaller than the streaming DMA mask.
494  */
495 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
496 {
497         int rc = dma_set_mask(dev, mask);
498         if (rc == 0)
499                 dma_set_coherent_mask(dev, mask);
500         return rc;
501 }
502 
503 /*
504  * Similar to the above, except it deals with the case where the device
505  * does not have dev->dma_mask appropriately setup.
506  */
507 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
508 {
509         dev->dma_mask = &dev->coherent_dma_mask;
510         return dma_set_mask_and_coherent(dev, mask);
511 }
512 
513 extern u64 dma_get_required_mask(struct device *dev);
514 
515 #ifndef arch_setup_dma_ops
516 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
517                                       u64 size, const struct iommu_ops *iommu,
518                                       bool coherent) { }
519 #endif
520 
521 #ifndef arch_teardown_dma_ops
522 static inline void arch_teardown_dma_ops(struct device *dev) { }
523 #endif
524 
525 static inline unsigned int dma_get_max_seg_size(struct device *dev)
526 {
527         if (dev->dma_parms && dev->dma_parms->max_segment_size)
528                 return dev->dma_parms->max_segment_size;
529         return SZ_64K;
530 }
531 
532 static inline unsigned int dma_set_max_seg_size(struct device *dev,
533                                                 unsigned int size)
534 {
535         if (dev->dma_parms) {
536                 dev->dma_parms->max_segment_size = size;
537                 return 0;
538         }
539         return -EIO;
540 }
541 
542 static inline unsigned long dma_get_seg_boundary(struct device *dev)
543 {
544         if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
545                 return dev->dma_parms->segment_boundary_mask;
546         return DMA_BIT_MASK(32);
547 }
548 
549 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
550 {
551         if (dev->dma_parms) {
552                 dev->dma_parms->segment_boundary_mask = mask;
553                 return 0;
554         }
555         return -EIO;
556 }
557 
558 #ifndef dma_max_pfn
559 static inline unsigned long dma_max_pfn(struct device *dev)
560 {
561         return *dev->dma_mask >> PAGE_SHIFT;
562 }
563 #endif
564 
565 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
566                                         dma_addr_t *dma_handle, gfp_t flag)
567 {
568         void *ret = dma_alloc_coherent(dev, size, dma_handle,
569                                        flag | __GFP_ZERO);
570         return ret;
571 }
572 
573 #ifdef CONFIG_HAS_DMA
574 static inline int dma_get_cache_alignment(void)
575 {
576 #ifdef ARCH_DMA_MINALIGN
577         return ARCH_DMA_MINALIGN;
578 #endif
579         return 1;
580 }
581 #endif
582 
583 /* flags for the coherent memory api */
584 #define DMA_MEMORY_MAP                  0x01
585 #define DMA_MEMORY_IO                   0x02
586 #define DMA_MEMORY_INCLUDES_CHILDREN    0x04
587 #define DMA_MEMORY_EXCLUSIVE            0x08
588 
589 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
590 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
591                                 dma_addr_t device_addr, size_t size, int flags);
592 void dma_release_declared_memory(struct device *dev);
593 void *dma_mark_declared_memory_occupied(struct device *dev,
594                                         dma_addr_t device_addr, size_t size);
595 #else
596 static inline int
597 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
598                             dma_addr_t device_addr, size_t size, int flags)
599 {
600         return 0;
601 }
602 
603 static inline void
604 dma_release_declared_memory(struct device *dev)
605 {
606 }
607 
608 static inline void *
609 dma_mark_declared_memory_occupied(struct device *dev,
610                                   dma_addr_t device_addr, size_t size)
611 {
612         return ERR_PTR(-EBUSY);
613 }
614 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
615 
616 /*
617  * Managed DMA API
618  */
619 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
620                                  dma_addr_t *dma_handle, gfp_t gfp);
621 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
622                                dma_addr_t dma_handle);
623 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
624                                     dma_addr_t *dma_handle, gfp_t gfp);
625 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
626                                   dma_addr_t dma_handle);
627 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
628 extern int dmam_declare_coherent_memory(struct device *dev,
629                                         phys_addr_t phys_addr,
630                                         dma_addr_t device_addr, size_t size,
631                                         int flags);
632 extern void dmam_release_declared_memory(struct device *dev);
633 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
634 static inline int dmam_declare_coherent_memory(struct device *dev,
635                                 phys_addr_t phys_addr, dma_addr_t device_addr,
636                                 size_t size, gfp_t gfp)
637 {
638         return 0;
639 }
640 
641 static inline void dmam_release_declared_memory(struct device *dev)
642 {
643 }
644 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
645 
646 static inline void *dma_alloc_wc(struct device *dev, size_t size,
647                                  dma_addr_t *dma_addr, gfp_t gfp)
648 {
649         DEFINE_DMA_ATTRS(attrs);
650         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
651         return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
652 }
653 #ifndef dma_alloc_writecombine
654 #define dma_alloc_writecombine dma_alloc_wc
655 #endif
656 
657 static inline void dma_free_wc(struct device *dev, size_t size,
658                                void *cpu_addr, dma_addr_t dma_addr)
659 {
660         DEFINE_DMA_ATTRS(attrs);
661         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
662         return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
663 }
664 #ifndef dma_free_writecombine
665 #define dma_free_writecombine dma_free_wc
666 #endif
667 
668 static inline int dma_mmap_wc(struct device *dev,
669                               struct vm_area_struct *vma,
670                               void *cpu_addr, dma_addr_t dma_addr,
671                               size_t size)
672 {
673         DEFINE_DMA_ATTRS(attrs);
674         dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
675         return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
676 }
677 #ifndef dma_mmap_writecombine
678 #define dma_mmap_writecombine dma_mmap_wc
679 #endif
680 
681 #ifdef CONFIG_NEED_DMA_MAP_STATE
682 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
683 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
684 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
685 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
686 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
687 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
688 #else
689 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
690 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
691 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
692 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
693 #define dma_unmap_len(PTR, LEN_NAME)             (0)
694 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
695 #endif
696 
697 #endif
698 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | Wiki (Japanese) | Wiki (English) | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

osdn.jp