From 553ac78877242b6d8b591323731df304140d0f99 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 29 Feb 2012 14:45:28 +0100 Subject: ARM: dma-mapping: introduce DMA_ERROR_CODE constant Replace all uses of ~0 with DMA_ERROR_CODE, what should make the code easier to read. Signed-off-by: Marek Szyprowski Acked-by: Kyungmin Park Tested-By: Subash Patel --- arch/arm/include/asm/dma-mapping.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index cb3b7c981c4..6a838da03c5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -10,6 +10,8 @@ #include #include +#define DMA_ERROR_CODE (~0) + #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma #endif @@ -123,7 +125,7 @@ extern int dma_set_mask(struct device *, u64); */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { - return dma_addr == ~0; + return dma_addr == DMA_ERROR_CODE; } /* -- cgit v1.2.3 From a227fb92a0f5f0dd8282719386e9b3a29f0d16b2 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 10 Feb 2012 19:55:20 +0100 Subject: ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops This patch removes the need for the offset parameter in dma bounce functions. This is required to let dma-mapping framework on ARM architecture to use common, generic dma_map_ops based dma-mapping helpers. Background and more detailed explaination: dma_*_range_* functions are available from the early days of the dma mapping api. They are the correct way of doing a partial syncs on the buffer (usually used by the network device drivers). This patch changes only the internal implementation of the dma bounce functions to let them tunnel through dma_map_ops structure. The driver api stays unchanged, so driver are obliged to call dma_*_range_* functions to keep code clean and easy to understand. The only drawback from this patch is reduced detection of the dma api abuse. Let us consider the following code: dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE); dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE); Without the patch such code fails, because dma bounce code is unable to find the bounce buffer for the given dma_address. After the patch the above sync call will be equivalent to: dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE); which succeeds. I don't consider this as a real problem, because DMA API abuse should be caught by debug_dma_* function family. This patch lets us to simplify the internal low-level implementation without chaning the driver visible API. Signed-off-by: Marek Szyprowski Acked-by: Kyungmin Park Tested-By: Subash Patel --- arch/arm/include/asm/dma-mapping.h | 67 +++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 34 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 6a838da03c5..eeddbe201e2 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -266,19 +266,17 @@ extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, /* * Private functions */ -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); -int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, - size_t, enum dma_data_direction); +int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction); +int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction); #else static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) + size_t size, enum dma_data_direction dir) { return 1; } static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, - unsigned long offset, size_t size, enum dma_data_direction dir) + size_t size, enum dma_data_direction dir) { return 1; } @@ -401,6 +399,33 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, __dma_unmap_page(dev, handle, size, dir); } + +static inline void dma_sync_single_for_cpu(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + BUG_ON(!valid_dma_direction(dir)); + + debug_dma_sync_single_for_cpu(dev, handle, size, dir); + + if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) + return; + + __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); +} + +static inline void dma_sync_single_for_device(struct device *dev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + BUG_ON(!valid_dma_direction(dir)); + + debug_dma_sync_single_for_device(dev, handle, size, dir); + + if (!dmabounce_sync_for_device(dev, handle, size, dir)) + return; + + __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir); +} + /** * dma_sync_single_range_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -423,40 +448,14 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); - - if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) - return; - - __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); + dma_sync_single_for_cpu(dev, handle + offset, size, dir); } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, unsigned long offset, size_t size, enum dma_data_direction dir) { - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_device(dev, handle + offset, size, dir); - - if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) - return; - - __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); -} - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - dma_sync_single_range_for_device(dev, handle, 0, size, dir); + dma_sync_single_for_device(dev, handle + offset, size, dir); } /* -- cgit v1.2.3 From 2dc6a016bbedf18f18ad73997e5338307d6dbde9 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 10 Feb 2012 19:55:20 +0100 Subject: ARM: dma-mapping: use asm-generic/dma-mapping-common.h This patch modifies dma-mapping implementation on ARM architecture to use common dma_map_ops structure and asm-generic/dma-mapping-common.h helpers. Signed-off-by: Marek Szyprowski Acked-by: Kyungmin Park Tested-By: Subash Patel --- arch/arm/include/asm/device.h | 1 + arch/arm/include/asm/dma-mapping.h | 196 +++++-------------------------------- 2 files changed, 28 insertions(+), 169 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 7aa368003b0..6e2cb0ee770 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -7,6 +7,7 @@ #define ASMARM_DEVICE_H struct dev_archdata { + struct dma_map_ops *dma_ops; #ifdef CONFIG_DMABOUNCE struct dmabounce_device_info *dmabounce; #endif diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index eeddbe201e2..6725a08a5c2 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -11,6 +11,27 @@ #include #define DMA_ERROR_CODE (~0) +extern struct dma_map_ops arm_dma_ops; + +static inline struct dma_map_ops *get_dma_ops(struct device *dev) +{ + if (dev && dev->archdata.dma_ops) + return dev->archdata.dma_ops; + return &arm_dma_ops; +} + +static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) +{ + BUG_ON(!dev); + dev->archdata.dma_ops = ops; +} + +#include + +static inline int dma_set_mask(struct device *dev, u64 mask) +{ + return get_dma_ops(dev)->set_dma_mask(dev, mask); +} #ifdef __arch_page_to_dma #error Please update to __arch_pfn_to_dma @@ -119,7 +140,6 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, extern int dma_supported(struct device *, u64); extern int dma_set_mask(struct device *, u64); - /* * DMA errors are defined by all-bits-set in the DMA address. */ @@ -297,179 +317,17 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, } #endif /* CONFIG_DMABOUNCE */ -/** - * dma_map_single - map a single buffer for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @cpu_addr: CPU direct mapped address of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_single() or - * dma_sync_single_for_cpu(). - */ -static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, - size_t size, enum dma_data_direction dir) -{ - unsigned long offset; - struct page *page; - dma_addr_t addr; - - BUG_ON(!virt_addr_valid(cpu_addr)); - BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); - BUG_ON(!valid_dma_direction(dir)); - - page = virt_to_page(cpu_addr); - offset = (unsigned long)cpu_addr & ~PAGE_MASK; - addr = __dma_map_page(dev, page, offset, size, dir); - debug_dma_map_page(dev, page, offset, size, dir, addr, true); - - return addr; -} - -/** - * dma_map_page - map a portion of a page for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - */ -static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - dma_addr_t addr; - - BUG_ON(!valid_dma_direction(dir)); - - addr = __dma_map_page(dev, page, offset, size, dir); - debug_dma_map_page(dev, page, offset, size, dir, addr, false); - - return addr; -} - -/** - * dma_unmap_single - unmap a single buffer previously mapped - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_single) - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Unmap a single streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_single() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, handle, size, dir, true); - __dma_unmap_page(dev, handle, size, dir); -} - -/** - * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Unmap a page streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_page() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - debug_dma_unmap_page(dev, handle, size, dir, false); - __dma_unmap_page(dev, handle, size, dir); -} - - -static inline void dma_sync_single_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_cpu(dev, handle, size, dir); - - if (!dmabounce_sync_for_cpu(dev, handle, size, dir)) - return; - - __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); -} - -static inline void dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - BUG_ON(!valid_dma_direction(dir)); - - debug_dma_sync_single_for_device(dev, handle, size, dir); - - if (!dmabounce_sync_for_device(dev, handle, size, dir)) - return; - - __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir); -} - -/** - * dma_sync_single_range_for_cpu - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @offset: offset of region to start sync - * @size: size of region to sync - * @dir: DMA transfer direction (same as passed to dma_map_single) - * - * Make physical memory consistent for a single streaming mode DMA - * translation after a transfer. - * - * If you perform a dma_map_single() but wish to interrogate the - * buffer using the cpu, yet do not wish to teardown the PCI dma - * mapping, you must call this function before doing so. At the - * next point you give the PCI dma address back to the card, you - * must first the perform a dma_sync_for_device, and then the - * device again owns the buffer. - */ -static inline void dma_sync_single_range_for_cpu(struct device *dev, - dma_addr_t handle, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - dma_sync_single_for_cpu(dev, handle + offset, size, dir); -} - -static inline void dma_sync_single_range_for_device(struct device *dev, - dma_addr_t handle, unsigned long offset, size_t size, - enum dma_data_direction dir) -{ - dma_sync_single_for_device(dev, handle + offset, size, dir); -} - /* * The scatter list versions of the above methods. */ -extern int dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern void dma_unmap_sg(struct device *, struct scatterlist *, int, +extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, + enum dma_data_direction, struct dma_attrs *attrs); +extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, +extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, enum dma_data_direction); -extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, - enum dma_data_direction); - #endif /* __KERNEL__ */ #endif -- cgit v1.2.3 From 15237e1f505b3e5c2276f240b01cd2133e110cbc Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Fri, 10 Feb 2012 19:55:20 +0100 Subject: ARM: dma-mapping: move all dma bounce code to separate dma ops structure This patch removes dma bounce hooks from the common dma mapping implementation on ARM architecture and creates a separate set of dma_map_ops for dma bounce devices. Signed-off-by: Marek Szyprowski Acked-by: Kyungmin Park Tested-By: Subash Patel --- arch/arm/include/asm/dma-mapping.h | 99 +------------------------------------- 1 file changed, 2 insertions(+), 97 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 6725a08a5c2..7a7c3c762f5 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -84,62 +84,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) } #endif -/* - * The DMA API is built upon the notion of "buffer ownership". A buffer - * is either exclusively owned by the CPU (and therefore may be accessed - * by it) or exclusively owned by the DMA device. These helper functions - * represent the transitions between these two ownership states. - * - * Note, however, that on later ARMs, this notion does not work due to - * speculative prefetches. We model our approach on the assumption that - * the CPU does do speculative prefetches, which means we clean caches - * before transfers and delay cache invalidation until transfer completion. - * - * Private support functions: these are not part of the API and are - * liable to change. Drivers must not use these. - */ -static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ - extern void ___dma_single_cpu_to_dev(const void *, size_t, - enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_single_cpu_to_dev(kaddr, size, dir); -} - -static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, - enum dma_data_direction dir) -{ - extern void ___dma_single_dev_to_cpu(const void *, size_t, - enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_single_dev_to_cpu(kaddr, size, dir); -} - -static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) -{ - extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, - size_t, enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_page_cpu_to_dev(page, off, size, dir); -} - -static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, - size_t size, enum dma_data_direction dir) -{ - extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, - size_t, enum dma_data_direction); - - if (!arch_is_coherent()) - ___dma_page_dev_to_cpu(page, off, size, dir); -} - -extern int dma_supported(struct device *, u64); -extern int dma_set_mask(struct device *, u64); /* * DMA errors are defined by all-bits-set in the DMA address. */ @@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, { } +extern int dma_supported(struct device *dev, u64 mask); + /** * dma_alloc_coherent - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices @@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, extern void __init init_consistent_dma_size(unsigned long size); -#ifdef CONFIG_DMABOUNCE /* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" * and utilize bounce buffers as needed to work around limited DMA windows. @@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long, */ extern void dmabounce_unregister_dev(struct device *); -/* - * The DMA API, implemented by dmabounce.c. See below for descriptions. - */ -extern dma_addr_t __dma_map_page(struct device *, struct page *, - unsigned long, size_t, enum dma_data_direction); -extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, - enum dma_data_direction); - -/* - * Private functions - */ -int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction); -int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction); -#else -static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - return 1; -} - -static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - return 1; -} - -static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir) -{ - __dma_page_cpu_to_dev(page, offset, size, dir); - return pfn_to_dma(dev, page_to_pfn(page)) + offset; -} - -static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir) -{ - __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), - handle & ~PAGE_MASK, size, dir); -} -#endif /* CONFIG_DMABOUNCE */ /* * The scatter list versions of the above methods. -- cgit v1.2.3 From f99d60341238fe73fc514129cd9ae4e44e1b2c47 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 16 May 2012 18:31:23 +0200 Subject: ARM: dma-mapping: use alloc, mmap, free from dma_ops This patch converts dma_alloc/free/mmap_{coherent,writecombine} functions to use generic alloc/free/mmap methods from dma_map_ops structure. A new DMA_ATTR_WRITE_COMBINE DMA attribute have been introduced to implement writecombine methods. Signed-off-by: Marek Szyprowski Acked-by: Kyungmin Park Acked-by: Arnd Bergmann Tested-By: Subash Patel --- arch/arm/include/asm/dma-mapping.h | 107 ++++++++++++++++++++++++++----------- 1 file changed, 77 insertions(+), 30 deletions(-) (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 7a7c3c762f5..bbef15d0489 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -110,68 +111,115 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size, extern int dma_supported(struct device *dev, u64 mask); /** - * dma_alloc_coherent - allocate consistent memory for DMA + * arm_dma_alloc - allocate consistent memory for DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @size: required memory size * @handle: bus-specific DMA address + * @attrs: optinal attributes that specific mapping properties * - * Allocate some uncached, unbuffered memory for a device for - * performing DMA. This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. + * Allocate some memory for a device for performing DMA. This function + * allocates pages, and will return the CPU-viewed address, and sets @handle + * to be the device-viewed address. */ -extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); +extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, struct dma_attrs *attrs); + +#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) + +static inline void *dma_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + BUG_ON(!ops); + + cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); + debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); + return cpu_addr; +} /** - * dma_free_coherent - free memory allocated by dma_alloc_coherent + * arm_dma_free - free memory allocated by arm_dma_alloc * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @size: size of memory originally requested in dma_alloc_coherent * @cpu_addr: CPU-view address returned from dma_alloc_coherent * @handle: device-view address returned from dma_alloc_coherent + * @attrs: optinal attributes that specific mapping properties * * Free (and unmap) a DMA buffer previously allocated by - * dma_alloc_coherent(). + * arm_dma_alloc(). * * References to memory and mappings associated with cpu_addr/handle * during and after this call executing are illegal. */ -extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); +extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, struct dma_attrs *attrs); + +#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) + +static inline void dma_free_attrs(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle, + struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free(dev, size, cpu_addr, dma_handle, attrs); +} /** - * dma_mmap_coherent - map a coherent DMA allocation into user space + * arm_dma_mmap - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @vma: vm_area_struct describing requested user mapping * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent * @handle: device-view address returned from dma_alloc_coherent * @size: size of memory originally requested in dma_alloc_coherent + * @attrs: optinal attributes that specific mapping properties * * Map a coherent DMA buffer previously allocated by dma_alloc_coherent * into user space. The coherent DMA buffer must not be freed by the * driver until the user space mapping has been released. */ -int dma_mmap_coherent(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); +extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + struct dma_attrs *attrs); +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) -/** - * dma_alloc_writecombine - allocate writecombining memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * - * Allocate some uncached, buffered memory for a device for - * performing DMA. This function allocates pages, and will - * return the CPU-viewed address, and sets @handle to be the - * device-viewed address. - */ -extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, - gfp_t); +static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, + size_t size, struct dma_attrs *attrs) +{ + struct dma_map_ops *ops = get_dma_ops(dev); + BUG_ON(!ops); + return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} -#define dma_free_writecombine(dev,size,cpu_addr,handle) \ - dma_free_coherent(dev,size,cpu_addr,handle) +static inline void *dma_alloc_writecombine(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); +} -int dma_mmap_writecombine(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t); +static inline void dma_free_writecombine(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); +} + +static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size) +{ + DEFINE_DMA_ATTRS(attrs); + dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); +} /* * This can be called during boot to increase the size of the consistent @@ -180,7 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, */ extern void __init init_consistent_dma_size(unsigned long size); - /* * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" * and utilize bounce buffers as needed to work around limited DMA windows. -- cgit v1.2.3 From 4ce63fcd919c32d22528e54dcd89506962933719 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 16 May 2012 15:48:21 +0200 Subject: ARM: dma-mapping: add support for IOMMU mapper This patch add a complete implementation of DMA-mapping API for devices which have IOMMU support. This implementation tries to optimize dma address space usage by remapping all possible physical memory chunks into a single dma address space chunk. DMA address space is managed on top of the bitmap stored in the dma_iommu_mapping structure stored in device->archdata. Platform setup code has to initialize parameters of the dma address space (base address, size, allocation precision order) with arm_iommu_create_mapping() function. To reduce the size of the bitmap, all allocations are aligned to the specified order of base 4 KiB pages. dma_alloc_* functions allocate physical memory in chunks, each with alloc_pages() function to avoid failing if the physical memory gets fragmented. In worst case the allocated buffer is composed of 4 KiB page chunks. dma_map_sg() function minimizes the total number of dma address space chunks by merging of physical memory chunks into one larger dma address space chunk. If requested chunk (scatter list entry) boundaries match physical page boundaries, most calls to dma_map_sg() requests will result in creating only one chunk in dma address space. dma_map_page() simply creates a mapping for the given page(s) in the dma address space. All dma functions also perform required cache operation like their counterparts from the arm linear physical memory mapping version. This patch contains code and fixes kindly provided by: - Krishna Reddy , - Andrzej Pietrasiewicz , - Hiroshi DOYU Signed-off-by: Marek Szyprowski Acked-by: Kyungmin Park Reviewed-by: Konrad Rzeszutek Wilk Tested-By: Subash Patel --- arch/arm/include/asm/device.h | 3 +++ arch/arm/include/asm/dma-iommu.h | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 arch/arm/include/asm/dma-iommu.h (limited to 'arch/arm/include/asm') diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index 6e2cb0ee770..b69c0d3285f 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -14,6 +14,9 @@ struct dev_archdata { #ifdef CONFIG_IOMMU_API void *iommu; /* private IOMMU data */ #endif +#ifdef CONFIG_ARM_DMA_USE_IOMMU + struct dma_iommu_mapping *mapping; +#endif }; struct omap_device; diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h new file mode 100644 index 00000000000..799b09409fa --- /dev/null +++ b/arch/arm/include/asm/dma-iommu.h @@ -0,0 +1,34 @@ +#ifndef ASMARM_DMA_IOMMU_H +#define ASMARM_DMA_IOMMU_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +struct dma_iommu_mapping { + /* iommu specific data */ + struct iommu_domain *domain; + + void *bitmap; + size_t bits; + unsigned int order; + dma_addr_t base; + + spinlock_t lock; + struct kref kref; +}; + +struct dma_iommu_mapping * +arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, + int order); + +void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); + +int arm_iommu_attach_device(struct device *dev, + struct dma_iommu_mapping *mapping); + +#endif /* __KERNEL__ */ +#endif -- cgit v1.2.3