Skip to content

Commit bb0e391

Browse files
author
Christoph Hellwig
committed
dma-mapping: fix vmap and mmap of noncontiougs allocations
Commit b5c58b2 ("dma-mapping: direct calls for dma-iommu") switched to use direct calls to dma-iommu, but missed the dma_vmap_noncontiguous, dma_vunmap_noncontiguous and dma_mmap_noncontiguous behavior keyed off the presence of the alloc_noncontiguous method. Fix this by removing the now unused alloc_noncontiguous and free_noncontiguous methods and moving the vmapping and mmaping of the noncontiguous allocations into the iommu code, as it is the only provider of actually noncontiguous allocations. Fixes: b5c58b2 ("dma-mapping: direct calls for dma-iommu") Reported-by: Xi Ruoyao <[email protected]> Signed-off-by: Christoph Hellwig <[email protected]> Reviewed-by: Leon Romanovsky <[email protected]> Tested-by: Xi Ruoyao <[email protected]>
1 parent 8826498 commit bb0e391

File tree

4 files changed

+49
-46
lines changed

4 files changed

+49
-46
lines changed

drivers/iommu/dma-iommu.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1038,6 +1038,21 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
10381038
return NULL;
10391039
}
10401040

1041+
/*
1042+
* This is the actual return value from the iommu_dma_alloc_noncontiguous.
1043+
*
1044+
* The users of the DMA API should only care about the sg_table, but to make
1045+
* the DMA-API internal vmaping and freeing easier we stash away the page
1046+
* array as well (except for the fallback case). This can go away any time,
1047+
* e.g. when a vmap-variant that takes a scatterlist comes along.
1048+
*/
1049+
struct dma_sgt_handle {
1050+
struct sg_table sgt;
1051+
struct page **pages;
1052+
};
1053+
#define sgt_handle(sgt) \
1054+
container_of((sgt), struct dma_sgt_handle, sgt)
1055+
10411056
struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
10421057
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
10431058
{
@@ -1066,6 +1081,24 @@ void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
10661081
kfree(sh);
10671082
}
10681083

1084+
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
1085+
struct sg_table *sgt)
1086+
{
1087+
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1088+
1089+
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
1090+
}
1091+
1092+
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
1093+
size_t size, struct sg_table *sgt)
1094+
{
1095+
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1096+
1097+
if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff)
1098+
return -ENXIO;
1099+
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
1100+
}
1101+
10691102
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
10701103
size_t size, enum dma_data_direction dir)
10711104
{

include/linux/dma-map-ops.h

Lines changed: 0 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -24,11 +24,6 @@ struct dma_map_ops {
2424
gfp_t gfp);
2525
void (*free_pages)(struct device *dev, size_t size, struct page *vaddr,
2626
dma_addr_t dma_handle, enum dma_data_direction dir);
27-
struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size,
28-
enum dma_data_direction dir, gfp_t gfp,
29-
unsigned long attrs);
30-
void (*free_noncontiguous)(struct device *dev, size_t size,
31-
struct sg_table *sgt, enum dma_data_direction dir);
3227
int (*mmap)(struct device *, struct vm_area_struct *,
3328
void *, dma_addr_t, size_t, unsigned long attrs);
3429

@@ -206,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
206201
}
207202
#endif /* CONFIG_DMA_GLOBAL_POOL */
208203

209-
/*
210-
* This is the actual return value from the ->alloc_noncontiguous method.
211-
* The users of the DMA API should only care about the sg_table, but to make
212-
* the DMA-API internal vmaping and freeing easier we stash away the page
213-
* array as well (except for the fallback case). This can go away any time,
214-
* e.g. when a vmap-variant that takes a scatterlist comes along.
215-
*/
216-
struct dma_sgt_handle {
217-
struct sg_table sgt;
218-
struct page **pages;
219-
};
220-
#define sgt_handle(sgt) \
221-
container_of((sgt), struct dma_sgt_handle, sgt)
222-
223204
int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
224205
void *cpu_addr, dma_addr_t dma_addr, size_t size,
225206
unsigned long attrs);

include/linux/iommu-dma.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,12 @@ struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size,
4444
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
4545
void iommu_dma_free_noncontiguous(struct device *dev, size_t size,
4646
struct sg_table *sgt, enum dma_data_direction dir);
47+
void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size,
48+
struct sg_table *sgt);
49+
#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \
50+
vunmap(vaddr);
51+
int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
52+
size_t size, struct sg_table *sgt);
4753
void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
4854
size_t size, enum dma_data_direction dir);
4955
void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,

kernel/dma/mapping.c

Lines changed: 10 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -750,17 +750,14 @@ static struct sg_table *alloc_single_sgt(struct device *dev, size_t size,
750750
struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
751751
enum dma_data_direction dir, gfp_t gfp, unsigned long attrs)
752752
{
753-
const struct dma_map_ops *ops = get_dma_ops(dev);
754753
struct sg_table *sgt;
755754

756755
if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES))
757756
return NULL;
758757
if (WARN_ON_ONCE(gfp & __GFP_COMP))
759758
return NULL;
760759

761-
if (ops && ops->alloc_noncontiguous)
762-
sgt = ops->alloc_noncontiguous(dev, size, dir, gfp, attrs);
763-
else if (use_dma_iommu(dev))
760+
if (use_dma_iommu(dev))
764761
sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs);
765762
else
766763
sgt = alloc_single_sgt(dev, size, dir, gfp);
@@ -786,13 +783,10 @@ static void free_single_sgt(struct device *dev, size_t size,
786783
void dma_free_noncontiguous(struct device *dev, size_t size,
787784
struct sg_table *sgt, enum dma_data_direction dir)
788785
{
789-
const struct dma_map_ops *ops = get_dma_ops(dev);
790-
791786
trace_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir, 0);
792787
debug_dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
793-
if (ops && ops->free_noncontiguous)
794-
ops->free_noncontiguous(dev, size, sgt, dir);
795-
else if (use_dma_iommu(dev))
788+
789+
if (use_dma_iommu(dev))
796790
iommu_dma_free_noncontiguous(dev, size, sgt, dir);
797791
else
798792
free_single_sgt(dev, size, sgt, dir);
@@ -802,37 +796,26 @@ EXPORT_SYMBOL_GPL(dma_free_noncontiguous);
802796
void *dma_vmap_noncontiguous(struct device *dev, size_t size,
803797
struct sg_table *sgt)
804798
{
805-
const struct dma_map_ops *ops = get_dma_ops(dev);
806-
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
807799

808-
if (ops && ops->alloc_noncontiguous)
809-
return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL);
800+
if (use_dma_iommu(dev))
801+
return iommu_dma_vmap_noncontiguous(dev, size, sgt);
802+
810803
return page_address(sg_page(sgt->sgl));
811804
}
812805
EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous);
813806

814807
void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
815808
{
816-
const struct dma_map_ops *ops = get_dma_ops(dev);
817-
818-
if (ops && ops->alloc_noncontiguous)
819-
vunmap(vaddr);
809+
if (use_dma_iommu(dev))
810+
iommu_dma_vunmap_noncontiguous(dev, vaddr);
820811
}
821812
EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous);
822813

823814
int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
824815
size_t size, struct sg_table *sgt)
825816
{
826-
const struct dma_map_ops *ops = get_dma_ops(dev);
827-
828-
if (ops && ops->alloc_noncontiguous) {
829-
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
830-
831-
if (vma->vm_pgoff >= count ||
832-
vma_pages(vma) > count - vma->vm_pgoff)
833-
return -ENXIO;
834-
return vm_map_pages(vma, sgt_handle(sgt)->pages, count);
835-
}
817+
if (use_dma_iommu(dev))
818+
return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt);
836819
return dma_mmap_pages(dev, vma, size, sg_page(sgt->sgl));
837820
}
838821
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);

0 commit comments

Comments
 (0)