search for: virt_to_dma

Displaying 14 results from an estimated 14 matches for "virt_to_dma".

2020 Aug 19
0
[PATCH 23/28] lib82596: convert from dma_cache_sync to dma_sync_single_for_device
Use the proper modern API to transfer cache ownership for incoherent DMA. Note that this moves the DMA helpers to the main lib82596.c file, so that they can use virt_to_dma. Signed-off-by: Christoph Hellwig <hch at lst.de> --- drivers/net/ethernet/i825xx/lasi_82596.c | 11 +-- drivers/net/ethernet/i825xx/lib82596.c | 114 ++++++++++++++--------- drivers/net/ethernet/i825xx/sni_82596.c | 4 - 3 files changed, 73 insertions(+), 56 deletions(-) diff --git...
2020 Sep 14
2
[PATCH 11/17] sgiseeq: convert to dma_alloc_noncoherent
...6.c index b4e4b3eb5758b5..ca2fb303fcc6f6 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -365,13 +365,44 @@ static int max_cmd_backlog = TX_RING_SIZE-1; static void i596_poll_controller(struct net_device *dev); #endif +static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v) +{ + return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma); +} + +#ifdef NONCOHERENT_DMA +static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr, + size_t len) +{ + dma_sync_single_for_device(ndev->dev.parent, +...
2020 Sep 01
2
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) > { > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > - DMA_FROM_DEVICE); > + struct sgiseeq_private *sp = netdev_priv(dev); > + > + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), > + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); > } > > static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) > { > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > - DMA_TO_DEVICE); > + struct...
2020 Sep 15
0
[PATCH 11/18] lib82596: convert to dma_alloc_noncoherent
...6.c index b4e4b3eb5758b5..ca2fb303fcc6f6 100644 --- a/drivers/net/ethernet/i825xx/lib82596.c +++ b/drivers/net/ethernet/i825xx/lib82596.c @@ -365,13 +365,44 @@ static int max_cmd_backlog = TX_RING_SIZE-1; static void i596_poll_controller(struct net_device *dev); #endif +static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v) +{ + return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma); +} + +#ifdef NONCOHERENT_DMA +static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr, + size_t len) +{ + dma_sync_single_for_device(ndev->dev.parent, +...
2020 Sep 15
0
[PATCH 12/18] sgiseeq: convert to dma_alloc_noncoherent
...ct sgiseeq_private { static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_FROM_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev); + + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_TO_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev);...
2020 Sep 02
1
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...ine void dma_sync_desc_cpu(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_FROM_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev); + + dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr), + sizeof(struct sgiseeq_rx_desc), DMA_FROM_DEVICE); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_TO_DEVICE); + stru...
2020 Sep 15
0
[PATCH 13/18] 53c700: convert to dma_alloc_noncoherent
...iff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index c59226d7e2f6b5..5117d90ccd9edf 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -269,18 +269,25 @@ NCR_700_get_SXFER(struct scsi_device *SDp) spi_period(SDp->sdev_target)); } +static inline dma_addr_t virt_to_dma(struct NCR_700_Host_Parameters *h, void *p) +{ + return h->pScript + ((uintptr_t)p - (uintptr_t)h->script); +} + static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h, void *addr, size_t size) { if (h->noncoherent) - dma_cache_sync(h->dev, addr, size, DMA_TO_DEVIC...
2020 Sep 01
3
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...*addr) > > > { > > > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > > > - DMA_FROM_DEVICE); > > > + struct sgiseeq_private *sp = netdev_priv(dev); > > > + > > > + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), > > > + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); > > > } > > > > > > static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) > > > { > > > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgi...
2020 Aug 19
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...ct sgiseeq_private { static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_FROM_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev); + + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); } static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) { - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), - DMA_TO_DEVICE); + struct sgiseeq_private *sp = netdev_priv(dev);...
2020 Sep 01
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...(struct net_device *dev, void *addr) > > { > > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > > - DMA_FROM_DEVICE); > > + struct sgiseeq_private *sp = netdev_priv(dev); > > + > > + dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr), > > + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL); > > } > > > > static inline void dma_sync_desc_dev(struct net_device *dev, void *addr) > > { > > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc), > > -...
2020 Sep 14
20
a saner API for allocating DMA addressable pages v2
Hi all, this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs with a separate new dma_alloc_pages API, which is available on all platforms. In addition to cleaning up the convoluted code path, this ensures that other drivers that have asked for better support for non-coherent DMA to pages with incurring bounce buffering over can finally be properly supported. I'm still a
2020 Sep 15
32
a saner API for allocating DMA addressable pages v3
Hi all, this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs with a separate new dma_alloc_pages API, which is available on all platforms. In addition to cleaning up the convoluted code path, this ensures that other drivers that have asked for better support for non-coherent DMA to pages with incurring bounce buffering over can finally be properly supported. As a follow up I
2020 Aug 19
39
a saner API for allocating DMA addressable pages
Hi all, this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs with a separate new dma_alloc_pages API, which is available on all platforms. In addition to cleaning up the convoluted code path, this ensures that other drivers that have asked for better support for non-coherent DMA to pages with incurring bounce buffering over can finally be properly supported. I'm still a
2013 Oct 17
42
[PATCH v8 0/19] enable swiotlb-xen on arm and arm64
Hi all, this patch series enables xen-swiotlb on arm and arm64. It has been heavily reworked compared to the previous versions in order to achieve better performances and to address review comments. We are not using dma_mark_clean to ensure coherency anymore. We call the platform implementation of map_page and unmap_page. We assume that dom0 has been mapped 1:1 (physical address == machine