Displaying 20 results from an estimated 33 matches for "dma_sync_single_for_device".
2020 Sep 02
1
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...01, 2020 at 07:16:27PM +0200, Christoph Hellwig wrote:
> > Well, if IP22 doesn't speculate (which I'm pretty sure is the case),
> > dma_sync_single_for_cpu should indeeed be a no-op. But then there
> > also shouldn't be anything in the cache, as the previous
> > dma_sync_single_for_device should have invalidated it. So it seems like
> > we are missing one (or more) ownership transfers to the device. I'll
> > try to look at the the ownership management in a little more detail
> > tomorrow.
>
> this is the problem:
>
> /* Always check for r...
2020 Sep 01
3
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...ma_sync_desc_dev(struct net_device *dev, void *addr)
> > > {
> > > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
> > > - DMA_TO_DEVICE);
> > > + struct sgiseeq_private *sp = netdev_priv(dev);
> > > +
> > > + dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
> > > + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
> > > }
> >
> > this breaks ethernet on IP22 completely, but I haven't figured out why, yet.
>
> the problem is that dma_sync_single_for_cpu() doesn...
2020 Sep 01
2
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...MA_BIDIRECTIONAL);
> }
>
> static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
> {
> - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
> - DMA_TO_DEVICE);
> + struct sgiseeq_private *sp = netdev_priv(dev);
> +
> + dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
> + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
> }
this breaks ethernet on IP22 completely, but I haven't figured out why, yet.
Thomas.
--
Crap can work. Given enough thrust pigs will fly, but it's not necessarily a
good idea....
2020 Aug 19
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_TO_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
--
2.28.0
2020 Sep 01
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...gt; > static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
> > {
> > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
> > - DMA_TO_DEVICE);
> > + struct sgiseeq_private *sp = netdev_priv(dev);
> > +
> > + dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
> > + sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
> > }
>
> this breaks ethernet on IP22 completely, but I haven't figured out why, yet.
the problem is that dma_sync_single_for_cpu() doesn't flush anything
for IP22, b...
2020 Sep 01
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
On Tue, Sep 01, 2020 at 07:16:27PM +0200, Christoph Hellwig wrote:
> Well, if IP22 doesn't speculate (which I'm pretty sure is the case),
> dma_sync_single_for_cpu should indeeed be a no-op. But then there
> also shouldn't be anything in the cache, as the previous
> dma_sync_single_for_device should have invalidated it. So it seems like
> we are missing one (or more) ownership transfers to the device. I'll
> try to look at the the ownership management in a little more detail
> tomorrow.
this is the problem:
/* Always check for received packets. */
sgiseeq...
2020 Sep 03
1
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
On Tue, Sep 01, 2020 at 07:38:10PM +0200, Thomas Bogendoerfer wrote:
> this is the problem:
>
> /* Always check for received packets. */
> sgiseeq_rx(dev, sp, hregs, sregs);
>
> so the driver will look at the rx descriptor on every interrupt, so
> we cache the rx descriptor on the first interrupt and if there was
> $no rx packet, we will only see it, if
2020 Aug 19
0
[PATCH 28/28] nvme-pci: use dma_alloc_pages backed dmapools
...0;
} else {
- pool = dev->prp_page_pool;
+ pool = &dev->prp_page_pool;
iod->npages = 1;
}
@@ -630,6 +630,11 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
for (;;) {
if (i == NVME_CTRL_PAGE_SIZE >> 3) {
__le64 *old_prp_list = prp_list;
+
+ dma_sync_single_for_device(dev->dev, prp_dma,
+ i * sizeof(*prp_list),
+ DMA_TO_DEVICE);
+
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
return BLK_STS_RESOURCE;
@@ -653,6 +658,8 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
dma_len = sg_dma_le...
2020 Aug 19
0
[PATCH 23/28] lib82596: convert from dma_cache_sync to dma_sync_single_for_device
...);
#endif
+static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
+{
+ return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
+}
+
+#ifdef NONCOHERENT_DMA
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_device(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BID...
2020 Sep 15
0
[PATCH 13/18] 53c700: convert to dma_alloc_noncoherent
...NCR_700_Host_Parameters *h, void *p)
+{
+ return h->pScript + ((uintptr_t)p - (uintptr_t)h->script);
+}
+
static inline void dma_sync_to_dev(struct NCR_700_Host_Parameters *h,
void *addr, size_t size)
{
if (h->noncoherent)
- dma_cache_sync(h->dev, addr, size, DMA_TO_DEVICE);
+ dma_sync_single_for_device(h->dev, virt_to_dma(h, addr),
+ size, DMA_BIDIRECTIONAL);
}
static inline void dma_sync_from_dev(struct NCR_700_Host_Parameters *h,
void *addr, size_t size)
{
if (h->noncoherent)
- dma_cache_sync(h->dev, addr, size, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(h->de...
2020 Sep 15
0
[PATCH 10/18] hal2: convert to dma_alloc_noncoherent
...dma = buffer_dma;
@@ -470,20 +471,22 @@ static int hal2_alloc_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *codec)
desc_dma : desc_dma + (i + 1) * sizeof(struct hal2_desc);
desc++;
}
- dma_cache_sync(dev, codec->desc, count * sizeof(struct hal2_desc),
- DMA_TO_DEVICE);
+ dma_sync_single_for_device(dev, codec->desc_dma,
+ count * sizeof(struct hal2_desc),
+ DMA_BIDIRECTIONAL);
codec->desc_count = count;
return 0;
}
-static void hal2_free_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *codec)
+static void hal2_free_dmabuf(struct snd_hal2 *hal2, struct hal2_codec *code...
2020 Sep 15
0
[PATCH 08/18] dma-mapping: add a new dma_alloc_noncoherent API
Add a new API to allocate and free memory that is guaranteed to be
addressable by a device, but which potentially is not cache coherent
for DMA.
To transfer ownership to and from the device, the existing streaming
DMA API calls dma_sync_single_for_device and dma_sync_single_for_cpu
must be used.
For now the new calls are implemented on top of dma_alloc_attrs just
like the old-noncoherent API, but once all drivers are switched to
the new API it will be replaced with a better working implementation
that is available on all architectures.
Signed-off...
2020 Sep 15
0
[PATCH 09/18] sgiwd93: convert to dma_alloc_noncoherent
...deb8f1529..cf1030c9dda17f 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -95,7 +95,7 @@ void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din)
*/
hcp->desc.pbuf = 0;
hcp->desc.cntinfo = HPCDMA_EOX;
- dma_cache_sync(hd->dev, hd->cpu,
+ dma_sync_single_for_device(hd->dev, hd->dma,
(unsigned long)(hcp + 1) - (unsigned long)hd->cpu,
DMA_TO_DEVICE);
}
@@ -234,8 +234,8 @@ static int sgiwd93_probe(struct platform_device *pdev)
hdata = host_to_hostdata(host);
hdata->dev = &pdev->dev;
- hdata->cpu = dma_alloc_attrs(...
2014 May 19
0
[PATCH 2/4] drm/ttm: introduce dma cache sync helpers
.../ttm/ttm_module.h>
@@ -248,6 +249,30 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
+void ttm_dma_tt_cache_sync_for_device(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_sync_single_for_device(dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+ }
+}
+EXPORT_SYMBOL(ttm_dma_tt_cache_sync_for_device);
+
+void ttm_dma_tt_cache_sync_for_cpu(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ int i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_syn...
2020 Sep 15
0
[PATCH 12/18] sgiseeq: convert to dma_alloc_noncoherent
...addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_TO_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
+ sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
}
static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
@@ -403,6 +407,8 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
rd = &sp->rx_desc[sp-...
2014 Jun 24
0
[PATCH v2 2/3] drm/ttm: introduce dma cache sync helpers
...odule.h>
@@ -248,6 +249,30 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
}
EXPORT_SYMBOL(ttm_dma_tt_fini);
+void ttm_dma_tt_cache_sync_for_device(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ unsigned long i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {
+ dma_sync_single_for_device(dev, ttm_dma->dma_address[i],
+ PAGE_SIZE, DMA_TO_DEVICE);
+ }
+}
+EXPORT_SYMBOL(ttm_dma_tt_cache_sync_for_device);
+
+void ttm_dma_tt_cache_sync_for_cpu(struct ttm_dma_tt *ttm_dma,
+ struct device *dev)
+{
+ unsigned long i;
+
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++) {...
2014 Jul 08
0
[PATCH v4 4/6] drm/nouveau: synchronize BOs when required
...drm->dev);
+ struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ int i;
+
+ if (!ttm_dma)
+ return;
+
+ if (nv_device_is_cpu_coherent(device) || nvbo->force_coherent)
+ return;
+
+ if (nv_device_is_pci(device)) {
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ pci_dma_sync_single_for_device(device->pdev,
+ ttm_dma->dma_address[i], PAGE_SIZE,
+ PCI_DMA_TODEVICE);
+ } else {
+ for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ dma_sync_single_for_device(nv_device_base(device),
+ ttm_dma->dma_address[i], PAGE_SIZE,
+ DMA_TO_DEVICE);
+ }
+}
+
+void
+nouveau_bo_sy...
2020 Sep 14
20
a saner API for allocating DMA addressable pages v2
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2020 Sep 15
32
a saner API for allocating DMA addressable pages v3
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
As a follow up I
2020 Sep 14
2
[PATCH 11/17] sgiseeq: convert to dma_alloc_noncoherent
...);
#endif
+static inline dma_addr_t virt_to_dma(struct i596_private *lp, volatile void *v)
+{
+ return lp->dma_addr + ((unsigned long)v - (unsigned long)lp->dma);
+}
+
+#ifdef NONCOHERENT_DMA
+static inline void dma_sync_dev(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_device(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BIDIRECTIONAL);
+}
+
+static inline void dma_sync_cpu(struct net_device *ndev, volatile void *addr,
+ size_t len)
+{
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ virt_to_dma(netdev_priv(ndev), addr), len,
+ DMA_BID...