Displaying 12 results from an estimated 12 matches for "sgiseeq_private".
2020 Sep 01
2
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...tions(+), 4 deletions(-)
>
> diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
> index 39599bbb5d45b6..f91dae16d69a19 100644
> --- a/drivers/net/ethernet/seeq/sgiseeq.c
> +++ b/drivers/net/ethernet/seeq/sgiseeq.c
> @@ -112,14 +112,18 @@ struct sgiseeq_private {
>
> static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
> {
> - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
> - DMA_FROM_DEVICE);
> + struct sgiseeq_private *sp = netdev_priv(dev);
> +
> + dma_sync_single_for_cp...
2020 Sep 15
0
[PATCH 12/18] sgiseeq: convert to dma_alloc_noncoherent
...---
1 file changed, 18 insertions(+), 10 deletions(-)
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 8507ff2420143a..37ff25a84030eb 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -112,14 +112,18 @@ struct sgiseeq_private {
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_FROM_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, ad...
2020 Sep 02
1
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
..._desc_cpu...
the patch below fixes the problem.
Thomas.
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 8507ff242014..876e3700a0e4 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -112,14 +112,18 @@ struct sgiseeq_private {
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_FROM_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_device(dev-...
2020 Sep 01
3
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...--git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
> > > index 39599bbb5d45b6..f91dae16d69a19 100644
> > > --- a/drivers/net/ethernet/seeq/sgiseeq.c
> > > +++ b/drivers/net/ethernet/seeq/sgiseeq.c
> > > @@ -112,14 +112,18 @@ struct sgiseeq_private {
> > >
> > > static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
> > > {
> > > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
> > > - DMA_FROM_DEVICE);
> > > + struct sgiseeq_private...
2020 Aug 19
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...+----
1 file changed, 8 insertions(+), 4 deletions(-)
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 39599bbb5d45b6..f91dae16d69a19 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -112,14 +112,18 @@ struct sgiseeq_private {
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_FROM_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, ad...
2020 Sep 01
0
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
...>
> > diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
> > index 39599bbb5d45b6..f91dae16d69a19 100644
> > --- a/drivers/net/ethernet/seeq/sgiseeq.c
> > +++ b/drivers/net/ethernet/seeq/sgiseeq.c
> > @@ -112,14 +112,18 @@ struct sgiseeq_private {
> >
> > static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
> > {
> > - dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
> > - DMA_FROM_DEVICE);
> > + struct sgiseeq_private *sp = netdev_priv(dev);
> &...
2020 Sep 14
2
[PATCH 11/17] sgiseeq: convert to dma_alloc_noncoherent
...0x00004400
/* big endian CPU, 82596 little endian */
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 8507ff2420143a..37ff25a84030eb 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -112,14 +112,18 @@ struct sgiseeq_private {
static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
{
- dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
- DMA_FROM_DEVICE);
+ struct sgiseeq_private *sp = netdev_priv(dev);
+
+ dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, ad...
2020 Sep 03
1
[PATCH 22/28] sgiseeq: convert from dma_cache_sync to dma_sync_single_for_device
On Tue, Sep 01, 2020 at 07:38:10PM +0200, Thomas Bogendoerfer wrote:
> this is the problem:
>
> /* Always check for received packets. */
> sgiseeq_rx(dev, sp, hregs, sregs);
>
> so the driver will look at the rx descriptor on every interrupt, so
> we cache the rx descriptor on the first interrupt and if there was
> $no rx packet, we will only see it, if
2020 Sep 14
20
a saner API for allocating DMA addressable pages v2
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2020 Sep 15
32
a saner API for allocating DMA addressable pages v3
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
As a follow up I
2020 Aug 19
39
a saner API for allocating DMA addressable pages
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2020 Aug 19
0
[PATCH 19/28] dma-mapping: replace DMA_ATTR_NON_CONSISTENT with dma_{alloc, free}_pages
...sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_pages(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_BIDIRECTIONAL);
err_out_free_dev:
free_netdev(dev);
@@ -817,8 +817,8 @@ static int sgiseeq_remove(struct platform_device *pdev)
struct sgiseeq_private *sp = netdev_priv(dev);
unregister_netdev(dev);
- dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
- sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
+ dma_free_pages(&pdev->dev, sizeof(*sp->srings), sp->srings,
+ sp->srings_dma, DMA_BIDIRECTI...