Danilo Krummrich
2023-Aug-23 18:15 UTC
[Nouveau] [PATCH drm-misc-next v2] drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly
Currently, NO_PREFETCH is passed implicitly through
drm_nouveau_gem_pushbuf_push::length and drm_nouveau_exec_push::va_len.
Since this is a direct representation of how the HW is programmed it
isn't really future proof for a uAPI. Hence, fix this up for the new
uAPI and split up the va_len field of struct drm_nouveau_exec_push,
such that we keep 32bit for va_len and 32bit for flags.
For drm_nouveau_gem_pushbuf_push::length at least provide
NOUVEAU_GEM_PUSHBUF_NO_PREFETCH to indicate the bit shift.
While at it, fix up nv50_dma_push() as well, such that the caller
doesn't need to encode the NO_PREFETCH flag into the length parameter.
Signed-off-by: Danilo Krummrich <dakr at redhat.com>
---
Changes in v2:
- dma: rename prefetch to no_prefetch in nv50_dma_push() (Faith)
- exec: print error message when pushbuf size larger max pushbuf size (Faith)
---
drivers/gpu/drm/nouveau/nouveau_dma.c | 7 +++++--
drivers/gpu/drm/nouveau/nouveau_dma.h | 8 ++++++--
drivers/gpu/drm/nouveau/nouveau_exec.c | 19 ++++++++++++++++---
drivers/gpu/drm/nouveau/nouveau_gem.c | 6 ++++--
include/uapi/drm/nouveau_drm.h | 8 +++++++-
5 files changed, 38 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c
b/drivers/gpu/drm/nouveau/nouveau_dma.c
index b90cac6d5772..b01c029f3a90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t *prev_get,
int *timeout)
}
void
-nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length)
+nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length,
+ bool no_prefetch)
{
struct nvif_user *user = &chan->drm->client.device.user;
struct nouveau_bo *pb = chan->push.buffer;
int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
BUG_ON(chan->dma.ib_free < 1);
+ WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH);
nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
- nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 |
+ (no_prefetch ? (1 << 31) : 0));
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h
b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 035a709c7be1..1744d95b233e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,7 +31,8 @@
#include "nouveau_chan.h"
int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
-void nv50_dma_push(struct nouveau_channel *, u64 addr, int length);
+void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length,
+ bool no_prefetch);
/*
* There's a hw race condition where you can't jump to your PUT offset,
@@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, int
length);
*/
#define NOUVEAU_DMA_SKIPS (128 / 4)
+/* Maximum push buffer size. */
+#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff
+
/* Object handles - for stuff that's doesn't use handle == oclass. */
enum {
NvDmaFB = 0x80000002,
@@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan)
if (chan->dma.ib_max) {
nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2),
- (chan->dma.cur - chan->dma.put) << 2);
+ (chan->dma.cur - chan->dma.put) << 2, false);
} else {
WRITE_PUT(chan->dma.cur);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c
b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 0f927adda4ed..a90c4cd8cbb2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job)
}
for (i = 0; i < exec_job->push.count; i++) {
- nv50_dma_push(chan, exec_job->push.s[i].va,
- exec_job->push.s[i].va_len);
+ struct drm_nouveau_exec_push *p = &exec_job->push.s[i];
+ bool no_prefetch = p->flags & DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH;
+
+ nv50_dma_push(chan, p->va, p->va_len, no_prefetch);
}
ret = nouveau_fence_emit(fence, chan);
@@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob,
{
struct nouveau_exec_job *job;
struct nouveau_job_args args = {};
- int ret;
+ int i, ret;
+
+ for (i = 0; i < __args->push.count; i++) {
+ struct drm_nouveau_exec_push *p = &__args->push.s[i];
+
+ if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) {
+ NV_PRINTK(err, nouveau_cli(__args->file_priv),
+ "pushbuf size exceeds limit: 0x%x max 0x%x\n",
+ p->va_len, NV50_DMA_PUSH_MAX_LENGTH);
+ return -EINVAL;
+ }
+ }
job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c
b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f39360870c70..c0b10d8d3d03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -856,9 +856,11 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void
*data,
for (i = 0; i < req->nr_push; i++) {
struct nouveau_vma *vma = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
+ u64 addr = vma->addr + push[i].offset;
+ u32 length = push[i].length & ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
+ bool no_prefetch = push[i].length & NOUVEAU_GEM_PUSHBUF_NO_PREFETCH;
- nv50_dma_push(chan, vma->addr + push[i].offset,
- push[i].length);
+ nv50_dma_push(chan, addr, length, no_prefetch);
}
} else
if (drm->client.device.info.chipset >= 0x25) {
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index b1ad9d5ffce8..8f16724b5d05 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -138,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
+#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@@ -338,7 +339,12 @@ struct drm_nouveau_exec_push {
/**
* @va_len: the length of the push buffer mapping
*/
- __u64 va_len;
+ __u32 va_len;
+ /**
+ * flags: the flags for this push buffer mapping
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
};
/**
base-commit: b4e9fa933551e51459c634dc4396171dc65284a6
--
2.41.0
Faith Ekstrand
2023-Aug-23 20:39 UTC
[Nouveau] [PATCH drm-misc-next v2] drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly
On Wed, Aug 23, 2023 at 1:17?PM Danilo Krummrich <dakr at redhat.com> wrote:> Currently, NO_PREFETCH is passed implicitly through > drm_nouveau_gem_pushbuf_push::length and drm_nouveau_exec_push::va_len. > > Since this is a direct representation of how the HW is programmed it > isn't really future proof for a uAPI. Hence, fix this up for the new > uAPI and split up the va_len field of struct drm_nouveau_exec_push, > such that we keep 32bit for va_len and 32bit for flags. > > For drm_nouveau_gem_pushbuf_push::length at least provide > NOUVEAU_GEM_PUSHBUF_NO_PREFETCH to indicate the bit shift. > > While at it, fix up nv50_dma_push() as well, such that the caller > doesn't need to encode the NO_PREFETCH flag into the length parameter. > > Signed-off-by: Danilo Krummrich <dakr at redhat.com> >Still Reviewed-by: Faith Ekstrand <faith.ekstrand at collabora.com>> --- > Changes in v2: > - dma: rename prefetch to no_prefetch in nv50_dma_push() (Faith) > - exec: print error message when pushbuf size larger max pushbuf size > (Faith) > --- > drivers/gpu/drm/nouveau/nouveau_dma.c | 7 +++++-- > drivers/gpu/drm/nouveau/nouveau_dma.h | 8 ++++++-- > drivers/gpu/drm/nouveau/nouveau_exec.c | 19 ++++++++++++++++--- > drivers/gpu/drm/nouveau/nouveau_gem.c | 6 ++++-- > include/uapi/drm/nouveau_drm.h | 8 +++++++- > 5 files changed, 38 insertions(+), 10 deletions(-) > > diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c > b/drivers/gpu/drm/nouveau/nouveau_dma.c > index b90cac6d5772..b01c029f3a90 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_dma.c > +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c > @@ -69,16 +69,19 @@ READ_GET(struct nouveau_channel *chan, uint64_t > *prev_get, int *timeout) > } > > void > -nv50_dma_push(struct nouveau_channel *chan, u64 offset, int length) > +nv50_dma_push(struct nouveau_channel *chan, u64 offset, u32 length, > + bool no_prefetch) > { > struct nvif_user *user = &chan->drm->client.device.user; > struct nouveau_bo *pb = chan->push.buffer; > int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; > > BUG_ON(chan->dma.ib_free < 1); > + WARN_ON(length > NV50_DMA_PUSH_MAX_LENGTH); > > nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); > - nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); > + nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8 | > + (no_prefetch ? (1 << 31) : 0)); > > chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; > > diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h > b/drivers/gpu/drm/nouveau/nouveau_dma.h > index 035a709c7be1..1744d95b233e 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_dma.h > +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h > @@ -31,7 +31,8 @@ > #include "nouveau_chan.h" > > int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); > -void nv50_dma_push(struct nouveau_channel *, u64 addr, int length); > +void nv50_dma_push(struct nouveau_channel *, u64 addr, u32 length, > + bool no_prefetch); > > /* > * There's a hw race condition where you can't jump to your PUT offset, > @@ -45,6 +46,9 @@ void nv50_dma_push(struct nouveau_channel *, u64 addr, > int length); > */ > #define NOUVEAU_DMA_SKIPS (128 / 4) > > +/* Maximum push buffer size. */ > +#define NV50_DMA_PUSH_MAX_LENGTH 0x7fffff > + > /* Object handles - for stuff that's doesn't use handle == oclass. */ > enum { > NvDmaFB = 0x80000002, > @@ -89,7 +93,7 @@ FIRE_RING(struct nouveau_channel *chan) > > if (chan->dma.ib_max) { > nv50_dma_push(chan, chan->push.addr + (chan->dma.put << 2), > - (chan->dma.cur - chan->dma.put) << 2); > + (chan->dma.cur - chan->dma.put) << 2, false); > } else { > WRITE_PUT(chan->dma.cur); > } > diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c > b/drivers/gpu/drm/nouveau/nouveau_exec.c > index 0f927adda4ed..a90c4cd8cbb2 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_exec.c > +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c > @@ -164,8 +164,10 @@ nouveau_exec_job_run(struct nouveau_job *job) > } > > for (i = 0; i < exec_job->push.count; i++) { > - nv50_dma_push(chan, exec_job->push.s[i].va, > - exec_job->push.s[i].va_len); > + struct drm_nouveau_exec_push *p = &exec_job->push.s[i]; > + bool no_prefetch = p->flags & > DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH; > + > + nv50_dma_push(chan, p->va, p->va_len, no_prefetch); > } > > ret = nouveau_fence_emit(fence, chan); > @@ -223,7 +225,18 @@ nouveau_exec_job_init(struct nouveau_exec_job **pjob, > { > struct nouveau_exec_job *job; > struct nouveau_job_args args = {}; > - int ret; > + int i, ret; > + > + for (i = 0; i < __args->push.count; i++) { > + struct drm_nouveau_exec_push *p = &__args->push.s[i]; > + > + if (unlikely(p->va_len > NV50_DMA_PUSH_MAX_LENGTH)) { > + NV_PRINTK(err, nouveau_cli(__args->file_priv), > + "pushbuf size exceeds limit: 0x%x max > 0x%x\n", > + p->va_len, NV50_DMA_PUSH_MAX_LENGTH); > + return -EINVAL; > + } > + } > > job = *pjob = kzalloc(sizeof(*job), GFP_KERNEL); > if (!job) > diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c > b/drivers/gpu/drm/nouveau/nouveau_gem.c > index f39360870c70..c0b10d8d3d03 100644 > --- a/drivers/gpu/drm/nouveau/nouveau_gem.c > +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c > @@ -856,9 +856,11 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, > void *data, > for (i = 0; i < req->nr_push; i++) { > struct nouveau_vma *vma = (void *)(unsigned long) > bo[push[i].bo_index].user_priv; > + u64 addr = vma->addr + push[i].offset; > + u32 length = push[i].length & > ~NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; > + bool no_prefetch = push[i].length & > NOUVEAU_GEM_PUSHBUF_NO_PREFETCH; > > - nv50_dma_push(chan, vma->addr + push[i].offset, > - push[i].length); > + nv50_dma_push(chan, addr, length, no_prefetch); > } > } else > if (drm->client.device.info.chipset >= 0x25) { > diff --git a/include/uapi/drm/nouveau_drm.h > b/include/uapi/drm/nouveau_drm.h > index b1ad9d5ffce8..8f16724b5d05 100644 > --- a/include/uapi/drm/nouveau_drm.h > +++ b/include/uapi/drm/nouveau_drm.h > @@ -138,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push { > __u32 pad; > __u64 offset; > __u64 length; > +#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23) > }; > > struct drm_nouveau_gem_pushbuf { > @@ -338,7 +339,12 @@ struct drm_nouveau_exec_push { > /** > * @va_len: the length of the push buffer mapping > */ > - __u64 va_len; > + __u32 va_len; > + /** > + * flags: the flags for this push buffer mapping > + */ > + __u32 flags; > +#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1 > }; > > /** > > base-commit: b4e9fa933551e51459c634dc4396171dc65284a6 > -- > 2.41.0 > >-------------- next part -------------- An HTML attachment was scrubbed... URL: <https://lists.freedesktop.org/archives/nouveau/attachments/20230823/deec05aa/attachment-0001.htm>
Reasonably Related Threads
- [PATCH drm-misc-next] drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly
- [PATCH drm-misc-next] drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly
- [PATCH drm-misc-next] drm/nouveau: fence: fix undefined fence state after emit
- PR: nv50 IB-mode DMA crash fixes
- [PATCH drm-next 05/14] drm/nouveau: new VM_BIND uapi interfaces