Alexandre Courbot
2015-Nov-09 07:37 UTC
[Nouveau] [PATCH] instmem/gk20a: fix race conditions
The LRU list used for recycling CPU mappings was handling concurrency very poorly. For instance, if an instobj was acquired twice before being released once, it would end up into the LRU list even though there is still a client accessing it. This patch fixes this by properly counting how many clients are currently using a given instobj. While at it, we also raise errors when inconsistencies are detected, and factorize some code. Signed-off-by: Alexandre Courbot <acourbot at nvidia.com> --- drm/nouveau/nvkm/subdev/instmem/gk20a.c | 66 ++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 29 deletions(-) diff --git a/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drm/nouveau/nvkm/subdev/instmem/gk20a.c index fc419bb8eab7..681b2541229a 100644 --- a/drm/nouveau/nvkm/subdev/instmem/gk20a.c +++ b/drm/nouveau/nvkm/subdev/instmem/gk20a.c @@ -57,6 +57,8 @@ struct gk20a_instobj { /* CPU mapping */ u32 *vaddr; struct list_head vaddr_node; + /* How many clients are using vaddr? */ + u32 use_cpt; }; #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) @@ -158,27 +160,35 @@ gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory) } /* - * Must be called while holding gk20a_instmem_lock + * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. + */ +static void +gk20a_instobj_recycle_vaddr(struct gk20a_instobj *obj) +{ + struct gk20a_instmem *imem = obj->imem; + /* there should not be any user left... */ + WARN_ON(obj->use_cpt); + list_del(&obj->vaddr_node); + vunmap(obj->vaddr); + obj->vaddr = NULL; + imem->vaddr_use -= nvkm_memory_size(&obj->memory); + nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, + imem->vaddr_max); +} + +/* + * Must be called while holding gk20a_instmem::lock */ static void gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) { while (imem->vaddr_use + size > imem->vaddr_max) { - struct gk20a_instobj *obj; - /* no candidate that can be unmapped, abort... */ if (list_empty(&imem->vaddr_lru)) break; - obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj, - vaddr_node); - list_del(&obj->vaddr_node); - vunmap(obj->vaddr); - obj->vaddr = NULL; - imem->vaddr_use -= nvkm_memory_size(&obj->memory); - nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n", - imem->vaddr_use, imem->vaddr_max); - + gk20a_instobj_recycle_vaddr(list_first_entry(&imem->vaddr_lru, + struct gk20a_instobj, vaddr_node)); } } @@ -196,9 +206,10 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) spin_lock_irqsave(&imem->lock, flags); if (node->vaddr) { - /* remove us from the LRU list since we cannot be unmapped */ - list_del(&node->vaddr_node); - + if (!node->use_cpt) { + /* remove from LRU list since mapping in use again */ + list_del(&node->vaddr_node); + } goto out; } @@ -218,6 +229,7 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) imem->vaddr_use, imem->vaddr_max); out: + node->use_cpt++; spin_unlock_irqrestore(&imem->lock, flags); return node->vaddr; @@ -233,9 +245,15 @@ gk20a_instobj_release(struct nvkm_memory *memory) spin_lock_irqsave(&imem->lock, flags); - /* add ourselves to the LRU list so our CPU mapping can be freed */ - list_add_tail(&node->vaddr_node, &imem->vaddr_lru); + /* we should at least have one user to release... */ + if (WARN_ON(node->use_cpt == 0)) + goto out; + + /* add unused objs to the LRU list to recycle their mapping */ + if (--node->use_cpt == 0) + list_add_tail(&node->vaddr_node, &imem->vaddr_lru); +out: spin_unlock_irqrestore(&imem->lock, flags); wmb(); @@ -273,25 +291,15 @@ static void gk20a_instobj_dtor(struct gk20a_instobj *node) { struct gk20a_instmem *imem = node->imem; - struct gk20a_instobj *obj; unsigned long flags; spin_lock_irqsave(&imem->lock, flags); + /* vaddr has already been recycled */ if (!node->vaddr) goto out; - list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) { - if (obj == node) { - list_del(&obj->vaddr_node); - break; - } - } - vunmap(node->vaddr); - node->vaddr = NULL; - imem->vaddr_use -= nvkm_memory_size(&node->memory); - nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", - imem->vaddr_use, imem->vaddr_max); + gk20a_instobj_recycle_vaddr(node); out: spin_unlock_irqrestore(&imem->lock, flags); -- 2.6.2
On 11/09/2015 05:37 PM, Alexandre Courbot wrote:> The LRU list used for recycling CPU mappings was handling concurrency > very poorly. For instance, if an instobj was acquired twice before being > released once, it would end up into the LRU list even though there is > still a client accessing it. > > This patch fixes this by properly counting how many clients are > currently using a given instobj.Out of curiosity, which instobjs are being accessed concurrently?> > While at it, we also raise errors when inconsistencies are detected, and > factorize some code. > > Signed-off-by: Alexandre Courbot <acourbot at nvidia.com> > --- > drm/nouveau/nvkm/subdev/instmem/gk20a.c | 66 ++++++++++++++++++--------------- > 1 file changed, 37 insertions(+), 29 deletions(-) > > diff --git a/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drm/nouveau/nvkm/subdev/instmem/gk20a.c > index fc419bb8eab7..681b2541229a 100644 > --- a/drm/nouveau/nvkm/subdev/instmem/gk20a.c > +++ b/drm/nouveau/nvkm/subdev/instmem/gk20a.c > @@ -57,6 +57,8 @@ struct gk20a_instobj { > /* CPU mapping */ > u32 *vaddr; > struct list_head vaddr_node; > + /* How many clients are using vaddr? */ > + u32 use_cpt; > }; > #define gk20a_instobj(p) container_of((p), struct gk20a_instobj, memory) > > @@ -158,27 +160,35 @@ gk20a_instobj_cpu_map_iommu(struct nvkm_memory *memory) > } > > /* > - * Must be called while holding gk20a_instmem_lock > + * Recycle the vaddr of obj. Must be called with gk20a_instmem::lock held. > + */ > +static void > +gk20a_instobj_recycle_vaddr(struct gk20a_instobj *obj) > +{ > + struct gk20a_instmem *imem = obj->imem; > + /* there should not be any user left... */ > + WARN_ON(obj->use_cpt); > + list_del(&obj->vaddr_node); > + vunmap(obj->vaddr); > + obj->vaddr = NULL; > + imem->vaddr_use -= nvkm_memory_size(&obj->memory); > + nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", imem->vaddr_use, > + imem->vaddr_max); > +} > + > +/* > + * Must be called while holding gk20a_instmem::lock > */ > static void > gk20a_instmem_vaddr_gc(struct gk20a_instmem *imem, const u64 size) > { > while (imem->vaddr_use + size > imem->vaddr_max) { > - struct gk20a_instobj *obj; > - > /* no candidate that can be unmapped, abort... */ > if (list_empty(&imem->vaddr_lru)) > break; > > - obj = list_first_entry(&imem->vaddr_lru, struct gk20a_instobj, > - vaddr_node); > - list_del(&obj->vaddr_node); > - vunmap(obj->vaddr); > - obj->vaddr = NULL; > - imem->vaddr_use -= nvkm_memory_size(&obj->memory); > - nvkm_debug(&imem->base.subdev, "(GC) vaddr used: %x/%x\n", > - imem->vaddr_use, imem->vaddr_max); > - > + gk20a_instobj_recycle_vaddr(list_first_entry(&imem->vaddr_lru, > + struct gk20a_instobj, vaddr_node)); > } > } > > @@ -196,9 +206,10 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) > spin_lock_irqsave(&imem->lock, flags); > > if (node->vaddr) { > - /* remove us from the LRU list since we cannot be unmapped */ > - list_del(&node->vaddr_node); > - > + if (!node->use_cpt) { > + /* remove from LRU list since mapping in use again */ > + list_del(&node->vaddr_node); > + } > goto out; > } > > @@ -218,6 +229,7 @@ gk20a_instobj_acquire(struct nvkm_memory *memory) > imem->vaddr_use, imem->vaddr_max); > > out: > + node->use_cpt++; > spin_unlock_irqrestore(&imem->lock, flags); > > return node->vaddr; > @@ -233,9 +245,15 @@ gk20a_instobj_release(struct nvkm_memory *memory) > > spin_lock_irqsave(&imem->lock, flags); > > - /* add ourselves to the LRU list so our CPU mapping can be freed */ > - list_add_tail(&node->vaddr_node, &imem->vaddr_lru); > + /* we should at least have one user to release... */ > + if (WARN_ON(node->use_cpt == 0)) > + goto out; > + > + /* add unused objs to the LRU list to recycle their mapping */ > + if (--node->use_cpt == 0) > + list_add_tail(&node->vaddr_node, &imem->vaddr_lru); > > +out: > spin_unlock_irqrestore(&imem->lock, flags); > > wmb(); > @@ -273,25 +291,15 @@ static void > gk20a_instobj_dtor(struct gk20a_instobj *node) > { > struct gk20a_instmem *imem = node->imem; > - struct gk20a_instobj *obj; > unsigned long flags; > > spin_lock_irqsave(&imem->lock, flags); > > + /* vaddr has already been recycled */ > if (!node->vaddr) > goto out; > > - list_for_each_entry(obj, &imem->vaddr_lru, vaddr_node) { > - if (obj == node) { > - list_del(&obj->vaddr_node); > - break; > - } > - } > - vunmap(node->vaddr); > - node->vaddr = NULL; > - imem->vaddr_use -= nvkm_memory_size(&node->memory); > - nvkm_debug(&imem->base.subdev, "vaddr used: %x/%x\n", > - imem->vaddr_use, imem->vaddr_max); > + gk20a_instobj_recycle_vaddr(node); > > out: > spin_unlock_irqrestore(&imem->lock, flags); >-------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 819 bytes Desc: OpenPGP digital signature URL: <http://lists.freedesktop.org/archives/nouveau/attachments/20151111/fa8081ad/attachment.sig>
Alexandre Courbot
2015-Nov-11 05:08 UTC
[Nouveau] [PATCH] instmem/gk20a: fix race conditions
On Wed, Nov 11, 2015 at 9:19 AM, Ben Skeggs <skeggsb at gmail.com> wrote:> On 11/09/2015 05:37 PM, Alexandre Courbot wrote: >> The LRU list used for recycling CPU mappings was handling concurrency >> very poorly. For instance, if an instobj was acquired twice before being >> released once, it would end up into the LRU list even though there is >> still a client accessing it. >> >> This patch fixes this by properly counting how many clients are >> currently using a given instobj. > Out of curiosity, which instobjs are being accessed concurrently?PGTs it seems - at least this is what dumping a stack when detecting a concurrent usage on an instobj indicates: [ 270.547052] [<bf0618dc>] (gk20a_instobj_acquire [nouveau]) from [<bf066358>] (gf100_vm_map_sg+0x58/0x124 [nouveau]) [ 270.557568] [<bf066358>] (gf100_vm_map_sg [nouveau]) from [<bf0641b0>] (nvkm_vm_map+0x300/0x3bc [nouveau]) [ 270.567333] [<bf0641b0>] (nvkm_vm_map [nouveau]) from [<bf0bcbd4>] (nouveau_bo_vma_add+0x74/0xa0 [nouveau]) [ 270.577189] [<bf0bcbd4>] (nouveau_bo_vma_add [nouveau]) from [<bf0bdbd0>] (nouveau_gem_object_open+0x124/0x158 [nouveau]) [ 270.588196] [<bf0bdbd0>] (nouveau_gem_object_open [nouveau]) from [<c02da62c>] (drm_gem_handle_create_tail+0x104/0x19c) [ 270.599025] [<c02da62c>] (drm_gem_handle_create_tail) from [<bf0be138>] (nouveau_gem_ioctl_new+0x90/0x18c [nouveau]) [ 270.609594] [<bf0be138>] (nouveau_gem_ioctl_new [nouveau]) from [<c02db3b8>] (drm_ioctl+0x284/0x440) [ 270.618777] [<c02db3b8>] (drm_ioctl) from [<bf0b6bfc>] (nouveau_drm_ioctl+0x54/0x98 [nouveau]) [ 270.627441] [<bf0b6bfc>] (nouveau_drm_ioctl [nouveau]) from [<c0103860>] (do_vfs_ioctl+0x458/0x6dc) [ 270.636471] [<c0103860>] (do_vfs_ioctl) from [<c0103b18>] (SyS_ioctl+0x34/0x5c) [ 270.643767] [<c0103b18>] (SyS_ioctl) from [<c000f5c0>] (ret_fast_syscall+0x0/0x3c)