search for: idr_alloc

Displaying 20 results from an estimated 41 matches for "idr_alloc".

2023 Feb 14
3
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...t the user-visible handle using idr. Preload and perform - * allocation under our spinlock. + * Get the user-visible handle using idr as the _last_ step. + * Preload and perform allocation under our spinlock. */ idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); - spin_unlock(&file_priv->table_lock); idr_preload_end(); - mutex_unlock(&dev->object_name_lock); if (ret < 0) - goto err_unref; - - handle = ret; + goto err_close; - ret = drm_vma_node_allow(&obj->vma_node,...
2023 Feb 14
3
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...t the user-visible handle using idr. Preload and perform - * allocation under our spinlock. + * Get the user-visible handle using idr as the _last_ step. + * Preload and perform allocation under our spinlock. */ idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); - spin_unlock(&file_priv->table_lock); idr_preload_end(); - mutex_unlock(&dev->object_name_lock); if (ret < 0) - goto err_unref; - - handle = ret; + goto err_close; - ret = drm_vma_node_allow(&obj->vma_node,...
2020 Feb 18
2
[PATCH] vhost: introduce vDPA based backend
On Fri, Jan 31, 2020 at 11:36:51AM +0800, Tiwei Bie wrote: > +static int vhost_vdpa_alloc_minor(struct vhost_vdpa *v) > +{ > + return idr_alloc(&vhost_vdpa.idr, v, 0, MINORMASK + 1, > + GFP_KERNEL); > +} Please don't use idr in new code, use xarray directly > +static int vhost_vdpa_probe(struct device *dev) > +{ > + struct vdpa_device *vdpa = dev_to_vdpa(dev); > + const struct vdpa_config_ops *ops = vdpa->...
2020 Feb 18
2
[PATCH] vhost: introduce vDPA based backend
On Fri, Jan 31, 2020 at 11:36:51AM +0800, Tiwei Bie wrote: > +static int vhost_vdpa_alloc_minor(struct vhost_vdpa *v) > +{ > + return idr_alloc(&vhost_vdpa.idr, v, 0, MINORMASK + 1, > + GFP_KERNEL); > +} Please don't use idr in new code, use xarray directly > +static int vhost_vdpa_probe(struct device *dev) > +{ > + struct vdpa_device *vdpa = dev_to_vdpa(dev); > + const struct vdpa_config_ops *ops = vdpa->...
2023 Feb 20
2
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...pinlock. >> +???? * Get the user-visible handle using idr as the _last_ step. >> +???? * Preload and perform allocation under our spinlock. >> ?????? */ >> ????? idr_preload(GFP_KERNEL); >> ????? spin_lock(&file_priv->table_lock); >> - >> ????? ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); >> - >> ????? spin_unlock(&file_priv->table_lock); >> ????? idr_preload_end(); >> -??? mutex_unlock(&dev->object_name_lock); >> ????? if (ret < 0) >> -??????? goto err_unref; >> - >&...
2023 Feb 20
2
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...pinlock. >> +???? * Get the user-visible handle using idr as the _last_ step. >> +???? * Preload and perform allocation under our spinlock. >> ?????? */ >> ????? idr_preload(GFP_KERNEL); >> ????? spin_lock(&file_priv->table_lock); >> - >> ????? ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); >> - >> ????? spin_unlock(&file_priv->table_lock); >> ????? idr_preload_end(); >> -??? mutex_unlock(&dev->object_name_lock); >> ????? if (ret < 0) >> -??????? goto err_unref; >> - >&...
2018 Sep 26
5
[PATCH 0/4] Improve virtio ID allocation
I noticed you were using IDRs where you could be using the more efficient IDAs, then while fixing that I noticed the lack of error handling, and I decided to follow that up with an efficiency improvement. There's probably a v2 of this to follow because I couldn't figure out how to properly handle one of the error cases ... see the comment embedded in one of the patches. Matthew Wilcox
2023 Feb 20
1
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...andle using idr as the _last_ step. >>>> +???? * Preload and perform allocation under our spinlock. >>>> ?????? */ >>>> ????? idr_preload(GFP_KERNEL); >>>> ????? spin_lock(&file_priv->table_lock); >>>> - >>>> ????? ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); >>>> - >>>> ????? spin_unlock(&file_priv->table_lock); >>>> ????? idr_preload_end(); >>>> -??? mutex_unlock(&dev->object_name_lock); >>>> ????? if (ret < 0) >>&g...
2023 Feb 20
1
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...andle using idr as the _last_ step. >>>> +???? * Preload and perform allocation under our spinlock. >>>> ?????? */ >>>> ????? idr_preload(GFP_KERNEL); >>>> ????? spin_lock(&file_priv->table_lock); >>>> - >>>> ????? ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); >>>> - >>>> ????? spin_unlock(&file_priv->table_lock); >>>> ????? idr_preload_end(); >>>> -??? mutex_unlock(&dev->object_name_lock); >>>> ????? if (ret < 0) >>&g...
2020 Feb 19
0
[PATCH] vhost: introduce vDPA based backend
On Tue, Feb 18, 2020 at 09:53:59AM -0400, Jason Gunthorpe wrote: > On Fri, Jan 31, 2020 at 11:36:51AM +0800, Tiwei Bie wrote: > > > +static int vhost_vdpa_alloc_minor(struct vhost_vdpa *v) > > +{ > > + return idr_alloc(&vhost_vdpa.idr, v, 0, MINORMASK + 1, > > + GFP_KERNEL); > > +} > > Please don't use idr in new code, use xarray directly > > > +static int vhost_vdpa_probe(struct device *dev) > > +{ > > + struct vdpa_device *vdpa = dev_to_vdpa(dev); > > +...
2023 Feb 14
0
[PATCH] drm/gem: Expose the buffer object handle to userspace last
...d and perform > - * allocation under our spinlock. > + * Get the user-visible handle using idr as the _last_ step. > + * Preload and perform allocation under our spinlock. > */ > idr_preload(GFP_KERNEL); > spin_lock(&file_priv->table_lock); > - > ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); > - > spin_unlock(&file_priv->table_lock); > idr_preload_end(); > > - mutex_unlock(&dev->object_name_lock); > if (ret < 0) > - goto err_unref; > - > - handle = ret; > + goto err_close...
2023 Feb 14
0
[Nouveau] [PATCH] drm/gem: Expose the buffer object handle to userspace last
...d and perform > - * allocation under our spinlock. > + * Get the user-visible handle using idr as the _last_ step. > + * Preload and perform allocation under our spinlock. > */ > idr_preload(GFP_KERNEL); > spin_lock(&file_priv->table_lock); > - > ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); > - > spin_unlock(&file_priv->table_lock); > idr_preload_end(); > > - mutex_unlock(&dev->object_name_lock); > if (ret < 0) > - goto err_unref; > - > - handle = ret; > + goto err_close...
2023 Feb 20
0
[Nouveau] [PATCH] drm/gem: Expose the buffer object handle to userspace last
...* Get the user-visible handle using idr as the _last_ step. >>> +???? * Preload and perform allocation under our spinlock. >>> ?????? */ >>> ????? idr_preload(GFP_KERNEL); >>> ????? spin_lock(&file_priv->table_lock); >>> - >>> ????? ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); >>> - >>> ????? spin_unlock(&file_priv->table_lock); >>> ????? idr_preload_end(); >>> -??? mutex_unlock(&dev->object_name_lock); >>> ????? if (ret < 0) >>> -??????? goto err...
2014 Jul 09
0
[PATCH 10/17] drm/qxl: rework to new fence interface
...rn 0; } + release->base.ops = NULL; release->type = type; release->release_offset = 0; release->surface_release_id = 0; @@ -60,44 +143,59 @@ qxl_release_alloc(struct qxl_device *qdev, int type, idr_preload(GFP_KERNEL); spin_lock(&qdev->release_idr_lock); - idr_ret = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); + handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); + release->base.seqno = ++qdev->release_seqno; spin_unlock(&qdev->release_idr_lock); idr_preload_end(); - handle = idr_ret; - if (idr_ret < 0) - go...
2018 May 16
0
[RFC v4 3/5] virtio_ring: add packed ring support
...iptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~__GFP_HIGHMEM; + + desc = kmalloc(total_sg * sizeof(struct vring_packed_desc), gfp); + + return desc; +} + +static u16 alloc_id_packed(struct vring_virtqueue *vq) +{ + u16 id; + + id = idr_alloc(&vq->buffer_id, NULL, 0, vq->vring_packed.num, + GFP_KERNEL); + return id; +} + +static void free_id_packed(struct vring_virtqueue *vq, u16 id) +{ + idr_remove(&vq->buffer_id, id); +} + static inline int virtqueue_add_packed(struct virtqueue *_vq, struct scatte...
2020 Feb 05
0
[PATCH] vhost: introduce vDPA based backend
...wait_queue_head_t release_q; > > +} vhost_vdpa; > > + > > +static const u64 vhost_vdpa_features[] = { > > + [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES, > > +}; > > + > > +static int vhost_vdpa_alloc_minor(struct vhost_vdpa *v) > > +{ > > + return idr_alloc(&vhost_vdpa.idr, v, 0, MINORMASK + 1, > > + GFP_KERNEL); > > +} > > + > > +static void vhost_vdpa_free_minor(int minor) > > +{ > > + idr_remove(&vhost_vdpa.idr, minor); > > +} > > + > > +static struct vhost_vdpa *vhost_vdpa_get_from_m...
2018 May 16
2
[RFC v4 3/5] virtio_ring: add packed ring support
...s in the > + * virtqueue. > + */ > + gfp &= ~__GFP_HIGHMEM; > + > + desc = kmalloc(total_sg * sizeof(struct vring_packed_desc), gfp); > + > + return desc; > +} > + > +static u16 alloc_id_packed(struct vring_virtqueue *vq) > +{ > + u16 id; > + > + id = idr_alloc(&vq->buffer_id, NULL, 0, vq->vring_packed.num, > + GFP_KERNEL); > + return id; > +} > + > +static void free_id_packed(struct vring_virtqueue *vq, u16 id) > +{ > + idr_remove(&vq->buffer_id, id); > +} > + > static inline int virtqueue_add_pack...
2018 May 16
2
[RFC v4 3/5] virtio_ring: add packed ring support
...s in the > + * virtqueue. > + */ > + gfp &= ~__GFP_HIGHMEM; > + > + desc = kmalloc(total_sg * sizeof(struct vring_packed_desc), gfp); > + > + return desc; > +} > + > +static u16 alloc_id_packed(struct vring_virtqueue *vq) > +{ > + u16 id; > + > + id = idr_alloc(&vq->buffer_id, NULL, 0, vq->vring_packed.num, > + GFP_KERNEL); > + return id; > +} > + > +static void free_id_packed(struct vring_virtqueue *vq, u16 id) > +{ > + idr_remove(&vq->buffer_id, id); > +} > + > static inline int virtqueue_add_pack...
2020 Feb 04
10
[PATCH] vhost: introduce vDPA based backend
...struct cdev cdev; > + dev_t devt; > + wait_queue_head_t release_q; > +} vhost_vdpa; > + > +static const u64 vhost_vdpa_features[] = { > + [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES, > +}; > + > +static int vhost_vdpa_alloc_minor(struct vhost_vdpa *v) > +{ > + return idr_alloc(&vhost_vdpa.idr, v, 0, MINORMASK + 1, > + GFP_KERNEL); > +} > + > +static void vhost_vdpa_free_minor(int minor) > +{ > + idr_remove(&vhost_vdpa.idr, minor); > +} > + > +static struct vhost_vdpa *vhost_vdpa_get_from_minor(int minor) > +{ > + struct vhost_v...
2020 Feb 04
10
[PATCH] vhost: introduce vDPA based backend
...struct cdev cdev; > + dev_t devt; > + wait_queue_head_t release_q; > +} vhost_vdpa; > + > +static const u64 vhost_vdpa_features[] = { > + [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES, > +}; > + > +static int vhost_vdpa_alloc_minor(struct vhost_vdpa *v) > +{ > + return idr_alloc(&vhost_vdpa.idr, v, 0, MINORMASK + 1, > + GFP_KERNEL); > +} > + > +static void vhost_vdpa_free_minor(int minor) > +{ > + idr_remove(&vhost_vdpa.idr, minor); > +} > + > +static struct vhost_vdpa *vhost_vdpa_get_from_minor(int minor) > +{ > + struct vhost_v...