Displaying 8 results from an estimated 8 matches for "unmap_grant_pag".
Did you mean:
  unmap_grant_pages
  
2019 Oct 30
0
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
...> +++ b/drivers/xen/gntdev.c
> @@ -65,7 +65,6 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
>  static atomic_t pages_mapped = ATOMIC_INIT(0);
>  
>  static int use_ptemod;
> -#define populate_freeable_maps use_ptemod
>  
>  static int unmap_grant_pages(struct gntdev_grant_map *map,
>  			     int offset, int pages);
> @@ -251,12 +250,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
>  		evtchn_put(map->notify.event);
>  	}
>  
> -	if (populate_freeable_maps && priv) {
> -		mutex_lo...
2019 Oct 28
1
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
...bebbc 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -65,7 +65,6 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
 static atomic_t pages_mapped = ATOMIC_INIT(0);
 
 static int use_ptemod;
-#define populate_freeable_maps use_ptemod
 
 static int unmap_grant_pages(struct gntdev_grant_map *map,
 			     int offset, int pages);
@@ -251,12 +250,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
 		evtchn_put(map->notify.event);
 	}
 
-	if (populate_freeable_maps && priv) {
-		mutex_lock(&priv->lock);
-		list_del...
2019 Nov 04
2
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
...riv *priv = file->private_data;
>  
>  	pr_debug("gntdev_vma_close %p\n", vma);
> -	if (use_ptemod) {
> -		/* It is possible that an mmu notifier could be running
> -		 * concurrently, so take priv->lock to ensure that the vma won't
> -		 * vanishing during the unmap_grant_pages call, since we will
> -		 * spin here until that completes. Such a concurrent call will
> -		 * not do any unmapping, since that has been done prior to
> -		 * closing the vma, but it may still iterate the unmap_ops list.
> -		 */
> -		mutex_lock(&priv->lock);
> +	if (use...
2011 Mar 07
6
[PATCH] xen/gntdev,gntalloc: Remove unneeded VM flags
...ma->vm_flags |= VM_DONTCOPY;
-	vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP;
 
 	vma->vm_ops = &gntalloc_vmops;
 
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 2faf797..69c787a 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -334,17 +334,26 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 
 /* ------------------------------------------------------------------ */
 
+static void gntdev_vma_open(struct vm_area_struct *vma)
+{
+	struct grant_map *map = vma->vm_private_data;
+
+	pr_debug("gntdev_vma_open %p\n", vma);
+	atomic_...
2019 Nov 05
0
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
...>>>  	pr_debug("gntdev_vma_close %p\n", vma);
>>> -	if (use_ptemod) {
>>> -		/* It is possible that an mmu notifier could be running
>>> -		 * concurrently, so take priv->lock to ensure that the vma won't
>>> -		 * vanishing during the unmap_grant_pages call, since we will
>>> -		 * spin here until that completes. Such a concurrent call will
>>> -		 * not do any unmapping, since that has been done prior to
>>> -		 * closing the vma, but it may still iterate the unmap_ops list.
>>> -		 */
>>> -		mutex_...
2019 Nov 05
1
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
...ata;
> >  
> >  	pr_debug("gntdev_vma_close %p\n", vma);
> > -	if (use_ptemod) {
> > -		/* It is possible that an mmu notifier could be running
> > -		 * concurrently, so take priv->lock to ensure that the vma won't
> > -		 * vanishing during the unmap_grant_pages call, since we will
> > -		 * spin here until that completes. Such a concurrent call will
> > -		 * not do any unmapping, since that has been done prior to
> > -		 * closing the vma, but it may still iterate the unmap_ops list.
> > -		 */
> > -		mutex_lock(&priv-...
2019 Oct 28
32
[PATCH v2 00/15] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others
2019 Nov 12
20
[PATCH hmm v3 00/14] Consolidate the mmu notifier interval_tree and locking
From: Jason Gunthorpe <jgg at mellanox.com>
8 of the mmu_notifier using drivers (i915_gem, radeon_mn, umem_odp, hfi1,
scif_dma, vhost, gntdev, hmm) drivers are using a common pattern where
they only use invalidate_range_start/end and immediately check the
invalidating range against some driver data structure to tell if the
driver is interested. Half of them use an interval_tree, the others