search for: vm_end

Displaying 20 results from an estimated 101 matches for "vm_end".

Did you mean: va_end
2019 Oct 30
0
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
...-------------------------- */ > > -static bool in_range(struct gntdev_grant_map *map, > - unsigned long start, unsigned long end) > -{ > - if (!map->vma) > - return false; > - if (map->vma->vm_start >= end) > - return false; > - if (map->vma->vm_end <= start) > - return false; > - > - return true; > -} > - > -static int unmap_if_in_range(struct gntdev_grant_map *map, > - unsigned long start, unsigned long end, > - bool blockable) > +static bool gntdev_invalidate(struct mmu_range_notifier *mn, >...
2020 Aug 11
2
[PATCH 1/4] vdpa: introduce config op to get valid iova range
...is ambiguous. Is end in the range or just behind it? >>>>> How about first/last? >>>> It is customary in the kernel to use start-end where end corresponds to >>>> the byte following the last in the range. See struct vm_area_struct >>>> vm_start and vm_end fields >>> Exactly my point: >>> >>> include/linux/mm_types.h: unsigned long vm_end; /* The first byte after our end address >>> >>> in this case Jason wants it to be the last byte, not one behind. >>> >>> >> Maybe...
2020 Aug 11
2
[PATCH 1/4] vdpa: introduce config op to get valid iova range
...is ambiguous. Is end in the range or just behind it? >>>>> How about first/last? >>>> It is customary in the kernel to use start-end where end corresponds to >>>> the byte following the last in the range. See struct vm_area_struct >>>> vm_start and vm_end fields >>> Exactly my point: >>> >>> include/linux/mm_types.h: unsigned long vm_end; /* The first byte after our end address >>> >>> in this case Jason wants it to be the last byte, not one behind. >>> >>> >> Maybe...
2019 Oct 28
1
[PATCH v2 09/15] xen/gntdev: use mmu_range_notifier_insert
.../* ------------------------------------------------------------------ */ -static bool in_range(struct gntdev_grant_map *map, - unsigned long start, unsigned long end) -{ - if (!map->vma) - return false; - if (map->vma->vm_start >= end) - return false; - if (map->vma->vm_end <= start) - return false; - - return true; -} - -static int unmap_if_in_range(struct gntdev_grant_map *map, - unsigned long start, unsigned long end, - bool blockable) +static bool gntdev_invalidate(struct mmu_range_notifier *mn, + const struct mmu_notifier_range *range,...
2010 Dec 08
2
[PATCH] xen: gntdev: move use of GNTMAP_contains_pte next to the map_op
...a) static void hypercall_vm_close(struct vm_area_struct * vma) { struct mmap_hypercall *priv = vma->vm_private_data; - struct page *page; + struct page *page, *tpage; printk(KERN_CRIT "hypercall_vm_close: vma %p %#lx-%#lx (%#lx) priv %p\n", vma, vma->vm_start, vma->vm_end, vma->vm_end - vma->vm_start, priv); - list_for_each_entry(page, &priv->list, lru) { + spin_lock(&priv->lock); + list_for_each_entry_safe(page, tpage, &priv->list, lru) { printk(KERN_CRIT "hypercall vm_close: page %p count %d\n", page, page_count(page));...
2020 Jun 17
12
[PATCH 0/4] vDPA: API for reporting IOVA range
Hi All: This series introduces API for reporing IOVA range. This is a must for userspace to work correclty: - for the process that uses vhost-vDPA directly to properly allocate IOVA - for VM(qemu), when vIOMMU is not enabled, fail early if GPA is out of range - for VM(qemu), when vIOMMU is enabled, determine a valid guest address width Please review. Thanks Jason Wang (4): vdpa:
2020 Jun 17
12
[PATCH 0/4] vDPA: API for reporting IOVA range
Hi All: This series introduces API for reporing IOVA range. This is a must for userspace to work correclty: - for the process that uses vhost-vDPA directly to properly allocate IOVA - for VM(qemu), when vIOMMU is not enabled, fail early if GPA is out of range - for VM(qemu), when vIOMMU is enabled, determine a valid guest address width Please review. Thanks Jason Wang (4): vdpa:
2020 Jan 13
0
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...} + /* + * Might as well create an interval covering the underlying VMA to + * avoid having to create a bunch of small intervals. + */ + vma = find_vma(svmm->mm, range->start); + if (!vma || start < vma->vm_start) { + ret = -ENOENT; + goto err; + } + if (range->end > vma->vm_end) { + range->end = vma->vm_end; + last = range->end - 1; + } else if (!mni) { + /* Anything registered on the right part of the vma? */ + mni = mmu_interval_notifier_find(svmm->mm, &nouveau_svm_mni_ops, + range->end, vma->vm_end - 1); + if (mni) + last = mmu_interv...
2020 May 29
1
[PATCH 4/6] vhost_vdpa: support doorbell mapping via mmap
...truct vhost_vdpa *v = vma->vm_file->private_data; > + struct vdpa_device *vdpa = v->vdpa; > + const struct vdpa_config_ops *ops = vdpa->config; > + struct vdpa_notification_area notify; > + int index = vma->vm_pgoff; > + > + if (vma->vm_end - vma->vm_start != PAGE_SIZE) > + return -EINVAL; > + if ((vma->vm_flags & VM_SHARED) == 0) > + return -EINVAL; > + if (vma->vm_flags & VM_READ) > + return -EINVAL; > + if (index > 65535) > +...
2020 Jun 10
1
[PATCH] vhost_vdpa: Fix potential underflow in vhost_vdpa_mmap()
...static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma) struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; struct vdpa_notification_area notify; - int index = vma->vm_pgoff; + unsigned long index = vma->vm_pgoff; if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; -- 2.26.2
2020 Aug 06
0
[PATCH 1/4] vdpa: introduce config op to get valid iova range
...; + > > > > > > This is ambiguous. Is end in the range or just behind it? > > How about first/last? > > It is customary in the kernel to use start-end where end corresponds to > the byte following the last in the range. See struct vm_area_struct > vm_start and vm_end fields Exactly my point: include/linux/mm_types.h: unsigned long vm_end; /* The first byte after our end address in this case Jason wants it to be the last byte, not one behind. > > > > > > > > > /** > > > * vDPA_config_ops - operations...
2020 Aug 10
0
[PATCH 1/4] vdpa: introduce config op to get valid iova range
...ous. Is end in the range or just behind it? > > > > How about first/last? > > > > > > It is customary in the kernel to use start-end where end corresponds to > > > the byte following the last in the range. See struct vm_area_struct > > > vm_start and vm_end fields > > > > Exactly my point: > > > > include/linux/mm_types.h: unsigned long vm_end; /* The first byte after our end address > > > > in this case Jason wants it to be the last byte, not one behind. > > > > > Maybe start, si...
2020 Aug 11
0
[PATCH 1/4] vdpa: introduce config op to get valid iova range
...or just behind it? > > > > > > How about first/last? > > > > > It is customary in the kernel to use start-end where end corresponds to > > > > > the byte following the last in the range. See struct vm_area_struct > > > > > vm_start and vm_end fields > > > > Exactly my point: > > > > > > > > include/linux/mm_types.h: unsigned long vm_end; /* The first byte after our end address > > > > > > > > in this case Jason wants it to be the last byte, not one behind. &gt...
2020 May 29
0
[PATCH 4/6] vhost_vdpa: support doorbell mapping via mmap
...a_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct vhost_vdpa *v = vma->vm_file->private_data; + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_notification_area notify; + int index = vma->vm_pgoff; + + if (vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + if ((vma->vm_flags & VM_SHARED) == 0) + return -EINVAL; + if (vma->vm_flags & VM_READ) + return -EINVAL; + if (index > 65535) + return -EINVAL; + if (!ops->get_vq_notification) + return -ENOTSUPP; + + /* To be safe and ea...
2020 May 29
0
[PATCH 4/6] vhost_vdpa: support doorbell mapping via mmap
...st_vdpa *v = vma->vm_file->private_data; >> +??? struct vdpa_device *vdpa = v->vdpa; >> +??? const struct vdpa_config_ops *ops = vdpa->config; >> +??? struct vdpa_notification_area notify; >> +??? int index = vma->vm_pgoff; >> + >> +??? if (vma->vm_end - vma->vm_start != PAGE_SIZE) >> +??????? return -EINVAL; >> +??? if ((vma->vm_flags & VM_SHARED) == 0) >> +??????? return -EINVAL; >> +??? if (vma->vm_flags & VM_READ) >> +??????? return -EINVAL; >> +??? if (index > 65535) >> +??????? r...
2007 Apr 18
1
[patch 5/9] Guest page hinting: mlocked pages.
...linux-2.6/mm/memory.c linux-2.6-patched/mm/memory.c --- linux-2.6/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 +++ linux-2.6-patched/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 @@ -2523,6 +2523,31 @@ int make_pages_present(unsigned long add BUG_ON(addr >= end); BUG_ON(end > vma->vm_end); len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; + + if (page_host_discards() && (vma->vm_flags & VM_LOCKED)) { + int rlen = len; + ret = 0; + while (rlen > 0) { + struct page *page_refs[32]; + int chunk, cret, i; + + chunk = rlen < 32 ? rlen : 32; + cret = get_...
2007 Apr 18
1
[patch 5/9] Guest page hinting: mlocked pages.
...linux-2.6/mm/memory.c linux-2.6-patched/mm/memory.c --- linux-2.6/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 +++ linux-2.6-patched/mm/memory.c 2006-09-01 12:50:24.000000000 +0200 @@ -2523,6 +2523,31 @@ int make_pages_present(unsigned long add BUG_ON(addr >= end); BUG_ON(end > vma->vm_end); len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE; + + if (page_host_discards() && (vma->vm_flags & VM_LOCKED)) { + int rlen = len; + ret = 0; + while (rlen > 0) { + struct page *page_refs[32]; + int chunk, cret, i; + + chunk = rlen < 32 ? rlen : 32; + cret = get_...
2020 Mar 04
5
[PATCH v3 0/4] nouveau/hmm: map pages after migration
Originally patch 4 was targeted for Jason's rdma tree since other HMM related changes were queued there. Now that those have been merged, these patches just contain changes to nouveau so they could go through any tree. I guess Ben Skeggs' tree would be appropriate. Changes since v2: Added patches 1-3 to fix some minor issues. Eliminated nouveau_find_svmm() since it is easily found.
2007 Apr 18
2
[PATCH] exec-shield style vdso move.
...apped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (!vma) { + ret = -ENOMEM; + goto up_fail; + } + + memset(vma, 0, sizeof(struct vm_area_struct)); + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + /* MAYWRITE to allow gdb to COW and set breakpoints */ + vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; + vma->vm_flags |= mm->def_flags; + vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_ops = &syscall_vm_ops; + v...
2007 Apr 18
2
[PATCH] exec-shield style vdso move.
...apped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + + vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL); + if (!vma) { + ret = -ENOMEM; + goto up_fail; + } + + memset(vma, 0, sizeof(struct vm_area_struct)); + vma->vm_start = addr; + vma->vm_end = addr + PAGE_SIZE; + /* MAYWRITE to allow gdb to COW and set breakpoints */ + vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE; + vma->vm_flags |= mm->def_flags; + vma->vm_page_prot = protection_map[vma->vm_flags & 7]; + vma->vm_ops = &syscall_vm_ops; + v...