Displaying 20 results from an estimated 23 matches for "vhost_invalidate_range_start".
2019 Jul 31
1
[PATCH V2 9/9] vhost: do not return -EAGIAN for non blocking invalidation too early
...atic void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
> vhost_set_map_dirty(vq, map, index);
> vhost_map_unprefetch(map);
> }
> +
> + return 0;
> }
>
> static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
> @@ -443,18 +448,19 @@ static int vhost_invalidate_range_start(struct mmu_notifier *mn,
> {
> struct vhost_dev *dev = container_of(mn, struct vhost_dev,
> mmu_notifier);
> - int i, j;
> -
> - if (!mmu_notifier_range_blockable(range))
> - return -EAGAIN;
> + bool blockable = mmu_notifier_range_blockable(range);
> + int...
2019 Jul 31
0
[PATCH V2 9/9] vhost: do not return -EAGIAN for non blocking invalidation too early
...->invalidate_count;
@@ -423,6 +426,8 @@ static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
vhost_set_map_dirty(vq, map, index);
vhost_map_unprefetch(map);
}
+
+ return 0;
}
static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
@@ -443,18 +448,19 @@ static int vhost_invalidate_range_start(struct mmu_notifier *mn,
{
struct vhost_dev *dev = container_of(mn, struct vhost_dev,
mmu_notifier);
- int i, j;
-
- if (!mmu_notifier_range_blockable(range))
- return -EAGAIN;
+ bool blockable = mmu_notifier_range_blockable(range);
+ int i, j, ret;
for (i = 0; i < dev->nvq...
2019 Sep 06
1
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...long start,
> - unsigned long end)
> -{
> - if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
> - return;
> -
> - spin_lock(&vq->mmu_lock);
> - --vq->invalidate_count;
> - spin_unlock(&vq->mmu_lock);
> -}
> -
> -static int vhost_invalidate_range_start(struct mmu_notifier *mn,
> - const struct mmu_notifier_range *range)
> -{
> - struct vhost_dev *dev = container_of(mn, struct vhost_dev,
> - mmu_notifier);
> - int i, j;
> -
> - if (!mmu_notifier_range_blockable(range))
> - return -EAGAIN;
> -
> - for (i...
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
Revert "vhost: access vq metadata through kernel virtual address"
vhost: re-introducing metadata acceleration through kernel virtual
address
drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
Revert "vhost: access vq metadata through kernel virtual address"
vhost: re-introducing metadata acceleration through kernel virtual
address
drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...long start,
> + unsigned long end)
> +{
> + if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
> + return;
> +
> + spin_lock(&vq->mmu_lock);
> + --vq->invalidate_count;
> + spin_unlock(&vq->mmu_lock);
> +}
> +
> +static int vhost_invalidate_range_start(struct mmu_notifier *mn,
> + const struct mmu_notifier_range *range)
> +{
> + struct vhost_dev *dev = container_of(mn, struct vhost_dev,
> + mmu_notifier);
> + bool blockable = mmu_notifier_range_blockable(range);
> + int i, j, ret;
> +
> + for (i = 0; i < de...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...long start,
> + unsigned long end)
> +{
> + if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
> + return;
> +
> + spin_lock(&vq->mmu_lock);
> + --vq->invalidate_count;
> + spin_unlock(&vq->mmu_lock);
> +}
> +
> +static int vhost_invalidate_range_start(struct mmu_notifier *mn,
> + const struct mmu_notifier_range *range)
> +{
> + struct vhost_dev *dev = container_of(mn, struct vhost_dev,
> + mmu_notifier);
> + bool blockable = mmu_notifier_range_blockable(range);
> + int i, j, ret;
> +
> + for (i = 0; i < de...
2019 Sep 05
0
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...t_virtqueue *vq,
- int index,
- unsigned long start,
- unsigned long end)
-{
- if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
- return;
-
- spin_lock(&vq->mmu_lock);
- --vq->invalidate_count;
- spin_unlock(&vq->mmu_lock);
-}
-
-static int vhost_invalidate_range_start(struct mmu_notifier *mn,
- const struct mmu_notifier_range *range)
-{
- struct vhost_dev *dev = container_of(mn, struct vhost_dev,
- mmu_notifier);
- int i, j;
-
- if (!mmu_notifier_range_blockable(range))
- return -EAGAIN;
-
- for (i = 0; i < dev->nvqs; i++) {
- struct vhost_v...
2019 Sep 09
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...end)
>> +{
>> + if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
>> + return;
>> +
>> + spin_lock(&vq->mmu_lock);
>> + --vq->invalidate_count;
>> + spin_unlock(&vq->mmu_lock);
>> +}
>> +
>> +static int vhost_invalidate_range_start(struct mmu_notifier *mn,
>> + const struct mmu_notifier_range *range)
>> +{
>> + struct vhost_dev *dev = container_of(mn, struct vhost_dev,
>> + mmu_notifier);
>> + bool blockable = mmu_notifier_range_blockable(range);
>> + int i, j, ret;
>> +...
2019 Sep 09
1
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
..._overlap(&vq->uaddrs[index], start, end))
> > > + return;
> > > +
> > > + spin_lock(&vq->mmu_lock);
> > > + --vq->invalidate_count;
> > > + spin_unlock(&vq->mmu_lock);
> > > +}
> > > +
> > > +static int vhost_invalidate_range_start(struct mmu_notifier *mn,
> > > + const struct mmu_notifier_range *range)
> > > +{
> > > + struct vhost_dev *dev = container_of(mn, struct vhost_dev,
> > > + mmu_notifier);
> > > + bool blockable = mmu_notifier_range_blockable(range);
> &g...
2019 Sep 05
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...t_virtqueue *vq,
+ int index,
+ unsigned long start,
+ unsigned long end)
+{
+ if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
+ return;
+
+ spin_lock(&vq->mmu_lock);
+ --vq->invalidate_count;
+ spin_unlock(&vq->mmu_lock);
+}
+
+static int vhost_invalidate_range_start(struct mmu_notifier *mn,
+ const struct mmu_notifier_range *range)
+{
+ struct vhost_dev *dev = container_of(mn, struct vhost_dev,
+ mmu_notifier);
+ bool blockable = mmu_notifier_range_blockable(range);
+ int i, j, ret;
+
+ for (i = 0; i < dev->nvqs; i++) {
+ struct vhost_virtq...
2019 Mar 14
2
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
On 2019/3/14 ??6:42, Michael S. Tsirkin wrote:
>>>>> Which means after we fix vhost to add the flush_dcache_page after
>>>>> kunmap, Parisc will get a double hit (but it also means Parisc
>>>>> was the only one of those archs needed explicit cache flushes,
>>>>> where vhost worked correctly so far.. so it kinds of proofs your
2019 Mar 14
2
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
On 2019/3/14 ??6:42, Michael S. Tsirkin wrote:
>>>>> Which means after we fix vhost to add the flush_dcache_page after
>>>>> kunmap, Parisc will get a double hit (but it also means Parisc
>>>>> was the only one of those archs needed explicit cache flushes,
>>>>> where vhost worked correctly so far.. so it kinds of proofs your
2019 Mar 14
0
[RFC PATCH V2 0/5] vhost: accelerate metadata access through vmap()
...ET.
Then if you have the ability to disambiguate which is the first
invalidate that tears down a mapping to any given page (vhost can do
that trivially, it's just a pointer to a page struct to kmap), in the
mmu notifier invalidate just before dropping the spinlock you would
do this check:
def vhost_invalidate_range_start():
[..]
spin_lock(mmu_notifier_lock);
[..]
if (vhost->page_pointer) {
if (access == FOLL_WRITE)
VM_WARN_ON(!PageDirty(vhost->page_pointer));
vhost->page_pointer = NULL;
/* no put_page, already done at gup time */
}
spin_unlock(..
Generally speaking set...
2019 Apr 23
7
[RFC PATCH V3 0/6] vhost: accelerate metadata access
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling. This is done through setup kernel address through direct
mapping and co-opreate VM management with MMU notifiers.
Test shows about 23% improvement on TX PPS. TCP_STREAM doesn't see
obvious
2019 May 24
10
[PATCH net-next 0/6] vhost: accelerate metadata access
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling like SMAP. This is done through setup kernel address through
direct mapping and co-opreate VM management with MMU notifiers.
Test shows about 23% improvement on TX PPS. TCP_STREAM
2019 May 24
10
[PATCH net-next 0/6] vhost: accelerate metadata access
Hi:
This series tries to access virtqueue metadata through kernel virtual
address instead of copy_user() friends since they had too much
overheads like checks, spec barriers or even hardware feature
toggling like SMAP. This is done through setup kernel address through
direct mapping and co-opreate VM management with MMU notifiers.
Test shows about 23% improvement on TX PPS. TCP_STREAM
2019 Aug 07
11
[PATCH V3 00/10] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the range is overlapped with
metadata
Jason Wang (9):
2019 Jul 31
14
[PATCH V2 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V1:
- Try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the range is overlapped with
metadata
Jason Wang (9):
vhost: don't set uaddr for invalid address
vhost: validate MMU notifier
2019 Aug 09
11
[PATCH V5 0/9] Fixes for vhost metadata acceleration
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V4:
- switch to use spinlock synchronize MMU notifier with accessors
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost