search for: vhost_vq_access_map_begin

Displaying 20 results from an estimated 26 matches for "vhost_vq_access_map_begin".

2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...tex held). > */ > > for (i = 0; i < VHOST_NUM_ADDRS; i++) > @@ -362,6 +361,44 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > } > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + int ref = READ_ONCE(vq->ref); > + > + smp_store_release(&vq->ref, ref + 1); > + /* Make sure ref counter is visible before accessing the map */ > + smp_load_acquire(&vq->ref); The map access is after this sequence, correct? J...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...tex held). > */ > > for (i = 0; i < VHOST_NUM_ADDRS; i++) > @@ -362,6 +361,44 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > } > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + int ref = READ_ONCE(vq->ref); > + > + smp_store_release(&vq->ref, ref + 1); > + /* Make sure ref counter is visible before accessing the map */ > + smp_load_acquire(&vq->ref); The map access is after this sequence, correct? J...
2019 Jul 31
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ed with + * memory accessors (e.g vq mutex held). */ for (i = 0; i < VHOST_NUM_ADDRS; i++) @@ -362,6 +361,44 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); } +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) +{ + int ref = READ_ONCE(vq->ref); + + smp_store_release(&vq->ref, ref + 1); + /* Make sure ref counter is visible before accessing the map */ + smp_load_acquire(&vq->ref); +} + +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) +{ +...
2019 Aug 03
1
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c > index db2c81cb1e90..6d9501303258 100644 > --- a/drivers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -363,39 +363,29 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > > static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > { > - int ref = READ_ONCE(vq->ref); > - > - smp_store_release(&vq->ref, ref + 1); > - /* Make sure ref counter is visible before accessing the map */ > - smp_load_acquire(&vq->ref); > + write_seqcount_begin(&vq->seq); >...
2019 Aug 07
0
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ed with + * memory accessors (e.g vq mutex held). */ for (i = 0; i < VHOST_NUM_ADDRS; i++) @@ -362,6 +361,40 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); } +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) +{ + write_seqcount_begin(&vq->seq); +} + +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) +{ + write_seqcount_end(&vq->seq); +} + +static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq) +{ + unsigned int seq; + + /* Make...
2019 Jul 31
14
[PATCH V2 0/9] Fixes for metadata accelreation
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V1: - Try not use RCU to syncrhonize MMU notifier with vhost worker - set dirty pages after no readers - return -EAGAIN only when we find the range is overlapped with metadata Jason Wang (9): vhost: don't set uaddr for invalid address vhost: validate MMU notifier
2019 Aug 01
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...rite_seqbegin_end() is sufficient. diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index db2c81cb1e90..6d9501303258 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -363,39 +363,29 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) { - int ref = READ_ONCE(vq->ref); - - smp_store_release(&vq->ref, ref + 1); - /* Make sure ref counter is visible before accessing the map */ - smp_load_acquire(&vq->ref); + write_seqcount_begin(&vq->seq); } static void inline vhost_vq_access...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...vhost_uaddr *uaddr, > + unsigned long start, > + unsigned long end) > +{ > + if (unlikely(!uaddr->size)) > + return false; > + > + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > +} > + > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + spin_lock(&vq->mmu_lock); > +} > + > +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) > +{ > + spin_unlock(&vq->mmu_lock); > +} > + > +static int vhost_invalidate_vq_start(struct vhost_virtqueue...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...vhost_uaddr *uaddr, > + unsigned long start, > + unsigned long end) > +{ > + if (unlikely(!uaddr->size)) > + return false; > + > + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > +} > + > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + spin_lock(&vq->mmu_lock); > +} > + > +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) > +{ > + spin_unlock(&vq->mmu_lock); > +} > + > +static int vhost_invalidate_vq_start(struct vhost_virtqueue...
2019 Sep 09
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...unsigned long start, >> + unsigned long end) >> +{ >> + if (unlikely(!uaddr->size)) >> + return false; >> + >> + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); >> +} >> + >> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) >> +{ >> + spin_lock(&vq->mmu_lock); >> +} >> + >> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) >> +{ >> + spin_unlock(&vq->mmu_lock); >> +} >> + >> +static int vhost_inv...
2019 Sep 09
1
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...long end) > > > +{ > > > + if (unlikely(!uaddr->size)) > > > + return false; > > > + > > > + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > > > +} > > > + > > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > > > +{ > > > + spin_lock(&vq->mmu_lock); > > > +} > > > + > > > +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) > > > +{ > > > + spin_unlock(&vq->mmu_lock); > > &...
2019 Sep 05
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...; +} + +static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, + unsigned long start, + unsigned long end) +{ + if (unlikely(!uaddr->size)) + return false; + + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); +} + +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) +{ + spin_lock(&vq->mmu_lock); +} + +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) +{ + spin_unlock(&vq->mmu_lock); +} + +static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq, + int index, + unsigned long...
2019 Aug 07
12
[PATCH V4 0/9] Fixes for metadata accelreation
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V3: - remove the unnecessary patch Changes from V2: - use seqlck helper to synchronize MMU notifier with vhost worker Changes from V1: - try not use RCU to syncrhonize MMU notifier with vhost worker - set dirty pages after no readers - return -EAGAIN only when we find the
2019 Aug 07
12
[PATCH V4 0/9] Fixes for metadata accelreation
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V3: - remove the unnecessary patch Changes from V2: - use seqlck helper to synchronize MMU notifier with vhost worker Changes from V1: - try not use RCU to syncrhonize MMU notifier with vhost worker - set dirty pages after no readers - return -EAGAIN only when we find the
2019 Aug 07
11
[PATCH V3 00/10] Fixes for metadata accelreation
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V2: - use seqlck helper to synchronize MMU notifier with vhost worker Changes from V1: - try not use RCU to syncrhonize MMU notifier with vhost worker - set dirty pages after no readers - return -EAGAIN only when we find the range is overlapped with metadata Jason Wang (9):
2019 Aug 09
11
[PATCH V5 0/9] Fixes for vhost metadata acceleration
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V4: - switch to use spinlock synchronize MMU notifier with accessors Changes from V3: - remove the unnecessary patch Changes from V2: - use seqlck helper to synchronize MMU notifier with vhost worker Changes from V1: - try not use RCU to syncrhonize MMU notifier with vhost
2019 Aug 09
11
[PATCH V5 0/9] Fixes for vhost metadata acceleration
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V4: - switch to use spinlock synchronize MMU notifier with accessors Changes from V3: - remove the unnecessary patch Changes from V2: - use seqlck helper to synchronize MMU notifier with vhost worker Changes from V1: - try not use RCU to syncrhonize MMU notifier with vhost
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi: Per request from Michael and Jason, the metadata accelreation is reverted in this version and rework in next version. Please review. Thanks Jason Wang (2): Revert "vhost: access vq metadata through kernel virtual address" vhost: re-introducing metadata acceleration through kernel virtual address drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi: Per request from Michael and Jason, the metadata accelreation is reverted in this version and rework in next version. Please review. Thanks Jason Wang (2): Revert "vhost: access vq metadata through kernel virtual address" vhost: re-introducing metadata acceleration through kernel virtual address drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Aug 07
2
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...tex held). > */ > > for (i = 0; i < VHOST_NUM_ADDRS; i++) > @@ -362,6 +361,40 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > } > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + write_seqcount_begin(&vq->seq); > +} > + > +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq) > +{ > + write_seqcount_end(&vq->seq); > +} The write side of a seqlock only provides write barriers. Access...