Displaying 20 results from an estimated 26 matches for "vhost_vq_access_map_end".
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...t we?
Cc Paul in case I missed something here. And if I'm right,
maybe we should call this out, adding
"The opposite is not true: a prior RELEASE is not
guaranteed to be visible before memory accesses following
the subsequent ACQUIRE".
> +}
> +
> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> +{
> + int ref = READ_ONCE(vq->ref);
> +
> + /* Make sure vq access is done before increasing ref counter */
> + smp_store_release(&vq->ref, ref + 1);
> +}
> +
> +static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq)
>...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...t we?
Cc Paul in case I missed something here. And if I'm right,
maybe we should call this out, adding
"The opposite is not true: a prior RELEASE is not
guaranteed to be visible before memory accesses following
the subsequent ACQUIRE".
> +}
> +
> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> +{
> + int ref = READ_ONCE(vq->ref);
> +
> + /* Make sure vq access is done before increasing ref counter */
> + smp_store_release(&vq->ref, ref + 1);
> +}
> +
> +static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq)
>...
2019 Jul 31
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ize);
}
+static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
+{
+ int ref = READ_ONCE(vq->ref);
+
+ smp_store_release(&vq->ref, ref + 1);
+ /* Make sure ref counter is visible before accessing the map */
+ smp_load_acquire(&vq->ref);
+}
+
+static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
+{
+ int ref = READ_ONCE(vq->ref);
+
+ /* Make sure vq access is done before increasing ref counter */
+ smp_store_release(&vq->ref, ref + 1);
+}
+
+static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq)
+{
+ int ref;
+
+ /* Make sure map change wa...
2019 Aug 07
0
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
}
+static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
+{
+ write_seqcount_begin(&vq->seq);
+}
+
+static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
+{
+ write_seqcount_end(&vq->seq);
+}
+
+static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq)
+{
+ unsigned int seq;
+
+ /* Make sure any changes to map was done before checking seq
+ * counter. Paired with smp_wmb() in write_seqcount_begin().
+ *...
2019 Aug 03
1
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...{
> - int ref = READ_ONCE(vq->ref);
> -
> - smp_store_release(&vq->ref, ref + 1);
> - /* Make sure ref counter is visible before accessing the map */
> - smp_load_acquire(&vq->ref);
> + write_seqcount_begin(&vq->seq);
> }
>
> static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> {
> - int ref = READ_ONCE(vq->ref);
> -
> - /* Make sure vq access is done before increasing ref counter */
> - smp_store_release(&vq->ref, ref + 1);
> + write_seqcount_end(&vq->seq);
> }
>
> static void inline vhost_vq_...
2019 Jul 31
14
[PATCH V2 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V1:
- Try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the range is overlapped with
metadata
Jason Wang (9):
vhost: don't set uaddr for invalid address
vhost: validate MMU notifier
2019 Aug 01
0
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...ccess_map_begin(struct vhost_virtqueue *vq)
{
- int ref = READ_ONCE(vq->ref);
-
- smp_store_release(&vq->ref, ref + 1);
- /* Make sure ref counter is visible before accessing the map */
- smp_load_acquire(&vq->ref);
+ write_seqcount_begin(&vq->seq);
}
static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
{
- int ref = READ_ONCE(vq->ref);
-
- /* Make sure vq access is done before increasing ref counter */
- smp_store_release(&vq->ref, ref + 1);
+ write_seqcount_end(&vq->seq);
}
static void inline vhost_vq_sync_access(struct vhost_virtqueue *vq)
{
-...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...+ return false;
> +
> + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
> +}
> +
> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
> +{
> + spin_lock(&vq->mmu_lock);
> +}
> +
> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> +{
> + spin_unlock(&vq->mmu_lock);
> +}
> +
> +static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
> + int index,
> + unsigned long start,
> + unsigned long end,
> + bool blockable)
> +{...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...+ return false;
> +
> + return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
> +}
> +
> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
> +{
> + spin_lock(&vq->mmu_lock);
> +}
> +
> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> +{
> + spin_unlock(&vq->mmu_lock);
> +}
> +
> +static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
> + int index,
> + unsigned long start,
> + unsigned long end,
> + bool blockable)
> +{...
2019 Sep 05
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...ned long end)
+{
+ if (unlikely(!uaddr->size))
+ return false;
+
+ return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
+}
+
+static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
+{
+ spin_lock(&vq->mmu_lock);
+}
+
+static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
+{
+ spin_unlock(&vq->mmu_lock);
+}
+
+static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
+ int index,
+ unsigned long start,
+ unsigned long end,
+ bool blockable)
+{
+ struct vhost_uaddr *uaddr = &vq->uaddrs[i...
2019 Sep 09
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...eturn !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
>> +}
>> +
>> +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
>> +{
>> + spin_lock(&vq->mmu_lock);
>> +}
>> +
>> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
>> +{
>> + spin_unlock(&vq->mmu_lock);
>> +}
>> +
>> +static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
>> + int index,
>> + unsigned long start,
>> + unsigned long end,
>>...
2019 Sep 09
1
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...addr->uaddr - 1 + uaddr->size);
> > > +}
> > > +
> > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq)
> > > +{
> > > + spin_lock(&vq->mmu_lock);
> > > +}
> > > +
> > > +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
> > > +{
> > > + spin_unlock(&vq->mmu_lock);
> > > +}
> > > +
> > > +static int vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
> > > + int index,
> > > + unsigned long start,
>...
2019 Aug 09
11
[PATCH V5 0/9] Fixes for vhost metadata acceleration
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V4:
- switch to use spinlock synchronize MMU notifier with accessors
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost
2019 Aug 09
11
[PATCH V5 0/9] Fixes for vhost metadata acceleration
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V4:
- switch to use spinlock synchronize MMU notifier with accessors
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost
2019 Aug 07
11
[PATCH V3 00/10] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the range is overlapped with
metadata
Jason Wang (9):
2019 Aug 07
12
[PATCH V4 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the
2019 Aug 07
12
[PATCH V4 0/9] Fixes for metadata accelreation
Hi all:
This series try to fix several issues introduced by meta data
accelreation series. Please review.
Changes from V3:
- remove the unnecessary patch
Changes from V2:
- use seqlck helper to synchronize MMU notifier with vhost worker
Changes from V1:
- try not use RCU to syncrhonize MMU notifier with vhost worker
- set dirty pages after no readers
- return -EAGAIN only when we find the
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
Revert "vhost: access vq metadata through kernel virtual address"
vhost: re-introducing metadata acceleration through kernel virtual
address
drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi:
Per request from Michael and Jason, the metadata accelreation is
reverted in this version and rework in next version.
Please review.
Thanks
Jason Wang (2):
Revert "vhost: access vq metadata through kernel virtual address"
vhost: re-introducing metadata acceleration through kernel virtual
address
drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Aug 08
3
[PATCH V4 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...t;uaddr - 1 +
>>> uaddr->size);
>>> ? }
>>> ? +static void inline vhost_vq_access_map_begin(struct
>>> vhost_virtqueue *vq)
>>> +{
>>> +??? write_seqcount_begin(&vq->seq);
>>> +}
>>> +
>>> +static void inline vhost_vq_access_map_end(struct vhost_virtqueue *vq)
>>> +{
>>> +??? write_seqcount_end(&vq->seq);
>>> +}
>> The write side of a seqlock only provides write barriers. Access to
>>
>> ????map = vq->maps[VHOST_ADDR_USED];
>>
>> Still needs a read side barrier...