search for: vhost_uaddr

Displaying 20 results from an estimated 47 matches for "vhost_uaddr".

Did you mean: host_addr
2019 Jul 23
2
[PATCH 5/6] vhost: mark dirty pages during map uninit
...vers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -306,6 +306,18 @@ static void vhost_map_unprefetch(struct vhost_map *map) > kfree(map); > } > > +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, > + struct vhost_map *map, int index) > +{ > + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; > + int i; > + > + if (uaddr->write) { > + for (i = 0; i < map->npages; i++) > + set_page_dirty(map->pages[i]); > + } > +} > + > static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > { > struct vhost_ma...
2019 Jul 23
2
[PATCH 5/6] vhost: mark dirty pages during map uninit
...vers/vhost/vhost.c > +++ b/drivers/vhost/vhost.c > @@ -306,6 +306,18 @@ static void vhost_map_unprefetch(struct vhost_map *map) > kfree(map); > } > > +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, > + struct vhost_map *map, int index) > +{ > + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; > + int i; > + > + if (uaddr->write) { > + for (i = 0; i < map->npages; i++) > + set_page_dirty(map->pages[i]); > + } > +} > + > static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > { > struct vhost_ma...
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi: Per request from Michael and Jason, the metadata accelreation is reverted in this version and rework in next version. Please review. Thanks Jason Wang (2): Revert "vhost: access vq metadata through kernel virtual address" vhost: re-introducing metadata acceleration through kernel virtual address drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 05
8
[PATCH 0/2] Revert and rework on the metadata accelreation
Hi: Per request from Michael and Jason, the metadata accelreation is reverted in this version and rework in next version. Please review. Thanks Jason Wang (2): Revert "vhost: access vq metadata through kernel virtual address" vhost: re-introducing metadata acceleration through kernel virtual address drivers/vhost/vhost.c | 202 +++++++++++++++++++++++++-----------------
2019 Sep 06
1
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...); > - > -} > - > -static void vhost_reset_vq_maps(struct vhost_virtqueue *vq) > -{ > - int i; > - > - vhost_uninit_vq_maps(vq); > - for (i = 0; i < VHOST_NUM_ADDRS; i++) > - vq->uaddrs[i].size = 0; > -} > - > -static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > - unsigned long start, > - unsigned long end) > -{ > - if (unlikely(!uaddr->size)) > - return false; > - > - return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > -} > - > -static void vhost_invalidate_vq...
2019 Jul 26
2
[PATCH] vhost: disable metadata prefetch optimization
...the outstanding issues in a short order. drivers/vhost/vhost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -96,7 +96,7 @@ struct vhost_uaddr { }; #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 +#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #else #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #endif -- MST
2019 Jul 26
2
[PATCH] vhost: disable metadata prefetch optimization
...the outstanding issues in a short order. drivers/vhost/vhost.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -96,7 +96,7 @@ struct vhost_uaddr { }; #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 -#define VHOST_ARCH_CAN_ACCEL_UACCESS 1 +#define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #else #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 #endif -- MST
2019 Sep 05
0
[PATCH 1/2] Revert "vhost: access vq metadata through kernel virtual address"
...ST_NUM_ADDRS; i++) - if (map[i]) - vhost_map_unprefetch(map[i]); - -} - -static void vhost_reset_vq_maps(struct vhost_virtqueue *vq) -{ - int i; - - vhost_uninit_vq_maps(vq); - for (i = 0; i < VHOST_NUM_ADDRS; i++) - vq->uaddrs[i].size = 0; -} - -static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, - unsigned long start, - unsigned long end) -{ - if (unlikely(!uaddr->size)) - return false; - - return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); -} - -static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq, - int...
2019 Sep 05
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...v *d) __vhost_vq_meta_reset(d->vqs[i]); } +#if VHOST_ARCH_CAN_ACCEL_UACCESS +static void vhost_map_unprefetch(struct vhost_map *map) +{ + kfree(map->pages); + kfree(map); +} + +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, + struct vhost_map *map, int index) +{ + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; + int i; + + if (uaddr->write) { + for (i = 0; i < map->npages; i++) + set_page_dirty(map->pages[i]); + } +} + +static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) +{ + struct vhost_map *map[VHOST_NUM_ADDRS]; + int i; + + spin_lock(&vq...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...+#if VHOST_ARCH_CAN_ACCEL_UACCESS > +static void vhost_map_unprefetch(struct vhost_map *map) > +{ > + kfree(map->pages); > + kfree(map); > +} > + > +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, > + struct vhost_map *map, int index) > +{ > + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; > + int i; > + > + if (uaddr->write) { > + for (i = 0; i < map->npages; i++) > + set_page_dirty(map->pages[i]); > + } > +} > + > +static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > +{ > + struct vhost_ma...
2019 Sep 08
3
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...+#if VHOST_ARCH_CAN_ACCEL_UACCESS > +static void vhost_map_unprefetch(struct vhost_map *map) > +{ > + kfree(map->pages); > + kfree(map); > +} > + > +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, > + struct vhost_map *map, int index) > +{ > + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; > + int i; > + > + if (uaddr->write) { > + for (i = 0; i < map->npages; i++) > + set_page_dirty(map->pages[i]); > + } > +} > + > +static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) > +{ > + struct vhost_ma...
2019 Aug 07
11
[PATCH V3 00/10] Fixes for metadata accelreation
Hi all: This series try to fix several issues introduced by meta data accelreation series. Please review. Changes from V2: - use seqlck helper to synchronize MMU notifier with vhost worker Changes from V1: - try not use RCU to syncrhonize MMU notifier with vhost worker - set dirty pages after no readers - return -EAGAIN only when we find the range is overlapped with metadata Jason Wang (9):
2019 Jul 23
0
[PATCH 5/6] vhost: mark dirty pages during map uninit
...x 89c9f08b5146..5b8821d00fe4 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -306,6 +306,18 @@ static void vhost_map_unprefetch(struct vhost_map *map) kfree(map); } +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, + struct vhost_map *map, int index) +{ + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; + int i; + + if (uaddr->write) { + for (i = 0; i < map->npages; i++) + set_page_dirty(map->pages[i]); + } +} + static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq) { struct vhost_map *map[VHOST_NUM_ADDRS]; @@ -315,8 +327,10 @@ static vo...
2019 Jul 23
0
[PATCH 5/6] vhost: mark dirty pages during map uninit
...rs/vhost/vhost.c >> @@ -306,6 +306,18 @@ static void vhost_map_unprefetch(struct vhost_map *map) >> kfree(map); >> } >> >> +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, >> + struct vhost_map *map, int index) >> +{ >> + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; >> + int i; >> + >> + if (uaddr->write) { >> + for (i = 0; i < map->npages; i++) >> + set_page_dirty(map->pages[i]); >> + } >> +} >> + >> static void vhost_uninit_vq_maps(struct vhost_virtqueu...
2019 Sep 09
0
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...gt; +static void vhost_map_unprefetch(struct vhost_map *map) >> +{ >> + kfree(map->pages); >> + kfree(map); >> +} >> + >> +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, >> + struct vhost_map *map, int index) >> +{ >> + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; >> + int i; >> + >> + if (uaddr->write) { >> + for (i = 0; i < map->npages; i++) >> + set_page_dirty(map->pages[i]); >> + } >> +} >> + >> +static void vhost_uninit_vq_maps(struct vhost_virtqueue...
2019 Sep 09
1
[PATCH 2/2] vhost: re-introducing metadata acceleration through kernel virtual address
...*map) > > > +{ > > > + kfree(map->pages); > > > + kfree(map); > > > +} > > > + > > > +static void vhost_set_map_dirty(struct vhost_virtqueue *vq, > > > + struct vhost_map *map, int index) > > > +{ > > > + struct vhost_uaddr *uaddr = &vq->uaddrs[index]; > > > + int i; > > > + > > > + if (uaddr->write) { > > > + for (i = 0; i < map->npages; i++) > > > + set_page_dirty(map->pages[i]); > > > + } > > > +} > > > + > > > +s...
2019 Jul 27
2
INFO: rcu detected stall in vhost_worker
...t; Fixes: 0ecfebd2b524 ("Linux 5.2") > > For information about bisection process see: https://goo.gl/tpsmEJ#bisection --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -787,7 +787,6 @@ static void vhost_setup_uaddr(struct vho size_t size, bool write) { struct vhost_uaddr *addr = &vq->uaddrs[index]; - spin_lock(&vq->mmu_lock); addr->uaddr = uaddr; addr->size = size; @@ -797,7 +796,10 @@ static void vhost_setup_uaddr(struct vho static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq) { spin_lock(&vq->mmu_lock); - + /* + * dea...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...alized with memory accessors (e.g vq mutex held). > + /* No need for synchronization since we are serialized with > + * memory accessors (e.g vq mutex held). > */ > > for (i = 0; i < VHOST_NUM_ADDRS; i++) > @@ -362,6 +361,44 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > } > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + int ref = READ_ONCE(vq->ref); > + > + smp_store_release(&vq->ref, ref +...
2019 Jul 31
2
[PATCH V2 7/9] vhost: do not use RCU to synchronize MMU notifier with worker
...alized with memory accessors (e.g vq mutex held). > + /* No need for synchronization since we are serialized with > + * memory accessors (e.g vq mutex held). > */ > > for (i = 0; i < VHOST_NUM_ADDRS; i++) > @@ -362,6 +361,44 @@ static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr, > return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size); > } > > +static void inline vhost_vq_access_map_begin(struct vhost_virtqueue *vq) > +{ > + int ref = READ_ONCE(vq->ref); > + > + smp_store_release(&vq->ref, ref +...
2019 Jun 06
2
[PATCH] vhost: Don't use defined in VHOST_ARCH_CAN_ACCEL_UACCESS definition
...-#if VHOST_ARCH_CAN_ACCEL_UACCESS +#ifdef VHOST_ARCH_CAN_ACCEL_UACCESS vhost_setup_vq_uaddr(vq); if (d->mm) diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index c5d950cf7627..d9f36c479fa7 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -95,8 +95,9 @@ struct vhost_uaddr { bool write; }; -#define VHOST_ARCH_CAN_ACCEL_UACCESS defined(CONFIG_MMU_NOTIFIER) && \ - ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 +#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 +#define VHOST_ARCH_CAN_ACCEL_UACCESS +#endif /* The virtqueue struct...