Stefano Garzarella
2022-Jun-21 16:08 UTC
[PATCH 2/3] vdpa_sim_blk: limit the number of request handled per batch
Limit the number of requests (4 per queue as for vdpa_sim_net) handled in a batch to prevent the worker from using the CPU for too long. Suggested-by: Eugenio P?rez <eperezma at redhat.com> Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> --- drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c index a83a5c76f620..ac86478845b6 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c @@ -197,6 +197,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim, static void vdpasim_blk_work(struct work_struct *work) { struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); + bool reschedule = false; int i; spin_lock(&vdpasim->lock); @@ -206,11 +207,15 @@ static void vdpasim_blk_work(struct work_struct *work) for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) { struct vdpasim_virtqueue *vq = &vdpasim->vqs[i]; + bool vq_work = true; + int reqs = 0; if (!vq->ready) continue; - while (vdpasim_blk_handle_req(vdpasim, vq)) { + while (vq_work) { + vq_work = vdpasim_blk_handle_req(vdpasim, vq); + /* Make sure used is visible before rasing the interrupt. */ smp_wmb(); @@ -218,10 +223,18 @@ static void vdpasim_blk_work(struct work_struct *work) if (vringh_need_notify_iotlb(&vq->vring) > 0) vringh_notify(&vq->vring); local_bh_enable(); + + if (++reqs > 4) { + vq_work = false; + reschedule = true; + } } } out: spin_unlock(&vdpasim->lock); + + if (reschedule) + schedule_work(&vdpasim->work); } static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config) -- 2.36.1
Jason Wang
2022-Jun-23 03:50 UTC
[PATCH 2/3] vdpa_sim_blk: limit the number of request handled per batch
On Wed, Jun 22, 2022 at 12:09 AM Stefano Garzarella <sgarzare at redhat.com> wrote:> > Limit the number of requests (4 per queue as for vdpa_sim_net) handled > in a batch to prevent the worker from using the CPU for too long. > > Suggested-by: Eugenio P?rez <eperezma at redhat.com> > Signed-off-by: Stefano Garzarella <sgarzare at redhat.com> > --- > drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 15 ++++++++++++++- > 1 file changed, 14 insertions(+), 1 deletion(-) > > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c > index a83a5c76f620..ac86478845b6 100644 > --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c > @@ -197,6 +197,7 @@ static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim, > static void vdpasim_blk_work(struct work_struct *work) > { > struct vdpasim *vdpasim = container_of(work, struct vdpasim, work); > + bool reschedule = false; > int i; > > spin_lock(&vdpasim->lock); > @@ -206,11 +207,15 @@ static void vdpasim_blk_work(struct work_struct *work) > > for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) { > struct vdpasim_virtqueue *vq = &vdpasim->vqs[i]; > + bool vq_work = true; > + int reqs = 0; > > if (!vq->ready) > continue; > > - while (vdpasim_blk_handle_req(vdpasim, vq)) { > + while (vq_work) { > + vq_work = vdpasim_blk_handle_req(vdpasim, vq); > +Is it better to check and exit the loop early here? Thanks> /* Make sure used is visible before rasing the interrupt. */ > smp_wmb(); > > @@ -218,10 +223,18 @@ static void vdpasim_blk_work(struct work_struct *work) > if (vringh_need_notify_iotlb(&vq->vring) > 0) > vringh_notify(&vq->vring); > local_bh_enable(); > + > + if (++reqs > 4) { > + vq_work = false; > + reschedule = true; > + } > } > } > out: > spin_unlock(&vdpasim->lock); > + > + if (reschedule) > + schedule_work(&vdpasim->work); > } > > static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config) > -- > 2.36.1 >