search for: handle_kick

Displaying 20 results from an estimated 309 matches for "handle_kick".

2013 Mar 22
0
[PATCH] tcm_vhost: Use vs for struct vhost_scsi
..._lock_init(&s->vs_flush_lock); - init_waitqueue_head(&s->vs_flush_wait); + vs->vs_events_nr = 0; + vs->vs_inflight[0] = 0; + vs->vs_inflight[1] = 0; + spin_lock_init(&vs->vs_flush_lock); + init_waitqueue_head(&vs->vs_flush_wait); - s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; - s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; + vs->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; + vs->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_M...
2013 Mar 22
0
[PATCH] tcm_vhost: Use vs for struct vhost_scsi
..._lock_init(&s->vs_flush_lock); - init_waitqueue_head(&s->vs_flush_wait); + vs->vs_events_nr = 0; + vs->vs_inflight[0] = 0; + vs->vs_inflight[1] = 0; + spin_lock_init(&vs->vs_flush_lock); + init_waitqueue_head(&vs->vs_flush_wait); - s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; - s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; + vs->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; + vs->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; for (i = VHOST_SCSI_VQ_IO; i < VHOST_SCSI_M...
2013 Apr 27
2
[PATCH v6 0/2] tcm_vhost flush
Changes in v6: - Allow device specific fields per vq - Track cmd per vq - Do not track evt - Switch to static array for inflight allocation, completely get rid of the pain to handle inflight allocation failure. Asias He (2): vhost: Allow device specific fields per vq tcm_vhost: Wait for pending requests in vhost_scsi_flush() drivers/vhost/net.c | 60 +++++++++++--------
2013 Apr 27
2
[PATCH v6 0/2] tcm_vhost flush
Changes in v6: - Allow device specific fields per vq - Track cmd per vq - Do not track evt - Switch to static array for inflight allocation, completely get rid of the pain to handle inflight allocation failure. Asias He (2): vhost: Allow device specific fields per vq tcm_vhost: Wait for pending requests in vhost_scsi_flush() drivers/vhost/net.c | 60 +++++++++++--------
2020 May 28
0
[PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick
...art(struct vhost_virtqueue *vq) > +{ > + struct vhost_poll *poll = &vq->poll; > + struct file *file = vq->kick; > + __poll_t mask; > + > + > + if (poll->wqh) > + return 0; > + > + mask = vfs_poll(file, &poll->table); > + if (mask) > + vq->handle_kick(&vq->poll.work); > + if (mask & EPOLLERR) { > + vhost_poll_stop(poll); > + return -EINVAL; > + } > + > + return 0; > +} So this basically a duplication of vhost_poll_start()? > + > +static long vhost_vdpa_set_vring_kick(struct vhost_virtqueue *vq, > +...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...dle_rx(net); -} - static int vhost_net_open(struct inode *inode, struct file *f) { struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); @@ -550,16 +520,18 @@ static int vhost_net_open(struct inode *inode, struct file *f) return -ENOMEM; dev = &n->dev; - n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; - n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; + vhost_work_set_fn(&n->vqs[VHOST_NET_VQ_TX].work, handle_tx); + vhost_work_set_fn(&n->vqs[VHOST_NET_VQ_RX].work, handle_rx); r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); if (r < 0) { kfree...
2010 Jul 29
1
[PATCH] vhost: locking/rcu cleanup
...dle_rx(net); -} - static int vhost_net_open(struct inode *inode, struct file *f) { struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); @@ -550,16 +520,18 @@ static int vhost_net_open(struct inode *inode, struct file *f) return -ENOMEM; dev = &n->dev; - n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; - n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; + vhost_work_set_fn(&n->vqs[VHOST_NET_VQ_TX].work, handle_tx); + vhost_work_set_fn(&n->vqs[VHOST_NET_VQ_RX].work, handle_rx); r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); if (r < 0) { kfree...
2019 Nov 07
1
[PATCH v5] vhost: introduce mdev based hardware backend
...want to develop >>>>> new API for e.g dirty page tracking. >>>> Good point. It's better to reject these ioctls for now. >>>> >>>> PS. One thing I may need to clarify is that, we need the >>>> VHOST_SET_OWNER ioctl to get the vq->handle_kick to work. >>>> So if we don't call vhost_dev_ioctl(), we will need to >>>> call vhost_dev_set_owner() directly. >> I may miss something, it looks to me the there's no owner check in >> vhost_vring_ioctl() and the vhost_poll_start() can make sure handle_kic...
2013 Jan 06
3
[PATCH] tcm_vhost: Use llist for cmd completion list
...@@ static int vhost_scsi_open(struct inode *inode, struct file *f) return -ENOMEM; vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); - INIT_LIST_HEAD(&s->vs_completion_list); - spin_lock_init(&s->vs_completion_lock); s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h index 7e87c63..47ee80b 100644 --- a/drivers/vhost/tcm_vhost.h +++ b/drivers/vhost/tcm_vhost.h @@ -34,7 +34,7 @@ struct tcm_vhost_...
2013 Jan 06
3
[PATCH] tcm_vhost: Use llist for cmd completion list
...@@ static int vhost_scsi_open(struct inode *inode, struct file *f) return -ENOMEM; vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); - INIT_LIST_HEAD(&s->vs_completion_list); - spin_lock_init(&s->vs_completion_lock); s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h index 7e87c63..47ee80b 100644 --- a/drivers/vhost/tcm_vhost.h +++ b/drivers/vhost/tcm_vhost.h @@ -34,7 +34,7 @@ struct tcm_vhost_...
2020 Jun 02
2
[PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick
...-05-26 335 pollstart = (vq->kick = eventfp) != NULL; a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 336 } else a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 337 filep = eventfp; a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 338 a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 339 if (pollstop && vq->handle_kick) a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 340 vhost_vdpa_poll_stop(vq); a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 341 a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 342 if (filep) a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 343 fput(filep); a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 344 a84ddbf1ef29f1 Z...
2020 Jun 02
2
[PATCH] vdpa: bypass waking up vhost_woker for vdpa vq kick
...-05-26 335 pollstart = (vq->kick = eventfp) != NULL; a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 336 } else a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 337 filep = eventfp; a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 338 a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 339 if (pollstop && vq->handle_kick) a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 340 vhost_vdpa_poll_stop(vq); a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 341 a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 342 if (filep) a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 343 fput(filep); a84ddbf1ef29f1 Zhu Lingshan 2020-05-26 344 a84ddbf1ef29f1 Z...
2020 Jun 08
2
[PATCH] vhost/test: fix up after API change
...vers/vhost/test.c b/drivers/vhost/test.c index f55cb584b84a..12304eb8da15 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -122,7 +122,7 @@ static int vhost_test_open(struct inode *inode, struct file *f) vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV + 64, - VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL); + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL); f->private_data = n; -- MST
2020 Jun 08
2
[PATCH] vhost/test: fix up after API change
...vers/vhost/test.c b/drivers/vhost/test.c index f55cb584b84a..12304eb8da15 100644 --- a/drivers/vhost/test.c +++ b/drivers/vhost/test.c @@ -122,7 +122,7 @@ static int vhost_test_open(struct inode *inode, struct file *f) vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV + 64, - VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, NULL); + VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL); f->private_data = n; -- MST
2012 Jul 30
0
[PATCH] tcm_vhost: Post-merge review changes requested by MST
...x-iscsi.org> This patch contains the post RFC-v5 (post-merge) changes, this includes: - Add locking comment - Move vhost_scsi_complete_cmd ahead of TFO callbacks in order to drop forward declarations - Drop extra '!= NULL' usage in vhost_scsi_complete_cmd_work() - Change vhost_scsi_*_handle_kick() to use pr_debug - Fix possible race in vhost_scsi_set_endpoint() for vs->vs_tpg checking + assignment. - Convert tv_tpg->tpg_vhost_count + ->tv_tpg_port_count from atomic_t -> int, and make sure reference is protected by ->tv_tpg_mutex. - Drop unnecessary vhost_scsi->vhost_r...
2012 Jul 30
0
[PATCH] tcm_vhost: Post-merge review changes requested by MST
...x-iscsi.org> This patch contains the post RFC-v5 (post-merge) changes, this includes: - Add locking comment - Move vhost_scsi_complete_cmd ahead of TFO callbacks in order to drop forward declarations - Drop extra '!= NULL' usage in vhost_scsi_complete_cmd_work() - Change vhost_scsi_*_handle_kick() to use pr_debug - Fix possible race in vhost_scsi_set_endpoint() for vs->vs_tpg checking + assignment. - Convert tv_tpg->tpg_vhost_count + ->tv_tpg_port_count from atomic_t -> int, and make sure reference is protected by ->tv_tpg_mutex. - Drop unnecessary vhost_scsi->vhost_r...
2013 May 07
1
[PATCH] vhost-test: Make vhost/test.c work
...= kmalloc(sizeof *n, GFP_KERNEL); + struct vhost_virtqueue **vqs; struct vhost_dev *dev; int r; if (!n) return -ENOMEM; + vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL); + if (!vqs) { + kfree(n); + return -ENOMEM; + } + dev = &n->dev; - n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX); + vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ].vq; + n->vqs[VHOST_TEST_VQ].vq.handle_kick = handle_vq_kick; + r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); if (r < 0) { kfree(n); return r; @@ -135,12 +...
2013 May 07
1
[PATCH] vhost-test: Make vhost/test.c work
...= kmalloc(sizeof *n, GFP_KERNEL); + struct vhost_virtqueue **vqs; struct vhost_dev *dev; int r; if (!n) return -ENOMEM; + vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL); + if (!vqs) { + kfree(n); + return -ENOMEM; + } + dev = &n->dev; - n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; - r = vhost_dev_init(dev, n->vqs, VHOST_TEST_VQ_MAX); + vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ].vq; + n->vqs[VHOST_TEST_VQ].vq.handle_kick = handle_vq_kick; + r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); if (r < 0) { kfree(n); return r; @@ -135,12 +...
2020 Sep 18
0
[PATCH v7 3/3] vhost: add an RPMsg API
...ned int n_epts) > > +{ > > + unsigned int i; > > + > > + for (i = 0; i < ARRAY_SIZE(vr->vq); i++) > > + vr->vq_p[i] = &vr->vq[i]; > > + > > + /* vq[0]: host -> guest, vq[1]: host <- guest */ > > + vr->vq[VIRTIO_RPMSG_REQUEST].handle_kick = handle_rpmsg_req_kick; > > + vr->vq[VIRTIO_RPMSG_RESPONSE].handle_kick = NULL; > > The comment depicts vq[0] followed by vq[1] but the code initialise vq[1] before > vq[0], which is wildly confusing. At the very least this should be: Nobody should care which of those is 0 an...
2019 May 17
0
[PATCH V2 1/4] vhost: introduce vhost_exceeds_weight()
...with + * request. + */ +#define VHOST_SCSI_WEIGHT 256 + struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ struct completion comp; @@ -1622,7 +1628,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vqs[i] = &vs->vqs[i].vq; vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; } - vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV); + vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, + VHOST_SCSI_WEIGHT, 0); vhost_scsi_init_inflight(vs, NULL); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vh...