Distribute requests across the multiqueue complex automatically based
on the IRQ affinity.
Suggested-by: Stefan Hajnoczi <stefanha at redhat.com>
Signed-off-by: Connor Kuehl <ckuehl at redhat.com>
---
fs/fuse/virtio_fs.c | 30 ++++++++++++++++++++++++------
1 file changed, 24 insertions(+), 6 deletions(-)
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index bcb8a02e2d8b..dcdc8b7b1ad5 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -30,6 +30,10 @@
static DEFINE_MUTEX(virtio_fs_mutex);
static LIST_HEAD(virtio_fs_instances);
+struct virtio_fs_vq;
+
+DEFINE_PER_CPU(struct virtio_fs_vq *, this_cpu_fsvq);
+
enum {
VQ_HIPRIO,
VQ_REQUEST
@@ -673,6 +677,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
struct virtqueue **vqs;
vq_callback_t **callbacks;
const char **names;
+ struct irq_affinity desc = { .pre_vectors = 1, .nr_sets = 1, };
unsigned int i;
int ret = 0;
@@ -681,6 +686,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
if (fs->num_request_queues == 0)
return -EINVAL;
+ fs->num_request_queues = min_t(unsigned int, nr_cpu_ids,
+ fs->num_request_queues);
+
fs->nvqs = VQ_REQUEST + fs->num_request_queues;
fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
if (!fs->vqs)
@@ -710,12 +718,24 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
names[i] = fs->vqs[i].name;
}
- ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
+ ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc);
if (ret < 0)
goto out;
- for (i = 0; i < fs->nvqs; i++)
+ for (i = 0; i < fs->nvqs; i++) {
+ const struct cpumask *mask;
+ unsigned int cpu;
+
fs->vqs[i].vq = vqs[i];
+ if (i == VQ_HIPRIO)
+ continue;
+
+ mask = vdev->config->get_vq_affinity(vdev, i);
+ for_each_cpu(cpu, mask) {
+ struct virtio_fs_vq **cpu_vq = per_cpu_ptr(&this_cpu_fsvq, cpu);
+ *cpu_vq = &fs->vqs[i];
+ }
+ }
virtio_fs_start_all_queues(fs);
out:
@@ -877,8 +897,6 @@ static int virtio_fs_probe(struct virtio_device *vdev)
if (ret < 0)
goto out;
- /* TODO vq affinity */
-
ret = virtio_fs_setup_dax(vdev, fs);
if (ret < 0)
goto out_vqs;
@@ -1225,7 +1243,6 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq
*fsvq,
static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
__releases(fiq->lock)
{
- unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
struct virtio_fs *fs;
struct fuse_req *req;
struct virtio_fs_vq *fsvq;
@@ -1245,7 +1262,8 @@ __releases(fiq->lock)
req->in.h.nodeid, req->in.h.len,
fuse_len_args(req->args->out_numargs, req->args->out_args));
- fsvq = &fs->vqs[queue_id];
+ fsvq = this_cpu_read(this_cpu_fsvq);
+
ret = virtio_fs_enqueue_req(fsvq, req, false);
if (ret < 0) {
if (ret == -ENOMEM || ret == -ENOSPC) {
--
2.30.2
Hi Connor,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on fuse/for-next]
[also build test WARNING on linux/master linus/master v5.12 next-20210507]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url:
https://github.com/0day-ci/linux/commits/Connor-Kuehl/virtiofs-Enable-multiple-request-queues/20210508-061611
base: https://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
for-next
config: microblaze-randconfig-r001-20210507 (attached as .config)
compiler: microblaze-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget
https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O
~/bin/make.cross
chmod +x ~/bin/make.cross
#
https://github.com/0day-ci/linux/commit/6ffd4543401bc990353404b556d91cce34b017fc
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review
Connor-Kuehl/virtiofs-Enable-multiple-request-queues/20210508-061611
git checkout 6ffd4543401bc990353404b556d91cce34b017fc
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross W=1
ARCH=microblaze
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp at intel.com>
All warnings (new ones prefixed by >>):
fs/fuse/virtio_fs.c: In function
'virtio_fs_wake_pending_and_unlock':>> fs/fuse/virtio_fs.c:1246:20: warning: variable 'fs' set but not
used [-Wunused-but-set-variable]
1246 | struct virtio_fs *fs;
| ^~
vim +/fs +1246 fs/fuse/virtio_fs.c
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1242
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1243 static void
virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1244 __releases(fiq->lock)
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1245 {
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 @1246 struct virtio_fs *fs;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1247 struct fuse_req *req;
51fecdd2555b3e Vivek Goyal 2019-10-15 1248 struct virtio_fs_vq *fsvq;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1249 int ret;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1250
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1251
WARN_ON(list_empty(&fiq->pending));
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1252 req =
list_last_entry(&fiq->pending, struct fuse_req, list);
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1253 clear_bit(FR_PENDING,
&req->flags);
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1254
list_del_init(&req->list);
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1255
WARN_ON(!list_empty(&fiq->pending));
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1256
spin_unlock(&fiq->lock);
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1257
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1258 fs = fiq->priv;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1259
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1260 pr_debug("%s: opcode %u
unique %#llx nodeid %#llx in.len %u out.len %u\n",
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1261 __func__,
req->in.h.opcode, req->in.h.unique,
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1262 req->in.h.nodeid,
req->in.h.len,
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1263
fuse_len_args(req->args->out_numargs, req->args->out_args));
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1264
6ffd4543401bc9 Connor Kuehl 2021-05-07 1265 fsvq =
this_cpu_read(this_cpu_fsvq);
6ffd4543401bc9 Connor Kuehl 2021-05-07 1266
a9bfd9dd341756 Vivek Goyal 2019-10-15 1267 ret =
virtio_fs_enqueue_req(fsvq, req, false);
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1268 if (ret < 0) {
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1269 if (ret == -ENOMEM || ret ==
-ENOSPC) {
a9bfd9dd341756 Vivek Goyal 2019-10-15 1270 /*
a9bfd9dd341756 Vivek Goyal 2019-10-15 1271 * Virtqueue full. Retry
submission from worker
a9bfd9dd341756 Vivek Goyal 2019-10-15 1272 * context as we might be
holding fc->bg_lock.
a9bfd9dd341756 Vivek Goyal 2019-10-15 1273 */
a9bfd9dd341756 Vivek Goyal 2019-10-15 1274
spin_lock(&fsvq->lock);
a9bfd9dd341756 Vivek Goyal 2019-10-15 1275
list_add_tail(&req->list, &fsvq->queued_reqs);
a9bfd9dd341756 Vivek Goyal 2019-10-15 1276 inc_in_flight_req(fsvq);
a9bfd9dd341756 Vivek Goyal 2019-10-15 1277
schedule_delayed_work(&fsvq->dispatch_work,
a9bfd9dd341756 Vivek Goyal 2019-10-15 1278 msecs_to_jiffies(1));
a9bfd9dd341756 Vivek Goyal 2019-10-15 1279
spin_unlock(&fsvq->lock);
a9bfd9dd341756 Vivek Goyal 2019-10-15 1280 return;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1281 }
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1282 req->out.h.error = ret;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1283 pr_err("virtio-fs:
virtio_fs_enqueue_req() failed %d\n", ret);
51fecdd2555b3e Vivek Goyal 2019-10-15 1284
51fecdd2555b3e Vivek Goyal 2019-10-15 1285 /* Can't end request in
submission context. Use a worker */
51fecdd2555b3e Vivek Goyal 2019-10-15 1286
spin_lock(&fsvq->lock);
51fecdd2555b3e Vivek Goyal 2019-10-15 1287
list_add_tail(&req->list, &fsvq->end_reqs);
51fecdd2555b3e Vivek Goyal 2019-10-15 1288
schedule_delayed_work(&fsvq->dispatch_work, 0);
51fecdd2555b3e Vivek Goyal 2019-10-15 1289
spin_unlock(&fsvq->lock);
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1290 return;
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1291 }
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1292 }
a62a8ef9d97da2 Stefan Hajnoczi 2018-06-12 1293
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all at lists.01.org
-------------- next part --------------
A non-text attachment was scrubbed...
Name: .config.gz
Type: application/gzip
Size: 29902 bytes
Desc: not available
URL:
<http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20210508/6db034b0/attachment-0001.gz>
On 5/7/21 5:15 PM, Connor Kuehl wrote:> Distribute requests across the multiqueue complex automatically based > on the IRQ affinity. > > Suggested-by: Stefan Hajnoczi <stefanha at redhat.com> > Signed-off-by: Connor Kuehl <ckuehl at redhat.com> > --- > fs/fuse/virtio_fs.c | 30 ++++++++++++++++++++++++------ > 1 file changed, 24 insertions(+), 6 deletions(-) > > diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c > index bcb8a02e2d8b..dcdc8b7b1ad5 100644 > --- a/fs/fuse/virtio_fs.c > +++ b/fs/fuse/virtio_fs.c > @@ -30,6 +30,10 @@ > static DEFINE_MUTEX(virtio_fs_mutex); > static LIST_HEAD(virtio_fs_instances); > > +struct virtio_fs_vq; > + > +DEFINE_PER_CPU(struct virtio_fs_vq *, this_cpu_fsvq); > [..] > + > + for_each_cpu(cpu, mask) { > + struct virtio_fs_vq **cpu_vq = per_cpu_ptr(&this_cpu_fsvq, cpu); > + *cpu_vq = &fs->vqs[i]; > + } > + }Hmm, actually, it's just occurred to me that the per-CPU state could be problematic with multiple virtio-fs mounts. I'll workshop this some more. Connor
On Fri, May 07, 2021 at 03:15:27PM -0700, Connor Kuehl wrote:> Distribute requests across the multiqueue complex automatically based > on the IRQ affinity.Hi Connor, Thanks for the patch. I will look into it and also test it. How did you test it? Did you modify vitiofsd to support multiqueue. Did you also run some performance numbers. Does it provide better/worse performance as compared to single queue. Thanks Vivek> > Suggested-by: Stefan Hajnoczi <stefanha at redhat.com> > Signed-off-by: Connor Kuehl <ckuehl at redhat.com> > --- > fs/fuse/virtio_fs.c | 30 ++++++++++++++++++++++++------ > 1 file changed, 24 insertions(+), 6 deletions(-) > > diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c > index bcb8a02e2d8b..dcdc8b7b1ad5 100644 > --- a/fs/fuse/virtio_fs.c > +++ b/fs/fuse/virtio_fs.c > @@ -30,6 +30,10 @@ > static DEFINE_MUTEX(virtio_fs_mutex); > static LIST_HEAD(virtio_fs_instances); > > +struct virtio_fs_vq; > + > +DEFINE_PER_CPU(struct virtio_fs_vq *, this_cpu_fsvq); > + > enum { > VQ_HIPRIO, > VQ_REQUEST > @@ -673,6 +677,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, > struct virtqueue **vqs; > vq_callback_t **callbacks; > const char **names; > + struct irq_affinity desc = { .pre_vectors = 1, .nr_sets = 1, }; > unsigned int i; > int ret = 0; > > @@ -681,6 +686,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, > if (fs->num_request_queues == 0) > return -EINVAL; > > + fs->num_request_queues = min_t(unsigned int, nr_cpu_ids, > + fs->num_request_queues); > + > fs->nvqs = VQ_REQUEST + fs->num_request_queues; > fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); > if (!fs->vqs) > @@ -710,12 +718,24 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, > names[i] = fs->vqs[i].name; > } > > - ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); > + ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, &desc); > if (ret < 0) > goto out; > > - for (i = 0; i < fs->nvqs; i++) > + for (i = 0; i < fs->nvqs; i++) { > + const struct cpumask *mask; > + unsigned int cpu; > + > fs->vqs[i].vq = vqs[i]; > + if (i == VQ_HIPRIO) > + continue; > + > + mask = vdev->config->get_vq_affinity(vdev, i); > + for_each_cpu(cpu, mask) { > + struct virtio_fs_vq **cpu_vq = per_cpu_ptr(&this_cpu_fsvq, cpu); > + *cpu_vq = &fs->vqs[i]; > + } > + } > > virtio_fs_start_all_queues(fs); > out: > @@ -877,8 +897,6 @@ static int virtio_fs_probe(struct virtio_device *vdev) > if (ret < 0) > goto out; > > - /* TODO vq affinity */ > - > ret = virtio_fs_setup_dax(vdev, fs); > if (ret < 0) > goto out_vqs; > @@ -1225,7 +1243,6 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, > static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) > __releases(fiq->lock) > { > - unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ > struct virtio_fs *fs; > struct fuse_req *req; > struct virtio_fs_vq *fsvq; > @@ -1245,7 +1262,8 @@ __releases(fiq->lock) > req->in.h.nodeid, req->in.h.len, > fuse_len_args(req->args->out_numargs, req->args->out_args)); > > - fsvq = &fs->vqs[queue_id]; > + fsvq = this_cpu_read(this_cpu_fsvq); > + > ret = virtio_fs_enqueue_req(fsvq, req, false); > if (ret < 0) { > if (ret == -ENOMEM || ret == -ENOSPC) { > -- > 2.30.2 >
On Fri, May 07, 2021 at 05:15:27PM -0500, Connor Kuehl wrote:> @@ -1245,7 +1262,8 @@ __releases(fiq->lock) > req->in.h.nodeid, req->in.h.len, > fuse_len_args(req->args->out_numargs, req->args->out_args)); > > - fsvq = &fs->vqs[queue_id]; > + fsvq = this_cpu_read(this_cpu_fsvq);Please check how CPU hotplug affects this patch. If the current CPU doesn't have a vq because it was hotplugged, then it may be necessary to pick another vq. Stefan -------------- next part -------------- A non-text attachment was scrubbed... Name: signature.asc Type: application/pgp-signature Size: 488 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20210511/f9b51a20/attachment.sig>