Michael S. Tsirkin
2021-Sep-22 19:02 UTC
[PATCH 1/4] hwrng: virtio - add an internal buffer
On Wed, Sep 22, 2021 at 07:09:00PM +0200, Laurent Vivier wrote:> hwrng core uses two buffers that can be mixed in the > virtio-rng queue. > > If the buffer is provided with wait=0 it is enqueued in the > virtio-rng queue but unused by the caller. > On the next call, core provides another buffer but the > first one is filled instead and the new one queued. > And the caller reads the data from the new one that is not > updated, and the data in the first one are lost. > > To avoid this mix, virtio-rng needs to use its own unique > internal buffer at a cost of a data copy to the caller buffer. > > Signed-off-by: Laurent Vivier <lvivier at redhat.com> > --- > drivers/char/hw_random/virtio-rng.c | 43 ++++++++++++++++++++++------- > 1 file changed, 33 insertions(+), 10 deletions(-) > > diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c > index a90001e02bf7..208c547dcac1 100644 > --- a/drivers/char/hw_random/virtio-rng.c > +++ b/drivers/char/hw_random/virtio-rng.c > @@ -18,13 +18,20 @@ static DEFINE_IDA(rng_index_ida); > struct virtrng_info { > struct hwrng hwrng; > struct virtqueue *vq; > - struct completion have_data; > char name[25]; > - unsigned int data_avail; > int index; > bool busy; > bool hwrng_register_done; > bool hwrng_removed; > + /* data transfer */ > + struct completion have_data; > + unsigned int data_avail; > + /* minimal size returned by rng_buffer_size() */ > +#if SMP_CACHE_BYTES < 32 > + u8 data[32]; > +#else > + u8 data[SMP_CACHE_BYTES]; > +#endifLet's move this logic to a macro in hw_random.h ?> }; > > static void random_recv_done(struct virtqueue *vq) > @@ -39,14 +46,14 @@ static void random_recv_done(struct virtqueue *vq) > } > > /* The host will fill any buffer we give it with sweet, sweet randomness. */ > -static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) > +static void register_buffer(struct virtrng_info *vi) > { > struct scatterlist sg; > > - sg_init_one(&sg, buf, size); > + sg_init_one(&sg, vi->data, sizeof(vi->data));Note that add_early_randomness requests less: size_t size = min_t(size_t, 16, rng_buffer_size()); maybe track how much was requested and grow up to sizeof(data)?> > /* There should always be room for one buffer. */ > - virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); > + virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);BTW no longer true if DMA API is in use ... not easy to fix, I think some changes to virtio API to allow pre-mapping s/g for DMA might be needed ...> > virtqueue_kick(vi->vq); > } > @@ -55,6 +62,8 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) > { > int ret; > struct virtrng_info *vi = (struct virtrng_info *)rng->priv; > + unsigned int chunk; > + size_t read; > > if (vi->hwrng_removed) > return -ENODEV; > @@ -62,19 +71,33 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait) > if (!vi->busy) { > vi->busy = true; > reinit_completion(&vi->have_data); > - register_buffer(vi, buf, size); > + register_buffer(vi); > } > > if (!wait) > return 0; > > - ret = wait_for_completion_killable(&vi->have_data); > - if (ret < 0) > - return ret; > + read = 0; > + while (size != 0) { > + ret = wait_for_completion_killable(&vi->have_data); > + if (ret < 0) > + return ret; > + > + chunk = min_t(unsigned int, size, vi->data_avail); > + memcpy(buf + read, vi->data, chunk); > + read += chunk; > + size -= chunk; > + vi->data_avail = 0; > + > + if (size != 0) { > + reinit_completion(&vi->have_data); > + register_buffer(vi); > + } > + } > > vi->busy = false; > > - return vi->data_avail; > + return read; > } > > static void virtio_cleanup(struct hwrng *rng) > -- > 2.31.1
On 22/09/2021 21:02, Michael S. Tsirkin wrote:> On Wed, Sep 22, 2021 at 07:09:00PM +0200, Laurent Vivier wrote: >> hwrng core uses two buffers that can be mixed in the >> virtio-rng queue. >> >> If the buffer is provided with wait=0 it is enqueued in the >> virtio-rng queue but unused by the caller. >> On the next call, core provides another buffer but the >> first one is filled instead and the new one queued. >> And the caller reads the data from the new one that is not >> updated, and the data in the first one are lost. >> >> To avoid this mix, virtio-rng needs to use its own unique >> internal buffer at a cost of a data copy to the caller buffer. >> >> Signed-off-by: Laurent Vivier <lvivier at redhat.com> >> --- >> drivers/char/hw_random/virtio-rng.c | 43 ++++++++++++++++++++++------- >> 1 file changed, 33 insertions(+), 10 deletions(-) >> >> diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c >> index a90001e02bf7..208c547dcac1 100644 >> --- a/drivers/char/hw_random/virtio-rng.c >> +++ b/drivers/char/hw_random/virtio-rng.c >> @@ -18,13 +18,20 @@ static DEFINE_IDA(rng_index_ida); >> struct virtrng_info { >> struct hwrng hwrng; >> struct virtqueue *vq; >> - struct completion have_data; >> char name[25]; >> - unsigned int data_avail; >> int index; >> bool busy; >> bool hwrng_register_done; >> bool hwrng_removed; >> + /* data transfer */ >> + struct completion have_data; >> + unsigned int data_avail; >> + /* minimal size returned by rng_buffer_size() */ >> +#if SMP_CACHE_BYTES < 32 >> + u8 data[32]; >> +#else >> + u8 data[SMP_CACHE_BYTES]; >> +#endif > > Let's move this logic to a macro in hw_random.h ? > >> }; >> >> static void random_recv_done(struct virtqueue *vq) >> @@ -39,14 +46,14 @@ static void random_recv_done(struct virtqueue *vq) >> } >> >> /* The host will fill any buffer we give it with sweet, sweet randomness. */ >> -static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size) >> +static void register_buffer(struct virtrng_info *vi) >> { >> struct scatterlist sg; >> >> - sg_init_one(&sg, buf, size); >> + sg_init_one(&sg, vi->data, sizeof(vi->data)); > > Note that add_early_randomness requests less: > size_t size = min_t(size_t, 16, rng_buffer_size()); > > maybe track how much was requested and grow up to sizeof(data)?I think this problem is managed by PATCH 3/4 as we reuse unused data of the buffer.> >> >> /* There should always be room for one buffer. */ >> - virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL); >> + virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL); > > > BTW no longer true if DMA API is in use ... not easy to fix, > I think some changes to virtio API to allow pre-mapping > s/g for DMA might be needed ...Is there something I can do here? Thanks, Laurent