Eric Blake
2019-May-11 20:30 UTC
[Libguestfs] [nbdkit PATCH] cache: Reduce use of bounce-buffer
Although the time spent in memcpy/memset probably pales in comparison to time spent in socket I/O, it's still worth worth reducing the number of times we have to utilize a bounce buffer when we already have aligned data. Signed-off-by: Eric Blake <eblake@redhat.com> --- filters/cache/cache.c | 60 ++++++++++++++++++++++++++++--------------- 1 file changed, 39 insertions(+), 21 deletions(-) diff --git a/filters/cache/cache.c b/filters/cache/cache.c index 19ce555..98786b5 100644 --- a/filters/cache/cache.c +++ b/filters/cache/cache.c @@ -1,5 +1,5 @@ /* nbdkit - * Copyright (C) 2018 Red Hat Inc. + * Copyright (C) 2018-2019 Red Hat Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are @@ -58,6 +58,7 @@ #include "cache.h" #include "blk.h" #include "reclaim.h" +#include "isaligned.h" #define THREAD_MODEL NBDKIT_THREAD_MODEL_PARALLEL @@ -233,11 +234,13 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata, CLEANUP_FREE uint8_t *block = NULL; assert (!flags); - block = malloc (blksize); - if (block == NULL) { - *err = errno; - nbdkit_error ("malloc: %m"); - return -1; + if (!IS_ALIGNED (count | offset, blksize)) { + block = malloc (blksize); + if (block == NULL) { + *err = errno; + nbdkit_error ("malloc: %m"); + return -1; + } } /* XXX This breaks up large read requests into smaller ones, which @@ -258,12 +261,14 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata, { ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock); - r = blk_read (next_ops, nxdata, blknum, block, err); + r = blk_read (next_ops, nxdata, blknum, + blkoffs || n < blksize ? block : buf, err); } if (r == -1) return -1; - memcpy (buf, &block[blkoffs], n); + if (blkoffs || n < blksize) + memcpy (buf, &block[blkoffs], n); buf += n; count -= n; @@ -282,11 +287,13 @@ cache_pwrite (struct nbdkit_next_ops *next_ops, void *nxdata, CLEANUP_FREE uint8_t *block = NULL; bool need_flush = false; - block = malloc (blksize); - if (block == NULL) { - *err = errno; - nbdkit_error ("malloc: %m"); - return -1; + if (!IS_ALIGNED (count | offset, blksize)) { + block = malloc (blksize); + if (block == NULL) { + *err = errno; + nbdkit_error ("malloc: %m"); + return -1; + } } if ((flags & NBDKIT_FLAG_FUA) && @@ -308,11 +315,15 @@ cache_pwrite (struct nbdkit_next_ops *next_ops, void *nxdata, * Hold the lock over the whole operation. */ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock); - r = blk_read (next_ops, nxdata, blknum, block, err); - if (r != -1) { - memcpy (&block[blkoffs], buf, n); - r = blk_write (next_ops, nxdata, blknum, block, flags, err); + if (blkoffs || n < blksize) { + r = blk_read (next_ops, nxdata, blknum, block, err); + if (r != -1) { + memcpy (&block[blkoffs], buf, n); + r = blk_write (next_ops, nxdata, blknum, block, flags, err); + } } + else + r = blk_write (next_ops, nxdata, blknum, buf, flags, err); if (r == -1) return -1; @@ -334,6 +345,7 @@ cache_zero (struct nbdkit_next_ops *next_ops, void *nxdata, { CLEANUP_FREE uint8_t *block = NULL; bool need_flush = false; + bool clean = false; block = malloc (blksize); if (block == NULL) { @@ -350,7 +362,7 @@ cache_zero (struct nbdkit_next_ops *next_ops, void *nxdata, } while (count > 0) { uint64_t blknum, blkoffs, n; - int r; + int r = 0; blknum = offset / blksize; /* block number */ blkoffs = offset % blksize; /* offset within the block */ @@ -362,11 +374,17 @@ cache_zero (struct nbdkit_next_ops *next_ops, void *nxdata, * Hold the lock over the whole operation. */ ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock); - r = blk_read (next_ops, nxdata, blknum, block, err); - if (r != -1) { + if (blkoffs || n < blksize) { + r = blk_read (next_ops, nxdata, blknum, block, err); memset (&block[blkoffs], 0, n); + clean = false; + } + else if (!clean) { + memset (block, 0, blksize); + clean = true; + } + if (r != -1) r = blk_write (next_ops, nxdata, blknum, block, flags, err); - } if (r == -1) return -1; -- 2.20.1
Richard W.M. Jones
2019-May-13 09:21 UTC
Re: [Libguestfs] [nbdkit PATCH] cache: Reduce use of bounce-buffer
On Sat, May 11, 2019 at 03:30:04PM -0500, Eric Blake wrote:> Although the time spent in memcpy/memset probably pales in comparison > to time spent in socket I/O, it's still worth worth reducing the > number of times we have to utilize a bounce buffer when we already > have aligned data. > > Signed-off-by: Eric Blake <eblake@redhat.com> > --- > filters/cache/cache.c | 60 ++++++++++++++++++++++++++++--------------- > 1 file changed, 39 insertions(+), 21 deletions(-) > > diff --git a/filters/cache/cache.c b/filters/cache/cache.c > index 19ce555..98786b5 100644 > --- a/filters/cache/cache.c > +++ b/filters/cache/cache.c > @@ -1,5 +1,5 @@ > /* nbdkit > - * Copyright (C) 2018 Red Hat Inc. > + * Copyright (C) 2018-2019 Red Hat Inc. > * > * Redistribution and use in source and binary forms, with or without > * modification, are permitted provided that the following conditions are > @@ -58,6 +58,7 @@ > #include "cache.h" > #include "blk.h" > #include "reclaim.h" > +#include "isaligned.h" > > #define THREAD_MODEL NBDKIT_THREAD_MODEL_PARALLEL > > @@ -233,11 +234,13 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata, > CLEANUP_FREE uint8_t *block = NULL; > > assert (!flags); > - block = malloc (blksize); > - if (block == NULL) { > - *err = errno; > - nbdkit_error ("malloc: %m"); > - return -1; > + if (!IS_ALIGNED (count | offset, blksize)) { > + block = malloc (blksize); > + if (block == NULL) { > + *err = errno; > + nbdkit_error ("malloc: %m"); > + return -1; > + } > } > > /* XXX This breaks up large read requests into smaller ones, which > @@ -258,12 +261,14 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata, > > { > ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock); > - r = blk_read (next_ops, nxdata, blknum, block, err); > + r = blk_read (next_ops, nxdata, blknum, > + blkoffs || n < blksize ? block : buf, err); > } > if (r == -1) > return -1; > > - memcpy (buf, &block[blkoffs], n); > + if (blkoffs || n < blksize) > + memcpy (buf, &block[blkoffs], n);I've stared at this patch for a while and I don't really understand it. One suggestion though. The patch might be better (or at least might be more obviously correct) if the function defined a boolean at the top which was used to tell if the bounce buffer was in use, something like: const bool use_bounce_buffer = /* condition */; if (use_bounce_buffer) { block = malloc (blksize); ... } r = blk_read (..., use_bounce_buffer ? block : buf, err); etc. Rich. -- Richard Jones, Virtualization Group, Red Hat http://people.redhat.com/~rjones Read my programming and virtualization blog: http://rwmj.wordpress.com Fedora Windows cross-compiler. Compile Windows programs, test, and build Windows installers. Over 100 libraries supported. http://fedoraproject.org/wiki/MinGW
Eric Blake
2019-May-13 14:11 UTC
Re: [Libguestfs] [nbdkit PATCH] cache: Reduce use of bounce-buffer
On 5/13/19 4:21 AM, Richard W.M. Jones wrote:> On Sat, May 11, 2019 at 03:30:04PM -0500, Eric Blake wrote: >> Although the time spent in memcpy/memset probably pales in comparison >> to time spent in socket I/O, it's still worth worth reducing the >> number of times we have to utilize a bounce buffer when we already >> have aligned data. >> >> Signed-off-by: Eric Blake <eblake@redhat.com> >> --->> @@ -233,11 +234,13 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata, >> CLEANUP_FREE uint8_t *block = NULL; >> >> assert (!flags); >> - block = malloc (blksize); >> - if (block == NULL) { >> - *err = errno; >> - nbdkit_error ("malloc: %m"); >> - return -1; >> + if (!IS_ALIGNED (count | offset, blksize)) { >> + block = malloc (blksize); >> + if (block == NULL) { >> + *err = errno; >> + nbdkit_error ("malloc: %m"); >> + return -1; >> + }If we have an unaligned count or offset, then we need the bounce buffer, but we only have to use it...>> } >> >> /* XXX This breaks up large read requests into smaller ones, which >> @@ -258,12 +261,14 @@ cache_pread (struct nbdkit_next_ops *next_ops, void *nxdata, >> >> { >> ACQUIRE_LOCK_FOR_CURRENT_SCOPE (&lock); >> - r = blk_read (next_ops, nxdata, blknum, block, err); >> + r = blk_read (next_ops, nxdata, blknum, >> + blkoffs || n < blksize ? block : buf, err);...on the unaligned head and unaligned tail of the overall request...>> } >> if (r == -1) >> return -1; >> >> - memcpy (buf, &block[blkoffs], n); >> + if (blkoffs || n < blksize) >> + memcpy (buf, &block[blkoffs], n);...and the memcpy is only necessary to copy into the caller's buf when we read into our bounce buffer. But yes, I agree with your complaint...> > I've stared at this patch for a while and I don't really understand > it. > > One suggestion though. The patch might be better (or at least might > be more obviously correct) if the function defined a boolean at the > top which was used to tell if the bounce buffer was in use, something > like: > > const bool use_bounce_buffer = /* condition */;...and will rewrite it to be more obvious about the three-part handling of any request. Writing this email made me also realize that the blocksize filter doesn't play nicely with the streaming plugin (although I don't know if that would ever work nicely, since the streaming plugin does not handle read-modify-write), because the blocksize filter does the same head/body/tail splitting of an unaligned request, but reorders things to visit the tail before the body, rather than visiting everything in order. It would be interesting if we had way to share the blocksize, cache, and cow filters' common code for adjusting requests onto appropriate block size boundaries. For that matter, someday I really want to get nbdkit to support NBD_INFO_BLOCK_SIZE, where plugins/filters can advertise alignment to the client, and where nbdkit itself takes care of the alignment issues rather than every plugin having to deal with bounce buffers, size rounding and read-modify-write. Ah well, a future project (as the caching project is already a big set to get working). -- Eric Blake, Principal Software Engineer Red Hat, Inc. +1-919-301-3226 Virtualization: qemu.org | libvirt.org
Seemingly Similar Threads
- [nbdkit PATCH] cache: Reduce use of bounce-buffer
- [nbdkit PATCH v2 2/2] cache, cow: Reduce use of bounce-buffer
- [PATCH nbdkit 5/9] cache: Allow this filter to serve requests in parallel.
- [nbdkit PATCH v2 1/3] backend: Rework internal/filter error return semantics
- [PATCH nbdkit v5 3/3] cache: Implement cache-max-size and cache space reclaim.