- More correct polling - Improve debug logs - Support extents - Add request state for easier debugging - Add progress Same as v1, with additional patches. Nir Soffer (6): examples: copy-libev.c: Improve polling examples: copy-libev.c: Improve debug logs examples: copy-libev.c: Support extents examples: copy-libev.c: Optimize zero extents examples: copy-libev.c: Add request state examples: copy-libev.c: Add progress examples/copy-libev.c | 352 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 327 insertions(+), 25 deletions(-) -- 2.26.3
Nir Soffer
2021-Apr-22 20:56 UTC
[Libguestfs] [PATCH libnbd v2 1/6] examples: copy-libev.c: Improve polling
Based on lib/poll.c, we should not invoke both notify_read() and notify_write() on the same event, since the first call may change the handle state and invalided the write event. When having both read and write event, prefer the read event since it is is a reply for older command. When returning events, use switch case for more efficient conversion from libnbd events to libev events. Signed-off-by: Nir Soffer <nsoffer at redhat.com> --- examples/copy-libev.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/examples/copy-libev.c b/examples/copy-libev.c index 1b1e9df..3f687a1 100644 --- a/examples/copy-libev.c +++ b/examples/copy-libev.c @@ -119,16 +119,20 @@ get_fd(struct connection *c) static inline int get_events(struct connection *c) { - int events = 0; unsigned dir = nbd_aio_get_direction (c->nbd); - if (dir & LIBNBD_AIO_DIRECTION_WRITE) - events |= EV_WRITE; + switch (dir) { + case LIBNBD_AIO_DIRECTION_READ: + return EV_READ; + case LIBNBD_AIO_DIRECTION_WRITE: + return EV_WRITE; + case LIBNBD_AIO_DIRECTION_BOTH: + return EV_READ | EV_WRITE; + default: + return 0; + } - if (dir & LIBNBD_AIO_DIRECTION_READ) - events |= EV_READ; - return events; } /* Start async copy or zero request. */ @@ -241,11 +245,15 @@ io_cb (struct ev_loop *loop, ev_io *w, int revents) { struct connection *c = (struct connection *)w; - if (revents & EV_WRITE) - nbd_aio_notify_write (c->nbd); + /* Based on lib/poll.c, we need to prefer read over write, and avoid + * invoking both notify_read() and notify_write(), since notify_read() may + * change the state of the handle. + */ if (revents & EV_READ) nbd_aio_notify_read (c->nbd); + else if (revents & EV_WRITE) + nbd_aio_notify_write (c->nbd); } static inline void -- 2.26.3
Nir Soffer
2021-Apr-22 20:56 UTC
[Libguestfs] [PATCH libnbd v2 2/6] examples: copy-libev.c: Improve debug logs
Use COPY_LIBEV_DEBUG instead of LIBNBD_DEBUG, since libnbd debug output is huge and not helpful for debugging the libev integration. Add request index, and log the request index in all debug message, to make it easier to follow all events on a particular request. Log also the time from starting the request until the request was completed. Here is an example output when copying fedora 32 image: copy-libev: r0: start read offset=0 len=1048576 copy-libev: r1: start read offset=1048576 len=1048576 copy-libev: r2: start read offset=2097152 len=1048576 copy-libev: r3: start read offset=3145728 len=1048576 ... copy-libev: r0: read completed offset=0 len=1048576 copy-libev: r0: start write offset=0 len=1048576 copy-libev: r1: read completed offset=1048576 len=1048576 copy-libev: r1: start write offset=1048576 len=1048576 copy-libev: r0: request completed offset=0 len=1048576 time=0.009660 copy-libev: r0: start read offset=16777216 len=1048576 copy-libev: r2: read completed offset=2097152 len=1048576 copy-libev: r2: start write offset=2097152 len=1048576 ... copy-libev: r15: request completed offset=6439305216 len=1048576 time=0.001415 copy-libev: r5: request completed offset=6440353792 len=1048576 time=0.001301 copy-libev: r7: request completed offset=6441402368 len=1048576 time=0.001359 copy-libev: flush Signed-off-by: Nir Soffer <nsoffer at redhat.com> --- examples/copy-libev.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/examples/copy-libev.c b/examples/copy-libev.c index 3f687a1..84d5c03 100644 --- a/examples/copy-libev.c +++ b/examples/copy-libev.c @@ -13,7 +13,7 @@ * * To debug it: * - * LIBNBD_DEBUG=1 ./copy-ev ... + * COPY_LIBEV_DEBUG=1 ./copy-ev ... */ #include <assert.h> @@ -68,6 +68,8 @@ struct request { int64_t offset; size_t length; unsigned char *data; + size_t index; + ev_tstamp started; }; static struct ev_loop *loop; @@ -141,6 +143,7 @@ start_request(struct request *r) { assert (offset < size); + r->started = ev_now (loop); r->length = MIN (REQUEST_SIZE, size - offset); r->offset = offset; @@ -154,7 +157,8 @@ start_read(struct request *r) { int64_t cookie; - DEBUG ("start read offset=%ld len=%ld", r->offset, r->length); + DEBUG ("r%d: start read offset=%ld len=%ld", + r->index, r->offset, r->length); cookie = nbd_aio_pread ( src.nbd, r->data, r->length, r->offset, @@ -170,7 +174,8 @@ read_completed (void *user_data, int *error) { struct request *r = (struct request *)user_data; - DEBUG ("read completed offset=%ld len=%ld", r->offset, r->length); + DEBUG ("r%d: read completed offset=%ld len=%ld", + r->index, r->offset, r->length); if (dst.can_zero && is_zero (r->data, r->length)) start_zero (r); @@ -185,7 +190,8 @@ start_write(struct request *r) { int64_t cookie; - DEBUG ("start write offset=%ld len=%ld", r->offset, r->length); + DEBUG ("r%d: start write offset=%ld len=%ld", + r->index, r->offset, r->length); cookie = nbd_aio_pwrite ( dst.nbd, r->data, r->length, r->offset, @@ -201,7 +207,8 @@ start_zero(struct request *r) { int64_t cookie; - DEBUG ("start zero offset=%ld len=%ld", r->offset, r->length); + DEBUG ("r%d: start zero offset=%ld len=%ld", + r->index, r->offset, r->length); cookie = nbd_aio_zero ( dst.nbd, r->length, r->offset, @@ -220,7 +227,8 @@ request_completed (void *user_data, int *error) written += r->length; - DEBUG ("request completed offset=%ld len=%ld", r->offset, r->length); + DEBUG ("r%d: request completed offset=%ld len=%ld time=%.6f", + r->index, r->offset, r->length, ev_now (loop) - r->started); if (written == size) { /* The last write completed. Stop all watchers and break out @@ -294,7 +302,7 @@ main (int argc, char *argv[]) if (dst.nbd == NULL) FAIL ("Cannot create destination: %s", nbd_get_error ()); - debug = nbd_get_debug (src.nbd); + debug = getenv ("COPY_LIBEV_DEBUG") != NULL; /* Connecting is fast, so use the syncronous API. */ @@ -319,6 +327,7 @@ main (int argc, char *argv[]) for (i = 0; i < MAX_REQUESTS && offset < size; i++) { struct request *r = &requests[i]; + r->index = i; r->data = malloc (REQUEST_SIZE); if (r->data == NULL) FAIL ("Cannot allocate buffer: %s", strerror (errno)); -- 2.26.3
Nir Soffer
2021-Apr-22 20:56 UTC
[Libguestfs] [PATCH libnbd v2 3/6] examples: copy-libev.c: Support extents
Handling extents is complicated, in particular when using async block status. But I could not get this working using sync block status; It seems that libnbd eats the events on the source, and then we hang forever waiting for inflight reads. Since getting extents is asynchronous, and starting new request depends on being able to get the next extent, requests have now a new a waiting state. When a request is started when extents are not available, it is marked as waiting. When extents request is completed, we start all waiting requests. Here is example log showing whats going on while copying fedora 32 image: The first request detect that we don't have extents yet, so it starts aync block status request. copy-libev: r0: start extents offset=0 count=134217728 copy-libev: r0: received 14 extents for base:allocation copy-libev: r0: extents completed time=0.001098 When extent request completes, we start al waiting requests. The first request (r0) looked into the first extent (e0) and consumed all of it: copy-libev: r0: start request offset=0 copy-libev: e0: offset=0 len=65536 zero=0 copy-libev: r0: extent offset=0 len=65536 zero=0 copy-libev: r0: start read offset=0 len=65536 The second request (r15) is looking into the second extent (e1) and consume all of it, starting a zero request: copy-libev: r15: start request offset=65536 copy-libev: e1: offset=65536 len=983040 zero=1 copy-libev: r15: extent offset=65536 len=983040 zero=1 copy-libev: r15: start zero offset=65536 len=983040 ... Request (r12) looked into the fifth extent (e4), but since this extent was large (10747904), it consume only 1m from it: copy-libev: r12: start request offset=2097152 copy-libev: e4: offset=2097152 len=10747904 zero=0 copy-libev: r12: extent offset=2097152 len=1048576 zero=0 copy-libev: r12: start read offset=2097152 len=1048576 The next request consumed the next 1m from the same extent (e4): copy-libev: r11: start request offset=3145728 copy-libev: e4: offset=3145728 len=9699328 zero=0 copy-libev: r11: extent offset=3145728 len=1048576 zero=0 copy-libev: r11: start read offset=3145728 len=1048576 copy-libev: r10: start request offset=4194304 .. The last part of extent e4 was consumed, and we switched to extent e5: copy-libev: r2: start request offset=12582912 copy-libev: e4: offset=12582912 len=262144 zero=0 copy-libev: r2: extent offset=12582912 len=262144 zero=0 copy-libev: r2: start read offset=12582912 len=262144 copy-libev: r1: start request offset=12845056 copy-libev: e5: offset=12845056 len=131072 zero=1 copy-libev: r1: extent offset=12845056 len=131072 zero=1 copy-libev: r1: start zero offset=12845056 len=131072 ... Request (r11) consumed the last extent (e13), starting a zero request. This free the extents array: copy-libev: r11: start request offset=133955584 copy-libev: e13: offset=133955584 len=262144 zero=1 copy-libev: r11: extent offset=133955584 len=262144 zero=1 copy-libev: r11: consumed all extents offset=134217728 copy-libev: r11: start zero offset=133955584 len=262144 ... Request (r12) started when extents array as cleared, so it started new block status request: copy-libev: r12: start extents offset=134217728 count=134217728 ... copy-libev: r12: received 3 extents for base:allocation copy-libev: r12: extents completed time=0.003027 ... The rest of the flow is same as before. When all requests are done, we shutdown the event loop and flush: copy-libev: r4: request completed offset=6438256640 len=1048576 time=0.000132 copy-libev: r1: read completed offset=6442385408 len=65536 copy-libev: r1: start write offset=6442385408 len=65536 copy-libev: r14: request completed offset=6439305216 len=1048576 time=0.000126 copy-libev: r8: request completed offset=6440353792 len=1048576 time=0.000151 copy-libev: r2: request completed offset=6441402368 len=983040 time=0.000143 copy-libev: r1: request completed offset=6442385408 len=65536 time=0.000142 copy-libev: flush Signed-off-by: Nir Soffer <nsoffer at redhat.com> --- examples/copy-libev.c | 235 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 225 insertions(+), 10 deletions(-) diff --git a/examples/copy-libev.c b/examples/copy-libev.c index 84d5c03..197d959 100644 --- a/examples/copy-libev.c +++ b/examples/copy-libev.c @@ -41,6 +41,7 @@ */ #define MAX_REQUESTS 16 #define REQUEST_SIZE (1024 * 1024) +#define EXTENTS_SIZE (128 * 1024 * 1024) #define MIN(a,b) (a) < (b) ? (a) : (b) @@ -62,14 +63,18 @@ struct connection { ev_io watcher; struct nbd_handle *nbd; bool can_zero; + bool can_extents; }; struct request { + ev_timer watcher; /* For starting on next loop iteration. */ int64_t offset; size_t length; + bool zero; unsigned char *data; size_t index; ev_tstamp started; + bool waiting; /* Waiting for extents completion. */ }; static struct ev_loop *loop; @@ -77,11 +82,29 @@ static ev_prepare prepare; static struct connection src; static struct connection dst; static struct request requests[MAX_REQUESTS]; + +/* List of extents received from source server. Using the same format returned + * by libnbd, array of uint32_t pairs. The first item is the length of the + * extent, and the second is the extent flags. + * + * The number of extents is extents_len / 2. extents_pos is the index of the + * current extent. + * + * extents_in_progress flag is set when we start asynchronous block status + * request. + */ +static uint32_t *extents; +static size_t extents_len; +static size_t extents_pos; +static bool extents_in_progress; + static int64_t size; static int64_t offset; static int64_t written; static bool debug; +static inline void start_request_soon (struct request *r); +static void start_request_cb (struct ev_loop *loop, ev_timer *w, int revents); static void start_request(struct request *r); static void start_read(struct request *r); static void start_write(struct request *r); @@ -133,23 +156,206 @@ get_events(struct connection *c) default: return 0; } +} + +static int +extent_callback (void *user_data, const char *metacontext, uint64_t offset, + uint32_t *entries, size_t nr_entries, int *error) +{ + struct request *r = user_data; + + if (strcmp (metacontext, LIBNBD_CONTEXT_BASE_ALLOCATION) != 0) { + DEBUG ("Unexpected meta context: %s", metacontext); + return 1; + } + + extents = malloc (nr_entries * sizeof *extents); + if (extents == NULL) + FAIL ("Cannot allocated extents: %s", strerror (errno)); + + memcpy (extents, entries, nr_entries * sizeof *extents); + extents_len = nr_entries; + + DEBUG ("r%d: received %d extents for %s", + r->index, nr_entries / 2, metacontext); + + return 1; +} + +static int +extents_completed (void *user_data, int *error) +{ + struct request *r = (struct request *)user_data; + int i; + + DEBUG ("r%d: extents completed time=%.6f", + r->index, ev_now (loop) - r->started); + + extents_in_progress = false; + + if (extents == NULL) { + DEBUG ("r%d: received no extents, disabling extents", r->index); + src.can_extents = false; + } + + /* Start requests waiting for extents completion on the next loop + * iteration, to avoid deadlock if we need to start a read. + */ + for (i = 0; i < MAX_REQUESTS; i++) { + struct request *r = &requests[i]; + if (r->waiting) { + r->waiting = false; + start_request_soon (r); + } + } + + return 1; +} + +static bool +start_extents (struct request *r) +{ + size_t count = MIN (EXTENTS_SIZE, size - offset); + int64_t cookie; + + if (extents_in_progress) { + r->waiting = true; + return true; + } + + DEBUG ("r%d: start extents offset=%ld count=%ld", r->index, offset, count); + + cookie = nbd_aio_block_status ( + src.nbd, count, offset, + (nbd_extent_callback) { .callback=extent_callback, + .user_data=r }, + (nbd_completion_callback) { .callback=extents_completed, + .user_data=r }, + 0); + if (cookie == -1) { + DEBUG ("Cannot get extents: %s", nbd_get_error ()); + src.can_extents = false; + return false; + } + + r->waiting = true; + extents_in_progress = true; + + return true; +} + +/* Return next extent to process. */ +static void +next_extent (struct request *r) +{ + uint32_t limit = MIN (REQUEST_SIZE, size - offset); + uint32_t length = 0; + bool is_zero; + + assert (extents); + + is_zero = extents[extents_pos + 1] & LIBNBD_STATE_ZERO; + + while (length < limit) { + DEBUG ("e%d: offset=%ld len=%ld zero=%d", + extents_pos / 2, offset, extents[extents_pos], is_zero); + + /* If this extent is too large, steal some data from it to + * complete the request. + */ + if (length + extents[extents_pos] > limit) { + uint32_t stolen = limit - length; + + extents[extents_pos] -= stolen; + length += stolen; + break; + } + + /* Consume the entire extent and start looking at the next one. */ + length += extents[extents_pos]; + extents[extents_pos] = 0; + + if (extents_pos + 2 == extents_len) + break; + + extents_pos += 2; + + /* If next extent is different, we are done. */ + if ((extents[extents_pos + 1] & LIBNBD_STATE_ZERO) != is_zero) + break; + } + + assert (length > 0 && length <= limit); + + r->offset = offset; + r->length = length; + r->zero = is_zero; + + DEBUG ("r%d: extent offset=%ld len=%ld zero=%d", + r->index, r->offset, r->length, r->zero); + + offset += length; + + if (extents_pos + 2 == extents_len && extents[extents_pos] == 0) { + /* Processed all extents, clear extents. */ + DEBUG ("r%d: consumed all extents offset=%ld", r->index, offset); + free (extents); + extents = NULL; + extents_pos = 0; + extents_len = 0; + } +} +static inline void +start_request_soon (struct request *r) +{ + ev_timer_init (&r->watcher, start_request_cb, 0, 0); + ev_timer_start (loop, &r->watcher); +} +static void +start_request_cb (struct ev_loop *loop, ev_timer *w, int revents) +{ + struct request *r = (struct request *)w; + start_request (r); } /* Start async copy or zero request. */ static void start_request(struct request *r) { - assert (offset < size); + /* Cancel the request if we are done. */ + if (offset == size) + return; r->started = ev_now (loop); - r->length = MIN (REQUEST_SIZE, size - offset); - r->offset = offset; - - start_read (r); - offset += r->length; + /* If needed, get more extents from server. */ + if (src.can_extents && extents == NULL && start_extents (r)) + return; + + DEBUG ("r%d: start request offset=%ld", r->index, offset); + + if (src.can_extents) { + /* Handle the next extent. */ + next_extent (r); + if (r->zero) { + if (dst.can_zero) { + start_zero (r); + } else { + memset (r->data, 0, r->length); + start_write (r); + } + } else { + start_read (r); + } + } else { + /* Extents not available. */ + r->length = MIN (REQUEST_SIZE, size - offset); + r->offset = offset; + start_read (r); + offset += r->length; + } } static void @@ -240,9 +446,11 @@ request_completed (void *user_data, int *error) ev_break (loop, EVBREAK_ALL); } - /* If we have data to read, start a new read. */ + /* If we have more work, start a new request on the next loop + * iteration, to avoid deadlock if we need to start a zero or write. + */ if (offset < size) - start_request(r); + start_request_soon(r); return 1; } @@ -304,11 +512,19 @@ main (int argc, char *argv[]) debug = getenv ("COPY_LIBEV_DEBUG") != NULL; + /* Configure soruce to report extents. */ + + if (nbd_add_meta_context (src.nbd, LIBNBD_CONTEXT_BASE_ALLOCATION)) + FAIL ("Cannot add base:allocation: %s", nbd_get_error ()); + /* Connecting is fast, so use the syncronous API. */ if (nbd_connect_uri (src.nbd, argv[1])) FAIL ("Cannot connect to source: %s", nbd_get_error ()); + src.can_extents = nbd_can_meta_context ( + src.nbd, LIBNBD_CONTEXT_BASE_ALLOCATION) > 0; + if (nbd_connect_uri (dst.nbd, argv[2])) FAIL ("Cannot connect to destination: %s", nbd_get_error ()); @@ -324,9 +540,8 @@ main (int argc, char *argv[]) /* Start the copy "loop". When request completes, it starts the * next request, until entire image was copied. */ - for (i = 0; i < MAX_REQUESTS && offset < size; i++) { + for (i = 0; i < MAX_REQUESTS; i++) { struct request *r = &requests[i]; - r->index = i; r->data = malloc (REQUEST_SIZE); if (r->data == NULL) -- 2.26.3
Nir Soffer
2021-Apr-22 20:56 UTC
[Libguestfs] [PATCH libnbd v2 4/6] examples: copy-libev.c: Optimize zero extents
If the destination supports zero, try to zero entire extent in one request. This speeds up copying of large sparse images. Same logic is used by nbdcopy. Here is an example benchmark, copying empty 1 TiB qcow2 image: $ qemu-img create -f qcow2 src.qcow2 1t $ qemu-img create -f qcow2 dst.qcow2 1t $ qemu-nbd --persistent --socket=/tmp/src.sock --format=qcow2 --read-only src.qcow2 $ qemu-nbd --persistent --socket=/tmp/dst.sock --format=qcow2 dst.qcow2 $ export SRC=nbd+unix:///?socket=/tmp/src.sock $ export DST=nbd+unix:///?socket=/tmp/dst.sock $ hyperfine -w3 \ "./copy-libev $SRC $DST" \ "qemu-img convert -n -W $SRC $DST" \ "../copy/nbdcopy --request-size=1048576 --requests=16 --connections=1 $SRC $DST" Benchmark #1: ./copy-libev nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock Time (mean ? ?): 940.9 ms ? 36.3 ms [User: 80.8 ms, System: 120.0 ms] Range (min ? max): 892.8 ms ? 1005.3 ms 10 runs Benchmark #2: qemu-img convert -n -W nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock Time (mean ? ?): 2.848 s ? 0.087 s [User: 241.7 ms, System: 253.9 ms] Range (min ? max): 2.740 s ? 3.035 s 10 runs Benchmark #3: ../copy/nbdcopy --request-size=1048576 --requests=16 --connections=1 nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock Time (mean ? ?): 1.082 s ? 0.041 s [User: 77.6 ms, System: 100.9 ms] Range (min ? max): 1.043 s ? 1.148 s 10 runs Summary './copy-libev nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock' ran 1.15 ? 0.06 times faster than '../copy/nbdcopy --request-size=1048576 --requests=16 --connections=1 nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock' 3.03 ? 0.15 times faster than 'qemu-img convert -n -W nbd+unix:///?socket=/tmp/src.sock nbd+unix:///?socket=/tmp/dst.sock' Signed-off-by: Nir Soffer <nsoffer at redhat.com> --- examples/copy-libev.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/copy-libev.c b/examples/copy-libev.c index 197d959..d9c8560 100644 --- a/examples/copy-libev.c +++ b/examples/copy-libev.c @@ -248,7 +248,7 @@ start_extents (struct request *r) static void next_extent (struct request *r) { - uint32_t limit = MIN (REQUEST_SIZE, size - offset); + uint32_t limit; uint32_t length = 0; bool is_zero; @@ -256,6 +256,12 @@ next_extent (struct request *r) is_zero = extents[extents_pos + 1] & LIBNBD_STATE_ZERO; + /* Zero can be much faster, so try to zero entire extent. */ + if (is_zero && dst.can_zero) + limit = MIN (EXTENTS_SIZE, size - offset); + else + limit = MIN (REQUEST_SIZE, size - offset); + while (length < limit) { DEBUG ("e%d: offset=%ld len=%ld zero=%d", extents_pos / 2, offset, extents[extents_pos], is_zero); -- 2.26.3
Nir Soffer
2021-Apr-22 20:56 UTC
[Libguestfs] [PATCH libnbd v2 5/6] examples: copy-libev.c: Add request state
Replace request waiting flag with a state enum. This makes it easier to follow the logs, including the type of the request in the completion log: $ COPY_LIBEV_DEBUG=1 ./copy-libev $SRC $DST ... copy-libev: r0: start read offset=0 len=65536 ... copy-libev: r0: read completed offset=0 len=65536 copy-libev: r0: start write offset=0 len=65536 ... copy-libev: r0: write completed offset=0 len=65536, time=0.007836 It is also helpful when debugging. Looking in the request array, we can understand the state of every request: (gdb) p requests[0] $9 = {watcher = {active = 0, pending = 0, priority = 0, data = 0x0, cb 0x4020e0 <start_request_cb>, at = -1.1344964150339365e-05, repeat = 0}, offset = 0, length = 65536, zero = false, data = 0x7ffff7373010 "\353c\220", index = 0, started = 1615154809.1588342, state = WRITE} (gdb) p requests[1] $10 = {watcher = {active = 0, pending = 0, priority = 0, data = 0x0, cb 0x4020e0 <start_request_cb>, at = -1.1344964150339365e-05, repeat = 0}, offset = 12845056, length = 131072, zero = true, data = 0x7ffff7272010 "", index = 1, started = 1615154809.1588342, state = ZERO} I think this will useful for more fine grain sparsifying; when a partial zero or write request completes, we can restart it to consume the rest of the payload. Signed-off-by: Nir Soffer <nsoffer at redhat.com> --- examples/copy-libev.c | 51 +++++++++++++++++++++++++++++++++---------- 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/examples/copy-libev.c b/examples/copy-libev.c index d9c8560..6e6cbcb 100644 --- a/examples/copy-libev.c +++ b/examples/copy-libev.c @@ -66,6 +66,19 @@ struct connection { bool can_extents; }; +enum request_state { + IDLE, /* Not used yet. */ + EXTENTS, /* Getting extents from source. */ + READ, /* Read from source. */ + WRITE, /* Write to destiation. */ + ZERO, /* Write zeroes to destiation. */ + SLEEP /* Waiting for extents completion. */ +}; + +static const char *state_names[] = { + "idle", "extents", "read", "write", "zero", "sleep" +}; + struct request { ev_timer watcher; /* For starting on next loop iteration. */ int64_t offset; @@ -74,7 +87,7 @@ struct request { unsigned char *data; size_t index; ev_tstamp started; - bool waiting; /* Waiting for extents completion. */ + enum request_state state; }; static struct ev_loop *loop; @@ -135,6 +148,12 @@ is_zero (const unsigned char *data, size_t len) return memcmp (data, p, len) == 0; } +static inline const char * +request_state (struct request *r) +{ + return state_names[r->state]; +} + static inline int get_fd(struct connection *c) { @@ -198,15 +217,16 @@ extents_completed (void *user_data, int *error) src.can_extents = false; } - /* Start requests waiting for extents completion on the next loop - * iteration, to avoid deadlock if we need to start a read. + /* Start the request to process recvievd extents. This must be done on the + * next loop iteration, to avoid deadlock if we need to start a read. */ + start_request_soon(r); + + /* Wake up requests waiting for extents completion */ for (i = 0; i < MAX_REQUESTS; i++) { struct request *r = &requests[i]; - if (r->waiting) { - r->waiting = false; + if (r->state == SLEEP) start_request_soon (r); - } } return 1; @@ -219,7 +239,8 @@ start_extents (struct request *r) int64_t cookie; if (extents_in_progress) { - r->waiting = true; + /* Sleep until extents request completes. */ + r->state = SLEEP; return true; } @@ -238,7 +259,7 @@ start_extents (struct request *r) return false; } - r->waiting = true; + r->state = EXTENTS; extents_in_progress = true; return true; @@ -315,6 +336,7 @@ next_extent (struct request *r) static inline void start_request_soon (struct request *r) { + r->state = IDLE; ev_timer_init (&r->watcher, start_request_cb, 0, 0); ev_timer_start (loop, &r->watcher); } @@ -340,8 +362,6 @@ start_request(struct request *r) if (src.can_extents && extents == NULL && start_extents (r)) return; - DEBUG ("r%d: start request offset=%ld", r->index, offset); - if (src.can_extents) { /* Handle the next extent. */ next_extent (r); @@ -369,6 +389,8 @@ start_read(struct request *r) { int64_t cookie; + r->state = READ; + DEBUG ("r%d: start read offset=%ld len=%ld", r->index, r->offset, r->length); @@ -402,6 +424,8 @@ start_write(struct request *r) { int64_t cookie; + r->state = WRITE; + DEBUG ("r%d: start write offset=%ld len=%ld", r->index, r->offset, r->length); @@ -419,6 +443,8 @@ start_zero(struct request *r) { int64_t cookie; + r->state = ZERO; + DEBUG ("r%d: start zero offset=%ld len=%ld", r->index, r->offset, r->length); @@ -439,8 +465,9 @@ request_completed (void *user_data, int *error) written += r->length; - DEBUG ("r%d: request completed offset=%ld len=%ld time=%.6f", - r->index, r->offset, r->length, ev_now (loop) - r->started); + DEBUG ("r%d: %s completed offset=%ld len=%ld, time=%.6f", + r->index, request_state (r), r->offset, r->length, + ev_now (loop) - r->started); if (written == size) { /* The last write completed. Stop all watchers and break out -- 2.26.3
Nir Soffer
2021-Apr-22 20:56 UTC
[Libguestfs] [PATCH libnbd v2 6/6] examples: copy-libev.c: Add progress
Use repeating timer to add progress bar: $ ./copy-libev $SRC $DST [ 100.00% ] 6.00 GiB, 0.89 seconds, 6.71 GiB/s Signed-off-by: Nir Soffer <nsoffer at redhat.com> --- examples/copy-libev.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/examples/copy-libev.c b/examples/copy-libev.c index 6e6cbcb..2e39465 100644 --- a/examples/copy-libev.c +++ b/examples/copy-libev.c @@ -42,6 +42,7 @@ #define MAX_REQUESTS 16 #define REQUEST_SIZE (1024 * 1024) #define EXTENTS_SIZE (128 * 1024 * 1024) +#define GIB (1024 * 1024 * 1024) #define MIN(a,b) (a) < (b) ? (a) : (b) @@ -115,6 +116,8 @@ static int64_t size; static int64_t offset; static int64_t written; static bool debug; +static ev_tstamp started; +static ev_timer progress; static inline void start_request_soon (struct request *r); static void start_request_cb (struct ev_loop *loop, ev_timer *w, int revents); @@ -505,6 +508,36 @@ io_cb (struct ev_loop *loop, ev_io *w, int revents) nbd_aio_notify_write (c->nbd); } +static void +progress_cb (struct ev_loop *loop, ev_timer *w, int revents) +{ + ev_tstamp duration = ev_now (loop) - started; + + printf ("[ %6.2f%% ] %.2f GiB, %.2f seconds, %.2f GiB/s %c", + (double) written / size * 100, + (double) size / GIB, + duration, + (double) written / GIB / duration, + revents ? '\r' : '\n'); + + fflush (stdout); +} + +static void +start_progress () +{ + started = ev_now (loop); + ev_timer_init (&progress, progress_cb, 0, 0.1); + ev_timer_start (loop, &progress); +} + +static void +finish_progress () +{ + ev_now_update (loop); + progress_cb (loop, &progress, 0); +} + static inline void update_watcher (struct connection *c) { @@ -535,6 +568,8 @@ main (int argc, char *argv[]) if (argc != 3) FAIL ("Usage: %s src-uri dst-uri", PROG); + start_progress (); + src.nbd = nbd_create (); if (src.nbd == NULL) FAIL ("Cannot create source: %s", nbd_get_error ()); @@ -620,5 +655,7 @@ main (int argc, char *argv[]) /* We can free requests data here, but it is not really needed. */ + finish_progress (); + return 0; } -- 2.26.3