Paul Durrant
2011-Dec-14 14:37 UTC
[PATCH 0 of 3] Support for VM generation ID save/restore and migrate
This patch series adds support for preservation of the VM generation ID buffer address in xenstore across save/restore and migrate, and also code to increment the value in all cases except for migration. The first patch modifies creation of the hvmloader key in xenstore and adds creation of a new read/write hvmloader/generation-id-addr key. The second patch changes hvmloader to use the new key (as opposed to the old data/generation-id key). The third patch adds the infrastructure to save and restore the VM generation ID address in xenstore and the code to increment the value.
# HG changeset patch # User Paul Durrant <paul.durrant@citrix.com> # Date 1323873285 0 # Node ID a4a5697297410d0d71adb5e96bee5ae061850310 # Parent 03138a08366b895d79e143119d4c9c72833cdbcd Make ro_paths and rw_paths dynamic. The paths need to be different for the HVM and non-HVM cases as, in the HVM case, we need an ''hvmloader'' key. This was previously handled by creating the hvmloader key in libxl__create_device_model(), which is only invoked for HVM guests. However, if we are to use the hvmloader key to parent the ''generation-id-address'' key, the creation needs to move earlier in the sequence. Handling this by making ro_paths and rw_paths dynamic in libxl__domain_make() seems like the cleanest approach. The read-only ''error'', ''drivers'', ''attr'' and ''messages'' keys are no longer created as they seem to be completely unused. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> diff -r 03138a08366b -r a4a569729741 tools/libxl/libxl_create.c --- a/tools/libxl/libxl_create.c Fri Dec 09 16:19:36 2011 +0000 +++ b/tools/libxl/libxl_create.c Wed Dec 14 14:34:45 2011 +0000 @@ -322,9 +322,10 @@ int libxl__domain_make(libxl__gc *gc, li libxl_ctx *ctx = libxl__gc_owner(gc); int flags, ret, i, rc; char *uuid_string; - char *rw_paths[] = { "control/shutdown", "device", "device/suspend/event-channel" , "data"}; - char *ro_paths[] = { "cpu", "memory", "device", "error", "drivers", - "control", "attr", "messages" }; + char **ro_paths; + int nr_ro_paths; + char **rw_paths; + int nr_rw_paths; char *dom_path, *vm_path, *libxl_path; struct xs_permissions roperm[2]; struct xs_permissions rwperm[1]; @@ -341,6 +342,31 @@ int libxl__domain_make(libxl__gc *gc, li goto out; } + nr_ro_paths = 0; + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + ro_paths = libxl__calloc(gc, 5, sizeof(char *)); + ro_paths[nr_ro_paths++] = "hvmloader"; + } else { + ro_paths = libxl__calloc(gc, 4, sizeof(char *)); + } + + ro_paths[nr_ro_paths++] = "cpu"; + ro_paths[nr_ro_paths++] = "memory"; + ro_paths[nr_ro_paths++] = "device"; + ro_paths[nr_ro_paths++] = "control"; + + nr_rw_paths = 0; + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + rw_paths = libxl__calloc(gc, 4, sizeof(char *)); + rw_paths[nr_rw_paths++] = "hvmloader/generation-id-address"; + } else { + rw_paths = libxl__calloc(gc, 3, sizeof(char *)); + } + + rw_paths[nr_rw_paths++] = "control/shutdown"; + rw_paths[nr_rw_paths++] = "device/suspend/event-channel"; + rw_paths[nr_rw_paths++] = "data"; + flags = 0; if (info->type == LIBXL_DOMAIN_TYPE_HVM) { flags |= XEN_DOMCTL_CDF_hvm_guest; @@ -414,16 +440,16 @@ retry_transaction: if (rc) goto out; - for (i = 0; i < ARRAY_SIZE(rw_paths); i++) { - char *path = libxl__sprintf(gc, "%s/%s", dom_path, rw_paths[i]); - xs_mkdir(ctx->xsh, t, path); - xs_set_permissions(ctx->xsh, t, path, rwperm, ARRAY_SIZE(rwperm)); - } for (i = 0; i < ARRAY_SIZE(ro_paths); i++) { char *path = libxl__sprintf(gc, "%s/%s", dom_path, ro_paths[i]); xs_mkdir(ctx->xsh, t, path); xs_set_permissions(ctx->xsh, t, path, roperm, ARRAY_SIZE(roperm)); } + for (i = 0; i < nr_rw_paths; i++) { + char *path = libxl__sprintf(gc, "%s/%s", dom_path, rw_paths[i]); + xs_mkdir(ctx->xsh, t, path); + xs_set_permissions(ctx->xsh, t, path, rwperm, ARRAY_SIZE(rwperm)); + } xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/uuid", vm_path), uuid_string, strlen(uuid_string)); xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/name", vm_path), info->name, strlen(info->name)); diff -r 03138a08366b -r a4a569729741 tools/libxl/libxl_dm.c --- a/tools/libxl/libxl_dm.c Fri Dec 09 16:19:36 2011 +0000 +++ b/tools/libxl/libxl_dm.c Wed Dec 14 14:34:45 2011 +0000 @@ -821,9 +821,7 @@ int libxl__create_device_model(libxl__gc goto out; } - path = libxl__sprintf(gc, "/local/domain/%d/hvmloader", info->domid); - xs_mkdir(ctx->xsh, XBT_NULL, path); - libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "%s/bios", path), + libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "/local/domain/%d/hvmloader/bios", info->domid), "%s", libxl__domain_bios(gc, info)); path = libxl__sprintf(gc, "/local/domain/0/device-model/%d", info->domid);
Paul Durrant
2011-Dec-14 14:37 UTC
[PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address
# HG changeset patch # User Paul Durrant <paul.durrant@citrix.com> # Date 1323873287 0 # Node ID 9618ee3b6896eb8202e737b3bb027b248af6dd70 # Parent a4a5697297410d0d71adb5e96bee5ae061850310 Re-name xenstore key used to save VM generation ID buffer address. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> diff -r a4a569729741 -r 9618ee3b6896 tools/firmware/hvmloader/acpi/build.c --- a/tools/firmware/hvmloader/acpi/build.c Wed Dec 14 14:34:45 2011 +0000 +++ b/tools/firmware/hvmloader/acpi/build.c Wed Dec 14 14:34:47 2011 +0000 @@ -309,7 +309,7 @@ unsigned long new_vm_gid(void) if ( snprintf(addr, sizeof(addr), "0x%lx", virt_to_phys(buf)) >= sizeof(addr) ) return 0; - xenstore_write("data/generation-id", addr); + xenstore_write("hvmloader/generation-id-address", addr); gid = strtoll(xenstore_read("platform/generation-id", "0"), NULL, 0); *(uint64_t *)buf = gid;
Paul Durrant
2011-Dec-14 14:37 UTC
[PATCH 3 of 3] VM generation ID save/restore and migrate
# HG changeset patch # User Paul Durrant <paul.durrant@citrix.com> # Date 1323873288 0 # Node ID 5f0a09b7edc29076bd72bae4a53259c71545b0bf # Parent 9618ee3b6896eb8202e737b3bb027b248af6dd70 VM generation ID save/restore and migrate. Add code to track the address of the VM generation ID buffer across a save/restore or migrate, and increment it as necessary. The address of the buffer is written into xenstore by hvmloader at boot time. It must be read from xenstore by the caller of xc_domain_save() and then written back again by the caller of xc_domain_restore(). Signed-off-by: Paul Durrant <paul.durrant@citrix.com> diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxc/ia64/xc_ia64_linux_restore.c --- a/tools/libxc/ia64/xc_ia64_linux_restore.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c Wed Dec 14 14:34:48 2011 +0000 @@ -548,7 +548,8 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn, - unsigned int hvm, unsigned int pae, int superpages) + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, unsigned long *vm_generationid_addr) { DECLARE_DOMCTL; int rc = 1; diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxc/ia64/xc_ia64_linux_save.c --- a/tools/libxc/ia64/xc_ia64_linux_save.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c Wed Dec 14 14:34:48 2011 +0000 @@ -382,7 +382,8 @@ out: int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags, - struct save_callbacks* callbacks, int hvm) + struct save_callbacks* callbacks, int hvm, + unsigned long vm_generationid_addr) { DECLARE_DOMCTL; xc_dominfo_t info; diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxc/xc_domain_restore.c --- a/tools/libxc/xc_domain_restore.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxc/xc_domain_restore.c Wed Dec 14 14:34:48 2011 +0000 @@ -681,6 +681,7 @@ typedef struct { uint64_t console_pfn; uint64_t acpi_ioport_location; uint64_t viridian; + uint64_t vm_generationid_addr; } pagebuf_t; static int pagebuf_init(pagebuf_t* buf) @@ -860,6 +861,17 @@ static int pagebuf_get_one(xc_interface } return compbuf_size; + case XC_SAVE_ID_HVM_GENERATION_ID_ADDR: + /* Skip padding 4 bytes then read the generation id buffer location. */ + if ( RDEXACT(fd, &buf->vm_generationid_addr, sizeof(uint32_t)) || + RDEXACT(fd, &buf->vm_generationid_addr, sizeof(uint64_t)) ) + { + PERROR("error read the generation id buffer location"); + return -1; + } + DPRINTF("read generation id buffer address"); + return pagebuf_get_one(xch, ctx, buf, fd, dom); + default: if ( (count > MAX_BATCH_SIZE) || (count < 0) ) { ERROR("Max batch size exceeded (%d). Giving up.", count); @@ -1248,7 +1260,8 @@ static int apply_batch(xc_interface *xch int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn, - unsigned int hvm, unsigned int pae, int superpages) + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, unsigned long *vm_generationid_addr) { DECLARE_DOMCTL; int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0; @@ -1449,6 +1462,39 @@ int xc_domain_restore(xc_interface *xch, xc_set_hvm_param(xch, dom, HVM_PARAM_VM86_TSS, pagebuf.vm86_tss); if ( pagebuf.console_pfn ) console_pfn = pagebuf.console_pfn; + if ( pagebuf.vm_generationid_addr ) { + if ( !no_incr_generationid ) { + unsigned int offset; + unsigned char *buf; + unsigned long long generationid; + + /* + * Map the VM generation id buffer and inject the new value. + */ + + pfn = pagebuf.vm_generationid_addr >> PAGE_SHIFT; + offset = pagebuf.vm_generationid_addr & (PAGE_SIZE - 1); + + if ( (pfn >= dinfo->p2m_size) || + (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) ) + { + ERROR("generation id buffer frame is bad"); + goto out; + } + + mfn = ctx->p2m[pfn]; + buf = xc_map_foreign_range(xch, dom, PAGE_SIZE, + PROT_READ | PROT_WRITE, mfn); + + generationid = *(unsigned long long *)(buf + offset); + *(unsigned long long *)(buf + offset) = generationid + 1; + + munmap(buf, PAGE_SIZE); + } + + *vm_generationid_addr = pagebuf.vm_generationid_addr; + } + break; /* our work here is done */ } diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxc/xc_domain_save.c --- a/tools/libxc/xc_domain_save.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxc/xc_domain_save.c Wed Dec 14 14:34:48 2011 +0000 @@ -804,7 +804,8 @@ static int save_tsc_info(xc_interface *x int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags, - struct save_callbacks* callbacks, int hvm) + struct save_callbacks* callbacks, int hvm, + unsigned long vm_generationid_addr) { xc_dominfo_t info; DECLARE_DOMCTL; @@ -1616,6 +1617,16 @@ int xc_domain_save(xc_interface *xch, in uint64_t data; } chunk = { 0, }; + chunk.id = XC_SAVE_ID_HVM_GENERATION_ID_ADDR; + chunk.data = vm_generationid_addr; + + if ( (chunk.data != 0) && + wrexact(io_fd, &chunk, sizeof(chunk)) ) + { + PERROR("Error when writing the generation id buffer location for guest"); + goto out; + } + chunk.id = XC_SAVE_ID_HVM_IDENT_PT; chunk.data = 0; xc_get_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxc/xenguest.h --- a/tools/libxc/xenguest.h Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxc/xenguest.h Wed Dec 14 14:34:48 2011 +0000 @@ -58,7 +58,8 @@ struct save_callbacks { */ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */, - struct save_callbacks* callbacks, int hvm); + struct save_callbacks* callbacks, int hvm, + unsigned long vm_generationid_addr); /** @@ -72,12 +73,15 @@ int xc_domain_save(xc_interface *xch, in * @parm hvm non-zero if this is a HVM restore * @parm pae non-zero if this HVM domain has PAE support enabled * @parm superpages non-zero to allocate guest memory with superpages + * @parm no_incr_generationid non-zero if generation id is NOT to be incremented + * @parm vm_generationid_addr returned with the address of the generation id buffer * @return 0 on success, -1 on failure */ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn, - unsigned int hvm, unsigned int pae, int superpages); + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, unsigned long *vm_generationid_addr); /** * xc_domain_restore writes a file to disk that contains the device * model saved state. diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxc/xg_save_restore.h --- a/tools/libxc/xg_save_restore.h Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxc/xg_save_restore.h Wed Dec 14 14:34:48 2011 +0000 @@ -253,6 +253,7 @@ #define XC_SAVE_ID_HVM_VIRIDIAN -11 #define XC_SAVE_ID_COMPRESSED_DATA -12 /* Marker to indicate arrival of compressed data */ #define XC_SAVE_ID_ENABLE_COMPRESSION -13 /* Marker to enable compression logic at receiver side */ +#define XC_SAVE_ID_HVM_GENERATION_ID_ADDR -14 /* ** We process save/restore/migrate in batches of pages; the below diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxl/libxl_create.c --- a/tools/libxl/libxl_create.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxl/libxl_create.c Wed Dec 14 14:34:48 2011 +0000 @@ -99,6 +99,7 @@ int libxl_init_build_info(libxl_ctx *ctx b_info->u.hvm.vpt_align = 1; b_info->u.hvm.timer_mode = 1; b_info->u.hvm.nested_hvm = 0; + b_info->u.hvm.no_incr_generationid = 0; break; case LIBXL_DOMAIN_TYPE_PV: b_info->u.pv.slack_memkb = 8 * 1024; diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxl/libxl_dom.c --- a/tools/libxl/libxl_dom.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxl/libxl_dom.c Wed Dec 14 14:34:48 2011 +0000 @@ -106,6 +106,7 @@ int libxl__build_pre(libxl__gc *gc, uint state->store_port = xc_evtchn_alloc_unbound(ctx->xch, domid, 0); state->console_port = xc_evtchn_alloc_unbound(ctx->xch, domid, 0); + state->vm_generationid_addr = 0; return 0; } @@ -117,7 +118,7 @@ int libxl__build_post(libxl__gc *gc, uin libxl_ctx *ctx = libxl__gc_owner(gc); char *dom_path, *vm_path; xs_transaction_t t; - char **ents; + char **ents, **hvm_ents; int i; libxl_cpuid_apply_policy(ctx, domid); @@ -143,6 +144,13 @@ int libxl__build_post(libxl__gc *gc, uin ? "offline" : "online"; } + hvm_ents = NULL; + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + hvm_ents = libxl__calloc(gc, 3, sizeof(char *)); + hvm_ents[0] = "hvmloader/generation-id-address"; + hvm_ents[1] = libxl__sprintf(gc, "0x%lx", state->vm_generationid_addr); + } + dom_path = libxl__xs_get_dompath(gc, domid); if (!dom_path) { return ERROR_FAIL; @@ -153,6 +161,9 @@ retry_transaction: t = xs_transaction_start(ctx->xsh); libxl__xs_writev(gc, t, dom_path, ents); + if (info->type == LIBXL_DOMAIN_TYPE_HVM) + libxl__xs_writev(gc, t, dom_path, hvm_ents); + libxl__xs_writev(gc, t, dom_path, local_ents); libxl__xs_writev(gc, t, vm_path, vms_ents); @@ -356,16 +367,19 @@ int libxl__domain_restore_common(libxl__ /* read signature */ int rc; int hvm, pae, superpages; + int no_incr_generationid; switch (info->type) { case LIBXL_DOMAIN_TYPE_HVM: hvm = 1; superpages = 1; pae = info->u.hvm.pae; + no_incr_generationid = info->u.hvm.no_incr_generationid; break; case LIBXL_DOMAIN_TYPE_PV: hvm = 0; superpages = 0; pae = 1; + no_incr_generationid = 0; break; default: return ERROR_INVAL; @@ -373,7 +387,8 @@ int libxl__domain_restore_common(libxl__ rc = xc_domain_restore(ctx->xch, fd, domid, state->store_port, &state->store_mfn, state->console_port, &state->console_mfn, - hvm, pae, superpages); + hvm, pae, superpages, no_incr_generationid, + &state->vm_generationid_addr); if ( rc ) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "restoring domain"); return ERROR_FAIL; @@ -539,12 +554,22 @@ int libxl__domain_suspend_common(libxl__ struct save_callbacks callbacks; struct suspendinfo si; int hvm, rc = ERROR_FAIL; + unsigned long vm_generationid_addr; switch (type) { - case LIBXL_DOMAIN_TYPE_HVM: + case LIBXL_DOMAIN_TYPE_HVM: { + char *path; + char *addr; + + path = libxl__sprintf(gc, "%s/hvmloader/generation-id-address", libxl__xs_get_dompath(gc, domid)); + addr = libxl__xs_read(gc, XBT_NULL, path); + + vm_generationid_addr = (addr) ? strtoul(addr, NULL, 0) : 0; hvm = 1; break; + } case LIBXL_DOMAIN_TYPE_PV: + vm_generationid_addr = 0; hvm = 0; break; default: @@ -582,7 +607,8 @@ int libxl__domain_suspend_common(libxl__ callbacks.switch_qemu_logdirty = libxl__domain_suspend_common_switch_qemu_logdirty; callbacks.data = &si; - rc = xc_domain_save(ctx->xch, fd, domid, 0, 0, flags, &callbacks, hvm); + rc = xc_domain_save(ctx->xch, fd, domid, 0, 0, flags, &callbacks, + hvm, vm_generationid_addr); if ( rc ) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "saving domain: %s", si.guest_responded ? diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxl/libxl_internal.h --- a/tools/libxl/libxl_internal.h Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxl/libxl_internal.h Wed Dec 14 14:34:48 2011 +0000 @@ -218,6 +218,7 @@ typedef struct { uint32_t console_port; unsigned long console_mfn; + unsigned long vm_generationid_addr; } libxl__domain_build_state; _hidden int libxl__build_pre(libxl__gc *gc, uint32_t domid, diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxl/libxl_types.idl --- a/tools/libxl/libxl_types.idl Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxl/libxl_types.idl Wed Dec 14 14:34:48 2011 +0000 @@ -184,6 +184,7 @@ libxl_domain_build_info = Struct("domain ("vpt_align", bool), ("timer_mode", integer), ("nested_hvm", bool), + ("no_incr_generationid", bool), ])), ("pv", Struct(None, [("kernel", libxl_file_reference), ("slack_memkb", uint32), diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/libxl/xl_cmdimpl.c Wed Dec 14 14:34:48 2011 +0000 @@ -360,6 +360,7 @@ static void printf_info(int domid, printf("\t\t\t(vpt_align %d)\n", b_info->u.hvm.vpt_align); printf("\t\t\t(timer_mode %d)\n", b_info->u.hvm.timer_mode); printf("\t\t\t(nestedhvm %d)\n", b_info->u.hvm.nested_hvm); + printf("\t\t\t(no_incr_generationid %d)\n", b_info->u.hvm.no_incr_generationid); printf("\t\t\t(device_model %s)\n", dm_info->device_model ? : "default"); printf("\t\t\t(videoram %d)\n", dm_info->videoram); @@ -1362,6 +1363,7 @@ struct domain_create { const char *restore_file; int migrate_fd; /* -1 means none */ char **migration_domname_r; /* from malloc */ + int no_incr_generationid; }; static int freemem(libxl_domain_build_info *b_info, libxl_device_model_info *dm_info) @@ -1575,6 +1577,8 @@ static int create_domain(struct domain_c } } + d_config.b_info.u.hvm.no_incr_generationid = dom_info->no_incr_generationid; + if (debug || dom_info->dryrun) printf_info(-1, &d_config, &d_config.dm_info); @@ -2800,6 +2804,7 @@ static void migrate_receive(int debug, i dom_info.restore_file = "incoming migration stream"; dom_info.migrate_fd = 0; /* stdin */ dom_info.migration_domname_r = &migration_domname; + dom_info.no_incr_generationid = 1; rc = create_domain(&dom_info); if (rc < 0) { diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/python/xen/lowlevel/checkpoint/libcheckpoint.c --- a/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c Wed Dec 14 14:34:48 2011 +0000 @@ -175,6 +175,7 @@ int checkpoint_start(checkpoint_state* s { int hvm, rc; int flags = XCFLAGS_LIVE; + unsigned long vm_generationid_addr; if (!s->domid) { s->errstr = "checkpoint state not opened"; @@ -185,16 +186,27 @@ int checkpoint_start(checkpoint_state* s hvm = s->domtype > dt_pv; if (hvm) { + char path[128]; + char *addr; + + sprintf(path, "/local/domain/%u/hvmloader/generation-id-address", s->domid); + addr = xs_read(s->xsh, XBT_NULL, path, NULL); + + vm_generationid_addr = (addr) ? strtoul(addr, NULL, 0) : 0; + free(addr); + flags |= XCFLAGS_HVM; if (switch_qemu_logdirty(s, 1)) return -1; + } else { + vm_generationid_addr = 0; } if (remus_flags & CHECKPOINT_FLAGS_COMPRESSION) flags |= XCFLAGS_CHECKPOINT_COMPRESS; callbacks->switch_qemu_logdirty = noop_switch_logdirty; - rc = xc_domain_save(s->xch, fd, s->domid, 0, 0, flags, callbacks, hvm); + rc = xc_domain_save(s->xch, fd, s->domid, 0, 0, flags, callbacks, hvm, vm_generationid_addr); if (hvm) switch_qemu_logdirty(s, 0); diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/xcutils/xc_restore.c --- a/tools/xcutils/xc_restore.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/xcutils/xc_restore.c Wed Dec 14 14:34:48 2011 +0000 @@ -23,11 +23,12 @@ main(int argc, char **argv) xc_interface *xch; int io_fd, ret; int superpages; - unsigned long store_mfn, console_mfn; + unsigned long store_mfn, console_mfn, vm_generationid_addr; + int no_incr_generationid; - if ( (argc != 8) && (argc != 9) ) + if ( (argc < 8) || (argc > 10) ) errx(1, "usage: %s iofd domid store_evtchn " - "console_evtchn hvm pae apic [superpages]", argv[0]); + "console_evtchn hvm pae apic [superpages [no_incr_generationid]]", argv[0]); xch = xc_interface_open(0,0,0); if ( !xch ) @@ -40,19 +41,25 @@ main(int argc, char **argv) hvm = atoi(argv[5]); pae = atoi(argv[6]); apic = atoi(argv[7]); - if ( argc == 9 ) + if ( argc >= 9 ) superpages = atoi(argv[8]); else superpages = !!hvm; + if ( argc == 10 ) + no_incr_generationid = !atoi(argv[9]); + else + no_incr_generationid = 0; ret = xc_domain_restore(xch, io_fd, domid, store_evtchn, &store_mfn, - console_evtchn, &console_mfn, hvm, pae, superpages); + console_evtchn, &console_mfn, hvm, pae, superpages, + no_incr_generationid, &vm_generationid_addr); if ( ret == 0 ) { printf("store-mfn %li\n", store_mfn); if ( !hvm ) printf("console-mfn %li\n", console_mfn); + printf("generation-id-address %lx\n", vm_generationid_addr); fflush(stdout); } diff -r 9618ee3b6896 -r 5f0a09b7edc2 tools/xcutils/xc_save.c --- a/tools/xcutils/xc_save.c Wed Dec 14 14:34:47 2011 +0000 +++ b/tools/xcutils/xc_save.c Wed Dec 14 14:34:48 2011 +0000 @@ -169,6 +169,10 @@ main(int argc, char **argv) unsigned int maxit, max_f; int io_fd, ret, port; struct save_callbacks callbacks; + char path[128]; + struct xs_handle *xs; + char *addr; + unsigned long vm_generationid_addr; if (argc != 6) errx(1, "usage: %s iofd domid maxit maxf flags", argv[0]); @@ -207,8 +211,21 @@ main(int argc, char **argv) memset(&callbacks, 0, sizeof(callbacks)); callbacks.suspend = suspend; callbacks.switch_qemu_logdirty = switch_qemu_logdirty; + + sprintf(path, "/local/domain/%d/hvmloader/generation-id-address", si.domid); + + if ((xs = xs_daemon_open()) == NULL) + errx(1, "Couldn''t contact xenstore"); + + addr = xs_read(xs, XBT_NULL, path, NULL); + + xs_daemon_close(xs); + + vm_generationid_addr = (addr) ? strtoul(addr, NULL, 0) : 0; + free(addr); + ret = xc_domain_save(si.xch, io_fd, si.domid, maxit, max_f, si.flags, - &callbacks, !!(si.flags & XCFLAGS_HVM)); + &callbacks, !!(si.flags & XCFLAGS_HVM), vm_generationid_addr); if (si.suspend_evtchn > 0) xc_suspend_evtchn_release(si.xch, si.xce, si.domid, si.suspend_evtchn);
Ian Jackson
2011-Dec-15 16:25 UTC
Re: [PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address
Paul Durrant writes ("[Xen-devel] [PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address"):> Re-name xenstore key used to save VM generation ID buffer address. > > Signed-off-by: Paul Durrant <paul.durrant@citrix.com> > > diff -r a4a569729741 -r 9618ee3b6896 tools/firmware/hvmloader/acpi/build.c > --- a/tools/firmware/hvmloader/acpi/build.c Wed Dec 14 14:34:45 2011 +0000 > +++ b/tools/firmware/hvmloader/acpi/build.c Wed Dec 14 14:34:47 2011 +0000 > @@ -309,7 +309,7 @@ unsigned long new_vm_gid(void) > if ( snprintf(addr, sizeof(addr), "0x%lx", virt_to_phys(buf)) > >= sizeof(addr) ) > return 0; > - xenstore_write("data/generation-id", addr); > + xenstore_write("hvmloader/generation-id-address", addr);Will just making just this change to hvmloader not cause failures without the corresponding patch to the toolstack to make that path writeable ? Ian.
Paul Durrant
2011-Dec-15 17:12 UTC
Re: [PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address
> -----Original Message----- > From: Ian Jackson [mailto:Ian.Jackson@eu.citrix.com] > Sent: 15 December 2011 16:25 > To: Paul Durrant > Cc: xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] [PATCH 2 of 3] Re-name xenstore key used to > save VM generation ID buffer address > > Paul Durrant writes ("[Xen-devel] [PATCH 2 of 3] Re-name xenstore > key used to save VM generation ID buffer address"): > > Re-name xenstore key used to save VM generation ID buffer address. > > > > Signed-off-by: Paul Durrant <paul.durrant@citrix.com> > > > > diff -r a4a569729741 -r 9618ee3b6896 > tools/firmware/hvmloader/acpi/build.c > > --- a/tools/firmware/hvmloader/acpi/build.c Wed Dec 14 14:34:45 > 2011 +0000 > > +++ b/tools/firmware/hvmloader/acpi/build.c Wed Dec 14 14:34:47 > 2011 +0000 > > @@ -309,7 +309,7 @@ unsigned long new_vm_gid(void) > > if ( snprintf(addr, sizeof(addr), "0x%lx", virt_to_phys(buf)) > > >= sizeof(addr) ) > > return 0; > > - xenstore_write("data/generation-id", addr); > > + xenstore_write("hvmloader/generation-id-address", addr); > > Will just making just this change to hvmloader not cause failures > without the corresponding patch to the toolstack to make that path > writeable ? >The xenstore-write will fail with EACCES but the failure is ignored so there should be no knock-on failure. Nothing consumes the key until the subsequent tools patch is applied, at which point the xenstore-write will work because the tools will have made the key writable. Paul
Ian Jackson
2011-Dec-15 17:40 UTC
Re: [PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address
Paul Durrant writes ("Re: [Xen-devel] [PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address"):> > -----Original Message----- > > > - xenstore_write("data/generation-id", addr); > > > + xenstore_write("hvmloader/generation-id-address", addr); > > > > Will just making just this change to hvmloader not cause failures > > without the corresponding patch to the toolstack to make that path > > writeable ? > > > > The xenstore-write will fail with EACCES but the failure is ignored so there should be no knock-on failure. Nothing consumes the key until the subsequent tools patch is applied, at which point the xenstore-write will work because the tools will have made the key writable.Um, right. OK, I guess. Is fault-oblivious computing the usual approach inside hvmloader ? Ian.
Paul Durrant
2011-Dec-16 11:47 UTC
[PATCH 0 of 3] Support for VM generation ID save/restore and migrate
This patch series adds support for preservation of the VM generation ID buffer address in xenstore across save/restore and migrate, and also code to increment the value in all cases except for migration. The first patch modifies creation of the hvmloader key in xenstore and adds creation of a new read/write hvmloader/generation-id-addr key. The second patch changes hvmloader to use the new key (as opposed to the old data/generation-id key). The third patch adds the infrastructure to save and restore the VM generation ID address in xenstore and the code to increment the value.
# HG changeset patch # User Paul Durrant <paul.durrant@citrix.com> # Date 1324035832 0 # Node ID 24fc8670dfcaa9cbdfd89532823b68feb96ca2eb # Parent 03138a08366b895d79e143119d4c9c72833cdbcd Make ro_paths and rw_paths dynamic. The paths need to be different for the HVM and non-HVM cases as, in the HVM case, we need an ''hvmloader'' key. This was previously handled by creating the hvmloader key in libxl__create_device_model(), which is only invoked for HVM guests. However, if we are to use the hvmloader key to parent the ''generation-id-address'' key, the creation needs to move earlier in the sequence. Handling this by making ro_paths and rw_paths dynamic in libxl__domain_make() seems like the cleanest approach. The read-only ''error'', ''drivers'', ''attr'' and ''messages'' keys are no longer created as they seem to be completely unused. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> diff -r 03138a08366b -r 24fc8670dfca tools/libxl/libxl_create.c --- a/tools/libxl/libxl_create.c Fri Dec 09 16:19:36 2011 +0000 +++ b/tools/libxl/libxl_create.c Fri Dec 16 11:43:52 2011 +0000 @@ -322,9 +322,10 @@ int libxl__domain_make(libxl__gc *gc, li libxl_ctx *ctx = libxl__gc_owner(gc); int flags, ret, i, rc; char *uuid_string; - char *rw_paths[] = { "control/shutdown", "device", "device/suspend/event-channel" , "data"}; - char *ro_paths[] = { "cpu", "memory", "device", "error", "drivers", - "control", "attr", "messages" }; + char **ro_paths; + int nr_ro_paths; + char **rw_paths; + int nr_rw_paths; char *dom_path, *vm_path, *libxl_path; struct xs_permissions roperm[2]; struct xs_permissions rwperm[1]; @@ -341,6 +342,31 @@ int libxl__domain_make(libxl__gc *gc, li goto out; } + nr_ro_paths = 0; + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + ro_paths = libxl__calloc(gc, 5, sizeof(char *)); + ro_paths[nr_ro_paths++] = "hvmloader"; + } else { + ro_paths = libxl__calloc(gc, 4, sizeof(char *)); + } + + ro_paths[nr_ro_paths++] = "cpu"; + ro_paths[nr_ro_paths++] = "memory"; + ro_paths[nr_ro_paths++] = "device"; + ro_paths[nr_ro_paths++] = "control"; + + nr_rw_paths = 0; + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + rw_paths = libxl__calloc(gc, 4, sizeof(char *)); + rw_paths[nr_rw_paths++] = "hvmloader/generation-id-address"; + } else { + rw_paths = libxl__calloc(gc, 3, sizeof(char *)); + } + + rw_paths[nr_rw_paths++] = "control/shutdown"; + rw_paths[nr_rw_paths++] = "device/suspend/event-channel"; + rw_paths[nr_rw_paths++] = "data"; + flags = 0; if (info->type == LIBXL_DOMAIN_TYPE_HVM) { flags |= XEN_DOMCTL_CDF_hvm_guest; @@ -414,16 +440,16 @@ retry_transaction: if (rc) goto out; - for (i = 0; i < ARRAY_SIZE(rw_paths); i++) { - char *path = libxl__sprintf(gc, "%s/%s", dom_path, rw_paths[i]); - xs_mkdir(ctx->xsh, t, path); - xs_set_permissions(ctx->xsh, t, path, rwperm, ARRAY_SIZE(rwperm)); - } for (i = 0; i < ARRAY_SIZE(ro_paths); i++) { char *path = libxl__sprintf(gc, "%s/%s", dom_path, ro_paths[i]); xs_mkdir(ctx->xsh, t, path); xs_set_permissions(ctx->xsh, t, path, roperm, ARRAY_SIZE(roperm)); } + for (i = 0; i < nr_rw_paths; i++) { + char *path = libxl__sprintf(gc, "%s/%s", dom_path, rw_paths[i]); + xs_mkdir(ctx->xsh, t, path); + xs_set_permissions(ctx->xsh, t, path, rwperm, ARRAY_SIZE(rwperm)); + } xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/uuid", vm_path), uuid_string, strlen(uuid_string)); xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/name", vm_path), info->name, strlen(info->name)); diff -r 03138a08366b -r 24fc8670dfca tools/libxl/libxl_dm.c --- a/tools/libxl/libxl_dm.c Fri Dec 09 16:19:36 2011 +0000 +++ b/tools/libxl/libxl_dm.c Fri Dec 16 11:43:52 2011 +0000 @@ -821,9 +821,7 @@ int libxl__create_device_model(libxl__gc goto out; } - path = libxl__sprintf(gc, "/local/domain/%d/hvmloader", info->domid); - xs_mkdir(ctx->xsh, XBT_NULL, path); - libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "%s/bios", path), + libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "/local/domain/%d/hvmloader/bios", info->domid), "%s", libxl__domain_bios(gc, info)); path = libxl__sprintf(gc, "/local/domain/0/device-model/%d", info->domid);
Paul Durrant
2011-Dec-16 11:47 UTC
[PATCH 2 of 3] Re-name xenstore key used to save VM generation ID buffer address
# HG changeset patch # User Paul Durrant <paul.durrant@citrix.com> # Date 1324035832 0 # Node ID d44e885e0389e1e2cad544d098f47c37f2d5a755 # Parent 24fc8670dfcaa9cbdfd89532823b68feb96ca2eb Re-name xenstore key used to save VM generation ID buffer address. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> diff -r 24fc8670dfca -r d44e885e0389 tools/firmware/hvmloader/acpi/build.c --- a/tools/firmware/hvmloader/acpi/build.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/firmware/hvmloader/acpi/build.c Fri Dec 16 11:43:52 2011 +0000 @@ -309,7 +309,7 @@ unsigned long new_vm_gid(void) if ( snprintf(addr, sizeof(addr), "0x%lx", virt_to_phys(buf)) >= sizeof(addr) ) return 0; - xenstore_write("data/generation-id", addr); + xenstore_write("hvmloader/generation-id-address", addr); gid = strtoll(xenstore_read("platform/generation-id", "0"), NULL, 0); *(uint64_t *)buf = gid;
Paul Durrant
2011-Dec-16 11:47 UTC
[PATCH 3 of 3] VM generation ID save/restore and migrate
# HG changeset patch # User Paul Durrant <paul.durrant@citrix.com> # Date 1324035833 0 # Node ID e745cfbe7e114cb8a8c331cb6b9c711462300c1f # Parent d44e885e0389e1e2cad544d098f47c37f2d5a755 VM generation ID save/restore and migrate. Add code to track the address of the VM generation ID buffer across a save/restore or migrate, and increment it as necessary. The address of the buffer is written into xenstore by hvmloader at boot time. It must be read from xenstore by the caller of xc_domain_save() and then written back again by the caller of xc_domain_restore(). Signed-off-by: Paul Durrant <paul.durrant@citrix.com> diff -r d44e885e0389 -r e745cfbe7e11 tools/libxc/ia64/xc_ia64_linux_restore.c --- a/tools/libxc/ia64/xc_ia64_linux_restore.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxc/ia64/xc_ia64_linux_restore.c Fri Dec 16 11:43:53 2011 +0000 @@ -548,7 +548,9 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn, - unsigned int hvm, unsigned int pae, int superpages) + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, + unsigned long *vm_generationid_addr) { DECLARE_DOMCTL; int rc = 1; diff -r d44e885e0389 -r e745cfbe7e11 tools/libxc/ia64/xc_ia64_linux_save.c --- a/tools/libxc/ia64/xc_ia64_linux_save.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxc/ia64/xc_ia64_linux_save.c Fri Dec 16 11:43:53 2011 +0000 @@ -382,7 +382,8 @@ out: int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags, - struct save_callbacks* callbacks, int hvm) + struct save_callbacks* callbacks, int hvm, + unsigned long vm_generationid_addr) { DECLARE_DOMCTL; xc_dominfo_t info; diff -r d44e885e0389 -r e745cfbe7e11 tools/libxc/xc_domain_restore.c --- a/tools/libxc/xc_domain_restore.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxc/xc_domain_restore.c Fri Dec 16 11:43:53 2011 +0000 @@ -681,6 +681,7 @@ typedef struct { uint64_t console_pfn; uint64_t acpi_ioport_location; uint64_t viridian; + uint64_t vm_generationid_addr; } pagebuf_t; static int pagebuf_init(pagebuf_t* buf) @@ -860,6 +861,17 @@ static int pagebuf_get_one(xc_interface } return compbuf_size; + case XC_SAVE_ID_HVM_GENERATION_ID_ADDR: + /* Skip padding 4 bytes then read the generation id buffer location. */ + if ( RDEXACT(fd, &buf->vm_generationid_addr, sizeof(uint32_t)) || + RDEXACT(fd, &buf->vm_generationid_addr, sizeof(uint64_t)) ) + { + PERROR("error read the generation id buffer location"); + return -1; + } + DPRINTF("read generation id buffer address"); + return pagebuf_get_one(xch, ctx, buf, fd, dom); + default: if ( (count > MAX_BATCH_SIZE) || (count < 0) ) { ERROR("Max batch size exceeded (%d). Giving up.", count); @@ -1248,7 +1260,9 @@ static int apply_batch(xc_interface *xch int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn, - unsigned int hvm, unsigned int pae, int superpages) + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, + unsigned long *vm_generationid_addr) { DECLARE_DOMCTL; int rc = 1, frc, i, j, n, m, pae_extended_cr3 = 0, ext_vcpucontext = 0; @@ -1449,6 +1463,39 @@ int xc_domain_restore(xc_interface *xch, xc_set_hvm_param(xch, dom, HVM_PARAM_VM86_TSS, pagebuf.vm86_tss); if ( pagebuf.console_pfn ) console_pfn = pagebuf.console_pfn; + if ( pagebuf.vm_generationid_addr ) { + if ( !no_incr_generationid ) { + unsigned int offset; + unsigned char *buf; + unsigned long long generationid; + + /* + * Map the VM generation id buffer and inject the new value. + */ + + pfn = pagebuf.vm_generationid_addr >> PAGE_SHIFT; + offset = pagebuf.vm_generationid_addr & (PAGE_SIZE - 1); + + if ( (pfn >= dinfo->p2m_size) || + (pfn_type[pfn] != XEN_DOMCTL_PFINFO_NOTAB) ) + { + ERROR("generation id buffer frame is bad"); + goto out; + } + + mfn = ctx->p2m[pfn]; + buf = xc_map_foreign_range(xch, dom, PAGE_SIZE, + PROT_READ | PROT_WRITE, mfn); + + generationid = *(unsigned long long *)(buf + offset); + *(unsigned long long *)(buf + offset) = generationid + 1; + + munmap(buf, PAGE_SIZE); + } + + *vm_generationid_addr = pagebuf.vm_generationid_addr; + } + break; /* our work here is done */ } diff -r d44e885e0389 -r e745cfbe7e11 tools/libxc/xc_domain_save.c --- a/tools/libxc/xc_domain_save.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxc/xc_domain_save.c Fri Dec 16 11:43:53 2011 +0000 @@ -804,7 +804,8 @@ static int save_tsc_info(xc_interface *x int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags, - struct save_callbacks* callbacks, int hvm) + struct save_callbacks* callbacks, int hvm, + unsigned long vm_generationid_addr) { xc_dominfo_t info; DECLARE_DOMCTL; @@ -1616,6 +1617,16 @@ int xc_domain_save(xc_interface *xch, in uint64_t data; } chunk = { 0, }; + chunk.id = XC_SAVE_ID_HVM_GENERATION_ID_ADDR; + chunk.data = vm_generationid_addr; + + if ( (chunk.data != 0) && + wrexact(io_fd, &chunk, sizeof(chunk)) ) + { + PERROR("Error when writing the generation id buffer location for guest"); + goto out; + } + chunk.id = XC_SAVE_ID_HVM_IDENT_PT; chunk.data = 0; xc_get_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, diff -r d44e885e0389 -r e745cfbe7e11 tools/libxc/xenguest.h --- a/tools/libxc/xenguest.h Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxc/xenguest.h Fri Dec 16 11:43:53 2011 +0000 @@ -58,7 +58,8 @@ struct save_callbacks { */ int xc_domain_save(xc_interface *xch, int io_fd, uint32_t dom, uint32_t max_iters, uint32_t max_factor, uint32_t flags /* XCFLAGS_xxx */, - struct save_callbacks* callbacks, int hvm); + struct save_callbacks* callbacks, int hvm, + unsigned long vm_generationid_addr); /** @@ -72,12 +73,16 @@ int xc_domain_save(xc_interface *xch, in * @parm hvm non-zero if this is a HVM restore * @parm pae non-zero if this HVM domain has PAE support enabled * @parm superpages non-zero to allocate guest memory with superpages + * @parm no_incr_generationid non-zero if generation id is NOT to be incremented + * @parm vm_generationid_addr returned with the address of the generation id buffer * @return 0 on success, -1 on failure */ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom, unsigned int store_evtchn, unsigned long *store_mfn, unsigned int console_evtchn, unsigned long *console_mfn, - unsigned int hvm, unsigned int pae, int superpages); + unsigned int hvm, unsigned int pae, int superpages, + int no_incr_generationid, + unsigned long *vm_generationid_addr); /** * xc_domain_restore writes a file to disk that contains the device * model saved state. diff -r d44e885e0389 -r e745cfbe7e11 tools/libxc/xg_save_restore.h --- a/tools/libxc/xg_save_restore.h Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxc/xg_save_restore.h Fri Dec 16 11:43:53 2011 +0000 @@ -253,6 +253,7 @@ #define XC_SAVE_ID_HVM_VIRIDIAN -11 #define XC_SAVE_ID_COMPRESSED_DATA -12 /* Marker to indicate arrival of compressed data */ #define XC_SAVE_ID_ENABLE_COMPRESSION -13 /* Marker to enable compression logic at receiver side */ +#define XC_SAVE_ID_HVM_GENERATION_ID_ADDR -14 /* ** We process save/restore/migrate in batches of pages; the below diff -r d44e885e0389 -r e745cfbe7e11 tools/libxl/libxl_create.c --- a/tools/libxl/libxl_create.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxl/libxl_create.c Fri Dec 16 11:43:53 2011 +0000 @@ -99,6 +99,7 @@ int libxl_init_build_info(libxl_ctx *ctx b_info->u.hvm.vpt_align = 1; b_info->u.hvm.timer_mode = 1; b_info->u.hvm.nested_hvm = 0; + b_info->u.hvm.no_incr_generationid = 0; break; case LIBXL_DOMAIN_TYPE_PV: b_info->u.pv.slack_memkb = 8 * 1024; diff -r d44e885e0389 -r e745cfbe7e11 tools/libxl/libxl_dom.c --- a/tools/libxl/libxl_dom.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxl/libxl_dom.c Fri Dec 16 11:43:53 2011 +0000 @@ -106,6 +106,7 @@ int libxl__build_pre(libxl__gc *gc, uint state->store_port = xc_evtchn_alloc_unbound(ctx->xch, domid, 0); state->console_port = xc_evtchn_alloc_unbound(ctx->xch, domid, 0); + state->vm_generationid_addr = 0; return 0; } @@ -117,7 +118,7 @@ int libxl__build_post(libxl__gc *gc, uin libxl_ctx *ctx = libxl__gc_owner(gc); char *dom_path, *vm_path; xs_transaction_t t; - char **ents; + char **ents, **hvm_ents; int i; libxl_cpuid_apply_policy(ctx, domid); @@ -143,6 +144,13 @@ int libxl__build_post(libxl__gc *gc, uin ? "offline" : "online"; } + hvm_ents = NULL; + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { + hvm_ents = libxl__calloc(gc, 3, sizeof(char *)); + hvm_ents[0] = "hvmloader/generation-id-address"; + hvm_ents[1] = libxl__sprintf(gc, "0x%lx", state->vm_generationid_addr); + } + dom_path = libxl__xs_get_dompath(gc, domid); if (!dom_path) { return ERROR_FAIL; @@ -153,6 +161,9 @@ retry_transaction: t = xs_transaction_start(ctx->xsh); libxl__xs_writev(gc, t, dom_path, ents); + if (info->type == LIBXL_DOMAIN_TYPE_HVM) + libxl__xs_writev(gc, t, dom_path, hvm_ents); + libxl__xs_writev(gc, t, dom_path, local_ents); libxl__xs_writev(gc, t, vm_path, vms_ents); @@ -356,16 +367,19 @@ int libxl__domain_restore_common(libxl__ /* read signature */ int rc; int hvm, pae, superpages; + int no_incr_generationid; switch (info->type) { case LIBXL_DOMAIN_TYPE_HVM: hvm = 1; superpages = 1; pae = info->u.hvm.pae; + no_incr_generationid = info->u.hvm.no_incr_generationid; break; case LIBXL_DOMAIN_TYPE_PV: hvm = 0; superpages = 0; pae = 1; + no_incr_generationid = 0; break; default: return ERROR_INVAL; @@ -373,7 +387,8 @@ int libxl__domain_restore_common(libxl__ rc = xc_domain_restore(ctx->xch, fd, domid, state->store_port, &state->store_mfn, state->console_port, &state->console_mfn, - hvm, pae, superpages); + hvm, pae, superpages, no_incr_generationid, + &state->vm_generationid_addr); if ( rc ) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "restoring domain"); return ERROR_FAIL; @@ -539,12 +554,23 @@ int libxl__domain_suspend_common(libxl__ struct save_callbacks callbacks; struct suspendinfo si; int hvm, rc = ERROR_FAIL; + unsigned long vm_generationid_addr; switch (type) { - case LIBXL_DOMAIN_TYPE_HVM: + case LIBXL_DOMAIN_TYPE_HVM: { + char *path; + char *addr; + + path = libxl__sprintf(gc, "%s/hvmloader/generation-id-address", + libxl__xs_get_dompath(gc, domid)); + addr = libxl__xs_read(gc, XBT_NULL, path); + + vm_generationid_addr = (addr) ? strtoul(addr, NULL, 0) : 0; hvm = 1; break; + } case LIBXL_DOMAIN_TYPE_PV: + vm_generationid_addr = 0; hvm = 0; break; default: @@ -582,7 +608,8 @@ int libxl__domain_suspend_common(libxl__ callbacks.switch_qemu_logdirty = libxl__domain_suspend_common_switch_qemu_logdirty; callbacks.data = &si; - rc = xc_domain_save(ctx->xch, fd, domid, 0, 0, flags, &callbacks, hvm); + rc = xc_domain_save(ctx->xch, fd, domid, 0, 0, flags, &callbacks, + hvm, vm_generationid_addr); if ( rc ) { LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "saving domain: %s", si.guest_responded ? diff -r d44e885e0389 -r e745cfbe7e11 tools/libxl/libxl_internal.h --- a/tools/libxl/libxl_internal.h Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxl/libxl_internal.h Fri Dec 16 11:43:53 2011 +0000 @@ -218,6 +218,7 @@ typedef struct { uint32_t console_port; unsigned long console_mfn; + unsigned long vm_generationid_addr; } libxl__domain_build_state; _hidden int libxl__build_pre(libxl__gc *gc, uint32_t domid, diff -r d44e885e0389 -r e745cfbe7e11 tools/libxl/libxl_types.idl --- a/tools/libxl/libxl_types.idl Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxl/libxl_types.idl Fri Dec 16 11:43:53 2011 +0000 @@ -184,6 +184,7 @@ libxl_domain_build_info = Struct("domain ("vpt_align", bool), ("timer_mode", integer), ("nested_hvm", bool), + ("no_incr_generationid", bool), ])), ("pv", Struct(None, [("kernel", libxl_file_reference), ("slack_memkb", uint32), diff -r d44e885e0389 -r e745cfbe7e11 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/libxl/xl_cmdimpl.c Fri Dec 16 11:43:53 2011 +0000 @@ -360,6 +360,8 @@ static void printf_info(int domid, printf("\t\t\t(vpt_align %d)\n", b_info->u.hvm.vpt_align); printf("\t\t\t(timer_mode %d)\n", b_info->u.hvm.timer_mode); printf("\t\t\t(nestedhvm %d)\n", b_info->u.hvm.nested_hvm); + printf("\t\t\t(no_incr_generationid %d)\n", + b_info->u.hvm.no_incr_generationid); printf("\t\t\t(device_model %s)\n", dm_info->device_model ? : "default"); printf("\t\t\t(videoram %d)\n", dm_info->videoram); @@ -1362,6 +1364,7 @@ struct domain_create { const char *restore_file; int migrate_fd; /* -1 means none */ char **migration_domname_r; /* from malloc */ + int no_incr_generationid; }; static int freemem(libxl_domain_build_info *b_info, libxl_device_model_info *dm_info) @@ -1575,6 +1578,8 @@ static int create_domain(struct domain_c } } + d_config.b_info.u.hvm.no_incr_generationid = dom_info->no_incr_generationid; + if (debug || dom_info->dryrun) printf_info(-1, &d_config, &d_config.dm_info); @@ -2800,6 +2805,7 @@ static void migrate_receive(int debug, i dom_info.restore_file = "incoming migration stream"; dom_info.migrate_fd = 0; /* stdin */ dom_info.migration_domname_r = &migration_domname; + dom_info.no_incr_generationid = 1; rc = create_domain(&dom_info); if (rc < 0) { diff -r d44e885e0389 -r e745cfbe7e11 tools/python/xen/lowlevel/checkpoint/libcheckpoint.c --- a/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c Fri Dec 16 11:43:53 2011 +0000 @@ -175,6 +175,7 @@ int checkpoint_start(checkpoint_state* s { int hvm, rc; int flags = XCFLAGS_LIVE; + unsigned long vm_generationid_addr; if (!s->domid) { s->errstr = "checkpoint state not opened"; @@ -185,16 +186,28 @@ int checkpoint_start(checkpoint_state* s hvm = s->domtype > dt_pv; if (hvm) { + char path[128]; + char *addr; + + sprintf(path, "/local/domain/%u/hvmloader/generation-id-address", s->domid); + addr = xs_read(s->xsh, XBT_NULL, path, NULL); + + vm_generationid_addr = (addr) ? strtoul(addr, NULL, 0) : 0; + free(addr); + flags |= XCFLAGS_HVM; if (switch_qemu_logdirty(s, 1)) return -1; + } else { + vm_generationid_addr = 0; } if (remus_flags & CHECKPOINT_FLAGS_COMPRESSION) flags |= XCFLAGS_CHECKPOINT_COMPRESS; callbacks->switch_qemu_logdirty = noop_switch_logdirty; - rc = xc_domain_save(s->xch, fd, s->domid, 0, 0, flags, callbacks, hvm); + rc = xc_domain_save(s->xch, fd, s->domid, 0, 0, flags, callbacks, hvm, + vm_generationid_addr); if (hvm) switch_qemu_logdirty(s, 0); diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_restore.c --- a/tools/xcutils/xc_restore.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/xcutils/xc_restore.c Fri Dec 16 11:43:53 2011 +0000 @@ -23,11 +23,13 @@ main(int argc, char **argv) xc_interface *xch; int io_fd, ret; int superpages; - unsigned long store_mfn, console_mfn; + unsigned long store_mfn, console_mfn, vm_generationid_addr; + int no_incr_generationid; - if ( (argc != 8) && (argc != 9) ) + if ( (argc < 8) || (argc > 10) ) errx(1, "usage: %s iofd domid store_evtchn " - "console_evtchn hvm pae apic [superpages]", argv[0]); + "console_evtchn hvm pae apic " + "[superpages [no_incr_generationid]]", argv[0]); xch = xc_interface_open(0,0,0); if ( !xch ) @@ -40,19 +42,25 @@ main(int argc, char **argv) hvm = atoi(argv[5]); pae = atoi(argv[6]); apic = atoi(argv[7]); - if ( argc == 9 ) + if ( argc >= 9 ) superpages = atoi(argv[8]); else superpages = !!hvm; + if ( argc == 10 ) + no_incr_generationid = !atoi(argv[9]); + else + no_incr_generationid = 0; ret = xc_domain_restore(xch, io_fd, domid, store_evtchn, &store_mfn, - console_evtchn, &console_mfn, hvm, pae, superpages); + console_evtchn, &console_mfn, hvm, pae, superpages, + no_incr_generationid, &vm_generationid_addr); if ( ret == 0 ) { printf("store-mfn %li\n", store_mfn); if ( !hvm ) printf("console-mfn %li\n", console_mfn); + printf("generation-id-address %lx\n", vm_generationid_addr); fflush(stdout); } diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_save.c --- a/tools/xcutils/xc_save.c Fri Dec 16 11:43:52 2011 +0000 +++ b/tools/xcutils/xc_save.c Fri Dec 16 11:43:53 2011 +0000 @@ -169,6 +169,10 @@ main(int argc, char **argv) unsigned int maxit, max_f; int io_fd, ret, port; struct save_callbacks callbacks; + char path[128]; + struct xs_handle *xs; + char *addr; + unsigned long vm_generationid_addr; if (argc != 6) errx(1, "usage: %s iofd domid maxit maxf flags", argv[0]); @@ -207,8 +211,22 @@ main(int argc, char **argv) memset(&callbacks, 0, sizeof(callbacks)); callbacks.suspend = suspend; callbacks.switch_qemu_logdirty = switch_qemu_logdirty; + + sprintf(path, "/local/domain/%d/hvmloader/generation-id-address", si.domid); + + if ((xs = xs_daemon_open()) == NULL) + errx(1, "Couldn''t contact xenstore"); + + addr = xs_read(xs, XBT_NULL, path, NULL); + + xs_daemon_close(xs); + + vm_generationid_addr = (addr) ? strtoul(addr, NULL, 0) : 0; + free(addr); + ret = xc_domain_save(si.xch, io_fd, si.domid, maxit, max_f, si.flags, - &callbacks, !!(si.flags & XCFLAGS_HVM)); + &callbacks, !!(si.flags & XCFLAGS_HVM), + vm_generationid_addr); if (si.suspend_evtchn > 0) xc_suspend_evtchn_release(si.xch, si.xce, si.domid, si.suspend_evtchn);
On Fri, 2011-12-16 at 11:47 +0000, Paul Durrant wrote:> + nr_ro_paths = 0; > + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { > + ro_paths = libxl__calloc(gc, 5, sizeof(char *)); > + ro_paths[nr_ro_paths++] = "hvmloader"; > + } else { > + ro_paths = libxl__calloc(gc, 4, sizeof(char *)); > + } > + > + ro_paths[nr_ro_paths++] = "cpu"; > + ro_paths[nr_ro_paths++] = "memory"; > + ro_paths[nr_ro_paths++] = "device"; > + ro_paths[nr_ro_paths++] = "control";The flexarray stuff allows you do to this sort of thing without worrying about running off the end of the allocated array etc. Part of me thinks that if the arrays aren''t static any more you might as well just do the create in an open coded list, instead of open coding the creation of a list and then iterating over it. A helper function like libxl__xs_mkdir(gc, t, path, perm) would reduce the amount of boilerplate.> @@ -414,16 +440,16 @@ retry_transaction: > if (rc) > goto out; > > - for (i = 0; i < ARRAY_SIZE(rw_paths); i++) { > - char *path = libxl__sprintf(gc, "%s/%s", dom_path, rw_paths[i]); > - xs_mkdir(ctx->xsh, t, path); > - xs_set_permissions(ctx->xsh, t, path, rwperm, ARRAY_SIZE(rwperm)); > - } > for (i = 0; i < ARRAY_SIZE(ro_paths); i++) { > char *path = libxl__sprintf(gc, "%s/%s", dom_path, ro_paths[i]); > xs_mkdir(ctx->xsh, t, path); > xs_set_permissions(ctx->xsh, t, path, roperm, ARRAY_SIZE(roperm)); > } > + for (i = 0; i < nr_rw_paths; i++) { > + char *path = libxl__sprintf(gc, "%s/%s", dom_path, rw_paths[i]); > + xs_mkdir(ctx->xsh, t, path); > + xs_set_permissions(ctx->xsh, t, path, rwperm, ARRAY_SIZE(rwperm)); > + }What does "xenstore-ls -fp" show before and after this re-ordering?> xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/uuid", vm_path), uuid_string, strlen(uuid_string)); > xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/name", vm_path), info->name, strlen(info->name));> diff -r 03138a08366b -r 24fc8670dfca tools/libxl/libxl_dm.c > --- a/tools/libxl/libxl_dm.c Fri Dec 09 16:19:36 2011 +0000 > +++ b/tools/libxl/libxl_dm.c Fri Dec 16 11:43:52 2011 +0000 > @@ -821,9 +821,7 @@ int libxl__create_device_model(libxl__gc > goto out; > } > > - path = libxl__sprintf(gc, "/local/domain/%d/hvmloader", info->domid); > - xs_mkdir(ctx->xsh, XBT_NULL, path); > - libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "%s/bios", path), > + libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "/local/domain/%d/hvmloader/bios", info->domid), > "%s", libxl__domain_bios(gc, info)); > > path = libxl__sprintf(gc, "/local/domain/0/device-model/%d", info->domid);Pre-existing problem but this should be libxl__xs_get_dompath. Ian.
Ian Campbell
2011-Dec-16 12:07 UTC
Re: [PATCH 3 of 3] VM generation ID save/restore and migrate
On Fri, 2011-12-16 at 11:47 +0000, Paul Durrant wrote:> diff -r d44e885e0389 -r e745cfbe7e11 tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > --- a/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c Fri Dec 16 11:43:52 2011 +0000 > +++ b/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c Fri Dec 16 11:43:53 2011 +0000 > @@ -175,6 +175,7 @@ int checkpoint_start(checkpoint_state* s > { > int hvm, rc; > int flags = XCFLAGS_LIVE; > + unsigned long vm_generationid_addr; > > if (!s->domid) { > s->errstr = "checkpoint state not opened"; > @@ -185,16 +186,28 @@ int checkpoint_start(checkpoint_state* s > > hvm = s->domtype > dt_pv; > if (hvm) { > + char path[128]; > + char *addr; > + > + sprintf(path, "/local/domain/%u/hvmloader/generation-id-address", s->domid);xs_get_domain_path() gives you the correct base path (I saw at least one more of these).> diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_restore.c > --- a/tools/xcutils/xc_restore.c Fri Dec 16 11:43:52 2011 +0000 > +++ b/tools/xcutils/xc_restore.c Fri Dec 16 11:43:53 2011 +0000[...]> diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_save.c > --- a/tools/xcutils/xc_save.c Fri Dec 16 11:43:52 2011 +0000 > +++ b/tools/xcutils/xc_save.c Fri Dec 16 11:43:53 2011 +0000[...] AFAIK these two are only used by xend so unless you are adding support for this stuff there (it''s deprecated so no need) this isn''t necessary, also I think xend reads the stdout of one or both and you''ve added to what gets printed, running the risk of breaking things. Ian.
> -----Original Message----- > From: Ian Campbell > Sent: 16 December 2011 12:02 > To: Paul Durrant > Cc: xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] [PATCH 1 of 3] Make ro_paths and rw_paths > dynamic > > On Fri, 2011-12-16 at 11:47 +0000, Paul Durrant wrote: > > + nr_ro_paths = 0; > > + if (info->type == LIBXL_DOMAIN_TYPE_HVM) { > > + ro_paths = libxl__calloc(gc, 5, sizeof(char *)); > > + ro_paths[nr_ro_paths++] = "hvmloader"; > > + } else { > > + ro_paths = libxl__calloc(gc, 4, sizeof(char *)); > > + } > > + > > + ro_paths[nr_ro_paths++] = "cpu"; > > + ro_paths[nr_ro_paths++] = "memory"; > > + ro_paths[nr_ro_paths++] = "device"; > > + ro_paths[nr_ro_paths++] = "control"; > > The flexarray stuff allows you do to this sort of thing without > worrying about running off the end of the allocated array etc. > > Part of me thinks that if the arrays aren''t static any more you > might as well just do the create in an open coded list, instead of > open coding the creation of a list and then iterating over it. > > A helper function like libxl__xs_mkdir(gc, t, path, perm) would > reduce the amount of boilerplate. >Actually, yes, the helper function would be a much neater solution. I''ll go for that. Paul> > @@ -414,16 +440,16 @@ retry_transaction: > > if (rc) > > goto out; > > > > - for (i = 0; i < ARRAY_SIZE(rw_paths); i++) { > > - char *path = libxl__sprintf(gc, "%s/%s", dom_path, > rw_paths[i]); > > - xs_mkdir(ctx->xsh, t, path); > > - xs_set_permissions(ctx->xsh, t, path, rwperm, > ARRAY_SIZE(rwperm)); > > - } > > for (i = 0; i < ARRAY_SIZE(ro_paths); i++) { > > char *path = libxl__sprintf(gc, "%s/%s", dom_path, > ro_paths[i]); > > xs_mkdir(ctx->xsh, t, path); > > xs_set_permissions(ctx->xsh, t, path, roperm, > ARRAY_SIZE(roperm)); > > } > > + for (i = 0; i < nr_rw_paths; i++) { > > + char *path = libxl__sprintf(gc, "%s/%s", dom_path, > rw_paths[i]); > > + xs_mkdir(ctx->xsh, t, path); > > + xs_set_permissions(ctx->xsh, t, path, rwperm, > ARRAY_SIZE(rwperm)); > > + } > > What does "xenstore-ls -fp" show before and after this re-ordering? > > > xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/uuid", vm_path), > uuid_string, strlen(uuid_string)); > > xs_write(ctx->xsh, t, libxl__sprintf(gc, "%s/name", vm_path), > > info->name, strlen(info->name)); > > > > diff -r 03138a08366b -r 24fc8670dfca tools/libxl/libxl_dm.c > > --- a/tools/libxl/libxl_dm.c Fri Dec 09 16:19:36 2011 +0000 > > +++ b/tools/libxl/libxl_dm.c Fri Dec 16 11:43:52 2011 +0000 > > @@ -821,9 +821,7 @@ int libxl__create_device_model(libxl__gc > > goto out; > > } > > > > - path = libxl__sprintf(gc, "/local/domain/%d/hvmloader", info- > >domid); > > - xs_mkdir(ctx->xsh, XBT_NULL, path); > > - libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, "%s/bios", > path), > > + libxl__xs_write(gc, XBT_NULL, libxl__sprintf(gc, > > + "/local/domain/%d/hvmloader/bios", info->domid), > > "%s", libxl__domain_bios(gc, info)); > > > > path = libxl__sprintf(gc, "/local/domain/0/device-model/%d", > > info->domid); > > Pre-existing problem but this should be libxl__xs_get_dompath. > > Ian. >
Paul Durrant
2011-Dec-16 12:51 UTC
Re: [PATCH 3 of 3] VM generation ID save/restore and migrate
> -----Original Message----- > From: Ian Campbell > Sent: 16 December 2011 12:08 > To: Paul Durrant > Cc: xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] [PATCH 3 of 3] VM generation ID > save/restore and migrate > > On Fri, 2011-12-16 at 11:47 +0000, Paul Durrant wrote: > > diff -r d44e885e0389 -r e745cfbe7e11 > tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > > --- a/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > Fri Dec 16 11:43:52 2011 +0000 > > +++ b/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > Fri Dec 16 11:43:53 2011 +0000 > > @@ -175,6 +175,7 @@ int checkpoint_start(checkpoint_state* s { > > int hvm, rc; > > int flags = XCFLAGS_LIVE; > > + unsigned long vm_generationid_addr; > > > > if (!s->domid) { > > s->errstr = "checkpoint state not opened"; @@ -185,16 > +186,28 > > @@ int checkpoint_start(checkpoint_state* s > > > > hvm = s->domtype > dt_pv; > > if (hvm) { > > + char path[128]; > > + char *addr; > > + > > + sprintf(path, > > + "/local/domain/%u/hvmloader/generation-id-address", s->domid); > > xs_get_domain_path() gives you the correct base path (I saw at least > one more of these). >Does that save me anything? I''d end up having to sprintf(path, "%s/hvmloader/generation-id-address", xs_get_domain_path(s->xsh, s->domid)) since xs_read() just takes a path rather than a (prefix, node) couple. Paul> > diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_restore.c > > --- a/tools/xcutils/xc_restore.c Fri Dec 16 11:43:52 2011 > +0000 > > +++ b/tools/xcutils/xc_restore.c Fri Dec 16 11:43:53 2011 > +0000 > [...] > > diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_save.c > > --- a/tools/xcutils/xc_save.c Fri Dec 16 11:43:52 2011 +0000 > > +++ b/tools/xcutils/xc_save.c Fri Dec 16 11:43:53 2011 +0000 > [...] > > AFAIK these two are only used by xend so unless you are adding > support for this stuff there (it''s deprecated so no need) this isn''t > necessary, also I think xend reads the stdout of one or both and > you''ve added to what gets printed, running the risk of breaking > things. > > Ian. >
Ian Campbell
2011-Dec-16 13:20 UTC
Re: [PATCH 3 of 3] VM generation ID save/restore and migrate
On Fri, 2011-12-16 at 12:51 +0000, Paul Durrant wrote:> > -----Original Message----- > > From: Ian Campbell > > Sent: 16 December 2011 12:08 > > To: Paul Durrant > > Cc: xen-devel@lists.xensource.com > > Subject: Re: [Xen-devel] [PATCH 3 of 3] VM generation ID > > save/restore and migrate > > > > On Fri, 2011-12-16 at 11:47 +0000, Paul Durrant wrote: > > > diff -r d44e885e0389 -r e745cfbe7e11 > > tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > > > --- a/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > > Fri Dec 16 11:43:52 2011 +0000 > > > +++ b/tools/python/xen/lowlevel/checkpoint/libcheckpoint.c > > Fri Dec 16 11:43:53 2011 +0000 > > > @@ -175,6 +175,7 @@ int checkpoint_start(checkpoint_state* s { > > > int hvm, rc; > > > int flags = XCFLAGS_LIVE; > > > + unsigned long vm_generationid_addr; > > > > > > if (!s->domid) { > > > s->errstr = "checkpoint state not opened"; @@ -185,16 > > +186,28 > > > @@ int checkpoint_start(checkpoint_state* s > > > > > > hvm = s->domtype > dt_pv; > > > if (hvm) { > > > + char path[128]; > > > + char *addr; > > > + > > > + sprintf(path, > > > + "/local/domain/%u/hvmloader/generation-id-address", s->domid); > > > > xs_get_domain_path() gives you the correct base path (I saw at least > > one more of these). > > > > Does that save me anything?Probably not much, other than avoiding hardcoding something. I guess /local/domain/%d is fair game in a toolstack.> I''d end up having to > > sprintf(path, "%s/hvmloader/generation-id-address", xs_get_domain_path(s->xsh, s->domid)) > > since xs_read() just takes a path rather than a (prefix, node) couple.xs_chdir() would actually be quite useful for this sort of thing, but it doesn''t exist so nevermind. Ian.> > Paul > > > > diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_restore.c > > > --- a/tools/xcutils/xc_restore.c Fri Dec 16 11:43:52 2011 > > +0000 > > > +++ b/tools/xcutils/xc_restore.c Fri Dec 16 11:43:53 2011 > > +0000 > > [...] > > > diff -r d44e885e0389 -r e745cfbe7e11 tools/xcutils/xc_save.c > > > --- a/tools/xcutils/xc_save.c Fri Dec 16 11:43:52 2011 +0000 > > > +++ b/tools/xcutils/xc_save.c Fri Dec 16 11:43:53 2011 +0000 > > [...] > > > > AFAIK these two are only used by xend so unless you are adding > > support for this stuff there (it''s deprecated so no need) this isn''t > > necessary, also I think xend reads the stdout of one or both and > > you''ve added to what gets printed, running the risk of breaking > > things. > > > > Ian. > > >
Pasi Kärkkäinen
2011-Dec-16 13:25 UTC
Re: [PATCH 0 of 3] Support for VM generation ID save/restore and migrate
On Fri, Dec 16, 2011 at 11:47:26AM +0000, Paul Durrant wrote:> This patch series adds support for preservation of the VM generation ID buffer > address in xenstore across save/restore and migrate, and also code to increment > the value in all cases except for migration. >Sorry for a stupid question, but what''s the usecase for this ''generation ID'' ? -- Pasi> The first patch modifies creation of the hvmloader key in xenstore and adds > creation of a new read/write hvmloader/generation-id-addr key. > The second patch changes hvmloader to use the new key (as opposed to the old > data/generation-id key). > The third patch adds the infrastructure to save and restore the VM generation ID > address in xenstore and the code to increment the value. > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel
Paul Durrant
2011-Dec-16 14:07 UTC
Re: [PATCH 0 of 3] Support for VM generation ID save/restore and migrate
> -----Original Message----- > From: Pasi Kärkkäinen [mailto:pasik@iki.fi] > Sent: 16 December 2011 13:26 > To: Paul Durrant > Cc: xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] [PATCH 0 of 3] Support for VM generation ID > save/restore and migrate > > On Fri, Dec 16, 2011 at 11:47:26AM +0000, Paul Durrant wrote: > > This patch series adds support for preservation of the VM > generation > > ID buffer address in xenstore across save/restore and migrate, and > > also code to increment the value in all cases except for > migration. > > > > Sorry for a stupid question, but what''s the usecase for this > ''generation ID'' ? >Pasi, I did mention this before on 1st December. Here''s what I said then: "I already know there is one OS that consumes this interface and another hypervisor that produces it but unfortunately I''m not at liberty to say much more because I''m under NDA. Hopefully a spec. will be forthcoming at some point." That really is all I believe I''m at liberty to say :-( Paul> -- Pasi > > > The first patch modifies creation of the hvmloader key in xenstore > and > > adds creation of a new read/write hvmloader/generation-id-addr > key. > > The second patch changes hvmloader to use the new key (as opposed > to > > the old data/generation-id key). > > The third patch adds the infrastructure to save and restore the VM > > generation ID address in xenstore and the code to increment the > value. > > > > > > > > _______________________________________________ > > Xen-devel mailing list > > Xen-devel@lists.xensource.com > > http://lists.xensource.com/xen-devel
Pasi Kärkkäinen
2011-Dec-16 14:32 UTC
Re: [PATCH 0 of 3] Support for VM generation ID save/restore and migrate
On Fri, Dec 16, 2011 at 02:07:16PM +0000, Paul Durrant wrote:> > -----Original Message----- > > From: Pasi Kärkkäinen [mailto:pasik@iki.fi] > > Sent: 16 December 2011 13:26 > > To: Paul Durrant > > Cc: xen-devel@lists.xensource.com > > Subject: Re: [Xen-devel] [PATCH 0 of 3] Support for VM generation ID > > save/restore and migrate > > > > On Fri, Dec 16, 2011 at 11:47:26AM +0000, Paul Durrant wrote: > > > This patch series adds support for preservation of the VM > > generation > > > ID buffer address in xenstore across save/restore and migrate, and > > > also code to increment the value in all cases except for > > migration. > > > > > > > Sorry for a stupid question, but what''s the usecase for this > > ''generation ID'' ? > > > > Pasi, > > I did mention this before on 1st December. Here''s what I said then: > > "I already know there is one OS that consumes this interface and another > hypervisor that produces it but unfortunately I''m not at liberty to say > much more because I''m under NDA. Hopefully a spec. will be forthcoming > at some point." > > That really is all I believe I''m at liberty to say :-( >Oh, OK. I missed that comment.. Thanks for the info :) -- Pasi
Possibly Parallel Threads
- [PATCH 0 of 4] Support for VM generation ID save/restore and migrate
- [PATCH 0 of 2] Support for VM generation ID save/restore and migrate
- [PATCH 0 of 6] Add support for a VM generation ID virtual device (v2)
- [PATCH] tools: do not link against unused libraries
- [PATCH v4 00/23] Xenstore stub domain