# HG changeset patch # User yang zhang <yang.z.zhang@intel.com> # Date 1336099620 -28800 # Node ID b1229c22098499e677270d7b95ed0878347ac953 # Parent c6bde42c8845439183336602a78bc07869f3651b libxl: allow to set more than 31 vcpus In current implementation, it uses integer for record current avail cpus and this only allows user to specify 31 vcpus. In following patch, it uses cpumap instead interger which make more sense than before. Also there is no limit to the max vcpus. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_create.c --- a/tools/libxl/libxl_create.c Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/libxl_create.c Fri May 04 10:47:00 2012 +0800 @@ -112,8 +112,11 @@ int libxl__domain_build_info_setdefault( if (!b_info->max_vcpus) b_info->max_vcpus = 1; - if (!b_info->cur_vcpus) - b_info->cur_vcpus = 1; + if (!b_info->avail_vcpus.size) { + if (libxl_cpumap_alloc_size(CTX, &b_info->avail_vcpus, 1)) + return ERROR_NOMEM; + libxl_cpumap_set(&b_info->avail_vcpus, 0); + } if (!b_info->cpumap.size) { if (libxl_cpumap_alloc(CTX, &b_info->cpumap)) diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dm.c --- a/tools/libxl/libxl_dm.c Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/libxl_dm.c Fri May 04 10:47:00 2012 +0800 @@ -200,10 +200,35 @@ static char ** libxl__build_device_model libxl__sprintf(gc, "%d", b_info->max_vcpus), NULL); } - if (b_info->cur_vcpus) { + if (b_info->avail_vcpus.size) { + int i, nr_set_cpus = 0; + char *s; + + libxl_for_each_set_cpu(i, + ((libxl_domain_build_info *)b_info)->avail_vcpus) + nr_set_cpus++; + + s = (char *)malloc((nr_set_cpus + 3) / 4 + 1); + + memset(s + ((nr_set_cpus % 4) ? 1 : 0), ''f'', nr_set_cpus / 4); + switch (nr_set_cpus % 4) { + case 1: + s[0] = ''1''; + break; + case 2: + s[0] = ''3''; + break; + case 3: + s[0] = ''7''; + break; + } + + s[(nr_set_cpus + 3) / 4] = ''\0''; + flexarray_vappend(dm_args, "-vcpu_avail", - libxl__sprintf(gc, "0x%x", b_info->cur_vcpus), + libxl__sprintf(gc, "0x%s", s), NULL); + free(s); } for (i = 0; i < num_vifs; i++) { if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) { @@ -437,11 +462,16 @@ static char ** libxl__build_device_model } if (b_info->max_vcpus > 1) { flexarray_append(dm_args, "-smp"); - if (b_info->cur_vcpus) + if (b_info->avail_vcpus.size) { + int i, nr_set_cpus = 0; + libxl_for_each_set_cpu(i, + ((libxl_domain_build_info *)b_info)->avail_vcpus) + nr_set_cpus++; + flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d", b_info->max_vcpus, - b_info->cur_vcpus)); - else + nr_set_cpus)); + } else flexarray_append(dm_args, libxl__sprintf(gc, "%d", b_info->max_vcpus)); } diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dom.c --- a/tools/libxl/libxl_dom.c Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/libxl_dom.c Fri May 04 10:47:00 2012 +0800 @@ -146,7 +146,8 @@ int libxl__build_post(libxl__gc *gc, uin ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); for (i = 0; i < info->max_vcpus; i++) { ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i))) + ents[12+(i*2)+1] = (i && info->avail_vcpus.size + && !libxl_cpumap_test(&info->avail_vcpus, i)) ? "offline" : "online"; } @@ -297,7 +298,7 @@ static int hvm_build_set_params(xc_inter va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic); va_hvm->nr_vcpus = info->max_vcpus; - memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus)); + memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size); for (i = 0, sum = 0; i < va_hvm->length; i++) sum += ((uint8_t *) va_hvm)[i]; va_hvm->checksum -= sum; diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_types.idl --- a/tools/libxl/libxl_types.idl Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/libxl_types.idl Fri May 04 10:47:00 2012 +0800 @@ -231,7 +231,7 @@ MemKB = UInt(64, init_val = "LIBXL_MEMKB # libxl_file_reference_unmap. libxl_domain_build_info = Struct("domain_build_info",[ ("max_vcpus", integer), - ("cur_vcpus", integer), + ("avail_vcpus", libxl_cpumap), ("cpumap", libxl_cpumap), ("tsc_mode", libxl_tsc_mode), ("max_memkb", MemKB), diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_utils.c --- a/tools/libxl/libxl_utils.c Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/libxl_utils.c Fri May 04 10:47:00 2012 +0800 @@ -447,6 +447,21 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l return 0; } +int libxl_cpumap_alloc_size(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus) +{ + int sz; + + if (max_cpus == 0) + return ERROR_FAIL; + + sz = (max_cpus + 7) / 8; + cpumap->map = calloc(sz, sizeof(*cpumap->map)); + if (!cpumap->map) + return ERROR_NOMEM; + cpumap->size = sz; + return 0; +} + void libxl_cpumap_dispose(libxl_cpumap *map) { free(map->map); diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_utils.h --- a/tools/libxl/libxl_utils.h Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/libxl_utils.h Fri May 04 10:47:00 2012 +0800 @@ -65,6 +65,7 @@ int libxl_vdev_to_device_disk(libxl_ctx libxl_device_disk *disk); int libxl_cpumap_alloc(libxl_ctx *ctx, libxl_cpumap *cpumap); +int libxl_cpumap_alloc_size(libxl_ctx *ctx, libxl_cpumap *cpumap, int max_cpus); int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); diff -r c6bde42c8845 -r b1229c220984 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Thu Apr 12 14:01:27 2012 +0100 +++ b/tools/libxl/xl_cmdimpl.c Fri May 04 10:47:00 2012 +0800 @@ -589,7 +589,14 @@ static void parse_config_data(const char /* the following is the actual config parsing with overriding values in the structures */ if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { b_info->max_vcpus = l; - b_info->cur_vcpus = (1 << l) - 1; + + if (libxl_cpumap_alloc_size(ctx, &b_info->avail_vcpus, l)) { + fprintf(stderr, "Unable to allocate cpumap\n"); + exit(1); + } + libxl_cpumap_set_none(&b_info->avail_vcpus); + while (l-- > 0) + libxl_cpumap_set((&b_info->avail_vcpus), l); } if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
Zhang, Yang Z writes ("[Xen-devel] [PATCH]libxl: allow to set more than 31 vcpus"):> libxl: allow to set more than 31 vcpus > > In current implementation, it uses integer for record current avail cpus and this only allows user to specify 31 vcpus. > In following patch, it uses cpumap instead interger which make more sense than before. Also there is no limit to the max vcpus....> - if (!b_info->cur_vcpus) > - b_info->cur_vcpus = 1; > + if (!b_info->avail_vcpus.size) { > + if (libxl_cpumap_alloc_size(CTX, &b_info->avail_vcpus, 1)) > + return ERROR_NOMEM; > + libxl_cpumap_set(&b_info->avail_vcpus, 0); > + }This error handling should really go away. Would you be able to provide a patch to make libxl_cpumap_alloc use libxl__zalloc(NULL, ) ? That never fails, so that would also mean libxl_cpumap_alloc can''t fail.> diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dm.c > --- a/tools/libxl/libxl_dm.c Thu Apr 12 14:01:27 2012 +0100 > +++ b/tools/libxl/libxl_dm.c Fri May 04 10:47:00 2012 +0800 > @@ -200,10 +200,35 @@ static char ** libxl__build_device_model > libxl__sprintf(gc, "%d", b_info->max_vcpus), > NULL); > } > - if (b_info->cur_vcpus) { > + if (b_info->avail_vcpus.size) { > + int i, nr_set_cpus = 0; > + char *s; > + > + libxl_for_each_set_cpu(i, > + ((libxl_domain_build_info *)b_info)->avail_vcpus) > + nr_set_cpus++; > + > + s = (char *)malloc((nr_set_cpus + 3) / 4 + 1); > + > + memset(s + ((nr_set_cpus % 4) ? 1 : 0), ''f'', nr_set_cpus / 4); > + switch (nr_set_cpus % 4) { > + case 1: > + s[0] = ''1''; > + break; > + case 2: > + s[0] = ''3''; > + break; > + case 3: > + s[0] = ''7''; > + break; > + } > + > + s[(nr_set_cpus + 3) / 4] = ''\0'';This is an arbitrary precision conversion to hex. Wouldn''t it be better to provide libxl_cpumap_set_all, and libxl_cpumap_hexmask or something ?> @@ -437,11 +462,16 @@ static char ** libxl__build_device_model > } > if (b_info->max_vcpus > 1) { > flexarray_append(dm_args, "-smp"); > - if (b_info->cur_vcpus) > + if (b_info->avail_vcpus.size) { > + int i, nr_set_cpus = 0; > + libxl_for_each_set_cpu(i, > + ((libxl_domain_build_info *)b_info)->avail_vcpus) > + nr_set_cpus++;If we had libxl_cpumap_count_set this would be clearer.> diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dom.c > --- a/tools/libxl/libxl_dom.c Thu Apr 12 14:01:27 2012 +0100 > +++ b/tools/libxl/libxl_dom.c Fri May 04 10:47:00 2012 +0800 > @@ -146,7 +146,8 @@ int libxl__build_post(libxl__gc *gc, uin > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > for (i = 0; i < info->max_vcpus; i++) { > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i))) > + ents[12+(i*2)+1] = (i && info->avail_vcpus.size > + && !libxl_cpumap_test(&info->avail_vcpus, i)) > ? "offline" : "online";If libxl_cpumap_test returned 0 if cpumap==NULL then this would be simpler. Thanks, Ian.
> -----Original Message----- > From: Ian Jackson [mailto:Ian.Jackson@eu.citrix.com] > Sent: Friday, May 11, 2012 12:53 AM > Subject: Re: [Xen-devel] [PATCH]libxl: allow to set more than 31 vcpus > > Zhang, Yang Z writes ("[Xen-devel] [PATCH]libxl: allow to set more than 31 > vcpus"): > > libxl: allow to set more than 31 vcpus > > > > In current implementation, it uses integer for record current avail cpus and > this only allows user to specify 31 vcpus. > > In following patch, it uses cpumap instead interger which make more sense > than before. Also there is no limit to the max vcpus. > ... > > - if (!b_info->cur_vcpus) > > - b_info->cur_vcpus = 1; > > + if (!b_info->avail_vcpus.size) { > > + if (libxl_cpumap_alloc_size(CTX, &b_info->avail_vcpus, 1)) > > + return ERROR_NOMEM; > > + libxl_cpumap_set(&b_info->avail_vcpus, 0); > > + } > > This error handling should really go away. Would you be able to > provide a patch to make libxl_cpumap_alloc use libxl__zalloc(NULL, ) ? > That never fails, so that would also mean libxl_cpumap_alloc can''t > fail.I don''t think so. Libxl_cpumap_alloc() also returned error when max_cpus equal zero. So the error handling cannot be avoid even using libxl__zalloc.> This is an arbitrary precision conversion to hex. Wouldn''t it be > better to provide libxl_cpumap_set_all, and libxl_cpumap_hexmask or > something ?Yes, it''s better to use a function to do the convert cpumap to hex.> > @@ -437,11 +462,16 @@ static char ** libxl__build_device_model > > } > > if (b_info->max_vcpus > 1) { > > flexarray_append(dm_args, "-smp"); > > - if (b_info->cur_vcpus) > > + if (b_info->avail_vcpus.size) { > > + int i, nr_set_cpus = 0; > > + libxl_for_each_set_cpu(i, > > + ((libxl_domain_build_info > *)b_info)->avail_vcpus) > > + nr_set_cpus++; > > If we had libxl_cpumap_count_set this would be clearer.Agree.> > diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dom.c > > --- a/tools/libxl/libxl_dom.c Thu Apr 12 14:01:27 2012 +0100 > > +++ b/tools/libxl/libxl_dom.c Fri May 04 10:47:00 2012 +0800 > > @@ -146,7 +146,8 @@ int libxl__build_post(libxl__gc *gc, uin > > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > > for (i = 0; i < info->max_vcpus; i++) { > > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 > << i))) > > + ents[12+(i*2)+1] = (i && info->avail_vcpus.size > > + > && !libxl_cpumap_test(&info->avail_vcpus, i)) > > ? "offline" : "online"; > > If libxl_cpumap_test returned 0 if cpumap==NULL then this would be > simpler.Sorry. What your mean for this? Best regard yang
Zhang, Yang Z writes ("RE: [Xen-devel] [PATCH]libxl: allow to set more than 31 vcpus"):> Ian Jackson [mailto:Ian.Jackson@eu.citrix.com]: > > This error handling should really go away. Would you be able to > > provide a patch to make libxl_cpumap_alloc use libxl__zalloc(NULL, ) ? > > That never fails, so that would also mean libxl_cpumap_alloc can''t > > fail. > > I don''t think so. Libxl_cpumap_alloc() also returned error when > max_cpus equal zero. So the error handling cannot be avoid even > using libxl__zalloc.Hmm. Can it not be made to either abort or succeed in this case ?> > > diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dom.c > > > --- a/tools/libxl/libxl_dom.c Thu Apr 12 14:01:27 2012 +0100 > > > +++ b/tools/libxl/libxl_dom.c Fri May 04 10:47:00 2012 +0800 > > > @@ -146,7 +146,8 @@ int libxl__build_post(libxl__gc *gc, uin > > > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > > > for (i = 0; i < info->max_vcpus; i++) { > > > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > > > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 > > << i))) > > > + ents[12+(i*2)+1] = (i && info->avail_vcpus.size > > > + > > && !libxl_cpumap_test(&info->avail_vcpus, i)) > > > ? "offline" : "online"; > > > > If libxl_cpumap_test returned 0 if cpumap==NULL then this would be > > simpler. > > Sorry. What your mean for this?I mean that if you changed libxl_cpumap_test to tolerate a NULL cpumap and simply return false, then the caller wouldn''t have to check for it. But you may prefer to keep it the current way. Ian.
On Fri, 2012-05-11 at 12:20 +0100, Ian Jackson wrote:> Zhang, Yang Z writes ("RE: [Xen-devel] [PATCH]libxl: allow to set more than 31 vcpus"): > > Ian Jackson [mailto:Ian.Jackson@eu.citrix.com]: > > > This error handling should really go away. Would you be able to > > > provide a patch to make libxl_cpumap_alloc use libxl__zalloc(NULL, ) ? > > > That never fails, so that would also mean libxl_cpumap_alloc can''t > > > fail. > > > > I don''t think so. Libxl_cpumap_alloc() also returned error when > > max_cpus equal zero. So the error handling cannot be avoid even > > using libxl__zalloc. > > Hmm. Can it not be made to either abort or succeed in this case ?max_cpus == 0 means xc_physinfo failed, which is either a memory allocation failure (bounce buffering) or a pretty serious error (toolstack/hypervisor mismatch which is logged or perhaps an XSM/privilege type thing) from which we are unlikely to recover. I''d say that a hard failure would be acceptable here. Ian.> > > > > diff -r c6bde42c8845 -r b1229c220984 tools/libxl/libxl_dom.c > > > > --- a/tools/libxl/libxl_dom.c Thu Apr 12 14:01:27 2012 +0100 > > > > +++ b/tools/libxl/libxl_dom.c Fri May 04 10:47:00 2012 +0800 > > > > @@ -146,7 +146,8 @@ int libxl__build_post(libxl__gc *gc, uin > > > > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > > > > for (i = 0; i < info->max_vcpus; i++) { > > > > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > > > > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 > > > << i))) > > > > + ents[12+(i*2)+1] = (i && info->avail_vcpus.size > > > > + > > > && !libxl_cpumap_test(&info->avail_vcpus, i)) > > > > ? "offline" : "online"; > > > > > > If libxl_cpumap_test returned 0 if cpumap==NULL then this would be > > > simpler. > > > > Sorry. What your mean for this? > > I mean that if you changed libxl_cpumap_test to tolerate a NULL cpumap > and simply return false, then the caller wouldn''t have to check for > it. But you may prefer to keep it the current way. > > Ian. > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel