Change from v2: Add function libxl_cpumap_to_hex_string to covert cpumap to hex string. According to Ian''s comments, modified some codes to make the logic more reasonable. In current implementation, it uses integer to record current avail cpus and this only allows user to specify 31 vcpus. In following patch, it uses cpumap instead integer which make more sense than before. Also there is no limit to the max vcpus. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> diff -r 3b0eed731020 tools/libxl/libxl_create.c --- a/tools/libxl/libxl_create.c Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/libxl_create.c Fri Jun 01 10:34:13 2012 +0800 @@ -146,8 +146,11 @@ int libxl__domain_build_info_setdefault( if (!b_info->max_vcpus) b_info->max_vcpus = 1; - if (!b_info->cur_vcpus) - b_info->cur_vcpus = 1; + if (!b_info->avail_vcpus.size) { + if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1)) + return ERROR_FAIL; + libxl_cpumap_set(&b_info->avail_vcpus, 0); + } if (!b_info->cpumap.size) { if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0)) diff -r 3b0eed731020 tools/libxl/libxl_dm.c --- a/tools/libxl/libxl_dm.c Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/libxl_dm.c Fri Jun 01 10:34:13 2012 +0800 @@ -160,6 +160,8 @@ static char ** libxl__build_device_model } if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) { int ioemu_vifs = 0; + int nr_set_cpus = 0; + char *s; if (b_info->u.hvm.serial) { flexarray_vappend(dm_args, "-serial", b_info->u.hvm.serial, NULL); @@ -200,11 +202,14 @@ static char ** libxl__build_device_model libxl__sprintf(gc, "%d", b_info->max_vcpus), NULL); } - if (b_info->cur_vcpus) { - flexarray_vappend(dm_args, "-vcpu_avail", - libxl__sprintf(gc, "0x%x", b_info->cur_vcpus), - NULL); - } + + nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus); + s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus); + flexarray_vappend(dm_args, "-vcpu_avail", + libxl__sprintf(gc, "0x%s", s), + NULL); + free(s); + for (i = 0; i < num_vifs; i++) { if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) { char *smac = libxl__sprintf(gc, @@ -441,11 +446,14 @@ static char ** libxl__build_device_model } if (b_info->max_vcpus > 1) { flexarray_append(dm_args, "-smp"); - if (b_info->cur_vcpus) + if (b_info->avail_vcpus.size) { + int nr_set_cpus = 0; + nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus); + flexarray_append(dm_args, libxl__sprintf(gc, "%d,maxcpus=%d", b_info->max_vcpus, - b_info->cur_vcpus)); - else + nr_set_cpus)); + } else flexarray_append(dm_args, libxl__sprintf(gc, "%d", b_info->max_vcpus)); } diff -r 3b0eed731020 tools/libxl/libxl_dom.c --- a/tools/libxl/libxl_dom.c Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/libxl_dom.c Fri Jun 01 10:34:13 2012 +0800 @@ -195,7 +195,7 @@ int libxl__build_post(libxl__gc *gc, uin ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); for (i = 0; i < info->max_vcpus; i++) { ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i))) + ents[12+(i*2)+1] = (i && !libxl_cpumap_test(&info->avail_vcpus, i)) ? "offline" : "online"; } @@ -350,7 +350,7 @@ static int hvm_build_set_params(xc_inter va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic); va_hvm->nr_vcpus = info->max_vcpus; - memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus)); + memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size); for (i = 0, sum = 0; i < va_hvm->length; i++) sum += ((uint8_t *) va_hvm)[i]; va_hvm->checksum -= sum; diff -r 3b0eed731020 tools/libxl/libxl_types.idl --- a/tools/libxl/libxl_types.idl Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/libxl_types.idl Fri Jun 01 10:34:13 2012 +0800 @@ -235,7 +235,7 @@ libxl_sched_params = Struct("sched_param libxl_domain_build_info = Struct("domain_build_info",[ ("max_vcpus", integer), - ("cur_vcpus", integer), + ("avail_vcpus", libxl_cpumap), ("cpumap", libxl_cpumap), ("tsc_mode", libxl_tsc_mode), ("max_memkb", MemKB), diff -r 3b0eed731020 tools/libxl/libxl_utils.c --- a/tools/libxl/libxl_utils.c Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/libxl_utils.c Fri Jun 01 10:34:13 2012 +0800 @@ -533,6 +533,28 @@ void libxl_cpumap_reset(libxl_cpumap *cp cpumap->map[cpu / 8] &= ~(1 << (cpu & 7)); } +int libxl_cpumap_count_set(const libxl_cpumap *cpumap) +{ + int i, nr_set_cpus = 0; + libxl_for_each_set_cpu(i, *((libxl_cpumap *)cpumap)) + nr_set_cpus++; + + return nr_set_cpus; +} + +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap) +{ + int i = cpumap->size; + char *p = libxl__zalloc(NULL, cpumap->size * 2 + 1); + char *q = p; + while(--i >= 0) { + sprintf((char *)p, "%02x", cpumap->map[i]); + p += 2; + } + *p = ''\0''; + return q; +} + int libxl_get_max_cpus(libxl_ctx *ctx) { return xc_get_max_cpus(ctx->xch); diff -r 3b0eed731020 tools/libxl/libxl_utils.h --- a/tools/libxl/libxl_utils.h Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/libxl_utils.h Fri Jun 01 10:34:13 2012 +0800 @@ -67,6 +67,8 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); +int libxl_cpumap_count_set(const libxl_cpumap *cpumap); +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap); static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap) { memset(cpumap->map, -1, cpumap->size); diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 @@ -650,7 +650,14 @@ static void parse_config_data(const char if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { b_info->max_vcpus = l; - b_info->cur_vcpus = (1 << l) - 1; + + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { + fprintf(stderr, "Unable to allocate cpumap\n"); + exit(1); + } + libxl_cpumap_set_none(&b_info->avail_vcpus); + while (l-- > 0) + libxl_cpumap_set((&b_info->avail_vcpus), l); } if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
Dario Faggioli
2012-Jun-01 06:35 UTC
Re: [PATCH v3 ]libxl: allow to set more than 31 vcpus
On Fri, 2012-06-01 at 02:48 +0000, Zhang, Yang Z wrote:> Change from v2: > Add function libxl_cpumap_to_hex_string to covert cpumap to hex string. > According to Ian''s comments, modified some codes to make the logic more reasonable. > > In current implementation, it uses integer to record current avail cpus and this only allows user to specify 31 vcpus. > In following patch, it uses cpumap instead integer which make more sense than before. Also there is no limit to the max vcpus. >This part I understand, and looks reasonable. I also see this is the whole point of your other patch, however ...> diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > b_info->max_vcpus = l; > - b_info->cur_vcpus = (1 << l) - 1; > + > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > + fprintf(stderr, "Unable to allocate cpumap\n"); > + exit(1); > + } >... Do you mind explaining me what would have happened here without your previous patch, i.e., by just using the existing libxl_cpumap_alloc ? I might be wrong, but I was wondering whether it is worth changing the interface like that for just this single case which saves, what, 1 to 3 bytes per domain? Regards, Dario -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://retis.sssup.it/people/faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
> -----Original Message----- > From: Dario Faggioli [mailto:raistlin@linux.it] > Sent: Friday, June 01, 2012 2:36 PM > To: Zhang, Yang Z > Cc: xen-devel@lists.xensource.com; Ian Campbell > Subject: Re: [Xen-devel] [PATCH v3 ]libxl: allow to set more than 31 vcpus > > On Fri, 2012-06-01 at 02:48 +0000, Zhang, Yang Z wrote: > > Change from v2: > > Add function libxl_cpumap_to_hex_string to covert cpumap to hex string. > > According to Ian''s comments, modified some codes to make the logic more > reasonable. > > > > In current implementation, it uses integer to record current avail cpus and > this only allows user to specify 31 vcpus. > > In following patch, it uses cpumap instead integer which make more sense > than before. Also there is no limit to the max vcpus. > > > This part I understand, and looks reasonable. > > I also see this is the whole point of your other patch, however ... > > > diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > > b_info->max_vcpus = l; > > - b_info->cur_vcpus = (1 << l) - 1; > > + > > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > > + fprintf(stderr, "Unable to allocate cpumap\n"); > > + exit(1); > > + } > > > ... Do you mind explaining me what would have happened here without your > previous patch, i.e., by just using the existing libxl_cpumap_alloc ? > > I might be wrong, but I was wondering whether it is worth changing the > interface like that for just this single case which saves, what, 1 to 3 > bytes per domain? >It''s ok to use existing libxl_cpumap_alloc(). But in my case, there is no need to use the existing interface. And, in future, there are some cases may not need to allocate max size cpumap too. So it''s better to extend the current interface. best regards yang
Dario Faggioli
2012-Jun-01 08:44 UTC
Re: [PATCH v3 ]libxl: allow to set more than 31 vcpus
On Fri, 2012-06-01 at 07:18 +0000, Zhang, Yang Z wrote:> > > diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > > > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > > > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > > > > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > > > b_info->max_vcpus = l; > > > - b_info->cur_vcpus = (1 << l) - 1; > > > + > > > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > > > + fprintf(stderr, "Unable to allocate cpumap\n"); > > > + exit(1); > > > + } > > > > > ... Do you mind explaining me what would have happened here without your > > previous patch, i.e., by just using the existing libxl_cpumap_alloc ? > > > > I might be wrong, but I was wondering whether it is worth changing the > > interface like that for just this single case which saves, what, 1 to 3 > > bytes per domain? > > > > It''s ok to use existing libxl_cpumap_alloc(). But in my case, there is no need to use the existing interface. >Ok.> And, in future, there are some cases may not need to allocate max size cpumap too > So it''s better to extend the current interface. >Well, maybe... Who knows what future reserves ?!? :-D Anyway, although I see your point, I really really dislike the new parameter in libxl_cpumap_alloc(), but of course it is not something up to me to decide, neither it is something I''d loose some sleep for. :-P Thanks and Regards, Dario -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://retis.sssup.it/people/faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
On Fri, 2012-06-01 at 09:44 +0100, Dario Faggioli wrote:> On Fri, 2012-06-01 at 07:18 +0000, Zhang, Yang Z wrote: > > > > diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > > > > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > > > > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > > > > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > > > > > > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > > > > b_info->max_vcpus = l; > > > > - b_info->cur_vcpus = (1 << l) - 1; > > > > + > > > > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > > > > + fprintf(stderr, "Unable to allocate cpumap\n"); > > > > + exit(1); > > > > + } > > > > > > > ... Do you mind explaining me what would have happened here without your > > > previous patch, i.e., by just using the existing libxl_cpumap_alloc ? > > > > > > I might be wrong, but I was wondering whether it is worth changing the > > > interface like that for just this single case which saves, what, 1 to 3 > > > bytes per domain? > > > > > > > It''s ok to use existing libxl_cpumap_alloc(). But in my case, there is no need to use the existing interface. > > > Ok. > > > And, in future, there are some cases may not need to allocate max size cpumap too > > So it''s better to extend the current interface. > > > Well, maybe... Who knows what future reserves ?!? :-D > > Anyway, although I see your point, I really really dislike the new > parameter in libxl_cpumap_alloc(),What about it do you dislike? The special meaning of 0 or its existence at all?> but of course it is not something up > to me to decide, neither it is something I''d loose some sleep for. :-PYou could give vcpus > pcpus (dumb, but e.g. for debugging) and in that case the existing libxl_cpumap_alloc behaviour (which sizes based on the # of phys cpus) is incorrect. I suggested that rather than having libxl_cpumap_alloc_size() we just combine this with the existing fn with a new parameter. Ian.
Dario Faggioli
2012-Jun-01 09:32 UTC
Re: [PATCH v3 ]libxl: allow to set more than 31 vcpus
On Fri, 2012-06-01 at 09:58 +0100, Ian Campbell wrote:> > > And, in future, there are some cases may not need to allocate max size cpumap too > > > So it''s better to extend the current interface. > > > > > Well, maybe... Who knows what future reserves ?!? :-D > > > > Anyway, although I see your point, I really really dislike the new > > parameter in libxl_cpumap_alloc(), > > What about it do you dislike? The special meaning of 0 or its existence > at all? >It''s pretty much all about its existence, given the fact it is _always_ 0 apart from one single case, where it could well be zero as well. :-) It''s just I find it uncomfortable to have it, but of course I could live with it if it buys something. It''s the latter I''m not sure I see...> > but of course it is not something up > > to me to decide, neither it is something I''d loose some sleep for. :-P > > You could give vcpus > pcpus (dumb, but e.g. for debugging) and in that > case the existing libxl_cpumap_alloc behaviour (which sizes based on the > # of phys cpus) is incorrect. >Mmm... Maybe this is still related to the fact that on all the test boxes I''ve used, libxl_get_max_cpus() returns something higher than the actual physical CPU count of those boxes themselves, but I just created an 18 VCPUs VM on my 16 PCPUs test machine... I take the above like you can''t, can you? Maybe it is that *_max_cpus() logic that needs some attention? :-O> I suggested that rather than having > libxl_cpumap_alloc_size() we just combine this with the existing fn with > a new parameter. >Yeah, I checked that in the archives, and that''s not the issue for me. If the we decide we want the thing, I''m fine with both the ''add new'' and ''modify the existing'' approaches. Thanks and Regards, Dario | -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://retis.sssup.it/people/faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
On Fri, 2012-06-01 at 10:32 +0100, Dario Faggioli wrote:> On Fri, 2012-06-01 at 09:58 +0100, Ian Campbell wrote: > > > > And, in future, there are some cases may not need to allocate max size cpumap too > > > > So it''s better to extend the current interface. > > > > > > > Well, maybe... Who knows what future reserves ?!? :-D > > > > > > Anyway, although I see your point, I really really dislike the new > > > parameter in libxl_cpumap_alloc(), > > > > What about it do you dislike? The special meaning of 0 or its existence > > at all? > > > It''s pretty much all about its existence, given the fact it is _always_ > 0 apart from one single case, where it could well be zero as well. :-)Except as discussed below it can''t be zero..> It''s just I find it uncomfortable to have it, but of course I could live > with it if it buys something. It''s the latter I''m not sure I see... > > > > but of course it is not something up > > > to me to decide, neither it is something I''d loose some sleep for. :-P > > > > You could give vcpus > pcpus (dumb, but e.g. for debugging) and in that > > case the existing libxl_cpumap_alloc behaviour (which sizes based on the > > # of phys cpus) is incorrect. > > > Mmm... Maybe this is still related to the fact that on all the test > boxes I''ve used, libxl_get_max_cpus() returns something higher than the > actual physical CPU count of those boxes themselves, but I just created > an 18 VCPUs VM on my 16 PCPUs test machine... I take the above like you > can''t, can you?I think libxl_get_max_cpus and/or libxl_cpumap_alloc involved some amount of rounding up, if you tried to create a 33 vcpu guest on that machine (or a machine with <= 32 cpus) it may not work...> Maybe it is that *_max_cpus() logic that needs some attention? :-Omax_cpus returns the max number of physical cpus, and I think it does so correctly (perhaps with some slop at the top end). In some cases we want to talk about virtual cpus and this change lets us size cpumap''s of virtual cpus more appropriately (be that larger or smaller than the number of physical cpus).> > > I suggested that rather than having > > libxl_cpumap_alloc_size() we just combine this with the existing fn with > > a new parameter. > > > Yeah, I checked that in the archives, and that''s not the issue for me. > If the we decide we want the thing, I''m fine with both the ''add new'' and > ''modify the existing'' approaches. > > Thanks and Regards, > Dario > |
Dario Faggioli
2012-Jun-01 10:23 UTC
Re: [PATCH v3 ]libxl: allow to set more than 31 vcpus
On Fri, 2012-06-01 at 10:41 +0100, Ian Campbell wrote:> > Mmm... Maybe this is still related to the fact that on all the test > > boxes I''ve used, libxl_get_max_cpus() returns something higher than the > > actual physical CPU count of those boxes themselves, but I just created > > an 18 VCPUs VM on my 16 PCPUs test machine... I take the above like you > > can''t, can you? > > I think libxl_get_max_cpus and/or libxl_cpumap_alloc involved some > amount of rounding up, if you tried to create a 33 vcpu guest on that > machine (or a machine with <= 32 cpus) it may not work... >It does: max_cpus = libxl_get_max_cpus(ctx); if (max_cpus == 0) return ERROR_FAIL; sz = (max_cpus + 7) / 8; So in my case it should be 16 + 7 = 23 / 8 = 2 ... Right? Then we have: cpumap->map = calloc(sz, sizeof(*cpumap->map)); Which make me thinking I''m getting a 2 elements uint8_t array for storing the cpumap (please correct me if I''m wrong, I frequently am when it comes to math! :-P). That''s way I wasn''t expecting to be able to exceed 16 VCPUs. Anyway, I just tried 25 and 33 and 65, and creation of the domain worked without raising any errors! Then I double-checked, and saw that, in the ''above 16'' cases, Xen deliberately paused a lot of VCPUs. Also, if I log into the guest /proc/cpuinfo reports only CPUs 0 and 32 (and 64 in the 65 VCPUs case). To conclude, I''m not sure what''s going on, but I don''t think is something we would want... :-/> > Maybe it is that *_max_cpus() logic that needs some attention? :-O > > max_cpus returns the max number of physical cpus, and I think it does so > correctly (perhaps with some slop at the top end). >As we also saw in another thread, it seems to return the max_cpu_id+1, which is different from the number of physical CPUs (at least in my case). And in fact, I''m sure it returns 64 on my box. However, that does not appear to be the main issue here, as creation seem to succeed no matter how much VCPUs I ask for, but then a number of them are off. :-O If that is a known/documented behaviour, fine, I just haven''t found it. Otherwise, perhaps I can investigate a bit what''s going on, if that is considered interesting...> In some cases we want > to talk about virtual cpus and this change lets us size cpumap''s of > virtual cpus more appropriately (be that larger or smaller than the > number of physical cpus). >I have no argument against this. As I tried to explain, I thought /* get max. number of cpus supported by hypervisor */ int libxl_get_max_cpus(libxl_ctx *ctx); "max. number of cpus supported by hypervisor" to be different from the actual number of physical processors, and I was sort-of mislead by the machine I use to test Xen every day (where that is actually happening!). If it is not like that, I guess I can agree with you on this change. Thanks and Regards, Dario -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://retis.sssup.it/people/faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
On Fri, 2012-06-01 at 11:23 +0100, Dario Faggioli wrote:> On Fri, 2012-06-01 at 10:41 +0100, Ian Campbell wrote: > > > Mmm... Maybe this is still related to the fact that on all the test > > > boxes I''ve used, libxl_get_max_cpus() returns something higher than the > > > actual physical CPU count of those boxes themselves, but I just created > > > an 18 VCPUs VM on my 16 PCPUs test machine... I take the above like you > > > can''t, can you? > > > > I think libxl_get_max_cpus and/or libxl_cpumap_alloc involved some > > amount of rounding up, if you tried to create a 33 vcpu guest on that > > machine (or a machine with <= 32 cpus) it may not work... > > > It does: > > max_cpus = libxl_get_max_cpus(ctx); > if (max_cpus == 0) > return ERROR_FAIL; > > sz = (max_cpus + 7) / 8; > > So in my case it should be 16 + 7 = 23 / 8 = 2 ... Right?Yeah, it seems we do this at byte rather than word granularity like I first thought.> Then we have: > > cpumap->map = calloc(sz, sizeof(*cpumap->map)); > > Which make me thinking I''m getting a 2 elements uint8_t array for > storing the cpumap (please correct me if I''m wrong, I frequently am when > it comes to math! :-P). That''s way I wasn''t expecting to be able to > exceed 16 VCPUs. > > Anyway, I just tried 25 and 33 and 65, and creation of the domain worked > without raising any errors! Then I double-checked, and saw that, in the > ''above 16'' cases, Xen deliberately paused a lot of VCPUs. Also, if I log > into the guest /proc/cpuinfo reports only CPUs 0 and 32 (and 64 in the > 65 VCPUs case).All 64 online? It might be that the issue being fixed here only manifests on HVM. I''m not really sure how 64 would work otherwise since cur_vcpus in the IDL is definitely an int, which is what needs fixing! libxl__build_post is also probably buggy with max_vcpus > nr-bits-in(curr_vcpus) and from the looks of it it just overflows off the end(!), which is also fixed here...> To conclude, I''m not sure what''s going on, but I don''t think is > something we would want... :-/ > > > > Maybe it is that *_max_cpus() logic that needs some attention? :-O > > > > max_cpus returns the max number of physical cpus, and I think it does so > > correctly (perhaps with some slop at the top end). > > > As we also saw in another thread, it seems to return the max_cpu_id+1, > which is different from the number of physical CPUs (at least in my > case). And in fact, I''m sure it returns 64 on my box. However, that does > not appear to be the main issue here, as creation seem to succeed no > matter how much VCPUs I ask for, but then a number of them are off. :-O > > If that is a known/documented behaviour, fine, I just haven''t found it. > Otherwise, perhaps I can investigate a bit what''s going on, if that is > considered interesting... > > > In some cases we want > > to talk about virtual cpus and this change lets us size cpumap''s of > > virtual cpus more appropriately (be that larger or smaller than the > > number of physical cpus). > > > I have no argument against this. As I tried to explain, I thought > > /* get max. number of cpus supported by hypervisor */ > int libxl_get_max_cpus(libxl_ctx *ctx); > > "max. number of cpus supported by hypervisor" to be different from the > actual number of physical processors, and I was sort-of mislead by the > machine I use to test Xen every day (where that is actually happening!). > If it is not like that, I guess I can agree with you on this change.It''s certainly supposed to be "get max. number of physical cpus", quite how that relates to the actual number of physical cpus I''m not sure. It''s definitely not something to do with virtual cpus (for which there is a limit, but not this one...) Ian.
Dario Faggioli
2012-Jun-01 10:47 UTC
Re: [PATCH v3 ]libxl: allow to set more than 31 vcpus
On Fri, 2012-06-01 at 11:38 +0100, Ian Campbell wrote:> > Anyway, I just tried 25 and 33 and 65, and creation of the domain worked > > without raising any errors! Then I double-checked, and saw that, in the > > ''above 16'' cases, Xen deliberately paused a lot of VCPUs. Also, if I log > > into the guest /proc/cpuinfo reports only CPUs 0 and 32 (and 64 in the > > 65 VCPUs case). > > All 64 online? >I see all of them from the host (xl vcpu-list) but a kind of random number of them from the guest...> It might be that the issue being fixed here only manifests on HVM. I''m > not really sure how 64 would work otherwise since cur_vcpus in the IDL > is definitely an int, which is what needs fixing! >and in fact, I agreed with that part of the patch. Also, yes, I''m trying PV but can try HVM as well and see what happens.> libxl__build_post is also probably buggy with max_vcpus > > nr-bits-in(curr_vcpus) and from the looks of it it just overflows off > the end(!), which is also fixed here... >What I can do is trying again with the patch applied, and if it still behaves weird I''ll go seeing what might be still missing.> > "max. number of cpus supported by hypervisor" to be different from the > > actual number of physical processors, and I was sort-of mislead by the > > machine I use to test Xen every day (where that is actually happening!). > > If it is not like that, I guess I can agree with you on this change. > > It''s certainly supposed to be "get max. number of physical cpus", quite > how that relates to the actual number of physical cpus I''m not sure. > > It''s definitely not something to do with virtual cpus (for which there > is a limit, but not this one...) >Ok, with that "physical" you''re putting there, I see and agree it could be useful untie it from the "virtual world". :-) Thanks and Regards, Dario -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://retis.sssup.it/people/faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
On Fri, 2012-06-01 at 11:47 +0100, Dario Faggioli wrote:> On Fri, 2012-06-01 at 11:38 +0100, Ian Campbell wrote: > > > Anyway, I just tried 25 and 33 and 65, and creation of the domain worked > > > without raising any errors! Then I double-checked, and saw that, in the > > > ''above 16'' cases, Xen deliberately paused a lot of VCPUs. Also, if I log > > > into the guest /proc/cpuinfo reports only CPUs 0 and 32 (and 64 in the > > > 65 VCPUs case). > > > > All 64 online? > > > I see all of them from the host (xl vcpu-list) but a kind of random > number of them from the guest...Which is probably the overflow I mentioned giving you a random set of online cpus after #32...> > libxl__build_post is also probably buggy with max_vcpus > > > nr-bits-in(curr_vcpus) and from the looks of it it just overflows off > > the end(!), which is also fixed here... > > > What I can do is trying again with the patch applied, and if it still > behaves weird I''ll go seeing what might be still missing. > > > > "max. number of cpus supported by hypervisor" to be different from the > > > actual number of physical processors, and I was sort-of mislead by the > > > machine I use to test Xen every day (where that is actually happening!). > > > If it is not like that, I guess I can agree with you on this change. > > > > It''s certainly supposed to be "get max. number of physical cpus", quite > > how that relates to the actual number of physical cpus I''m not sure. > > > > It''s definitely not something to do with virtual cpus (for which there > > is a limit, but not this one...) > > > Ok, with that "physical" you''re putting there, I see and agree it could > be useful untie it from the "virtual world". :-)Is that an ACK for this and/or the previous patch?> > Thanks and Regards, > Dario >
Dario Faggioli
2012-Jun-01 11:04 UTC
Re: [PATCH v3 ]libxl: allow to set more than 31 vcpus
On Fri, 2012-06-01 at 11:50 +0100, Ian Campbell wrote:> > Ok, with that "physical" you''re putting there, I see and agree it could > > be useful untie it from the "virtual world". :-) > > Is that an ACK for this and/or the previous patch? >Oh, yes, sorry for being a bit cryptic. For what my opinion counts, it is. :-) Thanks and Regards, Dario -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://retis.sssup.it/people/faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
On Fri, 2012-06-01 at 12:04 +0100, Dario Faggioli wrote:> On Fri, 2012-06-01 at 11:50 +0100, Ian Campbell wrote: > > > Ok, with that "physical" you''re putting there, I see and agree it could > > > be useful untie it from the "virtual world". :-) > > > > Is that an ACK for this and/or the previous patch? > > > Oh, yes, sorry for being a bit cryptic. For what my opinion counts, it > is. :-)Everyone''s Ack is worthwhile. I''ll add: Acked-by: Dario Faggioli <Dario.Faggioli@citrix.com> Please do supply the actual string (for formalities sake) next time. Ian.
On Fri, 2012-06-01 at 03:48 +0100, Zhang, Yang Z wrote:> Change from v2: > Add function libxl_cpumap_to_hex_string to covert cpumap to hex string. > According to Ian''s comments, modified some codes to make the logic more reasonable. > > In current implementation, it uses integer to record current avail cpus and this only allows user to specify 31 vcpus. > In following patch, it uses cpumap instead integer which make more sense than before. Also there is no limit to the max vcpus. > > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> > > diff -r 3b0eed731020 tools/libxl/libxl_create.c > --- a/tools/libxl/libxl_create.c Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/libxl_create.c Fri Jun 01 10:34:13 2012 +0800 > @@ -146,8 +146,11 @@ int libxl__domain_build_info_setdefault( > > if (!b_info->max_vcpus) > b_info->max_vcpus = 1; > - if (!b_info->cur_vcpus) > - b_info->cur_vcpus = 1; > + if (!b_info->avail_vcpus.size) { > + if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1)) > + return ERROR_FAIL; > + libxl_cpumap_set(&b_info->avail_vcpus, 0); > + } > > if (!b_info->cpumap.size) { > if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0)) > diff -r 3b0eed731020 tools/libxl/libxl_dm.c > --- a/tools/libxl/libxl_dm.c Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/libxl_dm.c Fri Jun 01 10:34:13 2012 +0800 > @@ -160,6 +160,8 @@ static char ** libxl__build_device_model > } > if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) { > int ioemu_vifs = 0; > + int nr_set_cpus = 0; > + char *s; > > if (b_info->u.hvm.serial) { > flexarray_vappend(dm_args, "-serial", b_info->u.hvm.serial, NULL); > @@ -200,11 +202,14 @@ static char ** libxl__build_device_model > libxl__sprintf(gc, "%d", b_info->max_vcpus), > NULL); > } > - if (b_info->cur_vcpus) { > - flexarray_vappend(dm_args, "-vcpu_avail", > - libxl__sprintf(gc, "0x%x", b_info->cur_vcpus), > - NULL); > - } > + > + nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus); > + s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus); > + flexarray_vappend(dm_args, "-vcpu_avail", > + libxl__sprintf(gc, "0x%s", s),You might as well make to_hex_string include the 0x?> + NULL); > + free(s); > + > for (i = 0; i < num_vifs; i++) { > if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) { > char *smac = libxl__sprintf(gc, > diff -r 3b0eed731020 tools/libxl/libxl_dom.c > --- a/tools/libxl/libxl_dom.c Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/libxl_dom.c Fri Jun 01 10:34:13 2012 +0800 > @@ -195,7 +195,7 @@ int libxl__build_post(libxl__gc *gc, uin > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > for (i = 0; i < info->max_vcpus; i++) { > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i))) > + ents[12+(i*2)+1] = (i && !libxl_cpumap_test(&info->avail_vcpus, i)) > ? "offline" : "online";Weren''t you going to drop the "i &&" and invert the ternary?> } > > @@ -350,7 +350,7 @@ static int hvm_build_set_params(xc_inter > va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); > va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic); > va_hvm->nr_vcpus = info->max_vcpus; > - memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus)); > + memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size);This needs some sort of limit check, probably in terms of HVM_MAX_VCPUS. otherwise a particularly large vcpus= in the config will cause mayhem... I guess this should probably be done in libxl__domain_build_info_setdefaults.> diff -r 3b0eed731020 tools/libxl/libxl_utils.c > --- a/tools/libxl/libxl_utils.c Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/libxl_utils.c Fri Jun 01 10:34:13 2012 +0800 > @@ -533,6 +533,28 @@ void libxl_cpumap_reset(libxl_cpumap *cp > cpumap->map[cpu / 8] &= ~(1 << (cpu & 7)); > } > > +int libxl_cpumap_count_set(const libxl_cpumap *cpumap) > +{ > + int i, nr_set_cpus = 0; > + libxl_for_each_set_cpu(i, *((libxl_cpumap *)cpumap))Please stop this habit of yours of casting away the const to make the warning go away, it is almost always wrong and on the odd occasion that it is right it should have a comment explaining why... In this case please make libxl_cpumap_test const correct instead.> + nr_set_cpus++; > + > + return nr_set_cpus; > +} > + > +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap) > +{ > + int i = cpumap->size; > + char *p = libxl__zalloc(NULL, cpumap->size * 2 + 1); > + char *q = p; > + while(--i >= 0) { > + sprintf((char *)p, "%02x", cpumap->map[i]);Why this cast?> + p += 2; > + } > + *p = ''\0''; > + return q; > +} > + > int libxl_get_max_cpus(libxl_ctx *ctx) > { > return xc_get_max_cpus(ctx->xch); > diff -r 3b0eed731020 tools/libxl/libxl_utils.h > --- a/tools/libxl/libxl_utils.h Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/libxl_utils.h Fri Jun 01 10:34:13 2012 +0800 > @@ -67,6 +67,8 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l > int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); > void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); > void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); > +int libxl_cpumap_count_set(const libxl_cpumap *cpumap);Please add a comment describing this function, it should remind the caller that they are responsible for freeing the result.> +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap); > static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap) > { > memset(cpumap->map, -1, cpumap->size); > diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > b_info->max_vcpus = l; > - b_info->cur_vcpus = (1 << l) - 1; > + > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > + fprintf(stderr, "Unable to allocate cpumap\n"); > + exit(1); > + } > + libxl_cpumap_set_none(&b_info->avail_vcpus); > + while (l-- > 0) > + libxl_cpumap_set((&b_info->avail_vcpus), l);This while loop is == libxl_cpumap_set_any.> } > > if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0))
On Fri, 2012-06-01 at 12:44 +0100, Ian Campbell wrote:> On Fri, 2012-06-01 at 03:48 +0100, Zhang, Yang Z wrote: > > Change from v2: > > Add function libxl_cpumap_to_hex_string to covert cpumap to hex string. > > According to Ian''s comments, modified some codes to make the logic more reasonable. > > > > In current implementation, it uses integer to record current avail cpus and this only allows user to specify 31 vcpus. > > In following patch, it uses cpumap instead integer which make more sense than before. Also there is no limit to the max vcpus. > > > > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>Hi Yang, Have I missed a follow up posting of this patch somewhere? Or is it still pending? Thanks, Ian.> > > > diff -r 3b0eed731020 tools/libxl/libxl_create.c > > --- a/tools/libxl/libxl_create.c Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/libxl_create.c Fri Jun 01 10:34:13 2012 +0800 > > @@ -146,8 +146,11 @@ int libxl__domain_build_info_setdefault( > > > > if (!b_info->max_vcpus) > > b_info->max_vcpus = 1; > > - if (!b_info->cur_vcpus) > > - b_info->cur_vcpus = 1; > > + if (!b_info->avail_vcpus.size) { > > + if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1)) > > + return ERROR_FAIL; > > + libxl_cpumap_set(&b_info->avail_vcpus, 0); > > + } > > > > if (!b_info->cpumap.size) { > > if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0)) > > diff -r 3b0eed731020 tools/libxl/libxl_dm.c > > --- a/tools/libxl/libxl_dm.c Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/libxl_dm.c Fri Jun 01 10:34:13 2012 +0800 > > @@ -160,6 +160,8 @@ static char ** libxl__build_device_model > > } > > if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) { > > int ioemu_vifs = 0; > > + int nr_set_cpus = 0; > > + char *s; > > > > if (b_info->u.hvm.serial) { > > flexarray_vappend(dm_args, "-serial", b_info->u.hvm.serial, NULL); > > @@ -200,11 +202,14 @@ static char ** libxl__build_device_model > > libxl__sprintf(gc, "%d", b_info->max_vcpus), > > NULL); > > } > > - if (b_info->cur_vcpus) { > > - flexarray_vappend(dm_args, "-vcpu_avail", > > - libxl__sprintf(gc, "0x%x", b_info->cur_vcpus), > > - NULL); > > - } > > + > > + nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus); > > + s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus); > > + flexarray_vappend(dm_args, "-vcpu_avail", > > + libxl__sprintf(gc, "0x%s", s), > > You might as well make to_hex_string include the 0x? > > > + NULL); > > + free(s); > > + > > for (i = 0; i < num_vifs; i++) { > > if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) { > > char *smac = libxl__sprintf(gc, > > diff -r 3b0eed731020 tools/libxl/libxl_dom.c > > --- a/tools/libxl/libxl_dom.c Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/libxl_dom.c Fri Jun 01 10:34:13 2012 +0800 > > @@ -195,7 +195,7 @@ int libxl__build_post(libxl__gc *gc, uin > > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > > for (i = 0; i < info->max_vcpus; i++) { > > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & (1 << i))) > > + ents[12+(i*2)+1] = (i && !libxl_cpumap_test(&info->avail_vcpus, i)) > > ? "offline" : "online"; > > Weren''t you going to drop the "i &&" and invert the ternary? > > > } > > > > @@ -350,7 +350,7 @@ static int hvm_build_set_params(xc_inter > > va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); > > va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic); > > va_hvm->nr_vcpus = info->max_vcpus; > > - memcpy(va_hvm->vcpu_online, &info->cur_vcpus, sizeof(info->cur_vcpus)); > > + memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, info->avail_vcpus.size); > > This needs some sort of limit check, probably in terms of HVM_MAX_VCPUS. > otherwise a particularly large vcpus= in the config will cause mayhem... > > I guess this should probably be done in > libxl__domain_build_info_setdefaults. > > > diff -r 3b0eed731020 tools/libxl/libxl_utils.c > > --- a/tools/libxl/libxl_utils.c Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/libxl_utils.c Fri Jun 01 10:34:13 2012 +0800 > > @@ -533,6 +533,28 @@ void libxl_cpumap_reset(libxl_cpumap *cp > > cpumap->map[cpu / 8] &= ~(1 << (cpu & 7)); > > } > > > > +int libxl_cpumap_count_set(const libxl_cpumap *cpumap) > > +{ > > + int i, nr_set_cpus = 0; > > + libxl_for_each_set_cpu(i, *((libxl_cpumap *)cpumap)) > > Please stop this habit of yours of casting away the const to make the > warning go away, it is almost always wrong and on the odd occasion that > it is right it should have a comment explaining why... > > In this case please make libxl_cpumap_test const correct instead. > > > + nr_set_cpus++; > > + > > + return nr_set_cpus; > > +} > > + > > +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap) > > +{ > > + int i = cpumap->size; > > + char *p = libxl__zalloc(NULL, cpumap->size * 2 + 1); > > + char *q = p; > > + while(--i >= 0) { > > + sprintf((char *)p, "%02x", cpumap->map[i]); > > Why this cast? > > > + p += 2; > > + } > > + *p = ''\0''; > > + return q; > > +} > > + > > int libxl_get_max_cpus(libxl_ctx *ctx) > > { > > return xc_get_max_cpus(ctx->xch); > > diff -r 3b0eed731020 tools/libxl/libxl_utils.h > > --- a/tools/libxl/libxl_utils.h Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/libxl_utils.h Fri Jun 01 10:34:13 2012 +0800 > > @@ -67,6 +67,8 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l > > int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); > > void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); > > void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); > > +int libxl_cpumap_count_set(const libxl_cpumap *cpumap); > > Please add a comment describing this function, it should remind the > caller that they are responsible for freeing the result. > > > +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap); > > static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap) > > { > > memset(cpumap->map, -1, cpumap->size); > > diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > > b_info->max_vcpus = l; > > - b_info->cur_vcpus = (1 << l) - 1; > > + > > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > > + fprintf(stderr, "Unable to allocate cpumap\n"); > > + exit(1); > > + } > > + libxl_cpumap_set_none(&b_info->avail_vcpus); > > + while (l-- > 0) > > + libxl_cpumap_set((&b_info->avail_vcpus), l); > > This while loop is == libxl_cpumap_set_any. > > > } > > > > if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0)) > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Sorry, I was on vacation on past two weeks and just backing to work now. :) I will give you a modified patch ASAP. best regards yang> -----Original Message----- > From: Ian Campbell [mailto:Ian.Campbell@citrix.com] > Sent: Friday, June 22, 2012 8:11 PM > To: Zhang, Yang Z > Cc: xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] [PATCH v3 ]libxl: allow to set more than 31 vcpus > > On Fri, 2012-06-01 at 12:44 +0100, Ian Campbell wrote: > > On Fri, 2012-06-01 at 03:48 +0100, Zhang, Yang Z wrote: > > > Change from v2: > > > Add function libxl_cpumap_to_hex_string to covert cpumap to hex string. > > > According to Ian''s comments, modified some codes to make the logic more > reasonable. > > > > > > In current implementation, it uses integer to record current avail cpus and > this only allows user to specify 31 vcpus. > > > In following patch, it uses cpumap instead integer which make more sense > than before. Also there is no limit to the max vcpus. > > > > > > Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> > > Hi Yang, > > Have I missed a follow up posting of this patch somewhere? Or is it > still pending? > > Thanks, > Ian. > > > > > > > diff -r 3b0eed731020 tools/libxl/libxl_create.c > > > --- a/tools/libxl/libxl_create.c Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/libxl_create.c Fri Jun 01 10:34:13 2012 +0800 > > > @@ -146,8 +146,11 @@ int libxl__domain_build_info_setdefault( > > > > > > if (!b_info->max_vcpus) > > > b_info->max_vcpus = 1; > > > - if (!b_info->cur_vcpus) > > > - b_info->cur_vcpus = 1; > > > + if (!b_info->avail_vcpus.size) { > > > + if (libxl_cpumap_alloc(CTX, &b_info->avail_vcpus, 1)) > > > + return ERROR_FAIL; > > > + libxl_cpumap_set(&b_info->avail_vcpus, 0); > > > + } > > > > > > if (!b_info->cpumap.size) { > > > if (libxl_cpumap_alloc(CTX, &b_info->cpumap, 0)) > > > diff -r 3b0eed731020 tools/libxl/libxl_dm.c > > > --- a/tools/libxl/libxl_dm.c Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/libxl_dm.c Fri Jun 01 10:34:13 2012 +0800 > > > @@ -160,6 +160,8 @@ static char ** libxl__build_device_model > > > } > > > if (b_info->type == LIBXL_DOMAIN_TYPE_HVM) { > > > int ioemu_vifs = 0; > > > + int nr_set_cpus = 0; > > > + char *s; > > > > > > if (b_info->u.hvm.serial) { > > > flexarray_vappend(dm_args, "-serial", > b_info->u.hvm.serial, NULL); > > > @@ -200,11 +202,14 @@ static char ** libxl__build_device_model > > > libxl__sprintf(gc, "%d", > b_info->max_vcpus), > > > NULL); > > > } > > > - if (b_info->cur_vcpus) { > > > - flexarray_vappend(dm_args, "-vcpu_avail", > > > - libxl__sprintf(gc, "0x%x", > b_info->cur_vcpus), > > > - NULL); > > > - } > > > + > > > + nr_set_cpus = libxl_cpumap_count_set(&b_info->avail_vcpus); > > > + s = libxl_cpumap_to_hex_string(&b_info->avail_vcpus); > > > + flexarray_vappend(dm_args, "-vcpu_avail", > > > + libxl__sprintf(gc, "0x%s", s), > > > > You might as well make to_hex_string include the 0x? > > > > > + NULL); > > > + free(s); > > > + > > > for (i = 0; i < num_vifs; i++) { > > > if (vifs[i].nictype == LIBXL_NIC_TYPE_IOEMU) { > > > char *smac = libxl__sprintf(gc, > > > diff -r 3b0eed731020 tools/libxl/libxl_dom.c > > > --- a/tools/libxl/libxl_dom.c Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/libxl_dom.c Fri Jun 01 10:34:13 2012 +0800 > > > @@ -195,7 +195,7 @@ int libxl__build_post(libxl__gc *gc, uin > > > ents[11] = libxl__sprintf(gc, "%lu", state->store_mfn); > > > for (i = 0; i < info->max_vcpus; i++) { > > > ents[12+(i*2)] = libxl__sprintf(gc, "cpu/%d/availability", i); > > > - ents[12+(i*2)+1] = (i && info->cur_vcpus && !(info->cur_vcpus & > (1 << i))) > > > + ents[12+(i*2)+1] = (i && !libxl_cpumap_test(&info->avail_vcpus, > i)) > > > ? "offline" : "online"; > > > > Weren''t you going to drop the "i &&" and invert the ternary? > > > > > } > > > > > > @@ -350,7 +350,7 @@ static int hvm_build_set_params(xc_inter > > > va_hvm = (struct hvm_info_table *)(va_map + HVM_INFO_OFFSET); > > > va_hvm->apic_mode = libxl_defbool_val(info->u.hvm.apic); > > > va_hvm->nr_vcpus = info->max_vcpus; > > > - memcpy(va_hvm->vcpu_online, &info->cur_vcpus, > sizeof(info->cur_vcpus)); > > > + memcpy(va_hvm->vcpu_online, info->avail_vcpus.map, > info->avail_vcpus.size); > > > > This needs some sort of limit check, probably in terms of HVM_MAX_VCPUS. > > otherwise a particularly large vcpus= in the config will cause mayhem... > > > > I guess this should probably be done in > > libxl__domain_build_info_setdefaults. > > > > > diff -r 3b0eed731020 tools/libxl/libxl_utils.c > > > --- a/tools/libxl/libxl_utils.c Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/libxl_utils.c Fri Jun 01 10:34:13 2012 +0800 > > > @@ -533,6 +533,28 @@ void libxl_cpumap_reset(libxl_cpumap *cp > > > cpumap->map[cpu / 8] &= ~(1 << (cpu & 7)); > > > } > > > > > > +int libxl_cpumap_count_set(const libxl_cpumap *cpumap) > > > +{ > > > + int i, nr_set_cpus = 0; > > > + libxl_for_each_set_cpu(i, *((libxl_cpumap *)cpumap)) > > > > Please stop this habit of yours of casting away the const to make the > > warning go away, it is almost always wrong and on the odd occasion that > > it is right it should have a comment explaining why... > > > > In this case please make libxl_cpumap_test const correct instead. > > > > > + nr_set_cpus++; > > > + > > > + return nr_set_cpus; > > > +} > > > + > > > +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap) > > > +{ > > > + int i = cpumap->size; > > > + char *p = libxl__zalloc(NULL, cpumap->size * 2 + 1); > > > + char *q = p; > > > + while(--i >= 0) { > > > + sprintf((char *)p, "%02x", cpumap->map[i]); > > > > Why this cast? > > > > > + p += 2; > > > + } > > > + *p = ''\0''; > > > + return q; > > > +} > > > + > > > int libxl_get_max_cpus(libxl_ctx *ctx) > > > { > > > return xc_get_max_cpus(ctx->xch); > > > diff -r 3b0eed731020 tools/libxl/libxl_utils.h > > > --- a/tools/libxl/libxl_utils.h Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/libxl_utils.h Fri Jun 01 10:34:13 2012 +0800 > > > @@ -67,6 +67,8 @@ int libxl_cpumap_alloc(libxl_ctx *ctx, l > > > int libxl_cpumap_test(libxl_cpumap *cpumap, int cpu); > > > void libxl_cpumap_set(libxl_cpumap *cpumap, int cpu); > > > void libxl_cpumap_reset(libxl_cpumap *cpumap, int cpu); > > > +int libxl_cpumap_count_set(const libxl_cpumap *cpumap); > > > > Please add a comment describing this function, it should remind the > > caller that they are responsible for freeing the result. > > > > > +char *libxl_cpumap_to_hex_string(const libxl_cpumap *cpumap); > > > static inline void libxl_cpumap_set_any(libxl_cpumap *cpumap) > > > { > > > memset(cpumap->map, -1, cpumap->size); > > > diff -r 3b0eed731020 tools/libxl/xl_cmdimpl.c > > > --- a/tools/libxl/xl_cmdimpl.c Fri Jun 01 09:27:17 2012 +0800 > > > +++ b/tools/libxl/xl_cmdimpl.c Fri Jun 01 10:34:13 2012 +0800 > > > @@ -650,7 +650,14 @@ static void parse_config_data(const char > > > > > > if (!xlu_cfg_get_long (config, "vcpus", &l, 0)) { > > > b_info->max_vcpus = l; > > > - b_info->cur_vcpus = (1 << l) - 1; > > > + > > > + if (libxl_cpumap_alloc(ctx, &b_info->avail_vcpus, l)) { > > > + fprintf(stderr, "Unable to allocate cpumap\n"); > > > + exit(1); > > > + } > > > + libxl_cpumap_set_none(&b_info->avail_vcpus); > > > + while (l-- > 0) > > > + libxl_cpumap_set((&b_info->avail_vcpus), l); > > > > This while loop is == libxl_cpumap_set_any. > > > > > } > > > > > > if (!xlu_cfg_get_long (config, "maxvcpus", &l, 0)) > > > > > > > > _______________________________________________ > > Xen-devel mailing list > > Xen-devel@lists.xen.org > > http://lists.xen.org/xen-devel >