Ryan Harper
2005-Apr-14 15:29 UTC
[Xen-devel] [PATCH] xen, tools/python/xen: pincpu support vcpus, add vcpu to cpu map
The following patch updates the dom0 pincpu operation to read the VCPU value from the xend interface rather than hard-coding the exec_domain to 0. This prevented pinning VCPUS other than 0 to a particular cpu. I added the number of VCPUS to the main xm list output and also included a new sub-option to xm list to display the VCPU to CPU mapping. While working on the pincpu code, I fixed an out-of-bounds indexing for the pincpu operation that wasn''t previously exposed since the vcpu/exec_domain value was hard-coded to 0. Here is some sample output of what the new sub-option looks like. (hungerforce) root # xm list Name Id Mem(MB) CPU VCPU(s) State Time(s) Console Domain-0 0 507 0 2 r---- 30.4 debian_sarge_1 3 128 1 1 -b--- 10.3 9603 debian_sarge_2 2 128 1 4 -b--- 2.4 9602 (hungerforce) root # xm list --vcpus Name Id VCPU CPU Domain-0 0 0 0 Domain-0 0 1 1 debian_sarge_1 3 0 1 debian_sarge_2 2 0 1 debian_sarge_2 2 1 0 debian_sarge_2 2 2 1 debian_sarge_2 2 3 0 (hungerforce) root # xm list -v 2 Name Id VCPU CPU debian_sarge_2 2 0 1 debian_sarge_2 2 1 0 debian_sarge_2 2 2 1 debian_sarge_2 2 3 0 (hungerforce) root # xm list -v 3 Name Id VCPU CPU debian_sarge_1 3 0 1 (hungerforce) root # xm pincpu 3 0 0 (hungerforce) root # xm list -v 3 Name Id VCPU CPU debian_sarge_1 3 0 0 (hungerforce) root # xm pincpu 3 1 1 Error: (22, ''Invalid argument'') (hungerforce) root # xm pincpu 3 33 0 Error: (22, ''Invalid argument'') -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: tools/libxc/xc.h | 3 + tools/libxc/xc_domain.c | 6 ++- tools/python/xen/lowlevel/xc/xc.c | 56 +++++++++++++++++------------- tools/python/xen/xend/XendClient.py | 3 + tools/python/xen/xend/XendDomain.py | 11 +++-- tools/python/xen/xend/XendDomainInfo.py | 3 + tools/python/xen/xend/server/SrvDomain.py | 1 tools/python/xen/xend/server/SrvUsbif.py | 1 tools/python/xen/xm/main.py | 45 ++++++++++++++++++------ xen/common/dom0_ops.c | 12 ++++++ xen/include/public/dom0_ops.h | 2 + 11 files changed, 101 insertions(+), 42 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN a/tools/libxc/xc_domain.c c/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c 2005-04-12 22:09:12.000000000 -0500 +++ c/tools/libxc/xc_domain.c 2005-04-13 12:21:01.000000000 -0500 @@ -64,12 +64,13 @@ int xc_domain_pincpu(int xc_handle, u32 domid, + int vcpu, int cpu) { dom0_op_t op; op.cmd = DOM0_PINCPUDOMAIN; op.u.pincpudomain.domain = (domid_t)domid; - op.u.pincpudomain.exec_domain = 0; + op.u.pincpudomain.exec_domain = vcpu; op.u.pincpudomain.cpu = cpu; return do_dom0_op(xc_handle, &op); } @@ -112,6 +113,9 @@ info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT); info->shared_info_frame = op.u.getdomaininfo.shared_info_frame; info->cpu_time = op.u.getdomaininfo.cpu_time; + info->vcpus = op.u.getdomaininfo.n_vcpu; + memcpy(info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, + MAX_VIRT_CPUS*sizeof(u32)); next_domid = (u16)op.u.getdomaininfo.domain + 1; info++; diff -urN a/tools/libxc/xc.h c/tools/libxc/xc.h --- a/tools/libxc/xc.h 2005-04-12 22:09:09.000000000 -0500 +++ c/tools/libxc/xc.h 2005-04-13 12:21:01.000000000 -0500 @@ -78,6 +78,7 @@ typedef struct { u32 domid; unsigned int cpu; + unsigned int vcpus; unsigned int dying:1, crashed:1, shutdown:1, paused:1, blocked:1, running:1; unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ @@ -85,6 +86,7 @@ unsigned long shared_info_frame; u64 cpu_time; unsigned long max_memkb; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; } xc_dominfo_t; typedef dom0_getdomaininfo_t xc_domaininfo_t; @@ -128,6 +130,7 @@ u32 domid); int xc_domain_pincpu(int xc_handle, u32 domid, + int vcpu, int cpu); /** * This function will return information about one or more domains. diff -urN a/tools/python/xen/lowlevel/xc/xc.c c/tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c 2005-04-12 22:09:06.000000000 -0500 +++ c/tools/python/xen/lowlevel/xc/xc.c 2005-04-13 12:21:01.000000000 -0500 @@ -128,15 +128,16 @@ XcObject *xc = (XcObject *)self; u32 dom; + int vcpu = 0; int cpu = -1; - static char *kwd_list[] = { "dom", "cpu", NULL }; + static char *kwd_list[] = { "dom", "vcpu", "cpu", NULL }; - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, - &dom, &cpu) ) + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, + &dom, &vcpu, &cpu) ) return NULL; - if ( xc_domain_pincpu(xc->xc_handle, dom, cpu) != 0 ) + if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, cpu) != 0 ) return PyErr_SetFromErrno(xc_error); Py_INCREF(zero); @@ -148,10 +149,10 @@ PyObject *kwds) { XcObject *xc = (XcObject *)self; - PyObject *list; + PyObject *list, *vcpu_list, *info_dict; u32 first_dom = 0; - int max_doms = 1024, nr_doms, i; + int max_doms = 1024, nr_doms, i, j; xc_dominfo_t *info; static char *kwd_list[] = { "first_dom", "max_doms", NULL }; @@ -168,23 +169,28 @@ list = PyList_New(nr_doms); for ( i = 0 ; i < nr_doms; i++ ) { - PyList_SetItem( - list, i, - Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" - ",s:l,s:L,s:l,s:i}", - "dom", info[i].domid, - "cpu", info[i].cpu, - "dying", info[i].dying, - "crashed", info[i].crashed, - "shutdown", info[i].shutdown, - "paused", info[i].paused, - "blocked", info[i].blocked, - "running", info[i].running, - "mem_kb", info[i].nr_pages*4, - "cpu_time", info[i].cpu_time, - "maxmem_kb", info[i].max_memkb, - "shutdown_reason", info[i].shutdown_reason - )); + vcpu_list = PyList_New(MAX_VIRT_CPUS); + for ( j = 0; j < MAX_VIRT_CPUS; j++ ) + PyList_SetItem( vcpu_list, j, Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + + info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" + ",s:l,s:L,s:l,s:i}", + "dom", info[i].domid, + "cpu", info[i].cpu, + "vcpus", info[i].vcpus, + "dying", info[i].dying, + "crashed", info[i].crashed, + "shutdown", info[i].shutdown, + "paused", info[i].paused, + "blocked", info[i].blocked, + "running", info[i].running, + "mem_kb", info[i].nr_pages*4, + "cpu_time", info[i].cpu_time, + "maxmem_kb", info[i].max_memkb, + "shutdown_reason", info[i].shutdown_reason); + PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list ); + PyList_SetItem( list, i, info_dict); + } free(info); @@ -895,6 +901,7 @@ " domain-id space was reached.\n" " dom [int]: Identifier of domain to which this info pertains\n" " cpu [int]: CPU to which this domain is bound\n" + " vcpus [int]: Number of Virtual CPUS in this domain\n" " dying [int]: Bool - is the domain dying?\n" " crashed [int]: Bool - has the domain crashed?\n" " shutdown [int]: Bool - has the domain shut itself down?\n" @@ -905,7 +912,8 @@ " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" " cpu_time [long]: CPU time consumed, in nanoseconds\n" " shutdown_reason [int]: Numeric code from guest OS, explaining " - "reason why it shut itself down.\n" }, + "reason why it shut itself down.\n" + " vcpu_to_cpu [[int]]: List that maps VCPUS to CPUS\n" }, { "linux_save", (PyCFunction)pyxc_linux_save, diff -urN a/tools/python/xen/xend/server/SrvDomain.py c/tools/python/xen/xend/server/SrvDomain.py --- a/tools/python/xen/xend/server/SrvDomain.py 2005-04-12 22:09:10.000000000 -0500 +++ c/tools/python/xen/xend/server/SrvDomain.py 2005-04-13 12:21:01.000000000 -0500 @@ -92,6 +92,7 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], + [''vcpu'', ''int''], [''cpu'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/server/SrvUsbif.py c/tools/python/xen/xend/server/SrvUsbif.py --- a/tools/python/xen/xend/server/SrvUsbif.py 2005-04-12 22:09:14.000000000 -0500 +++ c/tools/python/xen/xend/server/SrvUsbif.py 2005-04-13 12:21:01.000000000 -0500 @@ -107,6 +107,7 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], + [''vcpu'', ''int''], [''cpu'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/XendClient.py c/tools/python/xen/xend/XendClient.py --- a/tools/python/xen/xend/XendClient.py 2005-04-12 22:09:04.000000000 -0500 +++ c/tools/python/xen/xend/XendClient.py 2005-04-13 12:21:01.000000000 -0500 @@ -246,9 +246,10 @@ ''live'' : live, ''resource'' : resource }) - def xend_domain_pincpu(self, id, cpu): + def xend_domain_pincpu(self, id, vcpu, cpu): return self.xendPost(self.domainurl(id), {''op'' : ''pincpu'', + ''vcpu'' : vcpu, ''cpu'' : cpu }) def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, warpu): diff -urN a/tools/python/xen/xend/XendDomainInfo.py c/tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py 2005-04-12 22:09:06.000000000 -0500 +++ c/tools/python/xen/xend/XendDomainInfo.py 2005-04-13 12:21:01.000000000 -0500 @@ -372,6 +372,9 @@ sxpr.append([''shutdown_reason'', reason]) sxpr.append([''cpu'', self.info[''cpu'']]) sxpr.append([''cpu_time'', self.info[''cpu_time'']/1e9]) + sxpr.append([''vcpus'', self.info[''vcpus'']]) + sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x), + self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))]) if self.start_time: up_time = time.time() - self.start_time diff -urN a/tools/python/xen/xend/XendDomain.py c/tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py 2005-04-12 22:09:06.000000000 -0500 +++ c/tools/python/xen/xend/XendDomain.py 2005-04-13 12:21:01.000000000 -0500 @@ -610,15 +610,16 @@ xmigrate = XendMigrate.instance() return xmigrate.save_begin(dominfo, dst) - def domain_pincpu(self, id, cpu): - """Pin a domain to a cpu. + def domain_pincpu(self, id, vcpu, cpu): + """Pin a vcpu in a domain to a cpu. - @param id: domain - @param cpu: cpu number + @param id: domain + @param vcpu: vcpu number + @param cpu: cpu number """ dominfo = self.domain_lookup(id) try: - return xc.domain_pincpu(int(dominfo.id), cpu) + return xc.domain_pincpu(int(dominfo.id), vcpu, cpu) except Exception, ex: raise XendError(str(ex)) diff -urN a/tools/python/xen/xm/main.py c/tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py 2005-04-12 22:09:11.000000000 -0500 +++ c/tools/python/xen/xm/main.py 2005-04-14 10:08:25.171199816 -0500 @@ -340,8 +340,8 @@ name = "list" info = """List information about domains.""" - short_options = ''l'' - long_options = [''long''] + short_options = ''lv'' + long_options = [''long'',''vcpus''] def help(self, args): if help: @@ -350,11 +350,13 @@ Either all domains or the domains given. -l, --long Get more detailed information. + -v, --vcpus Show VCPU to CPU mapping. """ return def main(self, args): use_long = 0 + show_vcpus = 0 (options, params) = getopt(args[1:], self.short_options, self.long_options) @@ -362,6 +364,8 @@ for (k, v) in options: if k in [''-l'', ''--long'']: use_long = 1 + if k in [''-v'', ''--vcpus'']: + show_vcpus = 1 if n == 0: doms = server.xend_domains() @@ -371,11 +375,13 @@ if use_long: self.long_list(doms) + elif show_vcpus: + self.show_vcpus(doms) else: self.brief_list(doms) def brief_list(self, doms): - print ''Name Id Mem(MB) CPU State Time(s) Console'' + print ''Name Id Mem(MB) CPU VCPU(s) State Time(s) Console'' for dom in doms: info = server.xend_domain(dom) d = {} @@ -383,6 +389,7 @@ d[''name''] = sxp.child_value(info, ''name'', ''??'') d[''mem''] = int(sxp.child_value(info, ''memory'', ''0'')) d[''cpu''] = int(sxp.child_value(info, ''cpu'', ''0'')) + d[''vcpus''] = int(sxp.child_value(info, ''vcpus'', ''0'')) d[''state''] = sxp.child_value(info, ''state'', ''??'') d[''cpu_time''] = float(sxp.child_value(info, ''cpu_time'', ''0'')) console = sxp.child(info, ''console'') @@ -390,9 +397,24 @@ d[''port''] = sxp.child_value(console, ''console_port'') else: d[''port''] = '''' - print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(state)5s %(cpu_time)7.1f %(port)4s" + print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s" % d) + def show_vcpus(self, doms): + print ''Name Id VCPU CPU'' + for dom in doms: + info = server.xend_domain(dom) + vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''?'').replace(''-'','''') + count = 0 + for cpu in vcpu_to_cpu: + d = {} + d[''name''] = sxp.child_value(info, ''name'', ''??'') + d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) + d[''vcpu''] = int(count) + d[''cpu''] = int(cpu) + count = count + 1 + print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d" % d) + def long_list(self, doms): for dom in doms: info = server.xend_domain(dom) @@ -474,17 +496,18 @@ class ProgPincpu(Prog): group = ''domain'' name = "pincpu" - info = """Pin a domain to a cpu. """ + info = """Pin a vcpu to a cpu. """ def help(self, args): - print args[0],''DOM CPU'' - print ''\nPin domain DOM to cpu CPU.'' + print args[0],''DOM VCPU CPU'' + print ''\nPin vcpu VCPU in domain DOM to cpu CPU.'' def main(self, args): - if len(args) != 3: self.err("%s: Invalid argument(s)" % args[0]) - dom = args[1] - cpu = int(args[2]) - server.xend_domain_pincpu(dom, cpu) + if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0]) + dom = args[1] + vcpu = int(args[2]) + cpu = int(args[3]) + server.xend_domain_pincpu(dom, vcpu, cpu) xm.prog(ProgPincpu) diff -urN a/xen/common/dom0_ops.c c/xen/common/dom0_ops.c --- a/xen/common/dom0_ops.c 2005-04-12 22:09:06.000000000 -0500 +++ c/xen/common/dom0_ops.c 2005-04-13 13:22:42.000000000 -0500 @@ -242,6 +242,13 @@ break; } + if ( (op->u.pincpudomain.exec_domain >= MAX_VIRT_CPUS) || + !d->exec_domain[op->u.pincpudomain.exec_domain] ) + { + ret = -EINVAL; + break; + } + ed = d->exec_domain[op->u.pincpudomain.exec_domain]; if ( ed == NULL ) { @@ -321,6 +328,10 @@ break; } + memset(&op->u.getdomaininfo.vcpu_to_cpu,-1,MAX_VIRT_CPUS*sizeof(u8)); + for_each_exec_domain ( d, ed ) + op->u.getdomaininfo.vcpu_to_cpu[ed->eid] = ed->processor; + ed = d->exec_domain[op->u.getdomaininfo.exec_domain]; op->u.getdomaininfo.flags @@ -338,6 +349,7 @@ op->u.getdomaininfo.tot_pages = d->tot_pages; op->u.getdomaininfo.max_pages = d->max_pages; op->u.getdomaininfo.cpu_time = ed->cpu_time; + op->u.getdomaininfo.n_vcpu = d->shared_info->n_vcpu; op->u.getdomaininfo.shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT; diff -urN a/xen/include/public/dom0_ops.h c/xen/include/public/dom0_ops.h --- a/xen/include/public/dom0_ops.h 2005-04-12 22:09:12.000000000 -0500 +++ c/xen/include/public/dom0_ops.h 2005-04-13 12:21:01.000000000 -0500 @@ -92,6 +92,8 @@ memory_t max_pages; memory_t shared_info_frame; /* MFN of shared_info struct */ u64 cpu_time; + u32 n_vcpu; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; } dom0_getdomaininfo_t; #define DOM0_SETDOMAININFO 13 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Pratt
2005-Apr-14 15:49 UTC
RE: [Xen-devel] [PATCH] xen, tools/python/xen: pincpu support vcpus, add vcpu to cpu map
> The following patch updates the dom0 pincpu operation to read > the VCPU value from the xend interface rather than > hard-coding the exec_domain to 0. This prevented pinning > VCPUS other than 0 to a particular cpu. I added the number > of VCPUS to the main xm list output and also included a new > sub-option to xm list to display the VCPU to CPU mapping. > While working on the pincpu code, I fixed an out-of-bounds > indexing for the pincpu operation that wasn''t previously > exposed since the vcpu/exec_domain value was hard-coded to 0.Ryan, good progress, but I''d like to propose a couple of extentions: It would be useful if you could update it so that pincpu enabled you to specify a set of physical CPUs for each VCPU e.g. "xm pincpu mydom 1 2,4-6" which would allow VCPU 1 of mydom to run on CPUs 2,4 and 5 but no others. -1 would still mean "run anywhere". Having this functionality is really important before we can implement any kind of CPU load ballancer. Secondly, I think it would be really good if we could have some hierarchy in CPU names. Imagine a 4 socket system with dual core hyper threaded CPUs. It would be nice to be able to specify the 3rd socket, 1st core, 2nd hyperthread as CPU "2.0.1". Where we''re on a system without one of the levels of hierarchy, we just miss it off. E.g. a current SMP Xeon box would be "x.y". This would be much less confusing than the current scalar representation. What do you think? Thanks, Ian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Apr-14 16:24 UTC
Re: [Xen-devel] [PATCH] xen, tools/python/xen: pincpu support vcpus, add vcpu to cpu map
* Ian Pratt <m+Ian.Pratt@cl.cam.ac.uk> [2005-04-14 10:50]:> > The following patch updates the dom0 pincpu operation to read > > the VCPU value from the xend interface rather than > > hard-coding the exec_domain to 0. This prevented pinning > > VCPUS other than 0 to a particular cpu. I added the number > > of VCPUS to the main xm list output and also included a new > > sub-option to xm list to display the VCPU to CPU mapping. > > While working on the pincpu code, I fixed an out-of-bounds > > indexing for the pincpu operation that wasn''t previously > > exposed since the vcpu/exec_domain value was hard-coded to 0. > > Ryan, good progress, but I''d like to propose a couple of extentions: > > It would be useful if you could update it so that pincpu enabled you to > specify a set of physical CPUs for each VCPU e.g. > > "xm pincpu mydom 1 2,4-6" which would allow VCPU 1 of mydom to run on > CPUs 2,4 and 5 but no others. -1 would still mean "run anywhere". Having > this functionality is really important before we can implement any kind > of CPU load ballancer.Interesting idea. I don''t see anything in the schedulers that would take advantage of that sort of definition. AFAIK, exec_domains are never migrated unless told to do so via pincpu. Does the new scheduler do this? Or is this more of setting up the rules that the load balancer would query to find out where it can migrate vcpus?> Secondly, I think it would be really good if we could have some > hierarchy in CPU names. Imagine a 4 socket system with dual core hyper > threaded CPUs. It would be nice to be able to specify the 3rd socket, > 1st core, 2nd hyperthread as CPU "2.0.1". > > Where we''re on a system without one of the levels of hierarchy, we just > miss it off. E.g. a current SMP Xeon box would be "x.y". This would be > much less confusing than the current scalar representation.I like the idea of being able to specify "where" the vcpu runs more explicitly than ''cpu 0'', which does not give any indication of physical cpu characteristics. We would probably need to still provide a simple mapping, but allow the pincpu interface to support a more specific target as well as the more generic. 2-way hyperthreaded box: CPU SOCKET.CORE.THREAD 0 0.0.0 1 0.0.1 2 1.0.0 3 1.0.1 That look sane? -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Pratt
2005-Apr-14 16:57 UTC
RE: [Xen-devel] [PATCH] xen, tools/python/xen: pincpu support vcpus, add vcpu to cpu map
> > "xm pincpu mydom 1 2,4-6" which would allow VCPU 1 of mydom > to run on > > CPUs 2,4 and 5 but no others. -1 would still mean "run anywhere". > > Having this functionality is really important before we can > implement > > any kind of CPU load ballancer. > > Interesting idea. I don''t see anything in the schedulers > that would take advantage of that sort of definition. AFAIK, > exec_domains are never migrated unless told to do so via > pincpu. Does the new scheduler do this? Or is this more of > setting up the rules that the load balancer would query to > find out where it can migrate vcpus?I see having this as a pre-requisite for any fancy new scheduler (or as a first step, CPU load ballancer). Without it, I think it''ll be scheduling anarchy :-)> > Secondly, I think it would be really good if we could have some > > hierarchy in CPU names. Imagine a 4 socket system with dual > core hyper > > threaded CPUs. It would be nice to be able to specify the > 3rd socket, > > 1st core, 2nd hyperthread as CPU "2.0.1". > > > > Where we''re on a system without one of the levels of hierarchy, we > > just miss it off. E.g. a current SMP Xeon box would be "x.y". This > > would be much less confusing than the current scalar representation. > > I like the idea of being able to specify "where" the vcpu > runs more explicitly than ''cpu 0'', which does not give any > indication of physical cpu characteristics. We would > probably need to still provide a simple mapping, but allow > the pincpu interface to support a more specific target as > well as the more generic. > > 2-way hyperthreaded box: > CPU SOCKET.CORE.THREAD > 0 0.0.0 > 1 0.0.1 > 2 1.0.0 > 3 1.0.1 > > That look sane?Yep, that''s what I''m thinking. I think its probably worth squeezing out unsused levels of hierarchy, e.g. just having SOCKET.THREAD in the above example. Keeping it pretty generic makes sense too. E.g. imagine a big ccNUMA system with a ''node'' level above that of the actual CPU socket. Best, Ian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Apr-14 17:41 UTC
Re: [Xen-devel] [PATCH] xen, tools/python/xen: pincpu support vcpus, add vcpu to cpu map
* Ian Pratt <m+Ian.Pratt@cl.cam.ac.uk> [2005-04-14 11:58]:> > > "xm pincpu mydom 1 2,4-6" which would allow VCPU 1 of mydom > > to run on > > > CPUs 2,4 and 5 but no others. -1 would still mean "run anywhere". > > > Having this functionality is really important before we can > > implement > > > any kind of CPU load ballancer. > > > > Interesting idea. I don''t see anything in the schedulers > > that would take advantage of that sort of definition. AFAIK, > > exec_domains are never migrated unless told to do so via > > pincpu. Does the new scheduler do this? Or is this more of > > setting up the rules that the load balancer would query to > > find out where it can migrate vcpus? > > I see having this as a pre-requisite for any fancy new scheduler (or as > a first step, CPU load ballancer). Without it, I think it''ll be > scheduling anarchy :-)OK. Makes sense, that sounds like I separate patch. I was thinking a u32 bitmap, but that doesn''t give us the -1, run-anywhere. Maybe EDF_USEPINMAP and a u32 bitmap. if EDF_USEPINMAP is set, then the balancer/scheduler looks at the bitmap to see on which cpus the vcpu can run, if it is not set, the vcpu can run anywhere.> > > Secondly, I think it would be really good if we could have some > > > hierarchy in CPU names. Imagine a 4 socket system with dual > > core hyper > > > threaded CPUs. It would be nice to be able to specify the > > 3rd socket, > > > 1st core, 2nd hyperthread as CPU "2.0.1". > > > > > > Where we''re on a system without one of the levels of hierarchy, we > > > just miss it off. E.g. a current SMP Xeon box would be "x.y". This > > > would be much less confusing than the current scalar representation. > > > > I like the idea of being able to specify "where" the vcpu > > runs more explicitly than ''cpu 0'', which does not give any > > indication of physical cpu characteristics. We would > > probably need to still provide a simple mapping, but allow > > the pincpu interface to support a more specific target as > > well as the more generic. > > > > 2-way hyperthreaded box: > > CPU SOCKET.CORE.THREAD > > 0 0.0.0 > > 1 0.0.1 > > 2 1.0.0 > > 3 1.0.1 > > > > That look sane? > > Yep, that''s what I''m thinking. I think its probably worth squeezing out > unsused levels of hierarchy, e.g. just having SOCKET.THREAD in the aboveOK. I''ll see how the implementation looks when I''m done. It sounds nice though.> example. Keeping it pretty generic makes sense too. E.g. imagine a big > ccNUMA system with a ''node'' level above that of the actual CPU socket.Sure, I''ll look at the Linux cpu groups stuff and the Linux topology code to see if there is anything like this there. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Pratt
2005-Apr-14 18:28 UTC
RE: [Xen-devel] [PATCH] xen, tools/python/xen: pincpu support vcpus, add vcpu to cpu map
> > I see having this as a pre-requisite for any fancy new > scheduler (or > > as a first step, CPU load ballancer). Without it, I think it''ll be > > scheduling anarchy :-) > > OK. Makes sense, that sounds like I separate patch. I was thinking a > u32 bitmap, but that doesn''t give us the -1, run-anywhere. > Maybe EDF_USEPINMAP and a u32 bitmap. if EDF_USEPINMAP is > set, then the balancer/scheduler looks at the bitmap to see > on which cpus the vcpu can run, if it is not set, the vcpu > can run anywhere.We need to start incrementally introducing a cpu_set_t type that is a suitably sized bit array. We can probably get away with 64 bits for 3.0, but it might be more prudent to go for 256. We''ll need to change the hypercall interface to reflect this. ''-1'' just sets all the bits to indicate that it can run anywhere. (If you wanted to be really flash, you could enable an exclusion list to be specified instead e.g. ^3,4,5 -- I like writing regexps so I''d probably get carried away allowing wildcards to be specificed etc :-)> > Yep, that''s what I''m thinking. I think its probably worth squeezing > > out unsused levels of hierarchy, e.g. just having > SOCKET.THREAD in the > > above > > OK. I''ll see how the implementation looks when I''m done. It > sounds nice though. > > > example. Keeping it pretty generic makes sense too. E.g. > imagine a big > > ccNUMA system with a ''node'' level above that of the actual > CPU socket. > ( > Sure, I''ll look at the Linux cpu groups stuff and the Linux > topology code to see if there is anything like this there.Thanks. I think the correct thing to do is to pass a string from xm, and do the conversion in xend. Xend should know the #hyperthreads, #cores, #sockets, #nodes by reading it out of Xen. I think we can get away for the moment with assuming that all CPUs are identical. Ian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
* Ian Pratt <m+Ian.Pratt@cl.cam.ac.uk> [2005-04-14 10:50]:> > The following patch updates the dom0 pincpu operation to read > > the VCPU value from the xend interface rather than > > hard-coding the exec_domain to 0. This prevented pinning > > VCPUS other than 0 to a particular cpu. I added the number > > of VCPUS to the main xm list output and also included a new > > sub-option to xm list to display the VCPU to CPU mapping. > > While working on the pincpu code, I fixed an out-of-bounds > > indexing for the pincpu operation that wasn''t previously > > exposed since the vcpu/exec_domain value was hard-coded to 0. > > Ryan, good progress, but I''d like to propose a couple of extentions: > > It would be useful if you could update it so that pincpu enabled you to > specify a set of physical CPUs for each VCPU e.g. > > "xm pincpu mydom 1 2,4-6" which would allow VCPU 1 of mydom to run on > CPUs 2,4 and 5 but no others. -1 would still mean "run anywhere". Having > this functionality is really important before we can implement any kind > of CPU load ballancer.Attached is a patch that depends on the previous [1]patch, replacing the pincpu cpu argument with cpumap, a u32 bitmap representing which CPUs a VCPU can use. xm pincpu now takes a comma separated list to describe the bitmap, just as you listed in your example. (hungerforce) root # xm pincpu debian_sarge_2 1 2,4-6 (hungerforce) root # xm list Name Id Mem(MB) CPU VCPU(s) State Time(s) Console Domain-0 0 507 0 2 r---- 12.8 debian_sarge_2 1 128 1 4 -b--- 2.2 9601 (hungerforce) root # xm list -v debian_sarge_2 Name Id VCPU CPU CPUMAP debian_sarge_2 1 0 1 0xffffffff debian_sarge_2 1 1 0 0x00000074 debian_sarge_2 1 2 1 0xffffffff debian_sarge_2 1 3 0 0xffffffff The hypervisor now updates the exec_domain''s cpumap value, and then, for now, just picks the first set bit from the new cpumap passed. So, for folks who want to just one to one pinning, one can still do things like: xm pincpu mydom 1 0 which pins vcpu 1 in mydom to cpu 0. Please apply. 1. http://lists.xensource.com/archives/html/xen-devel/2005-04/msg00412.html -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: tools/libxc/xc.h | 3 +- tools/libxc/xc_domain.c | 6 ++-- tools/python/xen/lowlevel/xc/xc.c | 27 +++++++++++------- tools/python/xen/xend/XendClient.py | 4 +- tools/python/xen/xend/XendDomain.py | 8 ++--- tools/python/xen/xend/XendDomainInfo.py | 3 +- tools/python/xen/xend/server/SrvDomain.py | 2 - tools/python/xen/xm/main.py | 43 ++++++++++++++++++++++-------- xen/arch/x86/domain.c | 1 xen/common/dom0_ops.c | 18 +++++++++--- xen/common/domain.c | 1 xen/include/public/dom0_ops.h | 7 ++-- xen/include/xen/sched.h | 4 ++ 13 files changed, 87 insertions(+), 40 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN b/tools/libxc/xc_domain.c c/tools/libxc/xc_domain.c --- b/tools/libxc/xc_domain.c 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/libxc/xc_domain.c 2005-04-15 14:37:16.000000000 -0500 @@ -65,13 +65,13 @@ int xc_domain_pincpu(int xc_handle, u32 domid, int vcpu, - int cpu) + u32 cpumap) { dom0_op_t op; op.cmd = DOM0_PINCPUDOMAIN; op.u.pincpudomain.domain = (domid_t)domid; op.u.pincpudomain.exec_domain = vcpu; - op.u.pincpudomain.cpu = cpu; + op.u.pincpudomain.cpumap = cpumap; return do_dom0_op(xc_handle, &op); } @@ -116,6 +116,8 @@ info->vcpus = op.u.getdomaininfo.n_vcpu; memcpy(info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, MAX_VIRT_CPUS*sizeof(u32)); + memcpy(info->cpumap, &op.u.getdomaininfo.cpumap, + MAX_VIRT_CPUS*sizeof(u32)); next_domid = (u16)op.u.getdomaininfo.domain + 1; info++; diff -urN b/tools/libxc/xc.h c/tools/libxc/xc.h --- b/tools/libxc/xc.h 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/libxc/xc.h 2005-04-15 14:18:44.000000000 -0500 @@ -87,6 +87,7 @@ u64 cpu_time; unsigned long max_memkb; u32 vcpu_to_cpu[MAX_VIRT_CPUS]; + u32 cpumap[MAX_VIRT_CPUS]; } xc_dominfo_t; typedef dom0_getdomaininfo_t xc_domaininfo_t; @@ -131,7 +132,7 @@ int xc_domain_pincpu(int xc_handle, u32 domid, int vcpu, - int cpu); + u32 cpumap); /** * This function will return information about one or more domains. * diff -urN b/tools/python/xen/lowlevel/xc/xc.c c/tools/python/xen/lowlevel/xc/xc.c --- b/tools/python/xen/lowlevel/xc/xc.c 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/python/xen/lowlevel/xc/xc.c 2005-04-15 14:38:28.000000000 -0500 @@ -129,15 +129,15 @@ u32 dom; int vcpu = 0; - int cpu = -1; + unsigned long cpumap = 0xFFFFFFFF; - static char *kwd_list[] = { "dom", "vcpu", "cpu", NULL }; + static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL }; if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, - &dom, &vcpu, &cpu) ) + &dom, &vcpu, &cpumap) ) return NULL; - if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, cpu) != 0 ) + if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, cpumap) != 0 ) return PyErr_SetFromErrno(xc_error); Py_INCREF(zero); @@ -149,7 +149,7 @@ PyObject *kwds) { XcObject *xc = (XcObject *)self; - PyObject *list, *vcpu_list, *info_dict; + PyObject *list, *vcpu_list, *cpumap_list, *info_dict; u32 first_dom = 0; int max_doms = 1024, nr_doms, i, j; @@ -170,8 +170,13 @@ for ( i = 0 ; i < nr_doms; i++ ) { vcpu_list = PyList_New(MAX_VIRT_CPUS); - for ( j = 0; j < MAX_VIRT_CPUS; j++ ) - PyList_SetItem( vcpu_list, j, Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + cpumap_list = PyList_New(MAX_VIRT_CPUS); + for ( j = 0; j < MAX_VIRT_CPUS; j++ ) { + PyList_SetItem( vcpu_list, j, + Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + PyList_SetItem( cpumap_list, j, + Py_BuildValue("i", info[i].cpumap[j])); + } info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" ",s:l,s:L,s:l,s:i}", @@ -189,6 +194,7 @@ "maxmem_kb", info[i].max_memkb, "shutdown_reason", info[i].shutdown_reason); PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list ); + PyDict_SetItemString( info_dict, "cpumap", cpumap_list ); PyList_SetItem( list, i, info_dict); } @@ -884,9 +890,10 @@ { "domain_pincpu", (PyCFunction)pyxc_domain_pincpu, METH_VARARGS | METH_KEYWORDS, "\n" - "Pin a domain to a specified CPU.\n" - " dom [int]: Identifier of domain to be pinned.\n" - " cpu [int, -1]: CPU to pin to, or -1 to unpin\n\n" + "Pin a VCPU to a specified set CPUs.\n" + " dom [int]: Identifier of domain to which VCPU belongs.\n" + " vcpu [int, 0]: VCPU being pinned.\n" + " cpumap [int, -1]: Bitmap of usable CPUs.\n\n" "Returns: [int] 0 on success; -1 on error.\n" }, { "domain_getinfo", diff -urN b/tools/python/xen/xend/server/SrvDomain.py c/tools/python/xen/xend/server/SrvDomain.py --- b/tools/python/xen/xend/server/SrvDomain.py 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/python/xen/xend/server/SrvDomain.py 2005-04-15 14:32:58.000000000 -0500 @@ -93,7 +93,7 @@ fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], [''vcpu'', ''int''], - [''cpu'', ''int'']]) + [''cpumap'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN b/tools/python/xen/xend/XendClient.py c/tools/python/xen/xend/XendClient.py --- b/tools/python/xen/xend/XendClient.py 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/python/xen/xend/XendClient.py 2005-04-15 11:31:05.000000000 -0500 @@ -246,11 +246,11 @@ ''live'' : live, ''resource'' : resource }) - def xend_domain_pincpu(self, id, vcpu, cpu): + def xend_domain_pincpu(self, id, vcpu, cpumap): return self.xendPost(self.domainurl(id), {''op'' : ''pincpu'', ''vcpu'' : vcpu, - ''cpu'' : cpu }) + ''cpumap'' : cpumap }) def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, warpu): return self.xendPost(self.domainurl(id), diff -urN b/tools/python/xen/xend/XendDomainInfo.py c/tools/python/xen/xend/XendDomainInfo.py --- b/tools/python/xen/xend/XendDomainInfo.py 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/python/xen/xend/XendDomainInfo.py 2005-04-15 11:35:05.000000000 -0500 @@ -373,6 +373,7 @@ sxpr.append([''cpu'', self.info[''cpu'']]) sxpr.append([''cpu_time'', self.info[''cpu_time'']/1e9]) sxpr.append([''vcpus'', self.info[''vcpus'']]) + sxpr.append([''cpumap'', self.info[''cpumap'']]) sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x), self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))]) @@ -452,7 +453,7 @@ raise VmError(''missing memory size'') cpu = sxp.child_value(config, ''cpu'') if self.recreate and self.dom and cpu is not None: - xc.domain_pincpu(self.dom, int(cpu)) + xc.domain_pincpu(self.dom, 0, 1<<int(cpu)) try: image = sxp.child_value(self.config, ''image'') self.vcpus = int(sxp.child_value(image, ''vcpus'')) diff -urN b/tools/python/xen/xend/XendDomain.py c/tools/python/xen/xend/XendDomain.py --- b/tools/python/xen/xend/XendDomain.py 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/python/xen/xend/XendDomain.py 2005-04-15 11:33:21.000000000 -0500 @@ -610,16 +610,16 @@ xmigrate = XendMigrate.instance() return xmigrate.save_begin(dominfo, dst) - def domain_pincpu(self, id, vcpu, cpu): - """Pin a vcpu in a domain to a cpu. + def domain_pincpu(self, id, vcpu, cpumap): + """Set which cpus vcpu can use @param id: domain @param vcpu: vcpu number - @param cpu: cpu number + @param cpumap: bitmap of usbale cpus """ dominfo = self.domain_lookup(id) try: - return xc.domain_pincpu(int(dominfo.id), vcpu, cpu) + return xc.domain_pincpu(int(dominfo.id), vcpu, cpumap) except Exception, ex: raise XendError(str(ex)) diff -urN b/tools/python/xen/xm/main.py c/tools/python/xen/xm/main.py --- b/tools/python/xen/xm/main.py 2005-04-15 11:05:54.000000000 -0500 +++ c/tools/python/xen/xm/main.py 2005-04-15 15:16:19.137184088 -0500 @@ -6,6 +6,8 @@ import sys from getopt import getopt import socket +import warnings +warnings.filterwarnings(''ignore'', category=FutureWarning) from xen.xend import PrettyPrint from xen.xend import sxp @@ -401,19 +403,21 @@ % d) def show_vcpus(self, doms): - print ''Name Id VCPU CPU'' + print ''Name Id VCPU CPU CPUMAP'' for dom in doms: info = server.xend_domain(dom) vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''?'').replace(''-'','''') + cpumap = sxp.child_value(info, ''cpumap'', []) count = 0 for cpu in vcpu_to_cpu: d = {} - d[''name''] = sxp.child_value(info, ''name'', ''??'') - d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) - d[''vcpu''] = int(count) - d[''cpu''] = int(cpu) + d[''name''] = sxp.child_value(info, ''name'', ''??'') + d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) + d[''vcpu''] = int(count) + d[''cpu''] = int(cpu) + d[''cpumap''] = int(cpumap[count]) count = count + 1 - print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d" % d) + print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d 0x%(cpumap)08x" % d) def long_list(self, doms): for dom in doms: @@ -496,18 +500,35 @@ class ProgPincpu(Prog): group = ''domain'' name = "pincpu" - info = """Pin a vcpu to a cpu. """ + info = """Set which cpus a VCPU can use. """ def help(self, args): - print args[0],''DOM VCPU CPU'' - print ''\nPin vcpu VCPU in domain DOM to cpu CPU.'' + print args[0],''DOM VCPU CPUS'' + print ''\nSet which cpus VCPU in domain DOM can use.'' + + # convert list of cpus to bitmap integer value + def make_map(self, cpulist): + cpus = [] + cpumap = 0 + for c in cpulist.split('',''): + if len(c) > 1: + (x,y) = c.split(''-'') + for i in range(int(x),int(y)+1): + cpus.append(int(i)) + else: + cpus.append(int(c)) + cpus.sort() + for c in cpus: + cpumap = cpumap | 1<<c + + return cpumap def main(self, args): if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0]) dom = args[1] vcpu = int(args[2]) - cpu = int(args[3]) - server.xend_domain_pincpu(dom, vcpu, cpu) + cpumap = self.make_map(args[3]); + server.xend_domain_pincpu(dom, vcpu, cpumap) xm.prog(ProgPincpu) diff -urN b/xen/arch/x86/domain.c c/xen/arch/x86/domain.c --- b/xen/arch/x86/domain.c 2005-04-14 22:08:58.000000000 -0500 +++ c/xen/arch/x86/domain.c 2005-04-15 13:38:08.000000000 -0500 @@ -240,6 +240,7 @@ d->shared_info = (void *)alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid]; + ed->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; diff -urN b/xen/common/dom0_ops.c c/xen/common/dom0_ops.c --- b/xen/common/dom0_ops.c 2005-04-15 11:05:54.000000000 -0500 +++ c/xen/common/dom0_ops.c 2005-04-15 14:17:01.000000000 -0500 @@ -234,7 +234,7 @@ domid_t dom = op->u.pincpudomain.domain; struct domain *d = find_domain_by_id(dom); struct exec_domain *ed; - int cpu = op->u.pincpudomain.cpu; + u32 cpumap = op->u.pincpudomain.cpumap; if ( d == NULL ) { @@ -264,17 +264,23 @@ break; } - if ( cpu == -1 ) + /* update cpumap for this ed */ + ed->cpumap = cpumap; + + if ( cpumap == CPUMAP_RUNANYWHERE ) { clear_bit(EDF_CPUPINNED, &ed->ed_flags); } else { + /* pick a new cpu from the usable map */ + int new_cpu = (int)find_first_set_bit(cpumap) % smp_num_cpus; + exec_domain_pause(ed); - if ( ed->processor != (cpu % smp_num_cpus) ) + if ( ed->processor != new_cpu ) set_bit(EDF_MIGRATED, &ed->ed_flags); set_bit(EDF_CPUPINNED, &ed->ed_flags); - ed->processor = cpu % smp_num_cpus; + ed->processor = new_cpu; exec_domain_unpause(ed); } @@ -329,8 +335,10 @@ } memset(&op->u.getdomaininfo.vcpu_to_cpu,-1,MAX_VIRT_CPUS*sizeof(u8)); - for_each_exec_domain ( d, ed ) + for_each_exec_domain ( d, ed ) { op->u.getdomaininfo.vcpu_to_cpu[ed->eid] = ed->processor; + op->u.getdomaininfo.cpumap[ed->eid] = ed->cpumap; + } ed = d->exec_domain[op->u.getdomaininfo.exec_domain]; diff -urN b/xen/common/domain.c c/xen/common/domain.c --- b/xen/common/domain.c 2005-04-14 22:08:58.000000000 -0500 +++ c/xen/common/domain.c 2005-04-15 15:00:02.000000000 -0500 @@ -320,6 +320,7 @@ ed = d->exec_domain[vcpu]; atomic_set(&ed->pausecnt, 0); + ed->cpumap = CPUMAP_RUNANYWHERE; memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); diff -urN b/xen/include/public/dom0_ops.h c/xen/include/public/dom0_ops.h --- b/xen/include/public/dom0_ops.h 2005-04-15 11:05:54.000000000 -0500 +++ c/xen/include/public/dom0_ops.h 2005-04-15 14:14:48.000000000 -0500 @@ -93,7 +93,8 @@ memory_t shared_info_frame; /* MFN of shared_info struct */ u64 cpu_time; u32 n_vcpu; - u32 vcpu_to_cpu[MAX_VIRT_CPUS]; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; /* current mapping */ + u32 cpumap[MAX_VIRT_CPUS]; /* allowable mapping */ } dom0_getdomaininfo_t; #define DOM0_SETDOMAININFO 13 @@ -176,14 +177,14 @@ } dom0_readconsole_t; /* - * Pin Domain to a particular CPU (use -1 to unpin) + * Set which cpus an exec_domain can use */ #define DOM0_PINCPUDOMAIN 20 typedef struct { /* IN variables. */ domid_t domain; u16 exec_domain; - s32 cpu; /* -1 implies unpin */ + u32 cpumap; } dom0_pincpudomain_t; /* Get trace buffers machine base address */ diff -urN b/xen/include/xen/sched.h c/xen/include/xen/sched.h --- b/xen/include/xen/sched.h 2005-04-14 22:08:56.000000000 -0500 +++ c/xen/include/xen/sched.h 2005-04-15 11:52:18.000000000 -0500 @@ -58,6 +58,8 @@ void destroy_event_channels(struct domain *d); int init_exec_domain_event_channels(struct exec_domain *ed); + +#define CPUMAP_RUNANYWHERE 0xFFFFFFFF struct exec_domain { u32 processor; @@ -83,6 +85,8 @@ atomic_t pausecnt; + u32 cpumap; /* which cpus this domain can run on */ + struct arch_exec_domain arch; }; _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel