Ryan Harper
2005-Apr-26 22:31 UTC
[Xen-devel] [PATCH] xen,tools: pincpu use vcpu and cpumap_t
Here is an update to the previous pincpu patches. This patch updates the pincpu dom0op to use a cpumap_t pointer, rather than specifying a size in the interface. cpumap_t is current typedef''ed to an unsigned long. xm list -v now truncates the cpumap field to only show the bits of the map that are relevant. That is, if you have four VCPUS, you will only see the first four bits of the cpumap. This patch compiles against current unstable tree, but I could not test it as the current unstable tree crashes dom0 when booting domU. I back ported the patch to 20050420 snapshot and it worked there. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: tools/libxc/xc.h | 6 ++ tools/libxc/xc_domain.c | 16 +++++- tools/python/xen/lowlevel/xc/xc.c | 71 ++++++++++++++++++------------ tools/python/xen/xend/XendClient.py | 5 +- tools/python/xen/xend/XendDomain.py | 11 ++-- tools/python/xen/xend/XendDomainInfo.py | 6 ++ tools/python/xen/xend/server/SrvDomain.py | 3 - tools/python/xen/xend/server/SrvUsbif.py | 1 tools/python/xen/xm/main.py | 67 +++++++++++++++++++++++----- xen/arch/x86/domain.c | 1 xen/common/dom0_ops.c | 28 ++++++++++- xen/common/domain.c | 1 xen/include/public/dom0_ops.h | 7 ++ xen/include/public/xen.h | 2 xen/include/xen/sched.h | 4 + 15 files changed, 170 insertions(+), 59 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c 2005-04-25 22:14:43.000000000 -0500 +++ b/tools/libxc/xc_domain.c 2005-04-26 11:05:48.183936000 -0500 @@ -16,6 +16,8 @@ { int err, errno_saved; dom0_op_t op; + u32 vcpu = 0; /* FIXME, hard coded initial pin to vcpu 0 */ + cpumap_t cpumap = 1<<cpu; op.cmd = DOM0_CREATEDOMAIN; op.u.createdomain.domain = (domid_t)*pdomid; @@ -25,7 +27,7 @@ *pdomid = (u16)op.u.createdomain.domain; if ( (cpu != -1) && - ((err = xc_domain_pincpu(xc_handle, *pdomid, cpu)) != 0) ) + ((err = xc_domain_pincpu(xc_handle, *pdomid, vcpu, &cpumap)) != 0) ) goto fail; if ( (err = xc_domain_setcpuweight(xc_handle, *pdomid, cpu_weight)) != 0 ) @@ -84,13 +86,14 @@ int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu) + int vcpu, + cpumap_t *cpumap) { dom0_op_t op; op.cmd = DOM0_PINCPUDOMAIN; op.u.pincpudomain.domain = (domid_t)domid; - op.u.pincpudomain.exec_domain = 0; - op.u.pincpudomain.cpu = cpu; + op.u.pincpudomain.exec_domain = vcpu; + op.u.pincpudomain.cpumap = cpumap; return do_dom0_op(xc_handle, &op); } @@ -132,6 +135,11 @@ info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT); info->shared_info_frame = op.u.getdomaininfo.shared_info_frame; info->cpu_time = op.u.getdomaininfo.cpu_time; + info->vcpus = op.u.getdomaininfo.n_vcpu; + memcpy(info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, + MAX_VIRT_CPUS*sizeof(u32)); + memcpy(info->cpumap, &op.u.getdomaininfo.cpumap, + MAX_VIRT_CPUS*sizeof(cpumap_t)); next_domid = (u16)op.u.getdomaininfo.domain + 1; info++; diff -urN a/tools/libxc/xc.h b/tools/libxc/xc.h --- a/tools/libxc/xc.h 2005-04-25 22:14:41.000000000 -0500 +++ b/tools/libxc/xc.h 2005-04-26 10:19:05.894948992 -0500 @@ -78,6 +78,7 @@ typedef struct { u32 domid; unsigned int cpu; + unsigned int vcpus; unsigned int dying:1, crashed:1, shutdown:1, paused:1, blocked:1, running:1; unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ @@ -85,6 +86,8 @@ unsigned long shared_info_frame; u64 cpu_time; unsigned long max_memkb; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; + cpumap_t cpumap[MAX_VIRT_CPUS]; } xc_dominfo_t; typedef dom0_getdomaininfo_t xc_domaininfo_t; @@ -128,7 +131,8 @@ u32 domid); int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu); + int vcpu, + cpumap_t *cpumap); /** * This function will return information about one or more domains. * diff -urN a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c 2005-04-25 22:14:38.000000000 -0500 +++ b/tools/python/xen/lowlevel/xc/xc.c 2005-04-26 10:22:51.113710536 -0500 @@ -128,15 +128,16 @@ XcObject *xc = (XcObject *)self; u32 dom; - int cpu = -1; + int vcpu = 0; + cpumap_t cpumap = 0xFFFFFFFF; - static char *kwd_list[] = { "dom", "cpu", NULL }; + static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL }; - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, - &dom, &cpu) ) + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, + &dom, &vcpu, &cpumap) ) return NULL; - if ( xc_domain_pincpu(xc->xc_handle, dom, cpu) != 0 ) + if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, &cpumap) != 0 ) return PyErr_SetFromErrno(xc_error); Py_INCREF(zero); @@ -148,10 +149,10 @@ PyObject *kwds) { XcObject *xc = (XcObject *)self; - PyObject *list; + PyObject *list, *vcpu_list, *cpumap_list, *info_dict; u32 first_dom = 0; - int max_doms = 1024, nr_doms, i; + int max_doms = 1024, nr_doms, i, j; xc_dominfo_t *info; static char *kwd_list[] = { "first_dom", "max_doms", NULL }; @@ -168,23 +169,34 @@ list = PyList_New(nr_doms); for ( i = 0 ; i < nr_doms; i++ ) { - PyList_SetItem( - list, i, - Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" - ",s:l,s:L,s:l,s:i}", - "dom", info[i].domid, - "cpu", info[i].cpu, - "dying", info[i].dying, - "crashed", info[i].crashed, - "shutdown", info[i].shutdown, - "paused", info[i].paused, - "blocked", info[i].blocked, - "running", info[i].running, - "mem_kb", info[i].nr_pages*4, - "cpu_time", info[i].cpu_time, - "maxmem_kb", info[i].max_memkb, - "shutdown_reason", info[i].shutdown_reason - )); + vcpu_list = PyList_New(MAX_VIRT_CPUS); + cpumap_list = PyList_New(MAX_VIRT_CPUS); + for ( j = 0; j < MAX_VIRT_CPUS; j++ ) { + PyList_SetItem( vcpu_list, j, + Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + PyList_SetItem( cpumap_list, j, + Py_BuildValue("i", info[i].cpumap[j])); + } + + info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" + ",s:l,s:L,s:l,s:i}", + "dom", info[i].domid, + "cpu", info[i].cpu, + "vcpus", info[i].vcpus, + "dying", info[i].dying, + "crashed", info[i].crashed, + "shutdown", info[i].shutdown, + "paused", info[i].paused, + "blocked", info[i].blocked, + "running", info[i].running, + "mem_kb", info[i].nr_pages*4, + "cpu_time", info[i].cpu_time, + "maxmem_kb", info[i].max_memkb, + "shutdown_reason", info[i].shutdown_reason); + PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list ); + PyDict_SetItemString( info_dict, "cpumap", cpumap_list ); + PyList_SetItem( list, i, info_dict); + } free(info); @@ -878,9 +890,10 @@ { "domain_pincpu", (PyCFunction)pyxc_domain_pincpu, METH_VARARGS | METH_KEYWORDS, "\n" - "Pin a domain to a specified CPU.\n" - " dom [int]: Identifier of domain to be pinned.\n" - " cpu [int, -1]: CPU to pin to, or -1 to unpin\n\n" + "Pin a VCPU to a specified set CPUs.\n" + " dom [int]: Identifier of domain to which VCPU belongs.\n" + " vcpu [int, 0]: VCPU being pinned.\n" + " cpumap [int, -1]: Bitmap of usable CPUs.\n\n" "Returns: [int] 0 on success; -1 on error.\n" }, { "domain_getinfo", @@ -895,6 +908,7 @@ " domain-id space was reached.\n" " dom [int]: Identifier of domain to which this info pertains\n" " cpu [int]: CPU to which this domain is bound\n" + " vcpus [int]: Number of Virtual CPUS in this domain\n" " dying [int]: Bool - is the domain dying?\n" " crashed [int]: Bool - has the domain crashed?\n" " shutdown [int]: Bool - has the domain shut itself down?\n" @@ -905,7 +919,8 @@ " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" " cpu_time [long]: CPU time consumed, in nanoseconds\n" " shutdown_reason [int]: Numeric code from guest OS, explaining " - "reason why it shut itself down.\n" }, + "reason why it shut itself down.\n" + " vcpu_to_cpu [[int]]: List that maps VCPUS to CPUS\n" }, { "linux_save", (PyCFunction)pyxc_linux_save, diff -urN a/tools/python/xen/xend/server/SrvDomain.py b/tools/python/xen/xend/server/SrvDomain.py --- a/tools/python/xen/xend/server/SrvDomain.py 2005-04-25 22:14:42.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvDomain.py 2005-04-26 10:12:31.130962240 -0500 @@ -92,7 +92,8 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], - [''cpu'', ''int'']]) + [''vcpu'', ''int''], + [''cpumap'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/server/SrvUsbif.py b/tools/python/xen/xend/server/SrvUsbif.py --- a/tools/python/xen/xend/server/SrvUsbif.py 2005-04-25 22:14:46.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvUsbif.py 2005-04-26 10:11:54.490532432 -0500 @@ -107,6 +107,7 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], + [''vcpu'', ''int''], [''cpu'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/XendClient.py b/tools/python/xen/xend/XendClient.py --- a/tools/python/xen/xend/XendClient.py 2005-04-25 22:14:37.000000000 -0500 +++ b/tools/python/xen/xend/XendClient.py 2005-04-26 10:12:31.140960720 -0500 @@ -246,10 +246,11 @@ ''live'' : live, ''resource'' : resource }) - def xend_domain_pincpu(self, id, cpu): + def xend_domain_pincpu(self, id, vcpu, cpumap): return self.xendPost(self.domainurl(id), {''op'' : ''pincpu'', - ''cpu'' : cpu }) + ''vcpu'' : vcpu, + ''cpumap'' : cpumap }) def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, warpu): return self.xendPost(self.domainurl(id), diff -urN a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py 2005-04-25 22:14:38.000000000 -0500 +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-04-26 10:12:31.142960416 -0500 @@ -372,6 +372,10 @@ sxpr.append([''shutdown_reason'', reason]) sxpr.append([''cpu'', self.info[''cpu'']]) sxpr.append([''cpu_time'', self.info[''cpu_time'']/1e9]) + sxpr.append([''vcpus'', self.info[''vcpus'']]) + sxpr.append([''cpumap'', self.info[''cpumap'']]) + sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x), + self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))]) if self.start_time: up_time = time.time() - self.start_time @@ -449,7 +453,7 @@ raise VmError(''missing memory size'') cpu = sxp.child_value(config, ''cpu'') if self.recreate and self.dom and cpu is not None: - xc.domain_pincpu(self.dom, int(cpu)) + xc.domain_pincpu(self.dom, 0, 1<<int(cpu)) try: image = sxp.child_value(self.config, ''image'') self.vcpus = int(sxp.child_value(image, ''vcpus'')) diff -urN a/tools/python/xen/xend/XendDomain.py b/tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py 2005-04-25 22:14:38.000000000 -0500 +++ b/tools/python/xen/xend/XendDomain.py 2005-04-26 10:12:31.151959048 -0500 @@ -610,15 +610,16 @@ xmigrate = XendMigrate.instance() return xmigrate.save_begin(dominfo, dst) - def domain_pincpu(self, id, cpu): - """Pin a domain to a cpu. + def domain_pincpu(self, id, vcpu, cpumap): + """Set which cpus vcpu can use - @param id: domain - @param cpu: cpu number + @param id: domain + @param vcpu: vcpu number + @param cpumap: bitmap of usbale cpus """ dominfo = self.domain_lookup(id) try: - return xc.domain_pincpu(int(dominfo.id), cpu) + return xc.domain_pincpu(int(dominfo.id), vcpu, cpumap) except Exception, ex: raise XendError(str(ex)) diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py 2005-04-25 22:14:43.000000000 -0500 +++ b/tools/python/xen/xm/main.py 2005-04-26 10:50:57.373360104 -0500 @@ -6,6 +6,8 @@ import sys from getopt import getopt import socket +import warnings +warnings.filterwarnings(''ignore'', category=FutureWarning) from xen.xend import PrettyPrint from xen.xend import sxp @@ -340,8 +342,8 @@ name = "list" info = """List information about domains.""" - short_options = ''l'' - long_options = [''long''] + short_options = ''lv'' + long_options = [''long'',''vcpus''] def help(self, args): if help: @@ -350,11 +352,13 @@ Either all domains or the domains given. -l, --long Get more detailed information. + -v, --vcpus Show VCPU to CPU mapping. """ return def main(self, args): use_long = 0 + show_vcpus = 0 (options, params) = getopt(args[1:], self.short_options, self.long_options) @@ -362,6 +366,8 @@ for (k, v) in options: if k in [''-l'', ''--long'']: use_long = 1 + if k in [''-v'', ''--vcpus'']: + show_vcpus = 1 if n == 0: doms = server.xend_domains() @@ -371,11 +377,13 @@ if use_long: self.long_list(doms) + elif show_vcpus: + self.show_vcpus(doms) else: self.brief_list(doms) def brief_list(self, doms): - print ''Name Id Mem(MB) CPU State Time(s) Console'' + print ''Name Id Mem(MB) CPU VCPU(s) State Time(s) Console'' for dom in doms: info = server.xend_domain(dom) d = {} @@ -383,6 +391,7 @@ d[''name''] = sxp.child_value(info, ''name'', ''??'') d[''mem''] = int(sxp.child_value(info, ''memory'', ''0'')) d[''cpu''] = int(sxp.child_value(info, ''cpu'', ''0'')) + d[''vcpus''] = int(sxp.child_value(info, ''vcpus'', ''0'')) d[''state''] = sxp.child_value(info, ''state'', ''??'') d[''cpu_time''] = float(sxp.child_value(info, ''cpu_time'', ''0'')) console = sxp.child(info, ''console'') @@ -390,9 +399,27 @@ d[''port''] = sxp.child_value(console, ''console_port'') else: d[''port''] = '''' - print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(state)5s %(cpu_time)7.1f %(port)4s" + print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s" % d) + def show_vcpus(self, doms): + print ''Name Id VCPU CPU CPUMAP'' + for dom in doms: + info = server.xend_domain(dom) + vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''?'').replace(''-'','''') + cpumap = sxp.child_value(info, ''cpumap'', []) + mask = ((int(sxp.child_value(info, ''vcpus'', ''0'')))**2) - 1 + count = 0 + for cpu in vcpu_to_cpu: + d = {} + d[''name''] = sxp.child_value(info, ''name'', ''??'') + d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) + d[''vcpu''] = int(count) + d[''cpu''] = int(cpu) + d[''cpumap''] = int(cpumap[count])&mask + count = count + 1 + print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d 0x%(cpumap)x" % d) + def long_list(self, doms): for dom in doms: info = server.xend_domain(dom) @@ -474,17 +501,35 @@ class ProgPincpu(Prog): group = ''domain'' name = "pincpu" - info = """Pin a domain to a cpu. """ + info = """Set which cpus a VCPU can use. """ def help(self, args): - print args[0],''DOM CPU'' - print ''\nPin domain DOM to cpu CPU.'' + print args[0],''DOM VCPU CPUS'' + print ''\nSet which cpus VCPU in domain DOM can use.'' + + # convert list of cpus to bitmap integer value + def make_map(self, cpulist): + cpus = [] + cpumap = 0 + for c in cpulist.split('',''): + if len(c) > 1: + (x,y) = c.split(''-'') + for i in range(int(x),int(y)+1): + cpus.append(int(i)) + else: + cpus.append(int(c)) + cpus.sort() + for c in cpus: + cpumap = cpumap | 1<<c + + return cpumap def main(self, args): - if len(args) != 3: self.err("%s: Invalid argument(s)" % args[0]) - dom = args[1] - cpu = int(args[2]) - server.xend_domain_pincpu(dom, cpu) + if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0]) + dom = args[1] + vcpu = int(args[2]) + cpumap = self.make_map(args[3]); + server.xend_domain_pincpu(dom, vcpu, cpumap) xm.prog(ProgPincpu) diff -urN a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c 2005-04-25 22:14:40.000000000 -0500 +++ b/xen/arch/x86/domain.c 2005-04-26 10:12:31.164957072 -0500 @@ -248,6 +248,7 @@ d->shared_info = (void *)alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid]; + ed->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; diff -urN a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c --- a/xen/common/dom0_ops.c 2005-04-25 22:14:38.000000000 -0500 +++ b/xen/common/dom0_ops.c 2005-04-26 10:59:36.036511320 -0500 @@ -222,7 +222,7 @@ domid_t dom = op->u.pincpudomain.domain; struct domain *d = find_domain_by_id(dom); struct exec_domain *ed; - int cpu = op->u.pincpudomain.cpu; + cpumap_t *cpumap = op->u.pincpudomain.cpumap; if ( d == NULL ) { @@ -230,6 +230,13 @@ break; } + if ( (op->u.pincpudomain.exec_domain >= MAX_VIRT_CPUS) || + !d->exec_domain[op->u.pincpudomain.exec_domain] ) + { + ret = -EINVAL; + break; + } + ed = d->exec_domain[op->u.pincpudomain.exec_domain]; if ( ed == NULL ) { @@ -245,17 +252,23 @@ break; } - if ( cpu == -1 ) + /* update cpumap for this ed */ + ed->cpumap = *(cpumap); + + if ( *(cpumap) == CPUMAP_RUNANYWHERE ) { clear_bit(EDF_CPUPINNED, &ed->ed_flags); } else { + /* pick a new cpu from the usable map */ + int new_cpu = (int)find_first_set_bit(*(cpumap)) % smp_num_cpus; + exec_domain_pause(ed); - if ( ed->processor != (cpu % smp_num_cpus) ) + if ( ed->processor != new_cpu ) set_bit(EDF_MIGRATED, &ed->ed_flags); set_bit(EDF_CPUPINNED, &ed->ed_flags); - ed->processor = cpu % smp_num_cpus; + ed->processor = new_cpu; exec_domain_unpause(ed); } @@ -309,6 +322,12 @@ break; } + memset(&op->u.getdomaininfo.vcpu_to_cpu,-1,MAX_VIRT_CPUS*sizeof(u8)); + for_each_exec_domain ( d, ed ) { + op->u.getdomaininfo.vcpu_to_cpu[ed->eid] = ed->processor; + op->u.getdomaininfo.cpumap[ed->eid] = ed->cpumap; + } + ed = d->exec_domain[op->u.getdomaininfo.exec_domain]; op->u.getdomaininfo.flags @@ -326,6 +345,7 @@ op->u.getdomaininfo.tot_pages = d->tot_pages; op->u.getdomaininfo.max_pages = d->max_pages; op->u.getdomaininfo.cpu_time = ed->cpu_time; + op->u.getdomaininfo.n_vcpu = d->shared_info->n_vcpu; op->u.getdomaininfo.shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT; diff -urN a/xen/common/domain.c b/xen/common/domain.c --- a/xen/common/domain.c 2005-04-25 22:14:41.000000000 -0500 +++ b/xen/common/domain.c 2005-04-26 10:12:31.308935184 -0500 @@ -295,6 +295,7 @@ ed = d->exec_domain[vcpu]; atomic_set(&ed->pausecnt, 0); + ed->cpumap = CPUMAP_RUNANYWHERE; memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); diff -urN a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h --- a/xen/include/public/dom0_ops.h 2005-04-25 22:14:44.000000000 -0500 +++ b/xen/include/public/dom0_ops.h 2005-04-26 10:17:26.558050496 -0500 @@ -88,6 +88,9 @@ memory_t max_pages; memory_t shared_info_frame; /* MFN of shared_info struct */ u64 cpu_time; + u32 n_vcpu; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; /* current mapping */ + cpumap_t cpumap[MAX_VIRT_CPUS]; /* allowable mapping */ } dom0_getdomaininfo_t; #define DOM0_SETDOMAININFO 13 @@ -170,14 +173,14 @@ } dom0_readconsole_t; /* - * Pin Domain to a particular CPU (use -1 to unpin) + * Set which cpus an exec_domain can use */ #define DOM0_PINCPUDOMAIN 20 typedef struct { /* IN variables. */ domid_t domain; u16 exec_domain; - s32 cpu; /* -1 implies unpin */ + cpumap_t *cpumap; } dom0_pincpudomain_t; /* Get trace buffers machine base address */ diff -urN a/xen/include/public/xen.h b/xen/include/public/xen.h --- a/xen/include/public/xen.h 2005-04-25 22:14:40.000000000 -0500 +++ b/xen/include/public/xen.h 2005-04-26 10:54:26.318595600 -0500 @@ -473,6 +473,8 @@ /* For use in guest OSes. */ extern shared_info_t *HYPERVISOR_shared_info; +typedef unsigned long cpumap_t; + #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_XEN_H__ */ diff -urN a/xen/include/xen/sched.h b/xen/include/xen/sched.h --- a/xen/include/xen/sched.h 2005-04-25 22:14:39.000000000 -0500 +++ b/xen/include/xen/sched.h 2005-04-26 10:57:19.902206880 -0500 @@ -58,6 +58,8 @@ void destroy_event_channels(struct domain *d); int init_exec_domain_event_channels(struct exec_domain *ed); + +#define CPUMAP_RUNANYWHERE 0xFFFFFFFF struct exec_domain { u32 processor; @@ -83,6 +85,8 @@ atomic_t pausecnt; + cpumap_t cpumap; /* which cpus this domain can run on */ + struct arch_exec_domain arch; }; _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Lars Rasmusson
2005-Apr-26 23:14 UTC
Re: [Xen-devel] [PATCH] xen,tools: pincpu use vcpu and cpumap_t
(off topic) I''ve also had the same crash, and I got it to run again by following Keir''s suggestion in the thread BUG: xend oopses on munmap of /proc/xen/privcmd i.e. to check out everything, except patch 1.1367 with the command bk cset -x 1.1367 /Lars On Tue, 2005-04-26 at 17:31 -0500, Ryan Harper wrote:> I could not test it as the current unstable tree crashes dom0 when booting domU._______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ian Pratt
2005-Apr-27 00:51 UTC
[Xen-devel] RE: [PATCH] xen,tools: pincpu use vcpu and cpumap_t
> int err, errno_saved; > dom0_op_t op; > + u32 vcpu = 0; /* FIXME, hard coded initial pin to vcpu 0 */ > + cpumap_t cpumap = 1<<cpu;Ryan, I haven''t looked at the whole patch yet, but this comment worried me, as it reminded me of a slightly wider change that I think we need to address at the same time. We should remove the initial CPU allocation algorithm from xen altogether, and leave it to xend (implementing the same ht-aware algorithm), setting an appropriate pin map for each vcpu. The whole pining stuff should be removed from xc_domain_create too as it doesn''t belong there. I''d be inclined to go for something bigger than a long for the size of the bitmap in the xc interface, even if we only look at the first 32/64 bits within Xen. Best, Ian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Apr-27 14:04 UTC
[Xen-devel] Re: [PATCH] xen,tools: pincpu use vcpu and cpumap_t
* Ian Pratt <m+Ian.Pratt@cl.cam.ac.uk> [2005-04-27 08:55]:> > > > int err, errno_saved; > > dom0_op_t op; > > + u32 vcpu = 0; /* FIXME, hard coded initial pin to vcpu 0 */ > > + cpumap_t cpumap = 1<<cpu; > > Ryan, > I haven''t looked at the whole patch yet, but this comment worried me, as > it reminded me of a slightly wider change that I think we need to > address at the same time. > > We should remove the initial CPU allocation algorithm from xen > altogether, and leave it to xend (implementing the same ht-aware > algorithm), setting an appropriate pin map for each vcpu. The wholeThis patch is pretty big as it is, do you want me to move the cpu allocation out in this patch, or can I follow this patch up with another that moves the allocation out into xend?> pining stuff should be removed from xc_domain_create too as it doesn''t > belong there.Hrm, I believe this is a recent change from Keir: http://lists.xensource.com/archives/html/xen-changelog/2005-04/msg00279.html> > I''d be inclined to go for something bigger than a long for the size of > the bitmap in the xc interface, even if we only look at the first 32/64 > bits within Xen.OK. I chose an unsigned long as that was what you had indicated we would go with in Xen 3.0. How many bits would like to see the xc interface use? -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mike Wray
2005-May-03 16:51 UTC
Re: [Xen-devel] [PATCH] xen,tools: pincpu use vcpu and cpumap_t
Ryan Harper wrote:> Here is an update to the previous pincpu patches. This patch updates > the pincpu dom0op to use a cpumap_t pointer, rather than specifying a > size in the interface. cpumap_t is current typedef''ed to an unsigned > long. xm list -v now truncates the cpumap field to only show the bits > of the map that are relevant. That is, if you have four VCPUS, you will > only see the first four bits of the cpumap. > > This patch compiles against current unstable tree, but I could not test > it as the current unstable tree crashes dom0 when booting domU. I back > ported the patch to 20050420 snapshot and it worked there. >This patch causes the dom0 crash I reported on the current unstable (attached) so it needs fixing before applying. Mike _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-May-03 23:58 UTC
[Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
* Mike Wray <mike.wray@hp.com> [2005-05-03 12:01]:> Ryan Harper wrote: > >Here is an update to the previous pincpu patches. This patch updates > >the pincpu dom0op to use a cpumap_t pointer, rather than specifying a > >size in the interface. cpumap_t is current typedef''ed to an unsigned > >long. xm list -v now truncates the cpumap field to only show the bits > >of the map that are relevant. That is, if you have four VCPUS, you will > >only see the first four bits of the cpumap. > > > >This patch compiles against current unstable tree, but I could not test > >it as the current unstable tree crashes dom0 when booting domU. I back > >ported the patch to 20050420 snapshot and it worked there. > > > > This patch causes the dom0 crash I reported on the current unstable > (attached) so it needs fixing before applying.I was not able to recreate dom0 crash with the previous patch against current nightly snapshot (20050503). None-the-less addressing some of your feedback. Attached is an updated patch. o bumped cpumap_t up to u64, per Ian and Keir o Fixed deref of user-space pointer without copy_from_user(), Mike Wray o Added put_domain() in ed > MAX_VIRT_CPUS error path, Mike Wray -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: tools/libxc/xc.h | 6 ++ tools/libxc/xc_domain.c | 16 +++++- tools/python/xen/lowlevel/xc/xc.c | 71 ++++++++++++++++++------------ tools/python/xen/xend/XendClient.py | 5 +- tools/python/xen/xend/XendDomain.py | 11 ++-- tools/python/xen/xend/XendDomainInfo.py | 6 ++ tools/python/xen/xend/server/SrvDomain.py | 3 - tools/python/xen/xend/server/SrvUsbif.py | 1 tools/python/xen/xm/main.py | 67 +++++++++++++++++++++++----- xen/arch/x86/domain.c | 1 xen/common/dom0_ops.c | 38 +++++++++++++--- xen/common/domain.c | 1 xen/include/public/dom0_ops.h | 7 ++ xen/include/public/xen.h | 2 xen/include/xen/sched.h | 4 + 15 files changed, 179 insertions(+), 60 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c 2005-05-02 22:11:47.000000000 -0500 +++ b/tools/libxc/xc_domain.c 2005-05-03 17:15:27.000000000 -0500 @@ -16,6 +16,8 @@ { int err, errno_saved; dom0_op_t op; + u32 vcpu = 0; /* FIXME, hard coded initial pin to vcpu 0 */ + cpumap_t cpumap = 1<<cpu; op.cmd = DOM0_CREATEDOMAIN; op.u.createdomain.domain = (domid_t)*pdomid; @@ -25,7 +27,7 @@ *pdomid = (u16)op.u.createdomain.domain; if ( (cpu != -1) && - ((err = xc_domain_pincpu(xc_handle, *pdomid, cpu)) != 0) ) + ((err = xc_domain_pincpu(xc_handle, *pdomid, vcpu, &cpumap)) != 0) ) goto fail; if ( (err = xc_domain_setcpuweight(xc_handle, *pdomid, cpu_weight)) != 0 ) @@ -84,13 +86,14 @@ int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu) + int vcpu, + cpumap_t *cpumap) { dom0_op_t op; op.cmd = DOM0_PINCPUDOMAIN; op.u.pincpudomain.domain = (domid_t)domid; - op.u.pincpudomain.exec_domain = 0; - op.u.pincpudomain.cpu = cpu; + op.u.pincpudomain.exec_domain = vcpu; + op.u.pincpudomain.cpumap = cpumap; return do_dom0_op(xc_handle, &op); } @@ -132,6 +135,11 @@ info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT); info->shared_info_frame = op.u.getdomaininfo.shared_info_frame; info->cpu_time = op.u.getdomaininfo.cpu_time; + info->vcpus = op.u.getdomaininfo.n_vcpu; + memcpy(info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, + MAX_VIRT_CPUS*sizeof(u32)); + memcpy(info->cpumap, &op.u.getdomaininfo.cpumap, + MAX_VIRT_CPUS*sizeof(cpumap_t)); next_domid = (u16)op.u.getdomaininfo.domain + 1; info++; diff -urN a/tools/libxc/xc.h b/tools/libxc/xc.h --- a/tools/libxc/xc.h 2005-05-02 22:11:44.000000000 -0500 +++ b/tools/libxc/xc.h 2005-05-03 17:15:27.000000000 -0500 @@ -111,6 +111,7 @@ typedef struct { u32 domid; unsigned int cpu; + unsigned int vcpus; unsigned int dying:1, crashed:1, shutdown:1, paused:1, blocked:1, running:1; unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ @@ -118,6 +119,8 @@ unsigned long shared_info_frame; u64 cpu_time; unsigned long max_memkb; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; + cpumap_t cpumap[MAX_VIRT_CPUS]; } xc_dominfo_t; typedef dom0_getdomaininfo_t xc_domaininfo_t; @@ -167,7 +170,8 @@ u32 domid); int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu); + int vcpu, + cpumap_t *cpumap); /** * This function will return information about one or more domains. * diff -urN a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c 2005-05-02 22:11:41.000000000 -0500 +++ b/tools/python/xen/lowlevel/xc/xc.c 2005-05-03 17:15:27.000000000 -0500 @@ -155,15 +155,16 @@ XcObject *xc = (XcObject *)self; u32 dom; - int cpu = -1; + int vcpu = 0; + cpumap_t cpumap = 0xFFFFFFFF; - static char *kwd_list[] = { "dom", "cpu", NULL }; + static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL }; - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, - &dom, &cpu) ) + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, + &dom, &vcpu, &cpumap) ) return NULL; - if ( xc_domain_pincpu(xc->xc_handle, dom, cpu) != 0 ) + if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, &cpumap) != 0 ) return PyErr_SetFromErrno(xc_error); Py_INCREF(zero); @@ -175,10 +176,10 @@ PyObject *kwds) { XcObject *xc = (XcObject *)self; - PyObject *list; + PyObject *list, *vcpu_list, *cpumap_list, *info_dict; u32 first_dom = 0; - int max_doms = 1024, nr_doms, i; + int max_doms = 1024, nr_doms, i, j; xc_dominfo_t *info; static char *kwd_list[] = { "first_dom", "max_doms", NULL }; @@ -195,23 +196,34 @@ list = PyList_New(nr_doms); for ( i = 0 ; i < nr_doms; i++ ) { - PyList_SetItem( - list, i, - Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" - ",s:l,s:L,s:l,s:i}", - "dom", info[i].domid, - "cpu", info[i].cpu, - "dying", info[i].dying, - "crashed", info[i].crashed, - "shutdown", info[i].shutdown, - "paused", info[i].paused, - "blocked", info[i].blocked, - "running", info[i].running, - "mem_kb", info[i].nr_pages*4, - "cpu_time", info[i].cpu_time, - "maxmem_kb", info[i].max_memkb, - "shutdown_reason", info[i].shutdown_reason - )); + vcpu_list = PyList_New(MAX_VIRT_CPUS); + cpumap_list = PyList_New(MAX_VIRT_CPUS); + for ( j = 0; j < MAX_VIRT_CPUS; j++ ) { + PyList_SetItem( vcpu_list, j, + Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + PyList_SetItem( cpumap_list, j, + Py_BuildValue("i", info[i].cpumap[j])); + } + + info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" + ",s:l,s:L,s:l,s:i}", + "dom", info[i].domid, + "cpu", info[i].cpu, + "vcpus", info[i].vcpus, + "dying", info[i].dying, + "crashed", info[i].crashed, + "shutdown", info[i].shutdown, + "paused", info[i].paused, + "blocked", info[i].blocked, + "running", info[i].running, + "mem_kb", info[i].nr_pages*4, + "cpu_time", info[i].cpu_time, + "maxmem_kb", info[i].max_memkb, + "shutdown_reason", info[i].shutdown_reason); + PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list ); + PyDict_SetItemString( info_dict, "cpumap", cpumap_list ); + PyList_SetItem( list, i, info_dict); + } free(info); @@ -913,9 +925,10 @@ { "domain_pincpu", (PyCFunction)pyxc_domain_pincpu, METH_VARARGS | METH_KEYWORDS, "\n" - "Pin a domain to a specified CPU.\n" - " dom [int]: Identifier of domain to be pinned.\n" - " cpu [int, -1]: CPU to pin to, or -1 to unpin\n\n" + "Pin a VCPU to a specified set CPUs.\n" + " dom [int]: Identifier of domain to which VCPU belongs.\n" + " vcpu [int, 0]: VCPU being pinned.\n" + " cpumap [int, -1]: Bitmap of usable CPUs.\n\n" "Returns: [int] 0 on success; -1 on error.\n" }, { "domain_getinfo", @@ -930,6 +943,7 @@ " domain-id space was reached.\n" " dom [int]: Identifier of domain to which this info pertains\n" " cpu [int]: CPU to which this domain is bound\n" + " vcpus [int]: Number of Virtual CPUS in this domain\n" " dying [int]: Bool - is the domain dying?\n" " crashed [int]: Bool - has the domain crashed?\n" " shutdown [int]: Bool - has the domain shut itself down?\n" @@ -940,7 +954,8 @@ " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" " cpu_time [long]: CPU time consumed, in nanoseconds\n" " shutdown_reason [int]: Numeric code from guest OS, explaining " - "reason why it shut itself down.\n" }, + "reason why it shut itself down.\n" + " vcpu_to_cpu [[int]]: List that maps VCPUS to CPUS\n" }, { "linux_save", (PyCFunction)pyxc_linux_save, diff -urN a/tools/python/xen/xend/server/SrvDomain.py b/tools/python/xen/xend/server/SrvDomain.py --- a/tools/python/xen/xend/server/SrvDomain.py 2005-05-02 22:11:46.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvDomain.py 2005-05-03 17:15:27.000000000 -0500 @@ -92,7 +92,8 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], - [''cpu'', ''int'']]) + [''vcpu'', ''int''], + [''cpumap'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/server/SrvUsbif.py b/tools/python/xen/xend/server/SrvUsbif.py --- a/tools/python/xen/xend/server/SrvUsbif.py 2005-05-02 22:11:49.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvUsbif.py 2005-05-03 17:15:27.000000000 -0500 @@ -107,6 +107,7 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], + [''vcpu'', ''int''], [''cpu'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/XendClient.py b/tools/python/xen/xend/XendClient.py --- a/tools/python/xen/xend/XendClient.py 2005-05-02 22:11:40.000000000 -0500 +++ b/tools/python/xen/xend/XendClient.py 2005-05-03 17:15:27.000000000 -0500 @@ -246,10 +246,11 @@ ''live'' : live, ''resource'' : resource }) - def xend_domain_pincpu(self, id, cpu): + def xend_domain_pincpu(self, id, vcpu, cpumap): return self.xendPost(self.domainurl(id), {''op'' : ''pincpu'', - ''cpu'' : cpu }) + ''vcpu'' : vcpu, + ''cpumap'' : cpumap }) def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, warpu): return self.xendPost(self.domainurl(id), diff -urN a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py 2005-05-02 22:11:41.000000000 -0500 +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-05-03 17:15:27.000000000 -0500 @@ -372,6 +372,10 @@ sxpr.append([''shutdown_reason'', reason]) sxpr.append([''cpu'', self.info[''cpu'']]) sxpr.append([''cpu_time'', self.info[''cpu_time'']/1e9]) + sxpr.append([''vcpus'', self.info[''vcpus'']]) + sxpr.append([''cpumap'', self.info[''cpumap'']]) + sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x), + self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))]) if self.start_time: up_time = time.time() - self.start_time @@ -449,7 +453,7 @@ raise VmError(''missing memory size'') cpu = sxp.child_value(config, ''cpu'') if self.recreate and self.dom and cpu is not None: - xc.domain_pincpu(self.dom, int(cpu)) + xc.domain_pincpu(self.dom, 0, 1<<int(cpu)) try: image = sxp.child_value(self.config, ''image'') self.vcpus = int(sxp.child_value(image, ''vcpus'')) diff -urN a/tools/python/xen/xend/XendDomain.py b/tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py 2005-05-02 22:11:41.000000000 -0500 +++ b/tools/python/xen/xend/XendDomain.py 2005-05-03 17:15:27.000000000 -0500 @@ -612,15 +612,16 @@ xmigrate = XendMigrate.instance() return xmigrate.save_begin(dominfo, dst) - def domain_pincpu(self, id, cpu): - """Pin a domain to a cpu. + def domain_pincpu(self, id, vcpu, cpumap): + """Set which cpus vcpu can use - @param id: domain - @param cpu: cpu number + @param id: domain + @param vcpu: vcpu number + @param cpumap: bitmap of usbale cpus """ dominfo = self.domain_lookup(id) try: - return xc.domain_pincpu(int(dominfo.id), cpu) + return xc.domain_pincpu(int(dominfo.id), vcpu, cpumap) except Exception, ex: raise XendError(str(ex)) diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py 2005-05-02 22:11:47.000000000 -0500 +++ b/tools/python/xen/xm/main.py 2005-05-03 17:15:27.000000000 -0500 @@ -6,6 +6,8 @@ import sys from getopt import getopt import socket +import warnings +warnings.filterwarnings(''ignore'', category=FutureWarning) from xen.xend import PrettyPrint from xen.xend import sxp @@ -340,8 +342,8 @@ name = "list" info = """List information about domains.""" - short_options = ''l'' - long_options = [''long''] + short_options = ''lv'' + long_options = [''long'',''vcpus''] def help(self, args): if help: @@ -350,11 +352,13 @@ Either all domains or the domains given. -l, --long Get more detailed information. + -v, --vcpus Show VCPU to CPU mapping. """ return def main(self, args): use_long = 0 + show_vcpus = 0 (options, params) = getopt(args[1:], self.short_options, self.long_options) @@ -362,6 +366,8 @@ for (k, v) in options: if k in [''-l'', ''--long'']: use_long = 1 + if k in [''-v'', ''--vcpus'']: + show_vcpus = 1 if n == 0: doms = server.xend_domains() @@ -371,11 +377,13 @@ if use_long: self.long_list(doms) + elif show_vcpus: + self.show_vcpus(doms) else: self.brief_list(doms) def brief_list(self, doms): - print ''Name Id Mem(MB) CPU State Time(s) Console'' + print ''Name Id Mem(MB) CPU VCPU(s) State Time(s) Console'' for dom in doms: info = server.xend_domain(dom) d = {} @@ -383,6 +391,7 @@ d[''name''] = sxp.child_value(info, ''name'', ''??'') d[''mem''] = int(sxp.child_value(info, ''memory'', ''0'')) d[''cpu''] = int(sxp.child_value(info, ''cpu'', ''0'')) + d[''vcpus''] = int(sxp.child_value(info, ''vcpus'', ''0'')) d[''state''] = sxp.child_value(info, ''state'', ''??'') d[''cpu_time''] = float(sxp.child_value(info, ''cpu_time'', ''0'')) console = sxp.child(info, ''console'') @@ -390,9 +399,27 @@ d[''port''] = sxp.child_value(console, ''console_port'') else: d[''port''] = '''' - print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(state)5s %(cpu_time)7.1f %(port)4s" + print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s" % d) + def show_vcpus(self, doms): + print ''Name Id VCPU CPU CPUMAP'' + for dom in doms: + info = server.xend_domain(dom) + vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''?'').replace(''-'','''') + cpumap = sxp.child_value(info, ''cpumap'', []) + mask = ((int(sxp.child_value(info, ''vcpus'', ''0'')))**2) - 1 + count = 0 + for cpu in vcpu_to_cpu: + d = {} + d[''name''] = sxp.child_value(info, ''name'', ''??'') + d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) + d[''vcpu''] = int(count) + d[''cpu''] = int(cpu) + d[''cpumap''] = int(cpumap[count])&mask + count = count + 1 + print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d 0x%(cpumap)x" % d) + def long_list(self, doms): for dom in doms: info = server.xend_domain(dom) @@ -474,17 +501,35 @@ class ProgPincpu(Prog): group = ''domain'' name = "pincpu" - info = """Pin a domain to a cpu. """ + info = """Set which cpus a VCPU can use. """ def help(self, args): - print args[0],''DOM CPU'' - print ''\nPin domain DOM to cpu CPU.'' + print args[0],''DOM VCPU CPUS'' + print ''\nSet which cpus VCPU in domain DOM can use.'' + + # convert list of cpus to bitmap integer value + def make_map(self, cpulist): + cpus = [] + cpumap = 0 + for c in cpulist.split('',''): + if len(c) > 1: + (x,y) = c.split(''-'') + for i in range(int(x),int(y)+1): + cpus.append(int(i)) + else: + cpus.append(int(c)) + cpus.sort() + for c in cpus: + cpumap = cpumap | 1<<c + + return cpumap def main(self, args): - if len(args) != 3: self.err("%s: Invalid argument(s)" % args[0]) - dom = args[1] - cpu = int(args[2]) - server.xend_domain_pincpu(dom, cpu) + if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0]) + dom = args[1] + vcpu = int(args[2]) + cpumap = self.make_map(args[3]); + server.xend_domain_pincpu(dom, vcpu, cpumap) xm.prog(ProgPincpu) diff -urN a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c 2005-05-02 22:11:43.000000000 -0500 +++ b/xen/arch/x86/domain.c 2005-05-03 17:15:27.000000000 -0500 @@ -252,6 +252,7 @@ d->shared_info = (void *)alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); ed->vcpu_info = &d->shared_info->vcpu_data[ed->eid]; + ed->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; diff -urN a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c --- a/xen/common/dom0_ops.c 2005-05-02 22:11:41.000000000 -0500 +++ b/xen/common/dom0_ops.c 2005-05-03 17:15:27.000000000 -0500 @@ -222,7 +222,8 @@ domid_t dom = op->u.pincpudomain.domain; struct domain *d = find_domain_by_id(dom); struct exec_domain *ed; - int cpu = op->u.pincpudomain.cpu; + cpumap_t curmap, *cpumap = &curmap; + if ( d == NULL ) { @@ -230,6 +231,14 @@ break; } + if ( (op->u.pincpudomain.exec_domain >= MAX_VIRT_CPUS) || + !d->exec_domain[op->u.pincpudomain.exec_domain] ) + { + ret = -EINVAL; + put_domain(d); + break; + } + ed = d->exec_domain[op->u.pincpudomain.exec_domain]; if ( ed == NULL ) { @@ -245,20 +254,32 @@ break; } - if ( cpu == -1 ) + if ( copy_from_user(cpumap, + op->u.pincpudomain.cpumap, sizeof(*cpumap)) ) + { + ret = -EFAULT; + put_domain(d); + break; + } + + /* update cpumap for this ed */ + ed->cpumap = *(cpumap); + if ( *(cpumap) == CPUMAP_RUNANYWHERE ) { clear_bit(EDF_CPUPINNED, &ed->ed_flags); } else { + /* pick a new cpu from the usable map */ + int new_cpu = (int)find_first_set_bit(*(cpumap)) % smp_num_cpus; + exec_domain_pause(ed); - if ( ed->processor != (cpu % smp_num_cpus) ) + if ( ed->processor != new_cpu ) set_bit(EDF_MIGRATED, &ed->ed_flags); set_bit(EDF_CPUPINNED, &ed->ed_flags); - ed->processor = cpu % smp_num_cpus; + ed->processor = new_cpu; exec_domain_unpause(ed); } - put_domain(d); } break; @@ -309,6 +330,12 @@ break; } + memset(&op->u.getdomaininfo.vcpu_to_cpu,-1,MAX_VIRT_CPUS*sizeof(u8)); + for_each_exec_domain ( d, ed ) { + op->u.getdomaininfo.vcpu_to_cpu[ed->eid] = ed->processor; + op->u.getdomaininfo.cpumap[ed->eid] = ed->cpumap; + } + ed = d->exec_domain[op->u.getdomaininfo.exec_domain]; op->u.getdomaininfo.flags @@ -326,6 +353,7 @@ op->u.getdomaininfo.tot_pages = d->tot_pages; op->u.getdomaininfo.max_pages = d->max_pages; op->u.getdomaininfo.cpu_time = ed->cpu_time; + op->u.getdomaininfo.n_vcpu = d->shared_info->n_vcpu; op->u.getdomaininfo.shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT; diff -urN a/xen/common/domain.c b/xen/common/domain.c --- a/xen/common/domain.c 2005-05-02 22:11:44.000000000 -0500 +++ b/xen/common/domain.c 2005-05-03 17:15:27.000000000 -0500 @@ -294,6 +294,7 @@ ed = d->exec_domain[vcpu]; atomic_set(&ed->pausecnt, 0); + ed->cpumap = CPUMAP_RUNANYWHERE; memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); diff -urN a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h --- a/xen/include/public/dom0_ops.h 2005-05-02 22:11:48.000000000 -0500 +++ b/xen/include/public/dom0_ops.h 2005-05-03 17:15:27.000000000 -0500 @@ -88,6 +88,9 @@ memory_t max_pages; memory_t shared_info_frame; /* MFN of shared_info struct */ u64 cpu_time; + u32 n_vcpu; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; /* current mapping */ + cpumap_t cpumap[MAX_VIRT_CPUS]; /* allowable mapping */ } dom0_getdomaininfo_t; #define DOM0_SETDOMAININFO 13 @@ -170,14 +173,14 @@ } dom0_readconsole_t; /* - * Pin Domain to a particular CPU (use -1 to unpin) + * Set which cpus an exec_domain can use */ #define DOM0_PINCPUDOMAIN 20 typedef struct { /* IN variables. */ domid_t domain; u16 exec_domain; - s32 cpu; /* -1 implies unpin */ + cpumap_t *cpumap; } dom0_pincpudomain_t; /* Get trace buffers machine base address */ diff -urN a/xen/include/public/xen.h b/xen/include/public/xen.h --- a/xen/include/public/xen.h 2005-05-02 22:11:42.000000000 -0500 +++ b/xen/include/public/xen.h 2005-05-03 18:04:28.934634128 -0500 @@ -473,6 +473,8 @@ /* For use in guest OSes. */ extern shared_info_t *HYPERVISOR_shared_info; +typedef u64 cpumap_t; + #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_XEN_H__ */ diff -urN a/xen/include/xen/sched.h b/xen/include/xen/sched.h --- a/xen/include/xen/sched.h 2005-05-02 22:11:41.000000000 -0500 +++ b/xen/include/xen/sched.h 2005-05-03 17:15:27.000000000 -0500 @@ -58,6 +58,8 @@ void destroy_event_channels(struct domain *d); int init_exec_domain_event_channels(struct exec_domain *ed); + +#define CPUMAP_RUNANYWHERE 0xFFFFFFFF struct exec_domain { u32 processor; @@ -83,6 +85,8 @@ atomic_t pausecnt; + cpumap_t cpumap; /* which cpus this domain can run on */ + struct arch_exec_domain arch; }; _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mike Wray
2005-May-05 15:49 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Ryan Harper wrote:> * Mike Wray <mike.wray@hp.com> [2005-05-03 12:01]: > >>Ryan Harper wrote: >> >>>Here is an update to the previous pincpu patches. This patch updates >>>the pincpu dom0op to use a cpumap_t pointer, rather than specifying a >>>size in the interface. cpumap_t is current typedef''ed to an unsigned >>>long. xm list -v now truncates the cpumap field to only show the bits >>>of the map that are relevant. That is, if you have four VCPUS, you will >>>only see the first four bits of the cpumap. >>> >>>This patch compiles against current unstable tree, but I could not test >>>it as the current unstable tree crashes dom0 when booting domU. I back >>>ported the patch to 20050420 snapshot and it worked there. >>> >> >>This patch causes the dom0 crash I reported on the current unstable >>(attached) so it needs fixing before applying. > > > I was not able to recreate dom0 crash with the previous patch against > current nightly snapshot (20050503). None-the-less addressing some of > your feedback. > > Attached is an updated patch. > o bumped cpumap_t up to u64, per Ian and Keir > o Fixed deref of user-space pointer without copy_from_user(), Mike Wray > o Added put_domain() in ed > MAX_VIRT_CPUS error path, Mike WrayOK, I''ll give it another whirl. Mike _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mike Wray
2005-May-10 13:59 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Ryan Harper wrote:> * Mike Wray <mike.wray@hp.com> [2005-05-03 12:01]: > >>Ryan Harper wrote: >> >>>Here is an update to the previous pincpu patches. This patch updates >>>the pincpu dom0op to use a cpumap_t pointer, rather than specifying a >>>size in the interface. cpumap_t is current typedef''ed to an unsigned >>>long. xm list -v now truncates the cpumap field to only show the bits >>>of the map that are relevant. That is, if you have four VCPUS, you will >>>only see the first four bits of the cpumap. >>> >>>This patch compiles against current unstable tree, but I could not test >>>it as the current unstable tree crashes dom0 when booting domU. I back >>>ported the patch to 20050420 snapshot and it worked there. >>> >> >>This patch causes the dom0 crash I reported on the current unstable >>(attached) so it needs fixing before applying. > > > I was not able to recreate dom0 crash with the previous patch against > current nightly snapshot (20050503). None-the-less addressing some of > your feedback. > > Attached is an updated patch. > o bumped cpumap_t up to u64, per Ian and Keir > o Fixed deref of user-space pointer without copy_from_user(), Mike Wray > o Added put_domain() in ed > MAX_VIRT_CPUS error path, Mike Wray >Ryan, I just applied your patch and tried to compile it. I got the following error: dom0_ops.c: In function `do_dom0_op'': dom0_ops.c:336: structure has no member named `eid'' dom0_ops.c:337: structure has no member named `eid'' make[3]: *** [dom0_ops.o] Error 1 make[3]: Leaving directory `/home/mjw/repos-bk/xeno-unstable-xen.patch/xeno-unstable.bk-xen.pin/xen/common'' make[2]: *** [/home/mjw/repos-bk/xeno-unstable-xen.patch/xeno-unstable.bk-xen.pin/xen/xen] Error 2 make[2]: Leaving directory `/home/mjw/repos-bk/xeno-unstable-xen.patch/xeno-unstable.bk-xen.pin/xen'' make[1]: *** [xen] Error 2 make[1]: Leaving directory `/home/mjw/repos-bk/xeno-unstable-xen.patch/xeno-unstable.bk-xen.pin'' make: *** [world] Error 2 So it looks like we need to try again. Apologies if this is because of changes after you submitted the patch. I also had two failed hunks, which I fixed manually: 1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej 1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej Mike _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-May-10 14:59 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
* Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]:> So it looks like we need to try again. > Apologies if this is because of changes after > you submitted the patch. > > I also had two failed hunks, which I fixed manually: > > 1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej > 1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rejI''ll update it against current unstable and resubmit. Thanks. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-May-10 15:55 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
* Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]:> * Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: > > So it looks like we need to try again. > > Apologies if this is because of changes after > > you submitted the patch. > > > > I also had two failed hunks, which I fixed manually: > > > > 1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej > > 1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej > > I''ll update it against current unstable and resubmit. Thanks.Updated against 2005-05-10 nightly unstable snapshot. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: tools/libxc/xc.h | 6 ++ tools/libxc/xc_domain.c | 16 +++++- tools/python/xen/lowlevel/xc/xc.c | 71 ++++++++++++++++++------------ tools/python/xen/xend/XendClient.py | 5 +- tools/python/xen/xend/XendDomain.py | 11 ++-- tools/python/xen/xend/XendDomainInfo.py | 6 ++ tools/python/xen/xend/server/SrvDomain.py | 3 - tools/python/xen/xend/server/SrvUsbif.py | 1 tools/python/xen/xm/main.py | 67 +++++++++++++++++++++++----- xen/arch/x86/domain.c | 1 xen/common/dom0_ops.c | 38 +++++++++++++--- xen/common/domain.c | 1 xen/include/public/dom0_ops.h | 7 ++ xen/include/public/xen.h | 2 xen/include/xen/sched.h | 4 + 15 files changed, 179 insertions(+), 60 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c 2005-05-09 22:11:33.000000000 -0500 +++ b/tools/libxc/xc_domain.c 2005-05-10 10:13:18.886383808 -0500 @@ -16,6 +16,8 @@ { int err, errno_saved; dom0_op_t op; + u32 vcpu = 0; /* FIXME, hard coded initial pin to vcpu 0 */ + cpumap_t cpumap = 1<<cpu; op.cmd = DOM0_CREATEDOMAIN; op.u.createdomain.domain = (domid_t)*pdomid; @@ -25,7 +27,7 @@ *pdomid = (u16)op.u.createdomain.domain; if ( (cpu != -1) && - ((err = xc_domain_pincpu(xc_handle, *pdomid, cpu)) != 0) ) + ((err = xc_domain_pincpu(xc_handle, *pdomid, vcpu, &cpumap)) != 0) ) goto fail; if ( (err = xc_domain_setcpuweight(xc_handle, *pdomid, cpu_weight)) != 0 ) @@ -84,13 +86,14 @@ int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu) + int vcpu, + cpumap_t *cpumap) { dom0_op_t op; op.cmd = DOM0_PINCPUDOMAIN; op.u.pincpudomain.domain = (domid_t)domid; - op.u.pincpudomain.exec_domain = 0; - op.u.pincpudomain.cpu = cpu; + op.u.pincpudomain.exec_domain = vcpu; + op.u.pincpudomain.cpumap = cpumap; return do_dom0_op(xc_handle, &op); } @@ -133,6 +136,11 @@ info->max_memkb = op.u.getdomaininfo.max_pages<<(PAGE_SHIFT); info->shared_info_frame = op.u.getdomaininfo.shared_info_frame; info->cpu_time = op.u.getdomaininfo.cpu_time; + info->vcpus = op.u.getdomaininfo.n_vcpu; + memcpy(info->vcpu_to_cpu, &op.u.getdomaininfo.vcpu_to_cpu, + MAX_VIRT_CPUS*sizeof(u32)); + memcpy(info->cpumap, &op.u.getdomaininfo.cpumap, + MAX_VIRT_CPUS*sizeof(cpumap_t)); next_domid = (u16)op.u.getdomaininfo.domain + 1; info++; diff -urN a/tools/libxc/xc.h b/tools/libxc/xc.h --- a/tools/libxc/xc.h 2005-05-09 22:11:29.000000000 -0500 +++ b/tools/libxc/xc.h 2005-05-10 10:13:18.895382440 -0500 @@ -111,6 +111,7 @@ typedef struct { u32 domid; unsigned int cpu; + unsigned int vcpus; unsigned int dying:1, crashed:1, shutdown:1, paused:1, blocked:1, running:1; unsigned int shutdown_reason; /* only meaningful if shutdown==1 */ @@ -118,6 +119,8 @@ unsigned long shared_info_frame; u64 cpu_time; unsigned long max_memkb; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; + cpumap_t cpumap[MAX_VIRT_CPUS]; } xc_dominfo_t; typedef dom0_getdomaininfo_t xc_domaininfo_t; @@ -167,7 +170,8 @@ u32 domid); int xc_domain_pincpu(int xc_handle, u32 domid, - int cpu); + int vcpu, + cpumap_t *cpumap); /** * This function will return information about one or more domains. * diff -urN a/tools/python/xen/lowlevel/xc/xc.c b/tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c 2005-05-09 22:11:27.000000000 -0500 +++ b/tools/python/xen/lowlevel/xc/xc.c 2005-05-10 10:13:18.922378336 -0500 @@ -155,15 +155,16 @@ XcObject *xc = (XcObject *)self; u32 dom; - int cpu = -1; + int vcpu = 0; + cpumap_t cpumap = 0xFFFFFFFF; - static char *kwd_list[] = { "dom", "cpu", NULL }; + static char *kwd_list[] = { "dom", "vcpu", "cpumap", NULL }; - if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|i", kwd_list, - &dom, &cpu) ) + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|ii", kwd_list, + &dom, &vcpu, &cpumap) ) return NULL; - if ( xc_domain_pincpu(xc->xc_handle, dom, cpu) != 0 ) + if ( xc_domain_pincpu(xc->xc_handle, dom, vcpu, &cpumap) != 0 ) return PyErr_SetFromErrno(xc_error); Py_INCREF(zero); @@ -175,10 +176,10 @@ PyObject *kwds) { XcObject *xc = (XcObject *)self; - PyObject *list; + PyObject *list, *vcpu_list, *cpumap_list, *info_dict; u32 first_dom = 0; - int max_doms = 1024, nr_doms, i; + int max_doms = 1024, nr_doms, i, j; xc_dominfo_t *info; static char *kwd_list[] = { "first_dom", "max_doms", NULL }; @@ -195,23 +196,34 @@ list = PyList_New(nr_doms); for ( i = 0 ; i < nr_doms; i++ ) { - PyList_SetItem( - list, i, - Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" - ",s:l,s:L,s:l,s:i}", - "dom", info[i].domid, - "cpu", info[i].cpu, - "dying", info[i].dying, - "crashed", info[i].crashed, - "shutdown", info[i].shutdown, - "paused", info[i].paused, - "blocked", info[i].blocked, - "running", info[i].running, - "mem_kb", info[i].nr_pages*4, - "cpu_time", info[i].cpu_time, - "maxmem_kb", info[i].max_memkb, - "shutdown_reason", info[i].shutdown_reason - )); + vcpu_list = PyList_New(MAX_VIRT_CPUS); + cpumap_list = PyList_New(MAX_VIRT_CPUS); + for ( j = 0; j < MAX_VIRT_CPUS; j++ ) { + PyList_SetItem( vcpu_list, j, + Py_BuildValue("i", info[i].vcpu_to_cpu[j])); + PyList_SetItem( cpumap_list, j, + Py_BuildValue("i", info[i].cpumap[j])); + } + + info_dict = Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i" + ",s:l,s:L,s:l,s:i}", + "dom", info[i].domid, + "cpu", info[i].cpu, + "vcpus", info[i].vcpus, + "dying", info[i].dying, + "crashed", info[i].crashed, + "shutdown", info[i].shutdown, + "paused", info[i].paused, + "blocked", info[i].blocked, + "running", info[i].running, + "mem_kb", info[i].nr_pages*4, + "cpu_time", info[i].cpu_time, + "maxmem_kb", info[i].max_memkb, + "shutdown_reason", info[i].shutdown_reason); + PyDict_SetItemString( info_dict, "vcpu_to_cpu", vcpu_list ); + PyDict_SetItemString( info_dict, "cpumap", cpumap_list ); + PyList_SetItem( list, i, info_dict); + } free(info); @@ -959,9 +971,10 @@ { "domain_pincpu", (PyCFunction)pyxc_domain_pincpu, METH_VARARGS | METH_KEYWORDS, "\n" - "Pin a domain to a specified CPU.\n" - " dom [int]: Identifier of domain to be pinned.\n" - " cpu [int, -1]: CPU to pin to, or -1 to unpin\n\n" + "Pin a VCPU to a specified set CPUs.\n" + " dom [int]: Identifier of domain to which VCPU belongs.\n" + " vcpu [int, 0]: VCPU being pinned.\n" + " cpumap [int, -1]: Bitmap of usable CPUs.\n\n" "Returns: [int] 0 on success; -1 on error.\n" }, { "domain_getinfo", @@ -976,6 +989,7 @@ " domain-id space was reached.\n" " dom [int]: Identifier of domain to which this info pertains\n" " cpu [int]: CPU to which this domain is bound\n" + " vcpus [int]: Number of Virtual CPUS in this domain\n" " dying [int]: Bool - is the domain dying?\n" " crashed [int]: Bool - has the domain crashed?\n" " shutdown [int]: Bool - has the domain shut itself down?\n" @@ -986,7 +1000,8 @@ " maxmem_kb [int]: Maximum memory limit, in kilobytes\n" " cpu_time [long]: CPU time consumed, in nanoseconds\n" " shutdown_reason [int]: Numeric code from guest OS, explaining " - "reason why it shut itself down.\n" }, + "reason why it shut itself down.\n" + " vcpu_to_cpu [[int]]: List that maps VCPUS to CPUS\n" }, { "linux_save", (PyCFunction)pyxc_linux_save, diff -urN a/tools/python/xen/xend/server/SrvDomain.py b/tools/python/xen/xend/server/SrvDomain.py --- a/tools/python/xen/xend/server/SrvDomain.py 2005-05-09 22:11:31.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvDomain.py 2005-05-10 10:13:18.931376968 -0500 @@ -92,7 +92,8 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], - [''cpu'', ''int'']]) + [''vcpu'', ''int''], + [''cpumap'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/server/SrvUsbif.py b/tools/python/xen/xend/server/SrvUsbif.py --- a/tools/python/xen/xend/server/SrvUsbif.py 2005-05-09 22:11:36.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvUsbif.py 2005-05-10 10:13:18.932376816 -0500 @@ -107,6 +107,7 @@ def op_pincpu(self, op, req): fn = FormFn(self.xd.domain_pincpu, [[''dom'', ''str''], + [''vcpu'', ''int''], [''cpu'', ''int'']]) val = fn(req.args, {''dom'': self.dom.id}) return val diff -urN a/tools/python/xen/xend/XendClient.py b/tools/python/xen/xend/XendClient.py --- a/tools/python/xen/xend/XendClient.py 2005-05-09 22:11:25.000000000 -0500 +++ b/tools/python/xen/xend/XendClient.py 2005-05-10 10:13:18.947374536 -0500 @@ -246,10 +246,11 @@ ''live'' : live, ''resource'' : resource }) - def xend_domain_pincpu(self, id, cpu): + def xend_domain_pincpu(self, id, vcpu, cpumap): return self.xendPost(self.domainurl(id), {''op'' : ''pincpu'', - ''cpu'' : cpu }) + ''vcpu'' : vcpu, + ''cpumap'' : cpumap }) def xend_domain_cpu_bvt_set(self, id, mcuadv, warpback, warpvalue, warpl, warpu): return self.xendPost(self.domainurl(id), diff -urN a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py 2005-05-09 22:11:27.000000000 -0500 +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-05-10 10:13:18.958372864 -0500 @@ -372,6 +372,10 @@ sxpr.append([''shutdown_reason'', reason]) sxpr.append([''cpu'', self.info[''cpu'']]) sxpr.append([''cpu_time'', self.info[''cpu_time'']/1e9]) + sxpr.append([''vcpus'', self.info[''vcpus'']]) + sxpr.append([''cpumap'', self.info[''cpumap'']]) + sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x), + self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))]) if self.start_time: up_time = time.time() - self.start_time @@ -449,7 +453,7 @@ raise VmError(''missing memory size'') cpu = sxp.child_value(config, ''cpu'') if self.recreate and self.dom and cpu is not None: - xc.domain_pincpu(self.dom, int(cpu)) + xc.domain_pincpu(self.dom, 0, 1<<int(cpu)) try: image = sxp.child_value(self.config, ''image'') self.vcpus = int(sxp.child_value(image, ''vcpus'')) diff -urN a/tools/python/xen/xend/XendDomain.py b/tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py 2005-05-09 22:11:33.000000000 -0500 +++ b/tools/python/xen/xend/XendDomain.py 2005-05-10 10:13:18.960372560 -0500 @@ -612,15 +612,16 @@ xmigrate = XendMigrate.instance() return xmigrate.save_begin(dominfo, dst) - def domain_pincpu(self, id, cpu): - """Pin a domain to a cpu. + def domain_pincpu(self, id, vcpu, cpumap): + """Set which cpus vcpu can use - @param id: domain - @param cpu: cpu number + @param id: domain + @param vcpu: vcpu number + @param cpumap: bitmap of usbale cpus """ dominfo = self.domain_lookup(id) try: - return xc.domain_pincpu(int(dominfo.id), cpu) + return xc.domain_pincpu(int(dominfo.id), vcpu, cpumap) except Exception, ex: raise XendError(str(ex)) diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py 2005-05-09 22:11:32.000000000 -0500 +++ b/tools/python/xen/xm/main.py 2005-05-10 10:13:18.970371040 -0500 @@ -6,6 +6,8 @@ import sys from getopt import getopt import socket +import warnings +warnings.filterwarnings(''ignore'', category=FutureWarning) from xen.xend import PrettyPrint from xen.xend import sxp @@ -340,8 +342,8 @@ name = "list" info = """List information about domains.""" - short_options = ''l'' - long_options = [''long''] + short_options = ''lv'' + long_options = [''long'',''vcpus''] def help(self, args): if help: @@ -350,11 +352,13 @@ Either all domains or the domains given. -l, --long Get more detailed information. + -v, --vcpus Show VCPU to CPU mapping. """ return def main(self, args): use_long = 0 + show_vcpus = 0 (options, params) = getopt(args[1:], self.short_options, self.long_options) @@ -362,6 +366,8 @@ for (k, v) in options: if k in [''-l'', ''--long'']: use_long = 1 + if k in [''-v'', ''--vcpus'']: + show_vcpus = 1 if n == 0: doms = server.xend_domains() @@ -371,11 +377,13 @@ if use_long: self.long_list(doms) + elif show_vcpus: + self.show_vcpus(doms) else: self.brief_list(doms) def brief_list(self, doms): - print ''Name Id Mem(MB) CPU State Time(s) Console'' + print ''Name Id Mem(MB) CPU VCPU(s) State Time(s) Console'' for dom in doms: info = server.xend_domain(dom) d = {} @@ -383,6 +391,7 @@ d[''name''] = sxp.child_value(info, ''name'', ''??'') d[''mem''] = int(sxp.child_value(info, ''memory'', ''0'')) d[''cpu''] = int(sxp.child_value(info, ''cpu'', ''0'')) + d[''vcpus''] = int(sxp.child_value(info, ''vcpus'', ''0'')) d[''state''] = sxp.child_value(info, ''state'', ''??'') d[''cpu_time''] = float(sxp.child_value(info, ''cpu_time'', ''0'')) console = sxp.child(info, ''console'') @@ -390,9 +399,27 @@ d[''port''] = sxp.child_value(console, ''console_port'') else: d[''port''] = '''' - print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(state)5s %(cpu_time)7.1f %(port)4s" + print ("%(name)-16s %(dom)3d %(mem)7d %(cpu)3d %(vcpus)5d %(state)5s %(cpu_time)7.1f %(port)4s" % d) + def show_vcpus(self, doms): + print ''Name Id VCPU CPU CPUMAP'' + for dom in doms: + info = server.xend_domain(dom) + vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''?'').replace(''-'','''') + cpumap = sxp.child_value(info, ''cpumap'', []) + mask = ((int(sxp.child_value(info, ''vcpus'', ''0'')))**2) - 1 + count = 0 + for cpu in vcpu_to_cpu: + d = {} + d[''name''] = sxp.child_value(info, ''name'', ''??'') + d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) + d[''vcpu''] = int(count) + d[''cpu''] = int(cpu) + d[''cpumap''] = int(cpumap[count])&mask + count = count + 1 + print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d 0x%(cpumap)x" % d) + def long_list(self, doms): for dom in doms: info = server.xend_domain(dom) @@ -474,17 +501,35 @@ class ProgPincpu(Prog): group = ''domain'' name = "pincpu" - info = """Pin a domain to a cpu. """ + info = """Set which cpus a VCPU can use. """ def help(self, args): - print args[0],''DOM CPU'' - print ''\nPin domain DOM to cpu CPU.'' + print args[0],''DOM VCPU CPUS'' + print ''\nSet which cpus VCPU in domain DOM can use.'' + + # convert list of cpus to bitmap integer value + def make_map(self, cpulist): + cpus = [] + cpumap = 0 + for c in cpulist.split('',''): + if len(c) > 1: + (x,y) = c.split(''-'') + for i in range(int(x),int(y)+1): + cpus.append(int(i)) + else: + cpus.append(int(c)) + cpus.sort() + for c in cpus: + cpumap = cpumap | 1<<c + + return cpumap def main(self, args): - if len(args) != 3: self.err("%s: Invalid argument(s)" % args[0]) - dom = args[1] - cpu = int(args[2]) - server.xend_domain_pincpu(dom, cpu) + if len(args) != 4: self.err("%s: Invalid argument(s)" % args[0]) + dom = args[1] + vcpu = int(args[2]) + cpumap = self.make_map(args[3]); + server.xend_domain_pincpu(dom, vcpu, cpumap) xm.prog(ProgPincpu) diff -urN a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c --- a/xen/arch/x86/domain.c 2005-05-09 22:11:29.000000000 -0500 +++ b/xen/arch/x86/domain.c 2005-05-10 10:14:32.764152680 -0500 @@ -253,6 +253,7 @@ d->shared_info = (void *)alloc_xenheap_page(); memset(d->shared_info, 0, PAGE_SIZE); ed->vcpu_info = &d->shared_info->vcpu_data[ed->id]; + ed->cpumap = CPUMAP_RUNANYWHERE; SHARE_PFN_WITH_DOMAIN(virt_to_page(d->shared_info), d); machine_to_phys_mapping[virt_to_phys(d->shared_info) >> PAGE_SHIFT] = INVALID_M2P_ENTRY; diff -urN a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c --- a/xen/common/dom0_ops.c 2005-05-09 22:11:33.000000000 -0500 +++ b/xen/common/dom0_ops.c 2005-05-10 10:32:11.615182856 -0500 @@ -221,7 +221,8 @@ domid_t dom = op->u.pincpudomain.domain; struct domain *d = find_domain_by_id(dom); struct exec_domain *ed; - int cpu = op->u.pincpudomain.cpu; + cpumap_t curmap, *cpumap = &curmap; + if ( d == NULL ) { @@ -229,6 +230,14 @@ break; } + if ( (op->u.pincpudomain.exec_domain >= MAX_VIRT_CPUS) || + !d->exec_domain[op->u.pincpudomain.exec_domain] ) + { + ret = -EINVAL; + put_domain(d); + break; + } + ed = d->exec_domain[op->u.pincpudomain.exec_domain]; if ( ed == NULL ) { @@ -244,17 +253,29 @@ break; } - if ( cpu == -1 ) + if ( copy_from_user(cpumap, + op->u.pincpudomain.cpumap, sizeof(*cpumap)) ) { - clear_bit(EDF_CPUPINNED, &ed->flags); + ret = -EFAULT; + put_domain(d); + break; } + + /* update cpumap for this ed */ + ed->cpumap = *(cpumap); + + if ( *(cpumap) == CPUMAP_RUNANYWHERE ) + clear_bit(EDF_CPUPINNED, &ed->flags); else { + /* pick a new cpu from the usable map */ + int new_cpu = (int)find_first_set_bit(*(cpumap)) % smp_num_cpus; + exec_domain_pause(ed); - if ( ed->processor != (cpu % smp_num_cpus) ) + if ( ed->processor != new_cpu ) set_bit(EDF_MIGRATED, &ed->flags); set_bit(EDF_CPUPINNED, &ed->flags); - ed->processor = cpu % smp_num_cpus; + ed->processor = new_cpu; exec_domain_unpause(ed); } @@ -308,6 +329,12 @@ break; } + memset(&op->u.getdomaininfo.vcpu_to_cpu,-1,MAX_VIRT_CPUS*sizeof(u8)); + for_each_exec_domain ( d, ed ) { + op->u.getdomaininfo.vcpu_to_cpu[ed->id] = ed->processor; + op->u.getdomaininfo.cpumap[ed->id] = ed->cpumap; + } + ed = d->exec_domain[op->u.getdomaininfo.exec_domain]; op->u.getdomaininfo.flags @@ -325,6 +352,7 @@ op->u.getdomaininfo.tot_pages = d->tot_pages; op->u.getdomaininfo.max_pages = d->max_pages; op->u.getdomaininfo.cpu_time = ed->cpu_time; + op->u.getdomaininfo.n_vcpu = d->shared_info->n_vcpu; op->u.getdomaininfo.shared_info_frame = __pa(d->shared_info) >> PAGE_SHIFT; diff -urN a/xen/common/domain.c b/xen/common/domain.c --- a/xen/common/domain.c 2005-05-09 22:11:29.000000000 -0500 +++ b/xen/common/domain.c 2005-05-10 10:13:19.022363136 -0500 @@ -283,6 +283,7 @@ ed = d->exec_domain[vcpu]; atomic_set(&ed->pausecnt, 0); + ed->cpumap = CPUMAP_RUNANYWHERE; memcpy(&ed->arch, &idle0_exec_domain.arch, sizeof(ed->arch)); diff -urN a/xen/include/public/dom0_ops.h b/xen/include/public/dom0_ops.h --- a/xen/include/public/dom0_ops.h 2005-05-09 22:11:34.000000000 -0500 +++ b/xen/include/public/dom0_ops.h 2005-05-10 10:13:19.031361768 -0500 @@ -88,6 +88,9 @@ memory_t max_pages; memory_t shared_info_frame; /* MFN of shared_info struct */ u64 cpu_time; + u32 n_vcpu; + u32 vcpu_to_cpu[MAX_VIRT_CPUS]; /* current mapping */ + cpumap_t cpumap[MAX_VIRT_CPUS]; /* allowable mapping */ } dom0_getdomaininfo_t; #define DOM0_SETDOMAININFO 13 @@ -170,14 +173,14 @@ } dom0_readconsole_t; /* - * Pin Domain to a particular CPU (use -1 to unpin) + * Set which cpus an exec_domain can use */ #define DOM0_PINCPUDOMAIN 20 typedef struct { /* IN variables. */ domid_t domain; u16 exec_domain; - s32 cpu; /* -1 implies unpin */ + cpumap_t *cpumap; } dom0_pincpudomain_t; /* Get trace buffers machine base address */ diff -urN a/xen/include/public/xen.h b/xen/include/public/xen.h --- a/xen/include/public/xen.h 2005-05-09 22:11:28.000000000 -0500 +++ b/xen/include/public/xen.h 2005-05-10 10:13:19.041360248 -0500 @@ -473,6 +473,8 @@ /* For use in guest OSes. */ extern shared_info_t *HYPERVISOR_shared_info; +typedef u64 cpumap_t; + #endif /* !__ASSEMBLY__ */ #endif /* __XEN_PUBLIC_XEN_H__ */ diff -urN a/xen/include/xen/sched.h b/xen/include/xen/sched.h --- a/xen/include/xen/sched.h 2005-05-09 22:11:27.000000000 -0500 +++ b/xen/include/xen/sched.h 2005-05-10 10:13:19.064356752 -0500 @@ -58,6 +58,8 @@ void destroy_event_channels(struct domain *d); int init_exec_domain_event_channels(struct exec_domain *ed); + +#define CPUMAP_RUNANYWHERE 0xFFFFFFFF struct exec_domain { int id; @@ -84,6 +86,8 @@ atomic_t pausecnt; + cpumap_t cpumap; /* which cpus this domain can run on */ + struct arch_exec_domain arch; }; _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mike Wray
2005-May-11 15:16 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Ryan Harper wrote:> * Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]: > >>* Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: >> >>>So it looks like we need to try again. >>>Apologies if this is because of changes after >>>you submitted the patch. >>> >>>I also had two failed hunks, which I fixed manually: >>> >>>1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej >>>1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej >> >> >>I''ll update it against current unstable and resubmit. Thanks. > > > Updated against 2005-05-10 nightly unstable snapshot. >Thanks, I''ll apply it. Mike _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Christian Limpach
2005-May-11 20:30 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
On 5/11/05, Mike Wray <mike.wray@hp.com> wrote:> Ryan Harper wrote: > > * Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]: > > > >>* Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: > >> > >>>So it looks like we need to try again. > >>>Apologies if this is because of changes after > >>>you submitted the patch. > >>> > >>>I also had two failed hunks, which I fixed manually: > >>> > >>>1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej > >>>1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej > >> > >> > >>I''ll update it against current unstable and resubmit. Thanks. > > > > > > Updated against 2005-05-10 nightly unstable snapshot. > > > > Thanks, I''ll apply it.I''ve applied this already -- but it needs some more changes like moving the vcpu_to_cpu and cpumap arrays out of getdomaininfo into the new getvcpucontext, allowing this to scale beyond MAX_VIRT_CPUS. Also, I think sparse vcpu allocations aren''t handled right for these two arrays, which moving them to getvcpucontext will also fix. Patches for this would be highly appreciated! christian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Kip Macy
2005-May-11 20:41 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Yes, this patch obviously conflicts with mine. The GETVCPUCONTEXT patch has not gone in yet, nor have I received feedback. Should I resolve the conflicts and re-submit? On 5/11/05, Christian Limpach <christian.limpach@gmail.com> wrote:> On 5/11/05, Mike Wray <mike.wray@hp.com> wrote: > > Ryan Harper wrote: > > > * Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]: > > > > > >>* Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: > > >> > > >>>So it looks like we need to try again. > > >>>Apologies if this is because of changes after > > >>>you submitted the patch. > > >>> > > >>>I also had two failed hunks, which I fixed manually: > > >>> > > >>>1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej > > >>>1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej > > >> > > >> > > >>I''ll update it against current unstable and resubmit. Thanks. > > > > > > > > > Updated against 2005-05-10 nightly unstable snapshot. > > > > > > > Thanks, I''ll apply it. > > I''ve applied this already -- but it needs some more changes like > moving the vcpu_to_cpu and cpumap arrays out of getdomaininfo into the > new getvcpucontext, allowing this to scale beyond MAX_VIRT_CPUS. > Also, I think sparse vcpu allocations aren''t handled right for these > two arrays, which moving them to getvcpucontext will also fix. > Patches for this would be highly appreciated! > > christian >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Christian Limpach
2005-May-11 21:17 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
On 5/11/05, Kip Macy <kip.macy@gmail.com> wrote:> Yes, this patch obviously conflicts with mine. The GETVCPUCONTEXT > patch has not gone in yet, nor have I received feedback. Should I > resolve the conflicts and re-submit?I''ve also applied your patch and resolved some of the conflicts. I''ve pushed the Changesets out to bkbits now. What remains to be done is to move the two arrays in getdomaininfo to getvcpucontext to get rid of the fixed size arrays. christian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Kip Macy
2005-May-11 21:18 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Great, I really appreciate it. -Kip On 5/11/05, Christian Limpach <christian.limpach@gmail.com> wrote:> On 5/11/05, Kip Macy <kip.macy@gmail.com> wrote: > > Yes, this patch obviously conflicts with mine. The GETVCPUCONTEXT > > patch has not gone in yet, nor have I received feedback. Should I > > resolve the conflicts and re-submit? > > I''ve also applied your patch and resolved some of the conflicts. I''ve > pushed the Changesets out to bkbits now. What remains to be done is > to move the two arrays in getdomaininfo to getvcpucontext to get rid > of the fixed size arrays. > > christian >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mike Wray
2005-May-12 15:16 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Ryan Harper wrote:> * Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]: > >>* Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: >> >>>So it looks like we need to try again. >>>Apologies if this is because of changes after >>>you submitted the patch. >>> >>>I also had two failed hunks, which I fixed manually: >>> >>>1 out of 1 hunk FAILED -- saving rejects to file xen/arch/x86/domain.c.rej >>>1 out of 5 hunks FAILED -- saving rejects to file xen/common/dom0_ops.c.rej >> >> >>I''ll update it against current unstable and resubmit. Thanks. > > > Updated against 2005-05-10 nightly unstable snapshot. >Applied the patch, which was clean this time, but then dom0 crashed at boot. Reverting to unstable I had the same problem. So I pulled unstable again and now unstable doesn''t compile. So it doesn''t look like I''ll get this committed today. Sorry about the delay, Mike _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mike Wray
2005-May-12 16:06 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
Mike Wray wrote:> Ryan Harper wrote: > >> * Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]: >> >>> * Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: >>> >>>> So it looks like we need to try again. >>>> Apologies if this is because of changes after >>>> you submitted the patch. >>>> >>>> I also had two failed hunks, which I fixed manually: >>>> >>>> 1 out of 1 hunk FAILED -- saving rejects to file >>>> xen/arch/x86/domain.c.rej >>>> 1 out of 5 hunks FAILED -- saving rejects to file >>>> xen/common/dom0_ops.c.rej >>> >>> >>> >>> I''ll update it against current unstable and resubmit. Thanks. >> >> >> >> Updated against 2005-05-10 nightly unstable snapshot. >> > > Applied the patch, which was clean this time, but then > dom0 crashed at boot. Reverting to unstable I had the same problem. > So I pulled unstable again and now unstable doesn''t compile. > So it doesn''t look like I''ll get this committed today. > Sorry about the delay, >Looks like Keir has already committed the patch - so consider it done. Mike _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-May-12 16:17 UTC
Re: [Xen-devel] [RESUBMIT] [PATCH] xen, tools: pincpu use vcpu and cpumap_t
* Mike Wray <mike.wray@hp.com> [2005-05-12 11:14]:> Mike Wray wrote: > >Ryan Harper wrote: > > > >>* Ryan Harper <ryanh@us.ibm.com> [2005-05-10 10:01]: > >> > >>>* Mike Wray <mike.wray@hp.com> [2005-05-10 09:08]: > >>> > >>>>So it looks like we need to try again. > >>>>Apologies if this is because of changes after > >>>>you submitted the patch. > >>>> > >>>>I also had two failed hunks, which I fixed manually: > >>>> > >>>>1 out of 1 hunk FAILED -- saving rejects to file > >>>>xen/arch/x86/domain.c.rej > >>>>1 out of 5 hunks FAILED -- saving rejects to file > >>>>xen/common/dom0_ops.c.rej > >>> > >>> > >>> > >>>I''ll update it against current unstable and resubmit. Thanks. > >> > >> > >> > >>Updated against 2005-05-10 nightly unstable snapshot. > >> > > > >Applied the patch, which was clean this time, but then > >dom0 crashed at boot. Reverting to unstable I had the same problem. > >So I pulled unstable again and now unstable doesn''t compile. > >So it doesn''t look like I''ll get this committed today. > >Sorry about the delay, > > > > Looks like Keir has already committed the patch - so > consider it done.Great! Thanks for the help with review and testing. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel