This patch adds new control messages for vcpu hotplug events. Via the xm vcpu_hotplug sub-program, VCPUS in domains can be enabled/disabled when CONFIG_HOTPLUG_CPU is enabled in the target domain''s kernel. Currently there is nothing that tracks whether a VCPU is up or down. My previous [1]patch added a new per-VCPU flag (VCPUF_down) which could be used to keep track of which VCPUS are up and down. Right now, there isn''t a hypercall that returns the per-VCPU flag status (something equivalent to the per-domain flags in get_dominfo ). Have we thought about a get_vcpu_info hypercall? I''d like to get that implemented so I can report VCPU state in the xm list --vcpus output. That would also make it easier to skip sending control messages that don''t change the VCPU''s state (e.g. sending a down message to a vcpu that is already down). Please apply. 1. http://lists.xensource.com/archives/html/xen-devel/2005-06/msg00192.html -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c | 81 +++++++++++++++++ tools/python/xen/lowlevel/xu/xu.c | 16 +++ tools/python/xen/xend/XendClient.py | 6 + tools/python/xen/xend/XendDomain.py | 4 tools/python/xen/xend/XendDomainInfo.py | 12 ++ tools/python/xen/xend/server/SrvDomain.py | 8 + tools/python/xen/xend/server/messages.py | 18 +++ tools/python/xen/xm/main.py | 21 ++++ xen/include/public/io/domain_controller.h | 20 ++++ 9 files changed, 186 insertions(+) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 2005-06-06 22:05:33.000000000 -0500 +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 2005-06-07 16:53:49.362987126 -0500 @@ -85,6 +85,13 @@ /* Set when the idlers are all forked */ int smp_threads_ready; +#ifdef CONFIG_HOTPLUG_CPU +struct vcpu_hotplug_handler_t { + void (*fn)(); + u32 vcpu; +}; +#endif + #if 0 /* * Trampoline 80x86 program as an array. @@ -1297,6 +1304,9 @@ } #ifdef CONFIG_HOTPLUG_CPU +#include <asm-xen/ctrl_if.h> +/* hotplug down/up funtion pointer and target vcpu */ +struct vcpu_hotplug_handler_t vcpu_hotplug_handler; /* must be called with the cpucontrol mutex held */ static int __devinit cpu_enable(unsigned int cpu) @@ -1357,6 +1367,77 @@ } printk(KERN_ERR "CPU %u didn''t die...\n", cpu); } + +static int vcpu_hotplug_cpu_process(void *unused) +{ + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + + if ( handler->fn ) { + (*(handler->fn))(handler->vcpu); + handler->fn = NULL; + } + return 0; +} + +static void __vcpu_hotplug_handler(void *unused) +{ + int err; + + err = kernel_thread(vcpu_hotplug_cpu_process, + NULL, CLONE_FS | CLONE_FILES); + if ( err < 0 ) + printk(KERN_ALERT "Error creating hotplug_cpu process!\n"); + +} + +static void vcpu_hotplug_event_handler(ctrl_msg_t *msg, unsigned long id) +{ + static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL); + vcpu_hotplug_t *req = (vcpu_hotplug_t *)&msg->msg[0]; + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + ssize_t ret; + + if ( msg->length != sizeof(vcpu_hotplug_t) ) + goto parse_error; + + /* grab target vcpu from msg */ + handler->vcpu = req->vcpu; + + /* determine which function to call based on msg subtype */ + switch ( msg->subtype ) { + case CMSG_VCPU_HOTPLUG_OFF: + handler->fn = (void *)&cpu_down; + ret = schedule_work(&vcpu_hotplug_work); + req->status = (u32) ret; + break; + case CMSG_VCPU_HOTPLUG_ON: + handler->fn = (void *)&cpu_up; + ret = schedule_work(&vcpu_hotplug_work); + req->status = (u32) ret; + break; + default: + goto parse_error; + } + + ctrl_if_send_response(msg); + return; + parse_error: + msg->length = 0; + ctrl_if_send_response(msg); +} + +static int __init setup_vcpu_hotplug_event(void) +{ + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + + handler->fn = NULL; + ctrl_if_register_receiver(CMSG_VCPU_HOTPLUG, vcpu_hotplug_event_handler, 0); + + return 0; +} + +__initcall(setup_vcpu_hotplug_event); + #else /* ... !CONFIG_HOTPLUG_CPU */ int __cpu_disable(void) { diff -urN a/tools/python/xen/lowlevel/xu/xu.c b/tools/python/xen/lowlevel/xu/xu.c --- a/tools/python/xen/lowlevel/xu/xu.c 2005-06-06 22:05:28.000000000 -0500 +++ b/tools/python/xen/lowlevel/xu/xu.c 2005-06-07 16:51:43.084342691 -0500 @@ -744,6 +744,14 @@ C2P(mem_request_t, target, Int, Long); C2P(mem_request_t, status, Int, Long); return dict; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF): + C2P(vcpu_hotplug_t, vcpu, Int, Long); + C2P(vcpu_hotplug_t, status, Int, Long); + return dict; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON): + C2P(vcpu_hotplug_t, vcpu, Int, Long); + C2P(vcpu_hotplug_t, status, Int, Long); + return dict; } return PyString_FromStringAndSize((char *)xum->msg.msg, xum->msg.length); @@ -910,6 +918,14 @@ P2C(mem_request_t, target, u32); P2C(mem_request_t, status, u32); break; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF): + P2C(vcpu_hotplug_t, vcpu, u32); + P2C(vcpu_hotplug_t, status, u32); + break; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON): + P2C(vcpu_hotplug_t, vcpu, u32); + P2C(vcpu_hotplug_t, status, u32); + break; case TYPE(CMSG_USBIF_FE, CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED): P2C(usbif_fe_interface_status_changed_t, status, u32); P2C(usbif_fe_interface_status_changed_t, evtchn, u16); diff -urN a/tools/python/xen/xend/XendClient.py b/tools/python/xen/xend/XendClient.py --- a/tools/python/xen/xend/XendClient.py 2005-06-06 22:05:27.000000000 -0500 +++ b/tools/python/xen/xend/XendClient.py 2005-06-07 16:51:43.086342400 -0500 @@ -271,6 +271,12 @@ ''target'' : mem_target }) return val + def xend_domain_vcpu_hotplug(self, id, vcpu, state): + return self.xendPost(self.domainurl(id), + {''op'' : ''vcpu_hotplug'', + ''vcpu'' : vcpu, + ''state'' : state }) + def xend_domain_vif_limit(self, id, vif, credit, period): return self.xendPost(self.domainurl(id), { ''op'' : ''vif_limit_set'', diff -urN a/tools/python/xen/xend/XendDomain.py b/tools/python/xen/xend/XendDomain.py --- a/tools/python/xen/xend/XendDomain.py 2005-06-06 22:05:32.000000000 -0500 +++ b/tools/python/xen/xend/XendDomain.py 2005-06-07 16:51:43.090341819 -0500 @@ -703,6 +703,10 @@ dominfo = self.domain_lookup(id) return dominfo.mem_target_set(target) + def domain_vcpu_hotplug(self, id, vcpu, state): + dominfo = self.domain_lookup(id) + return dominfo.vcpu_hotplug(vcpu, state) + def instance(): diff -urN a/tools/python/xen/xend/XendDomainInfo.py b/tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py 2005-06-06 22:05:33.000000000 -0500 +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-06-07 16:51:43.092341528 -0500 @@ -1071,6 +1071,18 @@ msg = messages.packMsg(''mem_request_t'', { ''target'' : target * (1 << 8)} ) self.channel.writeRequest(msg) + def vcpu_hotplug(self, vcpu, state): + """Disable or enable VCPU in domain. + """ + if self.channel: + if int(state) == 0: + msg = messages.packMsg(''vcpu_hotplug_off_t'', { ''vcpu'' : vcpu} ) + else: + msg = messages.packMsg(''vcpu_hotplug_on_t'', { ''vcpu'' : vcpu} ) + + self.channel.writeRequest(msg) + + def shutdown(self, reason, key=0): msgtype = shutdown_messages.get(reason) if not msgtype: diff -urN a/tools/python/xen/xend/server/SrvDomain.py b/tools/python/xen/xend/server/SrvDomain.py --- a/tools/python/xen/xend/server/SrvDomain.py 2005-06-06 22:05:31.000000000 -0500 +++ b/tools/python/xen/xend/server/SrvDomain.py 2005-06-07 16:51:43.095341092 -0500 @@ -180,6 +180,14 @@ val = fn(req.args, {''dom'': self.dom.id}) return val + def op_vcpu_hotplug(self, op, req): + fn = FormFn(self.xd.domain_vcpu_hotplug, + [[''dom'', ''str''], + [''vcpu'', ''int''], + [''state'', ''int'']]) + val = fn(req.args, {''dom'': self.dom.id}) + return val + def render_POST(self, req): return self.perform(req) diff -urN a/tools/python/xen/xend/server/messages.py b/tools/python/xen/xend/server/messages.py --- a/tools/python/xen/xend/server/messages.py 2005-06-06 22:05:28.000000000 -0500 +++ b/tools/python/xen/xend/server/messages.py 2005-06-07 16:51:43.125336730 -0500 @@ -309,6 +309,24 @@ msg_formats.update(mem_request_formats) #===========================================================================+# Domain vcpu hotplug message. +#===========================================================================+ +CMSG_VCPU_HOTPLUG = 10 +CMSG_VCPU_HOTPLUG_OFF = 0 +CMSG_VCPU_HOTPLUG_ON = 1 + +vcpu_hotplug_formats = { + ''vcpu_hotplug_off_t'': + (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF), + + ''vcpu_hotplug_on_t'': + (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON) + } + +msg_formats.update(vcpu_hotplug_formats) + +#=========================================================================== class Msg: pass diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py 2005-06-06 22:05:31.000000000 -0500 +++ b/tools/python/xen/xm/main.py 2005-06-07 16:51:43.127336439 -0500 @@ -568,6 +568,27 @@ xm.prog(ProgBalloon) + +class ProgVcpuhotplug(Prog): + group = ''domain'' + name = ''vcpu_hotplug'' + info = """Enable or disable a VCPU in a domain.""" + + def help(self, args): + print args[0], "DOM VCPU [0|1]" + print """\nRequest virtual processor VCPU to be disabled or enabled in +domain DOM""" + + def main(self, args): + if len(args) != 4: self.err("%s: Invalid arguments(s)" % args[0]) + dom = args[1] + vcpu = args[2] + state = args[3] + server.xend_domain_vcpu_hotplug(dom, vcpu, state) + +xm.prog(ProgVcpuhotplug) + + class ProgDomid(Prog): group = ''domain'' name = ''domid'' diff -urN a/xen/include/public/io/domain_controller.h b/xen/include/public/io/domain_controller.h --- a/xen/include/public/io/domain_controller.h 2005-06-06 22:05:31.000000000 -0500 +++ b/xen/include/public/io/domain_controller.h 2005-06-07 16:51:43.139334694 -0500 @@ -61,6 +61,7 @@ #define CMSG_MEM_REQUEST 7 /* Memory reservation reqs */ #define CMSG_USBIF_BE 8 /* USB controller backend */ #define CMSG_USBIF_FE 9 /* USB controller frontend */ +#define CMSG_VCPU_HOTPLUG 10 /* Hotplug VCPU messages */ /****************************************************************************** * CONSOLE DEFINITIONS @@ -758,6 +759,25 @@ } PACKED shutdown_sysrq_t; /* 4 bytes */ /****************************************************************************** + * VCPU HOTPLUG CONTROLS + */ + +/* + * Subtypes for shutdown messages. + */ +#define CMSG_VCPU_HOTPLUG_OFF 0 /* turn vcpu off */ +#define CMSG_VCPU_HOTPLUG_ON 1 /* turn vcpu on */ + +/* + * CMSG_VCPU_HOTPLUG: + * Indicate which vcpu''s state should change + */ +typedef struct { + u32 vcpu; /* 0: VCPU''s whose state will change */ + u32 status; /* 4: Return code indicates success or failure. */ +} PACKED vcpu_hotplug_t; + +/****************************************************************************** * MEMORY CONTROLS */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
> -----Original Message----- > From: xen-devel-bounces@lists.xensource.com > [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of > Ryan Harper > Sent: 07 June 2005 23:03 > To: xen-devel@lists.xensource.com > Subject: [Xen-devel] [PATCH] add dom0 vcpu hotplug control > > This patch adds new control messages for vcpu hotplug events. > Via the xm vcpu_hotplug sub-program, VCPUS in domains can be > enabled/disabled when CONFIG_HOTPLUG_CPU is enabled in the > target domain''s kernel. > > Currently there is nothing that tracks whether a VCPU is up or down. > My previous [1]patch added a new per-VCPU flag (VCPUF_down) > which could be used to keep track of which VCPUS are up and > down. Right now, there isn''t a hypercall that returns the > per-VCPU flag status (something equivalent to the per-domain > flags in get_dominfo ). Have we thought about a > get_vcpu_info hypercall? I''d like to get that implemented so > I can report VCPU state in the xm list --vcpus output. That > would also make it easier to skip sending control messages > that don''t change the VCPU''s state (e.g. sending a down > message to a vcpu that is already down).I haven''t looked at the code, but can you not use DOM0_GETVCPUCONTEXT to iterate over all the VCPUs for a domain? (e.g. if you ask for VCPU N and it doesn''t exist (isn''t up) you''ll get the state for the next highest numbered one, or ESRCH if there isn''t one) That''s the behaviour I''d expect. Ian> > Please apply. > > 1. > http://lists.xensource.com/archives/html/xen-devel/2005-06/msg > 00192.html > > -- > Ryan Harper > Software Engineer; Linux Technology Center IBM Corp., Austin, Tx > (512) 838-9253 T/L: 678-9253 > ryanh@us.ibm.com > > diffstat output: > linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c | > 81 +++++++++++++++++ > tools/python/xen/lowlevel/xu/xu.c | 16 +++ > tools/python/xen/xend/XendClient.py | 6 + > tools/python/xen/xend/XendDomain.py | 4 > tools/python/xen/xend/XendDomainInfo.py | 12 ++ > tools/python/xen/xend/server/SrvDomain.py | 8 + > tools/python/xen/xend/server/messages.py | 18 +++ > tools/python/xen/xm/main.py | 21 ++++ > xen/include/public/io/domain_controller.h | 20 ++++ > 9 files changed, 186 insertions(+) > > Signed-off-by: Ryan Harper <ryanh@us.ibm.com> > --- > diff -urN > a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c > b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c > --- a/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c > 2005-06-06 22:05:33.000000000 -0500 > +++ b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c > 2005-06-07 16:53:49.362987126 -0500 > @@ -85,6 +85,13 @@ > /* Set when the idlers are all forked */ int smp_threads_ready; > > +#ifdef CONFIG_HOTPLUG_CPU > +struct vcpu_hotplug_handler_t { > + void (*fn)(); > + u32 vcpu; > +}; > +#endif > + > #if 0 > /* > * Trampoline 80x86 program as an array. > @@ -1297,6 +1304,9 @@ > } > > #ifdef CONFIG_HOTPLUG_CPU > +#include <asm-xen/ctrl_if.h> > +/* hotplug down/up funtion pointer and target vcpu */ struct > +vcpu_hotplug_handler_t vcpu_hotplug_handler; > > /* must be called with the cpucontrol mutex held */ static > int __devinit cpu_enable(unsigned int cpu) @@ -1357,6 +1367,77 @@ > } > printk(KERN_ERR "CPU %u didn''t die...\n", cpu); } > + > +static int vcpu_hotplug_cpu_process(void *unused) { > + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; > + > + if ( handler->fn ) { > + (*(handler->fn))(handler->vcpu); > + handler->fn = NULL; > + } > + return 0; > +} > + > +static void __vcpu_hotplug_handler(void *unused) { > + int err; > + > + err = kernel_thread(vcpu_hotplug_cpu_process, > + NULL, CLONE_FS | > CLONE_FILES); > + if ( err < 0 ) > + printk(KERN_ALERT "Error creating hotplug_cpu process!\n"); > + > +} > + > +static void vcpu_hotplug_event_handler(ctrl_msg_t *msg, > unsigned long > +id) { > + static DECLARE_WORK(vcpu_hotplug_work, > __vcpu_hotplug_handler, NULL); > + vcpu_hotplug_t *req = (vcpu_hotplug_t *)&msg->msg[0]; > + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; > + ssize_t ret; > + > + if ( msg->length != sizeof(vcpu_hotplug_t) ) > + goto parse_error; > + > + /* grab target vcpu from msg */ > + handler->vcpu = req->vcpu; > + > + /* determine which function to call based on msg subtype */ > + switch ( msg->subtype ) { > + case CMSG_VCPU_HOTPLUG_OFF: > + handler->fn = (void *)&cpu_down; > + ret = schedule_work(&vcpu_hotplug_work); > + req->status = (u32) ret; > + break; > + case CMSG_VCPU_HOTPLUG_ON: > + handler->fn = (void *)&cpu_up; > + ret = schedule_work(&vcpu_hotplug_work); > + req->status = (u32) ret; > + break; > + default: > + goto parse_error; > + } > + > + ctrl_if_send_response(msg); > + return; > + parse_error: > + msg->length = 0; > + ctrl_if_send_response(msg); > +} > + > +static int __init setup_vcpu_hotplug_event(void) { > + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; > + > + handler->fn = NULL; > + ctrl_if_register_receiver(CMSG_VCPU_HOTPLUG, > + vcpu_hotplug_event_handler, 0); > + > + return 0; > +} > + > +__initcall(setup_vcpu_hotplug_event); > + > #else /* ... !CONFIG_HOTPLUG_CPU */ > int __cpu_disable(void) > { > diff -urN a/tools/python/xen/lowlevel/xu/xu.c > b/tools/python/xen/lowlevel/xu/xu.c > --- a/tools/python/xen/lowlevel/xu/xu.c 2005-06-06 > 22:05:28.000000000 -0500 > +++ b/tools/python/xen/lowlevel/xu/xu.c 2005-06-07 > 16:51:43.084342691 -0500 > @@ -744,6 +744,14 @@ > C2P(mem_request_t, target, Int, Long); > C2P(mem_request_t, status, Int, Long); > return dict; > + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF): > + C2P(vcpu_hotplug_t, vcpu, Int, Long); > + C2P(vcpu_hotplug_t, status, Int, Long); > + return dict; > + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON): > + C2P(vcpu_hotplug_t, vcpu, Int, Long); > + C2P(vcpu_hotplug_t, status, Int, Long); > + return dict; > } > > return PyString_FromStringAndSize((char *)xum->msg.msg, > xum->msg.length); @@ -910,6 +918,14 @@ > P2C(mem_request_t, target, u32); > P2C(mem_request_t, status, u32); > break; > + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF): > + P2C(vcpu_hotplug_t, vcpu, u32); > + P2C(vcpu_hotplug_t, status, u32); > + break; > + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON): > + P2C(vcpu_hotplug_t, vcpu, u32); > + P2C(vcpu_hotplug_t, status, u32); > + break; > case TYPE(CMSG_USBIF_FE, CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED): > P2C(usbif_fe_interface_status_changed_t, status, u32); > P2C(usbif_fe_interface_status_changed_t, evtchn, > u16); diff -urN a/tools/python/xen/xend/XendClient.py > b/tools/python/xen/xend/XendClient.py > --- a/tools/python/xen/xend/XendClient.py 2005-06-06 > 22:05:27.000000000 -0500 > +++ b/tools/python/xen/xend/XendClient.py 2005-06-07 > 16:51:43.086342400 -0500 > @@ -271,6 +271,12 @@ > ''target'' : mem_target }) > return val > > + def xend_domain_vcpu_hotplug(self, id, vcpu, state): > + return self.xendPost(self.domainurl(id), > + {''op'' : ''vcpu_hotplug'', > + ''vcpu'' : vcpu, > + ''state'' : state }) > + > def xend_domain_vif_limit(self, id, vif, credit, period): > return self.xendPost(self.domainurl(id), > { ''op'' : ''vif_limit_set'', > diff -urN a/tools/python/xen/xend/XendDomain.py > b/tools/python/xen/xend/XendDomain.py > --- a/tools/python/xen/xend/XendDomain.py 2005-06-06 > 22:05:32.000000000 -0500 > +++ b/tools/python/xen/xend/XendDomain.py 2005-06-07 > 16:51:43.090341819 -0500 > @@ -703,6 +703,10 @@ > dominfo = self.domain_lookup(id) > return dominfo.mem_target_set(target) > > + def domain_vcpu_hotplug(self, id, vcpu, state): > + dominfo = self.domain_lookup(id) > + return dominfo.vcpu_hotplug(vcpu, state) > + > > > def instance(): > diff -urN a/tools/python/xen/xend/XendDomainInfo.py > b/tools/python/xen/xend/XendDomainInfo.py > --- a/tools/python/xen/xend/XendDomainInfo.py 2005-06-06 > 22:05:33.000000000 -0500 > +++ b/tools/python/xen/xend/XendDomainInfo.py 2005-06-07 > 16:51:43.092341528 -0500 > @@ -1071,6 +1071,18 @@ > msg = messages.packMsg(''mem_request_t'', { > ''target'' : target * (1 << 8)} ) > self.channel.writeRequest(msg) > > + def vcpu_hotplug(self, vcpu, state): > + """Disable or enable VCPU in domain. > + """ > + if self.channel: > + if int(state) == 0: > + msg = messages.packMsg(''vcpu_hotplug_off_t'', > { ''vcpu'' : vcpu} ) > + else: > + msg = messages.packMsg(''vcpu_hotplug_on_t'', > { ''vcpu'' : > + vcpu} ) > + > + self.channel.writeRequest(msg) > + > + > def shutdown(self, reason, key=0): > msgtype = shutdown_messages.get(reason) > if not msgtype: > diff -urN a/tools/python/xen/xend/server/SrvDomain.py > b/tools/python/xen/xend/server/SrvDomain.py > --- a/tools/python/xen/xend/server/SrvDomain.py > 2005-06-06 22:05:31.000000000 -0500 > +++ b/tools/python/xen/xend/server/SrvDomain.py > 2005-06-07 16:51:43.095341092 -0500 > @@ -180,6 +180,14 @@ > val = fn(req.args, {''dom'': self.dom.id}) > return val > > + def op_vcpu_hotplug(self, op, req): > + fn = FormFn(self.xd.domain_vcpu_hotplug, > + [[''dom'', ''str''], > + [''vcpu'', ''int''], > + [''state'', ''int'']]) > + val = fn(req.args, {''dom'': self.dom.id}) > + return val > + > def render_POST(self, req): > return self.perform(req) > > diff -urN a/tools/python/xen/xend/server/messages.py > b/tools/python/xen/xend/server/messages.py > --- a/tools/python/xen/xend/server/messages.py > 2005-06-06 22:05:28.000000000 -0500 > +++ b/tools/python/xen/xend/server/messages.py > 2005-06-07 16:51:43.125336730 -0500 > @@ -309,6 +309,24 @@ > msg_formats.update(mem_request_formats) > > > #============================================================> ==============> +# Domain vcpu hotplug message. > +#===========================================================> =========> +=====> + > +CMSG_VCPU_HOTPLUG = 10 > +CMSG_VCPU_HOTPLUG_OFF = 0 > +CMSG_VCPU_HOTPLUG_ON = 1 > + > +vcpu_hotplug_formats = { > + ''vcpu_hotplug_off_t'': > + (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF), > + > + ''vcpu_hotplug_on_t'': > + (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON) > + } > + > +msg_formats.update(vcpu_hotplug_formats) > + > +#===========================================================> =========> +=====> class Msg: > pass > > diff -urN a/tools/python/xen/xm/main.py b/tools/python/xen/xm/main.py > --- a/tools/python/xen/xm/main.py 2005-06-06 > 22:05:31.000000000 -0500 > +++ b/tools/python/xen/xm/main.py 2005-06-07 > 16:51:43.127336439 -0500 > @@ -568,6 +568,27 @@ > > xm.prog(ProgBalloon) > > + > +class ProgVcpuhotplug(Prog): > + group = ''domain'' > + name = ''vcpu_hotplug'' > + info = """Enable or disable a VCPU in a domain.""" > + > + def help(self, args): > + print args[0], "DOM VCPU [0|1]" > + print """\nRequest virtual processor VCPU to be disabled or > +enabled in domain DOM""" > + > + def main(self, args): > + if len(args) != 4: self.err("%s: Invalid > arguments(s)" % args[0]) > + dom = args[1] > + vcpu = args[2] > + state = args[3] > + server.xend_domain_vcpu_hotplug(dom, vcpu, state) > + > +xm.prog(ProgVcpuhotplug) > + > + > class ProgDomid(Prog): > group = ''domain'' > name = ''domid'' > diff -urN a/xen/include/public/io/domain_controller.h > b/xen/include/public/io/domain_controller.h > --- a/xen/include/public/io/domain_controller.h > 2005-06-06 22:05:31.000000000 -0500 > +++ b/xen/include/public/io/domain_controller.h > 2005-06-07 16:51:43.139334694 -0500 > @@ -61,6 +61,7 @@ > #define CMSG_MEM_REQUEST 7 /* Memory reservation reqs */ > #define CMSG_USBIF_BE 8 /* USB controller backend */ > #define CMSG_USBIF_FE 9 /* USB controller frontend */ > +#define CMSG_VCPU_HOTPLUG 10 /* Hotplug VCPU messages */ > > > /************************************************************* > ***************** > * CONSOLE DEFINITIONS > @@ -758,6 +759,25 @@ > } PACKED shutdown_sysrq_t; /* 4 bytes */ > > > /************************************************************* > ***************** > + * VCPU HOTPLUG CONTROLS > + */ > + > +/* > + * Subtypes for shutdown messages. > + */ > +#define CMSG_VCPU_HOTPLUG_OFF 0 /* turn vcpu off */ > +#define CMSG_VCPU_HOTPLUG_ON 1 /* turn vcpu on */ > + > +/* > + * CMSG_VCPU_HOTPLUG: > + * Indicate which vcpu''s state should change */ typedef struct { > + u32 vcpu; /* 0: VCPU''s whose state will change */ > + u32 status; /* 4: Return code indicates success or > failure. */ > +} PACKED vcpu_hotplug_t; > + > +/************************************************************ > ********** > +******** > * MEMORY CONTROLS > */ > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
* Ian Pratt <m+Ian.Pratt@cl.cam.ac.uk> [2005-06-07 17:27]:> > > > -----Original Message----- > > From: xen-devel-bounces@lists.xensource.com > > [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of > > Ryan Harper > > Sent: 07 June 2005 23:03 > > To: xen-devel@lists.xensource.com > > Subject: [Xen-devel] [PATCH] add dom0 vcpu hotplug control > > > > This patch adds new control messages for vcpu hotplug events. > > Via the xm vcpu_hotplug sub-program, VCPUS in domains can be > > enabled/disabled when CONFIG_HOTPLUG_CPU is enabled in the > > target domain''s kernel. > > > > Currently there is nothing that tracks whether a VCPU is up or down. > > My previous [1]patch added a new per-VCPU flag (VCPUF_down) > > which could be used to keep track of which VCPUS are up and > > down. Right now, there isn''t a hypercall that returns the > > per-VCPU flag status (something equivalent to the per-domain > > flags in get_dominfo ). Have we thought about a > > get_vcpu_info hypercall? I''d like to get that implemented so > > I can report VCPU state in the xm list --vcpus output. That > > would also make it easier to skip sending control messages > > that don''t change the VCPU''s state (e.g. sending a down > > message to a vcpu that is already down). > > I haven''t looked at the code, but can you not use DOM0_GETVCPUCONTEXT to > iterate over all the VCPUs for a domain? (e.g. if you ask for VCPU N and > it doesn''t exist (isn''t up) you''ll get the state for the next highest > numbered one, or ESRCH if there isn''t one) That''s the behaviour I''d > expect.That would work. Moving forward, there may be more per-VCPU info that we want to collect and that it would make sense to factor out per-VCPU info from DOM0_GETDOMINFO and create a DOM0_GETVCPUINFO call. If that is not desirable at this point, I can work with DOM0_GETVCPUCONTEXT. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
> That would work. Moving forward, there may be more per-VCPU > info that we want to collect and that it would make sense to > factor out per-VCPU info from DOM0_GETDOMINFO and create a > DOM0_GETVCPUINFO call. If that is not desirable at this > point, I can work with DOM0_GETVCPUCONTEXT.I think we can live with the summary provided by DOM0_GETDOMINFO for the moment. I presume the n_cpu and vcpu_to_cpu[] fields are updated to show down''ed VCPUs? Thanks, Ian> -- > Ryan Harper > Software Engineer; Linux Technology Center IBM Corp., Austin, Tx > (512) 838-9253 T/L: 678-9253 > ryanh@us.ibm.com >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
* Ian Pratt <m+Ian.Pratt@cl.cam.ac.uk> [2005-06-07 17:54]:> > > That would work. Moving forward, there may be more per-VCPU > > info that we want to collect and that it would make sense to > > factor out per-VCPU info from DOM0_GETDOMINFO and create a > > DOM0_GETVCPUINFO call. If that is not desirable at this > > point, I can work with DOM0_GETVCPUCONTEXT. > > I think we can live with the summary provided by DOM0_GETDOMINFO for the > moment.OK.> I presume the n_cpu and vcpu_to_cpu[] fields are updated to show down''ed > VCPUs?Not at the moment, but they should. I think if those are updated, I can extract state info from those variables without having to issue DOM0_VCPUCONTEXT ops. DOM0_VCPUCONTEXT should still skip vcpu''s that are not up. Thanks for the review. I''ll resubmit with those changes. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Jun-14 16:37 UTC
[Xen-devel] [PATCH][RESUBMIT] add dom0 vcpu hotplug control
* Ryan Harper <ryanh@us.ibm.com> [2005-06-07 17:05]:> This patch adds new control messages for vcpu hotplug events. Via the > xm vcpu_hotplug sub-program, VCPUS in domains can be enabled/disabled > when CONFIG_HOTPLUG_CPU is enabled in the target domain''s kernel. >Updated this patch to use vcpu_to_cpu up/down info to control whether state changes are sent when hotplugging vcpus. Built and tested with vcpu_down patch against 20050614 nightly unstable snapshot. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c | 81 +++++++++++++++++ tools/python/xen/lowlevel/xu/xu.c | 16 +++ tools/python/xen/xend/XendClient.py | 6 + tools/python/xen/xend/XendDomain.py | 12 ++ tools/python/xen/xend/XendDomainInfo.py | 12 ++ tools/python/xen/xend/server/SrvDomain.py | 8 + tools/python/xen/xend/server/messages.py | 18 +++ tools/python/xen/xm/main.py | 30 ++++++ xen/include/public/io/domain_controller.h | 20 ++++ 9 files changed, 203 insertions(+) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN vcpu_down/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c vcpu_cntl/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c --- vcpu_down/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 2005-06-13 10:41:56.000000000 -0500 +++ vcpu_cntl/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 2005-06-13 13:41:55.000000000 -0500 @@ -85,6 +85,13 @@ /* Set when the idlers are all forked */ int smp_threads_ready; +#ifdef CONFIG_HOTPLUG_CPU +struct vcpu_hotplug_handler_t { + void (*fn)(); + u32 vcpu; +}; +#endif + #if 0 /* * Trampoline 80x86 program as an array. @@ -1297,6 +1304,9 @@ } #ifdef CONFIG_HOTPLUG_CPU +#include <asm-xen/ctrl_if.h> +/* hotplug down/up funtion pointer and target vcpu */ +struct vcpu_hotplug_handler_t vcpu_hotplug_handler; /* must be called with the cpucontrol mutex held */ static int __devinit cpu_enable(unsigned int cpu) @@ -1357,6 +1367,77 @@ } printk(KERN_ERR "CPU %u didn''t die...\n", cpu); } + +static int vcpu_hotplug_cpu_process(void *unused) +{ + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + + if ( handler->fn ) { + (*(handler->fn))(handler->vcpu); + handler->fn = NULL; + } + return 0; +} + +static void __vcpu_hotplug_handler(void *unused) +{ + int err; + + err = kernel_thread(vcpu_hotplug_cpu_process, + NULL, CLONE_FS | CLONE_FILES); + if ( err < 0 ) + printk(KERN_ALERT "Error creating hotplug_cpu process!\n"); + +} + +static void vcpu_hotplug_event_handler(ctrl_msg_t *msg, unsigned long id) +{ + static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL); + vcpu_hotplug_t *req = (vcpu_hotplug_t *)&msg->msg[0]; + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + ssize_t ret; + + if ( msg->length != sizeof(vcpu_hotplug_t) ) + goto parse_error; + + /* grab target vcpu from msg */ + handler->vcpu = req->vcpu; + + /* determine which function to call based on msg subtype */ + switch ( msg->subtype ) { + case CMSG_VCPU_HOTPLUG_OFF: + handler->fn = (void *)&cpu_down; + ret = schedule_work(&vcpu_hotplug_work); + req->status = (u32) ret; + break; + case CMSG_VCPU_HOTPLUG_ON: + handler->fn = (void *)&cpu_up; + ret = schedule_work(&vcpu_hotplug_work); + req->status = (u32) ret; + break; + default: + goto parse_error; + } + + ctrl_if_send_response(msg); + return; + parse_error: + msg->length = 0; + ctrl_if_send_response(msg); +} + +static int __init setup_vcpu_hotplug_event(void) +{ + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + + handler->fn = NULL; + ctrl_if_register_receiver(CMSG_VCPU_HOTPLUG, vcpu_hotplug_event_handler, 0); + + return 0; +} + +__initcall(setup_vcpu_hotplug_event); + #else /* ... !CONFIG_HOTPLUG_CPU */ int __cpu_disable(void) { diff -urN vcpu_down/tools/python/xen/lowlevel/xu/xu.c vcpu_cntl/tools/python/xen/lowlevel/xu/xu.c --- vcpu_down/tools/python/xen/lowlevel/xu/xu.c 2005-06-12 22:13:37.000000000 -0500 +++ vcpu_cntl/tools/python/xen/lowlevel/xu/xu.c 2005-06-13 13:41:55.000000000 -0500 @@ -744,6 +744,14 @@ C2P(mem_request_t, target, Int, Long); C2P(mem_request_t, status, Int, Long); return dict; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF): + C2P(vcpu_hotplug_t, vcpu, Int, Long); + C2P(vcpu_hotplug_t, status, Int, Long); + return dict; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON): + C2P(vcpu_hotplug_t, vcpu, Int, Long); + C2P(vcpu_hotplug_t, status, Int, Long); + return dict; } return PyString_FromStringAndSize((char *)xum->msg.msg, xum->msg.length); @@ -909,6 +917,14 @@ case TYPE(CMSG_MEM_REQUEST, CMSG_MEM_REQUEST_SET): P2C(mem_request_t, target, u32); break; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF): + P2C(vcpu_hotplug_t, vcpu, u32); + P2C(vcpu_hotplug_t, status, u32); + break; + case TYPE(CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON): + P2C(vcpu_hotplug_t, vcpu, u32); + P2C(vcpu_hotplug_t, status, u32); + break; case TYPE(CMSG_USBIF_FE, CMSG_USBIF_FE_INTERFACE_STATUS_CHANGED): P2C(usbif_fe_interface_status_changed_t, status, u32); P2C(usbif_fe_interface_status_changed_t, evtchn, u16); diff -urN vcpu_down/tools/python/xen/xend/server/messages.py vcpu_cntl/tools/python/xen/xend/server/messages.py --- vcpu_down/tools/python/xen/xend/server/messages.py 2005-06-12 22:13:37.000000000 -0500 +++ vcpu_cntl/tools/python/xen/xend/server/messages.py 2005-06-13 13:41:55.000000000 -0500 @@ -309,6 +309,24 @@ msg_formats.update(mem_request_formats) #===========================================================================+# Domain vcpu hotplug message. +#===========================================================================+ +CMSG_VCPU_HOTPLUG = 10 +CMSG_VCPU_HOTPLUG_OFF = 0 +CMSG_VCPU_HOTPLUG_ON = 1 + +vcpu_hotplug_formats = { + ''vcpu_hotplug_off_t'': + (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_OFF), + + ''vcpu_hotplug_on_t'': + (CMSG_VCPU_HOTPLUG, CMSG_VCPU_HOTPLUG_ON) + } + +msg_formats.update(vcpu_hotplug_formats) + +#=========================================================================== class Msg: pass diff -urN vcpu_down/tools/python/xen/xend/server/SrvDomain.py vcpu_cntl/tools/python/xen/xend/server/SrvDomain.py --- vcpu_down/tools/python/xen/xend/server/SrvDomain.py 2005-06-12 22:13:41.000000000 -0500 +++ vcpu_cntl/tools/python/xen/xend/server/SrvDomain.py 2005-06-13 16:53:32.265760766 -0500 @@ -180,6 +180,14 @@ val = fn(req.args, {''dom'': self.dom.id}) return val + def op_vcpu_hotplug(self, op, req): + fn = FormFn(self.xd.domain_vcpu_hotplug, + [[''dom'', ''int''], + [''vcpu'', ''int''], + [''state'', ''int'']]) + val = fn(req.args, {''dom'': self.dom.id}) + return val + def render_POST(self, req): return self.perform(req) diff -urN vcpu_down/tools/python/xen/xend/XendClient.py vcpu_cntl/tools/python/xen/xend/XendClient.py --- vcpu_down/tools/python/xen/xend/XendClient.py 2005-06-12 22:13:36.000000000 -0500 +++ vcpu_cntl/tools/python/xen/xend/XendClient.py 2005-06-13 13:41:55.000000000 -0500 @@ -271,6 +271,12 @@ ''target'' : mem_target }) return val + def xend_domain_vcpu_hotplug(self, id, vcpu, state): + return self.xendPost(self.domainurl(id), + {''op'' : ''vcpu_hotplug'', + ''vcpu'' : vcpu, + ''state'' : state }) + def xend_domain_vif_limit(self, id, vif, credit, period): return self.xendPost(self.domainurl(id), { ''op'' : ''vif_limit_set'', diff -urN vcpu_down/tools/python/xen/xend/XendDomainInfo.py vcpu_cntl/tools/python/xen/xend/XendDomainInfo.py --- vcpu_down/tools/python/xen/xend/XendDomainInfo.py 2005-06-12 22:13:43.000000000 -0500 +++ vcpu_cntl/tools/python/xen/xend/XendDomainInfo.py 2005-06-13 13:41:55.000000000 -0500 @@ -949,6 +949,18 @@ msg = messages.packMsg(''mem_request_t'', { ''target'' : target * (1 << 8)} ) self.channel.writeRequest(msg) + def vcpu_hotplug(self, vcpu, state): + """Disable or enable VCPU in domain. + """ + if self.channel: + if int(state) == 0: + msg = messages.packMsg(''vcpu_hotplug_off_t'', { ''vcpu'' : vcpu} ) + else: + msg = messages.packMsg(''vcpu_hotplug_on_t'', { ''vcpu'' : vcpu} ) + + self.channel.writeRequest(msg) + + def shutdown(self, reason, key=0): msgtype = shutdown_messages.get(reason) if not msgtype: diff -urN vcpu_down/tools/python/xen/xend/XendDomain.py vcpu_cntl/tools/python/xen/xend/XendDomain.py --- vcpu_down/tools/python/xen/xend/XendDomain.py 2005-06-12 22:13:43.000000000 -0500 +++ vcpu_cntl/tools/python/xen/xend/XendDomain.py 2005-06-13 13:45:29.000000000 -0500 @@ -710,6 +710,18 @@ dominfo = self.domain_lookup(id) return dominfo.mem_target_set(mem) + def domain_vcpu_hotplug(self, id, vcpu, state): + """Enable or disable VCPU vcpu in DOM id + + @param id: domain + @param vcpu: target VCPU in domain + @param state: which state VCPU will become + @return: 0 on success, -1 on error + """ + + dominfo = self.domain_lookup(id) + return dominfo.vcpu_hotplug(vcpu, state) + def domain_dumpcore(self, id): """Save a core dump for a crashed domain. diff -urN vcpu_down/tools/python/xen/xm/main.py vcpu_cntl/tools/python/xen/xm/main.py --- vcpu_down/tools/python/xen/xm/main.py 2005-06-13 14:46:16.000000000 -0500 +++ vcpu_cntl/tools/python/xen/xm/main.py 2005-06-13 17:28:55.159285868 -0500 @@ -571,6 +571,36 @@ xm.prog(ProgBalloon) + +class ProgVcpuhotplug(Prog): + group = ''domain'' + name = ''vcpu_hotplug'' + info = """Enable or disable a VCPU in a domain.""" + + def help(self, args): + print args[0], "DOM VCPU [0|1]" + print """\nRequest virtual processor VCPU to be disabled or enabled in +domain DOM""" + + def main(self, args): + if len(args) != 4: self.err("%s: Invalid arguments(s)" % args[0]) + dom = args[1] + vcpu = args[2] + state = args[3] + info = server.xend_domain(dom) + vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''-1'').replace(''-1'',''#'') + # only send state change if states differ + try: + # (down going up) or (up going down) + if (vcpu_to_cpu[int(vcpu)] == "#" and state == "1") or \ + (vcpu_to_cpu[int(vcpu)] != "#" and state == "0"): + server.xend_domain_vcpu_hotplug(int(dom), int(vcpu), int(state)) + except IndexError: + print "Invalid VCPU(%s)"%(vcpu) + +xm.prog(ProgVcpuhotplug) + + class ProgDomid(Prog): group = ''domain'' name = ''domid'' diff -urN vcpu_down/xen/include/public/io/domain_controller.h vcpu_cntl/xen/include/public/io/domain_controller.h --- vcpu_down/xen/include/public/io/domain_controller.h 2005-06-12 22:13:41.000000000 -0500 +++ vcpu_cntl/xen/include/public/io/domain_controller.h 2005-06-13 13:41:55.000000000 -0500 @@ -61,6 +61,7 @@ #define CMSG_MEM_REQUEST 7 /* Memory reservation reqs */ #define CMSG_USBIF_BE 8 /* USB controller backend */ #define CMSG_USBIF_FE 9 /* USB controller frontend */ +#define CMSG_VCPU_HOTPLUG 10 /* Hotplug VCPU messages */ /****************************************************************************** * CONSOLE DEFINITIONS @@ -758,6 +759,25 @@ } PACKED shutdown_sysrq_t; /* 4 bytes */ /****************************************************************************** + * VCPU HOTPLUG CONTROLS + */ + +/* + * Subtypes for shutdown messages. + */ +#define CMSG_VCPU_HOTPLUG_OFF 0 /* turn vcpu off */ +#define CMSG_VCPU_HOTPLUG_ON 1 /* turn vcpu on */ + +/* + * CMSG_VCPU_HOTPLUG: + * Indicate which vcpu''s state should change + */ +typedef struct { + u32 vcpu; /* 0: VCPU''s whose state will change */ + u32 status; /* 4: Return code indicates success or failure. */ +} PACKED vcpu_hotplug_t; + +/****************************************************************************** * MEMORY CONTROLS */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Christian Limpach
2005-Jun-21 11:02 UTC
Re: [Xen-devel] [PATCH][RESUBMIT] add dom0 vcpu hotplug control
On 6/14/05, Ryan Harper <ryanh@us.ibm.com> wrote:> * Ryan Harper <ryanh@us.ibm.com> [2005-06-07 17:05]: > > This patch adds new control messages for vcpu hotplug events. Via the > > xm vcpu_hotplug sub-program, VCPUS in domains can be enabled/disabled > > when CONFIG_HOTPLUG_CPU is enabled in the target domain''s kernel. > > > > Updated this patch to use vcpu_to_cpu up/down info to control whether > state changes are sent when hotplugging vcpus.I''ve applied both patches. I think there''s a flaw in the way how vcpu_to_cpu gets passed up to python, the -1 -> # hack is kind of gross and it won''t work for cpus > 9? Also the whole hotplug stuff is still missing interrupt re-routing when a vcpu is taken down. To do this, we need an evtchn operation to change the vcpu affinity of a port by changing notify_vcpu_id. christian _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Jun-21 14:14 UTC
Re: [Xen-devel] [PATCH][RESUBMIT] add dom0 vcpu hotplug control
* Christian Limpach <christian.limpach@gmail.com> [2005-06-21 09:11]:> On 6/14/05, Ryan Harper <ryanh@us.ibm.com> wrote: > > * Ryan Harper <ryanh@us.ibm.com> [2005-06-07 17:05]: > > > This patch adds new control messages for vcpu hotplug events. Via the > > > xm vcpu_hotplug sub-program, VCPUS in domains can be enabled/disabled > > > when CONFIG_HOTPLUG_CPU is enabled in the target domain''s kernel. > > > > > > > Updated this patch to use vcpu_to_cpu up/down info to control whether > > state changes are sent when hotplugging vcpus. > > I''ve applied both patches. I think there''s a flaw in the way how > vcpu_to_cpu gets passed up to python, the -1 -> # hack is kind of > gross and it won''t work for cpus > 9? > > Also the whole hotplug stuff is still missing interrupt re-routing > when a vcpu is taken down. To do this, we need an evtchn operation to > change the vcpu affinity of a port by changing notify_vcpu_id.OK, I''ll take a look and address the issues. Thanks. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Jun-22 21:01 UTC
Re: [Xen-devel] [PATCH][RESUBMIT] add dom0 vcpu hotplug control
* Christian Limpach <christian.limpach@gmail.com> [2005-06-21 09:11]:> On 6/14/05, Ryan Harper <ryanh@us.ibm.com> wrote: > > * Ryan Harper <ryanh@us.ibm.com> [2005-06-07 17:05]: > > > This patch adds new control messages for vcpu hotplug events. Via the > > > xm vcpu_hotplug sub-program, VCPUS in domains can be enabled/disabled > > > when CONFIG_HOTPLUG_CPU is enabled in the target domain''s kernel. > > > > > > > Updated this patch to use vcpu_to_cpu up/down info to control whether > > state changes are sent when hotplugging vcpus. > > I''ve applied both patches. I think there''s a flaw in the way how > vcpu_to_cpu gets passed up to python, the -1 -> # hack is kind of > gross and it won''t work for cpus > 9?I updated the vcpu_to_cpu string creation to include a field separator, which gets rid of the -1 -> # hack and works for cpus > 9. I ran into some issues with stale vcpu_to_cpu lists when running the hotplug subprogram. I would take a vcpu offline, and then issue the command to bring it back and the vcpu_to_cpu list would not have changed to indicate the the vcpu actually went down. If I injected a xm list -v (which always showed the correct mapping) then subsequent hotplug commands would see the state change and fire off the hotplug request. I don''t know that not sending the event when not changing state saves that much work so I took the state check out and now just send the hotplug event directly.> Also the whole hotplug stuff is still missing interrupt re-routing > when a vcpu is taken down. To do this, we need an evtchn operation to > change the vcpu affinity of a port by changing notify_vcpu_id.I don''t fully understand all of the mappings that are happening, so this part of the patch might be way off. In any case, I''ve added a new evtchn op to set the notify_vcpu_id field of a channel. I updated the HOTPLUG_CPU code to use the new routines when bringing cpus up and down. When taking down a cpu, I route the IPI irq channels to CPU 0, and when the cpu comes up, it re-routes the channels back to the awakened CPU. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c | 19 +++++++--- linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c | 32 +++++++++++++++++ tools/python/xen/xend/XendDomainInfo.py | 6 ++- tools/python/xen/xm/main.py | 18 +-------- xen/common/event_channel.c | 29 +++++++++++++++ xen/include/public/event_channel.h | 10 ++++- 6 files changed, 91 insertions(+), 23 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -urN b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c c/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c --- b/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 2005-06-21 22:10:55.000000000 -0500 +++ c/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/smpboot.c 2005-06-22 14:31:48.765503988 -0500 @@ -103,6 +103,11 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; #endif +static DEFINE_PER_CPU(int, resched_irq); +static DEFINE_PER_CPU(int, callfunc_irq); +static char resched_name[NR_CPUS][15]; +static char callfunc_name[NR_CPUS][15]; + #if 0 /* * Currently trivial. Write the real->protected mode @@ -1328,6 +1333,10 @@ while (!cpu_online(cpu)) cpu_relax(); + /* re-route bound IRQs 0 to cpu */ + rebind_evtchn_from_irq(0, cpu, per_cpu(resched_irq, cpu)); + rebind_evtchn_from_irq(0, cpu, per_cpu(callfunc_irq, cpu)); + fixup_irqs(cpu_online_map); /* counter the disable in fixup_irqs() */ local_irq_enable(); @@ -1357,6 +1366,11 @@ cpu_clear(cpu, map); fixup_irqs(map); + + /* re-route IRQs from dead vcpu to another */ + rebind_evtchn_from_irq(cpu, 0, per_cpu(resched_irq, cpu)); + rebind_evtchn_from_irq(cpu, 0, per_cpu(callfunc_irq, cpu)); + /* It''s now safe to remove this processor from the online map */ cpu_clear(cpu, cpu_online_map); @@ -1514,11 +1528,6 @@ extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *); extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *); -static DEFINE_PER_CPU(int, resched_irq); -static DEFINE_PER_CPU(int, callfunc_irq); -static char resched_name[NR_CPUS][15]; -static char callfunc_name[NR_CPUS][15]; - void __init smp_intr_init(void) { int cpu = smp_processor_id(); diff -urN b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c c/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c --- b/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c 2005-06-21 22:10:53.000000000 -0500 +++ c/linux-2.6.11-xen-sparse/arch/xen/kernel/evtchn.c 2005-06-22 14:31:48.767503708 -0500 @@ -271,6 +271,38 @@ return irq; } +void rebind_evtchn_from_ipi(int cpu, int newcpu, int ipi) +{ + evtchn_op_t op; + int evtchn = per_cpu(ipi_to_evtchn, cpu)[ipi]; + + spin_lock(&irq_mapping_update_lock); + + op.cmd = EVTCHNOP_rebind; + op.u.rebind.port = evtchn; + op.u.rebind.vcpu = newcpu; + if ( HYPERVISOR_event_channel_op(&op) != 0 ) + printk(KERN_INFO "Failed to rebind IPI%d to CPU%d\n",ipi,newcpu); + + spin_unlock(&irq_mapping_update_lock); +} + +void rebind_evtchn_from_irq(int cpu, int newcpu, int irq) +{ + evtchn_op_t op; + int evtchn = irq_to_evtchn[irq]; + + spin_lock(&irq_mapping_update_lock); + + op.cmd = EVTCHNOP_rebind; + op.u.rebind.port = evtchn; + op.u.rebind.vcpu = newcpu; + if ( HYPERVISOR_event_channel_op(&op) != 0 ) + printk(KERN_INFO "Failed to rebind IRQ%d to CPU%d\n",irq,newcpu); + + spin_unlock(&irq_mapping_update_lock); +} + void unbind_ipi_on_cpu_from_irq(int cpu, int ipi) { evtchn_op_t op; diff -urN b/tools/python/xen/xend/XendDomainInfo.py c/tools/python/xen/xend/XendDomainInfo.py --- b/tools/python/xen/xend/XendDomainInfo.py 2005-06-21 22:10:54.000000000 -0500 +++ c/tools/python/xen/xend/XendDomainInfo.py 2005-06-22 14:31:48.000000000 -0500 @@ -423,8 +423,10 @@ sxpr.append([''cpu_time'', self.info[''cpu_time'']/1e9]) sxpr.append([''vcpus'', self.info[''vcpus'']]) sxpr.append([''cpumap'', self.info[''cpumap'']]) - sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x), - self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))]) + # build a string, using ''|'' to seperate items, show only up + # to number of vcpus in domain, and trim the trailing ''|'' + sxpr.append([''vcpu_to_cpu'', ''''.join(map(lambda x: str(x)+''|'', + self.info[''vcpu_to_cpu''][0:self.info[''vcpus'']]))[:-1]]) if self.start_time: up_time = time.time() - self.start_time diff -urN b/tools/python/xen/xm/main.py c/tools/python/xen/xm/main.py --- b/tools/python/xen/xm/main.py 2005-06-21 22:10:53.000000000 -0500 +++ c/tools/python/xen/xm/main.py 2005-06-22 14:54:50.810258798 -0500 @@ -410,8 +410,7 @@ print ''Name Id VCPU CPU CPUMAP'' for dom in doms: info = server.xend_domain(dom) - # XXX this is quite broken for cpu''s > 9 - vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''?'').replace(''-1'',''#'') + vcpu_to_cpu = sxp.child_value(info, ''vcpu_to_cpu'', ''-1'').split(''|'') cpumap = sxp.child_value(info, ''cpumap'', []) mask = ((int(sxp.child_value(info, ''vcpus'', ''0'')))**2) - 1 count = 0 @@ -420,10 +419,7 @@ d[''name''] = sxp.child_value(info, ''name'', ''??'') d[''dom''] = int(sxp.child_value(info, ''id'', ''-1'')) d[''vcpu''] = int(count) - if cpu == "#": - d[''cpu''] = int("-1") - else: - d[''cpu''] = int(cpu) + d[''cpu''] = int(cpu) d[''cpumap''] = int(cpumap[count])&mask count = count + 1 print ("%(name)-16s %(dom)3d %(vcpu)4d %(cpu)3d 0x%(cpumap)x" % d) @@ -593,15 +589,7 @@ state = int(args[3]) dom = server.xend_domain(name) id = sxp.child_value(dom, ''id'') - vcpu_to_cpu = sxp.child_value(dom, ''vcpu_to_cpu'', ''-1'') - # only send state change if states differ - try: - # (down going up) or (up going down) - if (vcpu_to_cpu[vcpu] == "-1" and state == 1) or \ - (vcpu_to_cpu[vcpu] != "-1" and state == 0): - server.xend_domain_vcpu_hotplug(id, vcpu, state) - except IndexError: - print "Invalid VCPU(%d)"%(vcpu) + server.xend_domain_vcpu_hotplug(id, vcpu, state) xm.prog(ProgVcpuhotplug) diff -urN b/xen/common/event_channel.c c/xen/common/event_channel.c --- b/xen/common/event_channel.c 2005-06-21 22:10:55.000000000 -0500 +++ c/xen/common/event_channel.c 2005-06-22 14:55:20.936237917 -0500 @@ -579,6 +579,29 @@ return rc; } +static long evtchn_rebind(evtchn_rebind_t *bind) +{ + struct domain *d = current->domain; + int port = bind->port; + int vcpu = bind->vcpu; + struct evtchn *chn; + long rc = 0; + + spin_lock(&d->evtchn_lock); + + if ( !port_is_valid(d, port) ) + { + rc = -EINVAL; + goto out; + } + + chn = evtchn_from_port(d, port); + chn->notify_vcpu_id = vcpu; + + out: + spin_unlock(&d->evtchn_lock); + return rc; +} long do_event_channel_op(evtchn_op_t *uop) { @@ -637,6 +660,12 @@ rc = -EFAULT; break; + case EVTCHNOP_rebind: + rc = evtchn_rebind(&op.u.rebind); + if ( (rc == 0) && (copy_to_user(uop, &op, sizeof(op)) != 0) ) + rc = -EFAULT; + break; + default: rc = -ENOSYS; break; diff -urN b/xen/include/public/event_channel.h c/xen/include/public/event_channel.h --- b/xen/include/public/event_channel.h 2005-06-21 22:10:55.000000000 -0500 +++ c/xen/include/public/event_channel.h 2005-06-22 14:31:48.801498950 -0500 @@ -162,6 +162,13 @@ } PACKED u; } PACKED evtchn_status_t; /* 20 bytes */ +#define EVTCHNOP_rebind 8 +typedef struct { + /* IN parameters. */ + u32 port; /* 0 */ + u32 vcpu; /* 4 */ +} PACKED evtchn_rebind_t; /* 8 bytes */ + typedef struct { u32 cmd; /* EVTCHNOP_* */ /* 0 */ u32 __reserved; /* 4 */ @@ -174,7 +181,8 @@ evtchn_close_t close; evtchn_send_t send; evtchn_status_t status; - u8 __dummy[24]; + evtchn_rebind_t rebind; + u8 __dummy[16]; } PACKED u; } PACKED evtchn_op_t; /* 32 bytes */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel