Ryan Harper
2005-Aug-05 19:21 UTC
[Xen-devel] [PATCH] xend,sparse: move vcpu-hotplug to xenbus/store
This patch changes the vcpu-hotplug handler from using control message over to using xenstore for triggers. Dropping the control messages also fixes the issue with not being able to use xm vcpu-hotplug on dom0. This patch depends in Dan Smith''s [1]fix. 1. http://lists.xensource.com/archives/html/xen-devel/2005-08/msg00262.html -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c | 195 ++++++++++++-------- tools/python/xen/xend/XendDomainInfo.py | 33 ++- 2 files changed, 148 insertions(+), 80 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> --- diff -r b63577ff53a3 linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c --- a/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Fri Aug 5 14:57:43 2005 +++ b/linux-2.6-xen-sparse/arch/xen/i386/kernel/smpboot.c Fri Aug 5 13:03:31 2005 @@ -1324,14 +1324,132 @@ } #ifdef CONFIG_HOTPLUG_CPU -#include <asm-xen/ctrl_if.h> - +#include <asm-xen/xenbus.h> /* hotplug down/up funtion pointer and target vcpu */ struct vcpu_hotplug_handler_t { - void (*fn)(int vcpu); + void (*fn) (int vcpu); u32 vcpu; }; static struct vcpu_hotplug_handler_t vcpu_hotplug_handler; + +static int vcpu_hotplug_cpu_process(void *unused) +{ + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + + if (handler->fn) { + (*(handler->fn)) (handler->vcpu); + handler->fn = NULL; + } + return 0; +} + +static void __vcpu_hotplug_handler(void *unused) +{ + int err; + + err = kernel_thread(vcpu_hotplug_cpu_process, + NULL, CLONE_FS | CLONE_FILES); + if (err < 0) + printk(KERN_ALERT "Error creating hotplug_cpu process!\n"); +} + +static void handle_cpus_watch(struct xenbus_watch *, const char *); +static struct notifier_block xsn_cpus; + +/* xenbus watch struct */ +static struct xenbus_watch cpus_watch = { + .node = "cpus", + .callback = handle_cpus_watch, +}; + +static int setup_cpus_watcher(struct notifier_block *notifier, + unsigned long event, void *data) +{ + int err = 0; + + down(&xenbus_lock); + err = register_xenbus_watch(&cpus_watch); + up(&xenbus_lock); + + if (err) { + printk("Failed to set cpus watcher\n"); + } + return NOTIFY_DONE; +} + +static void handle_cpus_watch(struct xenbus_watch *watch, const char *node) +{ + static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL); + struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; + ssize_t ret; + int err, cpu, state; + char dir[32]; + char *cpustr; + + /* get a pointer to start of cpus/cpu string */ + if ((cpustr = strstr(node, "cpus/cpu")) != NULL) { + + /* find which cpu state changed, note vcpu for handler */ + sscanf(cpustr, "cpus/cpu%d", &cpu); + handler->vcpu = cpu; + + /* calc the dir for xenbus read */ + sprintf(dir, "cpus/cpu%d", cpu); + + /* make sure watch that was triggered is changes to the online key */ + if ((strcmp(node + strlen(dir), "/online")) != 0) + return; + + /* get the state value */ + xenbus_transaction_start("cpus"); + err = xenbus_scanf(dir, "online", "%d", &state); + xenbus_transaction_end(0); + + if (err != 1) { + printk(KERN_ERR + "XENBUS: Unable to read cpu online state\n"); + return; + } + + /* if we detect a state change, take action */ + switch (state) { + /* offline -> online */ + case 1: + if (!cpu_isset(cpu, cpu_online_map)) { + handler->fn = (void *)&cpu_up; + ret = schedule_work(&vcpu_hotplug_work); + } + break; + /* online -> offline */ + case 0: + if (cpu_isset(cpu, cpu_online_map)) { + handler->fn = (void *)&cpu_down; + ret = schedule_work(&vcpu_hotplug_work); + } + break; + default: + printk(KERN_ERR + "XENBUS: unknown state(%d) on node(%s)\n", state, + node); + } + } + return; +} + +static int __init setup_vcpu_hotplug_event(void) +{ + xsn_cpus.notifier_call = setup_cpus_watcher; + + if (xen_start_info.store_evtchn) { + setup_cpus_watcher(&xsn_cpus, 0, NULL); + } else { + register_xenstore_notifier(&xsn_cpus); + } + + return 0; +} + +subsys_initcall(setup_vcpu_hotplug_event); /* must be called with the cpucontrol mutex held */ static int __devinit cpu_enable(unsigned int cpu) @@ -1401,77 +1519,6 @@ printk(KERN_ERR "CPU %u didn''t die...\n", cpu); } -static int vcpu_hotplug_cpu_process(void *unused) -{ - struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; - - if (handler->fn) { - (*(handler->fn))(handler->vcpu); - handler->fn = NULL; - } - return 0; -} - -static void __vcpu_hotplug_handler(void *unused) -{ - int err; - - err = kernel_thread(vcpu_hotplug_cpu_process, - NULL, CLONE_FS | CLONE_FILES); - if (err < 0) - printk(KERN_ALERT "Error creating hotplug_cpu process!\n"); - -} - -static void vcpu_hotplug_event_handler(ctrl_msg_t *msg, unsigned long id) -{ - static DECLARE_WORK(vcpu_hotplug_work, __vcpu_hotplug_handler, NULL); - vcpu_hotplug_t *req = (vcpu_hotplug_t *)&msg->msg[0]; - struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; - ssize_t ret; - - if (msg->length != sizeof(vcpu_hotplug_t)) - goto parse_error; - - /* grab target vcpu from msg */ - handler->vcpu = req->vcpu; - - /* determine which function to call based on msg subtype */ - switch (msg->subtype) { - case CMSG_VCPU_HOTPLUG_OFF: - handler->fn = (void *)&cpu_down; - ret = schedule_work(&vcpu_hotplug_work); - req->status = (u32) ret; - break; - case CMSG_VCPU_HOTPLUG_ON: - handler->fn = (void *)&cpu_up; - ret = schedule_work(&vcpu_hotplug_work); - req->status = (u32) ret; - break; - default: - goto parse_error; - } - - ctrl_if_send_response(msg); - return; - parse_error: - msg->length = 0; - ctrl_if_send_response(msg); -} - -static int __init setup_vcpu_hotplug_event(void) -{ - struct vcpu_hotplug_handler_t *handler = &vcpu_hotplug_handler; - - handler->fn = NULL; - ctrl_if_register_receiver(CMSG_VCPU_HOTPLUG, - vcpu_hotplug_event_handler, 0); - - return 0; -} - -__initcall(setup_vcpu_hotplug_event); - #else /* ... !CONFIG_HOTPLUG_CPU */ int __cpu_disable(void) { diff -r b63577ff53a3 tools/python/xen/xend/XendDomainInfo.py --- a/tools/python/xen/xend/XendDomainInfo.py Fri Aug 5 14:57:43 2005 +++ b/tools/python/xen/xend/XendDomainInfo.py Fri Aug 5 13:03:31 2005 @@ -266,6 +266,7 @@ self.restart_count = 0 self.vcpus = 1 + self.vcpusdb = {} self.bootloader = None def setDB(self, db): @@ -547,6 +548,16 @@ except: raise VmError(''invalid vcpus value'') + def exportVCPUSToDB(self, vcpus): + for v in range(0,vcpus): + path = "/cpus/cpu%d"%(v) + if not self.vcpusdb.has_key(path): + self.vcpusdb[path] = self.db.addChild(path) + db = self.vcpusdb[path] + log.debug("writing key online=1 to path %s in store"%(path)) + db[''online''] = "1" + db.saveDB(save=True) + def init_image(self): """Create boot image handler for the domain. """ @@ -565,6 +576,8 @@ self.db.introduceDomain(self.id, self.store_mfn, self.store_channel) + # get the configured value of vcpus and update store + self.exportVCPUSToDB(self.vcpus) def delete(self): """Delete the vm''s db. @@ -929,14 +942,20 @@ def vcpu_hotplug(self, vcpu, state): """Disable or enable VCPU in domain. """ - log.error("Holly Shit! %d %d\n" % (vcpu, state)) - if self.channel: + db = "" + try: + db = self.vcpusdb[''/cpus/cpu%d''%(vcpu)] + except: + log.error("Invalid VCPU") + return + + if self.store_channel: if int(state) == 0: - msg = messages.packMsg(''vcpu_hotplug_off_t'', { ''vcpu'' : vcpu} ) + db[''online''] = "0" else: - msg = messages.packMsg(''vcpu_hotplug_on_t'', { ''vcpu'' : vcpu} ) - - self.channel.writeRequest(msg) + db[''online''] = "1" + + db.saveDB(save=True) def shutdown(self, reason): reasonid = shutdown_codes.get(reason) @@ -966,6 +985,8 @@ self.db.introduceDomain(self.id, self.store_mfn, self.store_channel) self.exportToDB(save=True, sync=True) + # get run-time value of vcpus and update store + self.exportVCPUSToDB(dom_get(self.id)[''vcpus'']) def vm_field_ignore(vm, config, val, index): """Dummy config field handler used for fields with built-in handling. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Rusty Russell
2005-Aug-06 08:39 UTC
Re: [Xen-devel] [PATCH] xend,sparse: move vcpu-hotplug to xenbus/store
On Fri, 2005-08-05 at 14:21 -0500, Ryan Harper wrote:> +/* xenbus watch struct */ > +static struct xenbus_watch cpus_watch = { > + .node = "cpus", > + .callback = handle_cpus_watch, > +};Everywhere else we''ve used singular names: /domain, /domain/<uuid>/device, /tool, /domain/<uuid>/backend. So I''d suggest either using "cpu" or consider putting this under "device" and actually using the xenbus system, although I''m not sure that''s all that useful for you. Perhaps revisit after Christian has done latest merge.> +static void handle_cpus_watch(struct xenbus_watch *watch, const char *node)Just a preference, I prefer the watch callback to be something like "handle_XXX_event", which seems clearer to me.> + /* get a pointer to start of cpus/cpu string */ > + if ((cpustr = strstr(node, "cpus/cpu")) != NULL) { > + > + /* find which cpu state changed, note vcpu for handler */ > + sscanf(cpustr, "cpus/cpu%d", &cpu);Please don''t repeat "cpu", just use "cpu/0", "cpu/1" which matches what block interfaces are doing. The guidelines I''ve been working on with store layout are as follows (no doubt we''ll all come up with more as we use this thing): 1) Redundant information in the store is bad. Don''t put information in two places then insist they be the same. 2) Multiple entries should be in directory names, unless the hierarchy is getting completely out of control. 3) Directory names have to be unique, obviously. If there is a useful handle to use, do so, otherwise simply use numbers starting from 0, which makes it fairly clear to the reader the tags are arbitrary. I''m still not sure about binary flags. For the block devices, Christian and I chose "read-only": if it exists, the device is read only, if not, it''s read-write. That''s preferable to a "writability" flag which contains either "read-only" or "read-write", since the default is fairly-clear. In the case of online vs. offline cpus, you could either have an "offline" entry (not existing meaning "online", which is probably the clearer default), or a "availability" which contains the string "online" or "offline". Please do not have a node called "online" which contains 0 to mean "NOT"! Thanks! Rusty. -- A bad analogy is like a leaky screwdriver -- Richard Braakman _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2005-Aug-08 14:14 UTC
Re: [Xen-devel] [PATCH] xend, sparse: move vcpu-hotplug to xenbus/store
* Rusty Russell <rusty@rustcorp.com.au> [2005-08-08 08:58]:> On Fri, 2005-08-05 at 14:21 -0500, Ryan Harper wrote: > > +/* xenbus watch struct */ > > +static struct xenbus_watch cpus_watch = { > > + .node = "cpus", > > + .callback = handle_cpus_watch, > > +}; > > Everywhere else we''ve used singular > names: /domain, /domain/<uuid>/device, /tool, /domain/<uuid>/backend. > So I''d suggest either using "cpu" or consider putting this under > "device" and actually using the xenbus system, although I''m not sure > that''s all that useful for you. Perhaps revisit after Christian has > done latest merge.OK.> > > +static void handle_cpus_watch(struct xenbus_watch *watch, const char *node) > > Just a preference, I prefer the watch callback to be something like > "handle_XXX_event", which seems clearer to me. >Sure.> > + /* get a pointer to start of cpus/cpu string */ > > + if ((cpustr = strstr(node, "cpus/cpu")) != NULL) { > > + > > + /* find which cpu state changed, note vcpu for handler */ > > + sscanf(cpustr, "cpus/cpu%d", &cpu); > > Please don''t repeat "cpu", just use "cpu/0", "cpu/1" which matches what > block interfaces are doing. > > The guidelines I''ve been working on with store layout are as follows (no > doubt we''ll all come up with more as we use this thing): > > 1) Redundant information in the store is bad. Don''t put information in > two places then insist they be the same. > > 2) Multiple entries should be in directory names, unless the hierarchy > is getting completely out of control. > > 3) Directory names have to be unique, obviously. If there is a useful > handle to use, do so, otherwise simply use numbers starting from 0, > which makes it fairly clear to the reader the tags are arbitrary. > > I''m still not sure about binary flags. For the block devices, Christian > and I chose "read-only": if it exists, the device is read only, if not, > it''s read-write. That''s preferable to a "writability" flag which > contains either "read-only" or "read-write", since the default is > fairly-clear.Good to know.> > In the case of online vs. offline cpus, you could either have an > "offline" entry (not existing meaning "online", which is probably the > clearer default), or a "availability" which contains the string "online" > or "offline". Please do not have a node called "online" which contains > 0 to mean "NOT"!In this case, I was just following (with the exception of cpus) the sysfs example which already exists: /sys/devices/system/cpu/cpu0/online If we don''t care that the xenbus entries _do_ the exact same thing, but aren''t similarly named, I''m fine with that. Just to be clear, it sounds like you want: /domain/uuid/cpu/0/online # cpu 0 is online /domain/uuid/cpu/1/offline # cpu 1 is offline /domain/uuid/cpu/2/online # etc. /domain/uuid/cpu/3/online # etc. I don''t like that. What about this? /domain/uuid/cpu/0/state and the values can be "online|offline" I rather like having a single target whose value changes rather than two entries one of which I must remove when the state changes. In this case, I can keep a single entry but toss the online=0 stuff you dislike. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Rusty Russell
2005-Aug-09 02:52 UTC
Re: [Xen-devel] [PATCH] xend, sparse: move vcpu-hotplug to xenbus/store
On Mon, 2005-08-08 at 09:14 -0500, Ryan Harper wrote:> /domain/uuid/cpu/0/online # cpu 0 is online > /domain/uuid/cpu/1/offline # cpu 1 is offline > /domain/uuid/cpu/2/online # etc. > /domain/uuid/cpu/3/online # etc. > > I don''t like that. What about this?Actually I meant that if "offline" doesn''t exist, it''s online, rather than two mutually-exclusive nodes.> /domain/uuid/cpu/0/state > > and the values can be "online|offline""state" is a little vague, hence I suggested "availability". Not a great name, but clear about what it contains. Rusty. -- A bad analogy is like a leaky screwdriver -- Richard Braakman _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel