Elena Ufimtseva
2013-Nov-14 03:26 UTC
[PATCH v2 2/7] libxc: Plumb Xen with vNUMA topology for domain.
Per-domain vNUMA topology initialization. domctl hypercall is used to set vNUMA topology per domU during domain build time. Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com> --- tools/libxc/xc_dom.h | 9 +++++++ tools/libxc/xc_domain.c | 61 +++++++++++++++++++++++++++++++++++++++++++++++ tools/libxc/xenctrl.h | 9 +++++++ 3 files changed, 79 insertions(+) diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h index 86e23ee..42a16c9 100644 --- a/tools/libxc/xc_dom.h +++ b/tools/libxc/xc_dom.h @@ -114,6 +114,15 @@ struct xc_dom_image { struct xc_dom_phys *phys_pages; int realmodearea_log; + /* + * vNUMA topology and memory allocation structure. + * Defines the way to allocate memory on per NUMA + * physical nodes that is defined by vnode_numamap. + */ + uint32_t nr_vnodes; + uint64_t *vnuma_memszs; + unsigned int *vnode_numamap; + /* malloc memory pool */ struct xc_dom_mem *memblocks; diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c index 1ccafc5..afa8667 100644 --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -1776,6 +1776,67 @@ int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid, return do_domctl(xch, &domctl); } +/* Plumbs Xen with vNUMA topology */ +int xc_domain_setvnodes(xc_interface *xch, + uint32_t domid, + uint16_t nr_vnodes, + uint16_t nr_vcpus, + vmemrange_t *vmemrange, + unsigned int *vdistance, + unsigned int *vcpu_to_vnode, + unsigned int *vnode_numamap) +{ + int rc; + DECLARE_DOMCTL; + DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * nr_vnodes, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) * + nr_vnodes * nr_vnodes, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + DECLARE_HYPERCALL_BOUNCE(vnode_numamap, sizeof(*vnode_numamap) * + nr_vnodes, + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); + if ( nr_vnodes == 0 ) + return -EINVAL; + + if ( vdistance == NULL || vcpu_to_vnode == NULL || + vmemrange == NULL || vnode_numamap == NULL ) { + PERROR("Incorrect parameters for XEN_DOMCTL_setvnumainfo.\n"); + return -EINVAL; + } + + rc = -EFAULT; + + if ( xc_hypercall_bounce_pre(xch, vmemrange) || + xc_hypercall_bounce_pre(xch, vdistance) || + xc_hypercall_bounce_pre(xch, vcpu_to_vnode) || + xc_hypercall_bounce_pre(xch, vnode_numamap) ) { + PERROR("Could not bounce buffer for xc_domain_setvnodes.\n"); + return rc; + } + + set_xen_guest_handle(domctl.u.vnuma.vmemrange, vmemrange); + set_xen_guest_handle(domctl.u.vnuma.vdistance, vdistance); + set_xen_guest_handle(domctl.u.vnuma.vcpu_to_vnode, vcpu_to_vnode); + set_xen_guest_handle(domctl.u.vnuma.vnode_numamap, vnode_numamap); + + domctl.cmd = XEN_DOMCTL_setvnumainfo; + domctl.domain = (domid_t)domid; + domctl.u.vnuma.nr_vnodes = nr_vnodes; + domctl.u.vnuma.__pad = 0; + + rc = do_domctl(xch, &domctl); + + xc_hypercall_bounce_post(xch, vmemrange); + xc_hypercall_bounce_post(xch, vdistance); + xc_hypercall_bounce_post(xch, vcpu_to_vnode); + xc_hypercall_bounce_post(xch, vnode_numamap); + + return rc; +} + /* * Local variables: * mode: C diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h index 8cf3f3b..451660e 100644 --- a/tools/libxc/xenctrl.h +++ b/tools/libxc/xenctrl.h @@ -1108,6 +1108,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch, uint32_t domid, unsigned long map_limitkb); +int xc_domain_setvnodes(xc_interface *xch, + uint32_t domid, + uint16_t nr_vnodes, + uint16_t nr_vcpus, + vmemrange_t *vmemrange, + unsigned int *vdistance, + unsigned int *vcpu_to_vnode, + unsigned int *vnode_numamap); + #if defined(__i386__) || defined(__x86_64__) /* * PC BIOS standard E820 types and structure. -- 1.7.10.4
Dario Faggioli
2013-Nov-14 11:40 UTC
Re: [PATCH v2 2/7] libxc: Plumb Xen with vNUMA topology for domain.
On mer, 2013-11-13 at 22:26 -0500, Elena Ufimtseva wrote:> diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h > @@ -114,6 +114,15 @@ struct xc_dom_image { > struct xc_dom_phys *phys_pages; > int realmodearea_log; > > + /* > + * vNUMA topology and memory allocation structure. > + * Defines the way to allocate memory on per NUMA > + * physical nodes that is defined by vnode_numamap. > + */ > + uint32_t nr_vnodes; > + uint64_t *vnuma_memszs; > + unsigned int *vnode_numamap; > + >This vnode_numamap is, basically vnode-to-pnode mapping, isn''t it? If yes, considering you have vcpu_to_vnode already, what about actually calling it vnode_to_pnode (or something similar)? Here in Xen (either hypervisor or toolstack), hinting to ''map'' and ''mapping'', especially when there is memory involved, tend to have a completely different meaning...> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c > > +/* Plumbs Xen with vNUMA topology */ > +int xc_domain_setvnodes(xc_interface *xch, > + uint32_t domid, > + uint16_t nr_vnodes, > + uint16_t nr_vcpus, > + vmemrange_t *vmemrange, > + unsigned int *vdistance, > + unsigned int *vcpu_to_vnode, > + unsigned int *vnode_numamap) > +{ > + int rc; > + DECLARE_DOMCTL; > + DECLARE_HYPERCALL_BOUNCE(vmemrange, sizeof(*vmemrange) * nr_vnodes, > + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); >These all should be XC_HYPERCALL_BUFFER_BOUNCE_IN, I think. You don''t really do any copy_to_guest() in 1/7 for any of these parameters, and, at the same time, you really don''t expect, here, to get something back from the hypervisor in them, do you?> + DECLARE_HYPERCALL_BOUNCE(vdistance, sizeof(*vdistance) * > + nr_vnodes * nr_vnodes, > + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); > + DECLARE_HYPERCALL_BOUNCE(vcpu_to_vnode, sizeof(*vcpu_to_vnode) * nr_vcpus, > + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); > + DECLARE_HYPERCALL_BOUNCE(vnode_numamap, sizeof(*vnode_numamap) * > + nr_vnodes, > + XC_HYPERCALL_BUFFER_BOUNCE_BOTH); > + if ( nr_vnodes == 0 ) > + return -EINVAL; > + >Oh, how funny, we were just talking about this with Konrad the other day. :-) So, my impression is that, in libxc, we return 0 on success and, in case of error, we return -1 and set errno. So, whenever it is another xc call that fails, you can rely on it having set errno properly and just return -1. If it''s you, like in this case, you should be the one setting errno (and returning -1). Again, this is what I''ve always done and what I find the most common in libxc. Perhaps we need a word from one of the maintainer to enlighten us about what''s the best practise here. If I''m right, pretty much all the error handling should be changed in this patch, and I''ll avoid pointing at all the occurrences here.> + if ( vdistance == NULL || vcpu_to_vnode == NULL || > + vmemrange == NULL || vnode_numamap == NULL ) { >Mmm... didn''t we say in the previous patch (1/7, the one implementing this DOMCTL in xen) that it is fine for vnode_numamap to be NULL? If we error out like this here, how will ever Xen see it being NULL?> + PERROR("Incorrect parameters for XEN_DOMCTL_setvnumainfo.\n"); > + return -EINVAL; > + } > + > + rc = -EFAULT;> diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h > index 8cf3f3b..451660e 100644 > --- a/tools/libxc/xenctrl.h > +++ b/tools/libxc/xenctrl.h > @@ -1108,6 +1108,15 @@ int xc_domain_set_memmap_limit(xc_interface *xch, > uint32_t domid, > unsigned long map_limitkb); > > +int xc_domain_setvnodes(xc_interface *xch, > + uint32_t domid, > + uint16_t nr_vnodes, > + uint16_t nr_vcpus, > + vmemrange_t *vmemrange, > + unsigned int *vdistance, > + unsigned int *vcpu_to_vnode, > + unsigned int *vnode_numamap); > + >This is a minor thing, but I wonder whether xc_domain_setvnuma() wouldn''t make a better name for this. Thoughts? Regards, Dario -- <<This happens because I choose it to happen!>> (Raistlin Majere) ----------------------------------------------------------------- Dario Faggioli, Ph.D, http://about.me/dario.faggioli Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel