James (song wei)
2010-Mar-17 08:56 UTC
[Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Hi, There are more than 64 cpus on new intel platform especially on NUMA system, so that we need break the pcpu limit (that is 64) when set affinity of a VCPU. -James (Song Wei) Signed-off-by: James (song wei) <jsong@novell.com> diff -r c1f272c3a441 tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Mon Mar 15 17:08:29 2010 +0000 +++ b/tools/libxc/xc_domain.c Wed Mar 17 16:51:07 2010 +0800 @@ -98,23 +98,28 @@ int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t cpumap) + uint64_t *cpumap, int cpusize) { DECLARE_DOMCTL; int ret = -1; - uint8_t local[sizeof (cpumap)]; + uint8_t *local = malloc(cpusize); + if(local == NULL) + { + PERROR("Could not alloc memory for Xen hypercall"); + goto out; + } domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; - bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); + bitmap_64_to_byte(local, cpumap, cpusize * 8); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; - if ( lock_pages(local, sizeof(local)) != 0 ) + if ( lock_pages(local, cpusize) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -122,9 +127,10 @@ ret = do_domctl(xc_handle, &domctl); - unlock_pages(local, sizeof(local)); + unlock_pages(local, cpusize); out: + free(local); return ret; } @@ -132,18 +138,26 @@ int xc_vcpu_getaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t *cpumap) + uint64_t *cpumap, + int cpusize) { DECLARE_DOMCTL; int ret = -1; - uint8_t local[sizeof (cpumap)]; + uint8_t * local = malloc(cpusize); + + if(local == NULL) + { + PERROR("Could not alloc memory for Xen hypercall"); + goto out; + } domctl.cmd = XEN_DOMCTL_getvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; + set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; if ( lock_pages(local, sizeof(local)) != 0 ) { @@ -154,8 +168,9 @@ ret = do_domctl(xc_handle, &domctl); unlock_pages(local, sizeof (local)); - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); - out: + bitmap_byte_to_64(cpumap, local, cpusize * 8); +out: + free(local); return ret; } diff -r c1f272c3a441 tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Mon Mar 15 17:08:29 2010 +0000 +++ b/tools/libxc/xenctrl.h Wed Mar 17 16:51:07 2010 +0800 @@ -309,11 +309,13 @@ int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t cpumap); + uint64_t *cpumap, + int cpusize); int xc_vcpu_getaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t *cpumap); + uint64_t *cpumap, + int cpusize); /** * This function will return information about one or more domains. It is diff -r c1f272c3a441 tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Mon Mar 15 17:08:29 2010 +0000 +++ b/tools/python/xen/lowlevel/xc/xc.c Wed Mar 17 16:51:07 2010 +0800 @@ -215,35 +215,54 @@ { uint32_t dom; int vcpu = 0, i; - uint64_t cpumap = ~0ULL; + uint64_t *cpumap; PyObject *cpulist = NULL; + int nr_cpus, size; + xc_physinfo_t info; + xc_cpu_to_node_t map[1]; + uint64_t cpumap_size = sizeof(cpumap); static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; + if ( !PyArg_ParseTupleAndKeywords(args, kwds, "i|iO", kwd_list, &dom, &vcpu, &cpulist) ) return NULL; + set_xen_guest_handle(info.cpu_to_node, map); + info.max_cpu_id = 1; + if ( xc_physinfo(self->xc_handle, &info) != 0 ) + return pyxc_error_to_exception(); + + nr_cpus = info.nr_cpus; + + size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); + cpumap = malloc(cpumap_size * size); + if(cpumap == NULL) + return pyxc_error_to_exception(); + + if ( (cpulist != NULL) && PyList_Check(cpulist) ) { - cpumap = 0ULL; + for ( i = 0; i < size; i++) + { + cpumap[i] = 0ULL; + } for ( i = 0; i < PyList_Size(cpulist); i++ ) { long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); - if ( cpu >= 64 ) - { - errno = EINVAL; - PyErr_SetFromErrno(xc_error_obj); - return NULL; - } - cpumap |= (uint64_t)1 << cpu; + *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % (cpumap_size * 8)); } } - if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) + if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 ) + { + free(cpumap); return pyxc_error_to_exception(); - + } + Py_INCREF(zero); + free(cpumap); return zero; } @@ -362,7 +381,11 @@ uint32_t dom, vcpu = 0; xc_vcpuinfo_t info; int rc, i; - uint64_t cpumap; + uint64_t *cpumap; + int nr_cpus, size; + xc_physinfo_t pinfo = { 0 }; + xc_cpu_to_node_t map[1]; + uint64_t cpumap_size = sizeof(cpumap); static char *kwd_list[] = { "domid", "vcpu", NULL }; @@ -370,12 +393,25 @@ &dom, &vcpu) ) return NULL; + set_xen_guest_handle(pinfo.cpu_to_node, map); + pinfo.max_cpu_id = 1; + if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) + return pyxc_error_to_exception(); + nr_cpus = pinfo.nr_cpus; rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); if ( rc < 0 ) return pyxc_error_to_exception(); - rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); + size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); + + if((cpumap = malloc(cpumap_size * size)) == NULL) + return pyxc_error_to_exception(); + + rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size); if ( rc < 0 ) + { + free(cpumap); return pyxc_error_to_exception(); + } info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", "online", info.online, @@ -385,17 +421,18 @@ "cpu", info.cpu); cpulist = PyList_New(0); - for ( i = 0; cpumap != 0; i++ ) + for ( i = 0; i < size * cpumap_size * 8; i++ ) { - if ( cpumap & 1 ) { + if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { PyObject *pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - cpumap >>= 1; + *(cpumap + i / (cpumap_size * 8)) >>= 1; } PyDict_SetItemString(info_dict, "cpumap", cpulist); Py_DECREF(cpulist); + free(cpumap); return info_dict; } http://old.nabble.com/file/p27928229/adjust_vcpuaffinity_more_cpu.patch adjust_vcpuaffinity_more_cpu.patch -- View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27928229.html Sent from the Xen - Dev mailing list archive at Nabble.com. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jan Beulich
2010-Mar-17 09:25 UTC
Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
>>> "James (song wei)" <jsong@novell.com> 17.03.10 09:56 >>> >--- a/tools/python/xen/lowlevel/xc/xc.c Mon Mar 15 17:08:29 2010 +0000 >+++ b/tools/python/xen/lowlevel/xc/xc.c Wed Mar 17 16:51:07 2010 +0800 >@@ -215,35 +215,54 @@ > { > uint32_t dom; > int vcpu = 0, i; >- uint64_t cpumap = ~0ULL; >+ uint64_t *cpumap; > PyObject *cpulist = NULL; >+ int nr_cpus, size; >+ xc_physinfo_t info; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(cpumap);Perhaps sizeof(*cpumap)?>... >+ *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu %(cpumap_size * 8)); Using [] here and in similar places further down would likely make these constructs a little bit more legible.>@@ -362,7 +381,11 @@ > uint32_t dom, vcpu = 0; > xc_vcpuinfo_t info; > int rc, i; >- uint64_t cpumap; >+ uint64_t *cpumap; >+ int nr_cpus, size; >+ xc_physinfo_t pinfo = { 0 }; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(cpumap);Same as above.>@@ -385,17 +421,18 @@ > "cpu", info.cpu); > > cpulist = PyList_New(0); >- for ( i = 0; cpumap != 0; i++ ) >+ for ( i = 0; i < size * cpumap_size * 8; i++ )Why not simply use nr_cpus here? Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James (song wei)
2010-Mar-18 02:26 UTC
Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Jan Beulich wrote:> >>>> "James (song wei)" <jsong@novell.com> 17.03.10 09:56 >>> >>--- a/tools/python/xen/lowlevel/xc/xc.c Mon Mar 15 17:08:29 2010 +0000 >>+++ b/tools/python/xen/lowlevel/xc/xc.c Wed Mar 17 16:51:07 2010 +0800 >>@@ -215,35 +215,54 @@ >> { >> uint32_t dom; >> int vcpu = 0, i; >>- uint64_t cpumap = ~0ULL; >>+ uint64_t *cpumap; >> PyObject *cpulist = NULL; >>+ int nr_cpus, size; >>+ xc_physinfo_t info; >>+ xc_cpu_to_node_t map[1]; >>+ uint64_t cpumap_size = sizeof(cpumap); > > Perhaps sizeof(*cpumap)? > > -- Yeah,you are right. > >>... >>+ *(cpumap + cpu / (cpumap_size * 8)) |= (uint64_t)1 << (cpu % > (cpumap_size * 8)); > > Using [] here and in similar places further down would likely make these > constructs a little bit more legible. > --yes. > >>@@ -362,7 +381,11 @@ >> uint32_t dom, vcpu = 0; >> xc_vcpuinfo_t info; >> int rc, i; >>- uint64_t cpumap; >>+ uint64_t *cpumap; >>+ int nr_cpus, size; >>+ xc_physinfo_t pinfo = { 0 }; >>+ xc_cpu_to_node_t map[1]; >>+ uint64_t cpumap_size = sizeof(cpumap); > > Same as above. > >>@@ -385,17 +421,18 @@ >> "cpu", info.cpu); >> >> cpulist = PyList_New(0); >>- for ( i = 0; cpumap != 0; i++ ) >>+ for ( i = 0; i < size * cpumap_size * 8; i++ ) > > Why not simply use nr_cpus here? > --Yes, copy amount of nr_cpus bitsare enough here. > > Jan, thank you very much! I''ll post the new patch here soon. > > -Jame (Song Wei) > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel > >-- View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27941020.html Sent from the Xen - Dev mailing list archive at Nabble.com. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James (song wei)
2010-Mar-18 03:41 UTC
Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Keir, could you take a look at this issue. New Patch for this issue: Singed-off-by: James (Song Wei) <jsong@novell.com> diff -r 8b269215464b tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c Wed Mar 10 14:01:32 2010 +0800 +++ b/tools/libxc/xc_domain.c Thu Mar 18 11:37:55 2010 +0800 @@ -105,23 +105,28 @@ int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t cpumap) + uint64_t *cpumap, int cpusize) { DECLARE_DOMCTL; int ret = -1; - uint8_t local[sizeof (cpumap)]; + uint8_t *local = malloc(cpusize); + if(local == NULL) + { + PERROR("Could not alloc memory for Xen hypercall"); + goto out; + } domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; - bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); + bitmap_64_to_byte(local, cpumap, cpusize * 8); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; - if ( lock_pages(local, sizeof(local)) != 0 ) + if ( lock_pages(local, cpusize) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -129,9 +134,10 @@ ret = do_domctl(xc_handle, &domctl); - unlock_pages(local, sizeof(local)); + unlock_pages(local, cpusize); out: + free(local); return ret; } @@ -139,18 +145,25 @@ int xc_vcpu_getaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t *cpumap) + uint64_t *cpumap, int cpusize) { DECLARE_DOMCTL; int ret = -1; - uint8_t local[sizeof (cpumap)]; + uint8_t * local = malloc(cpusize); + + if(local == NULL) + { + PERROR("Could not alloc memory for Xen hypercall"); + goto out; + } domctl.cmd = XEN_DOMCTL_getvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; + set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; if ( lock_pages(local, sizeof(local)) != 0 ) { @@ -161,8 +174,9 @@ ret = do_domctl(xc_handle, &domctl); unlock_pages(local, sizeof (local)); - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); - out: + bitmap_byte_to_64(cpumap, local, cpusize * 8); +out: + free(local); return ret; } diff -r 8b269215464b tools/libxc/xenctrl.h --- a/tools/libxc/xenctrl.h Wed Mar 10 14:01:32 2010 +0800 +++ b/tools/libxc/xenctrl.h Thu Mar 18 11:37:55 2010 +0800 @@ -310,11 +310,13 @@ int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t cpumap); + uint64_t *cpumap, + int cpusize); int xc_vcpu_getaffinity(int xc_handle, uint32_t domid, int vcpu, - uint64_t *cpumap); + uint64_t *cpumap, + int cpusize); /** * This function will return information about one or more domains. It is diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c --- a/tools/python/xen/lowlevel/xc/xc.c Wed Mar 10 14:01:32 2010 +0800 +++ b/tools/python/xen/lowlevel/xc/xc.c Thu Mar 18 11:37:55 2010 +0800 @@ -217,8 +217,12 @@ { uint32_t dom; int vcpu = 0, i; - uint64_t cpumap = ~0ULL; + uint64_t *cpumap; PyObject *cpulist = NULL; + int nr_cpus, size; + xc_physinfo_t info; + xc_cpu_to_node_t map[1]; + uint64_t cpumap_size = sizeof(*cpumap); static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; @@ -226,26 +230,38 @@ &dom, &vcpu, &cpulist) ) return NULL; + set_xen_guest_handle(info.cpu_to_node, map); + info.max_cpu_id = 1; + if ( xc_physinfo(self->xc_handle, &info) != 0 ) + return pyxc_error_to_exception(); + + nr_cpus = info.nr_cpus; + + size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); + cpumap = malloc(cpumap_size * size); + if(cpumap == NULL) + return pyxc_error_to_exception(); + if ( (cpulist != NULL) && PyList_Check(cpulist) ) { - cpumap = 0ULL; + for ( i = 0; i < size; i++) + { + cpumap[i] = 0ULL; + } for ( i = 0; i < PyList_Size(cpulist); i++ ) { long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); - if ( cpu >= 64 ) - { - errno = EINVAL; - PyErr_SetFromErrno(xc_error_obj); - return NULL; - } - cpumap |= (uint64_t)1 << cpu; + cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % (cpumap_size * 8)); } } - if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) + if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * cpumap_size) != 0 ) + { + free(cpumap); return pyxc_error_to_exception(); - + } Py_INCREF(zero); + free(cpumap); return zero; } @@ -365,7 +381,11 @@ uint32_t dom, vcpu = 0; xc_vcpuinfo_t info; int rc, i; - uint64_t cpumap; + uint64_t *cpumap; + int nr_cpus, size; + xc_physinfo_t pinfo = { 0 }; + xc_cpu_to_node_t map[1]; + uint64_t cpumap_size = sizeof(*cpumap); static char *kwd_list[] = { "domid", "vcpu", NULL }; @@ -373,12 +393,25 @@ &dom, &vcpu) ) return NULL; + set_xen_guest_handle(pinfo.cpu_to_node, map); + pinfo.max_cpu_id = 1; + if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) + return pyxc_error_to_exception(); + nr_cpus = pinfo.nr_cpus; rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); if ( rc < 0 ) return pyxc_error_to_exception(); - rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); + size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); + + if((cpumap = malloc(cpumap_size * size)) == NULL) + return pyxc_error_to_exception(); + + rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, cpumap_size * size); if ( rc < 0 ) + { + free(cpumap); return pyxc_error_to_exception(); + } info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", "online", info.online, @@ -386,19 +419,19 @@ "running", info.running, "cpu_time", info.cpu_time, "cpu", info.cpu); - cpulist = PyList_New(0); - for ( i = 0; cpumap != 0; i++ ) + for ( i = 0; i < nr_cpus; i++ ) { - if ( cpumap & 1 ) { + if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { PyObject *pyint = PyInt_FromLong(i); PyList_Append(cpulist, pyint); Py_DECREF(pyint); } - cpumap >>= 1; + cpumap[i / (cpumap_size * 8)] >>= 1; } PyDict_SetItemString(info_dict, "cpumap", cpulist); Py_DECREF(cpulist); + free(cpumap); return info_dict; } http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch adjust_vcpuaffinity_more_cpu.patch -- View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu-affinity-to-more-than-64-cpus-tp27928229p27941371.html Sent from the Xen - Dev mailing list archive at Nabble.com. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Masaki Kanno
2010-Mar-19 03:14 UTC
Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Hi James, I tested xm vcpu-pin command with xen-unstable changeset 21044. The command failed as follows. Also Time(s) values by xm vcpu-list command were strange. The values were same in all VCPUs. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 116.5 any cpu Domain-0 0 1 1 r-- 116.5 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> Set which CPUs a VCPU can use. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 117.0 any cpu Domain-0 0 1 1 r-- 117.0 any cpu I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin command again. The command succeeded as follows. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 60.8 any cpu Domain-0 0 1 1 -b- 42.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 61.6 0 Domain-0 0 1 1 -b- 43.2 any cpu Best regards, Kan Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:> >Keir, could you take a look at this issue. >New Patch for this issue: >Singed-off-by: James (Song Wei) <jsong@novell.com> > >diff -r 8b269215464b tools/libxc/xc_domain.c >--- a/tools/libxc/xc_domain.c Wed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xc_domain.c Thu Mar 18 11:37:55 2010 +0800 >@@ -105,23 +105,28 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t *local = malloc(cpusize); > >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > domctl.cmd = XEN_DOMCTL_setvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >+ bitmap_64_to_byte(local, cpumap, cpusize * 8); > > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); > >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > >- if ( lock_pages(local, sizeof(local)) != 0 ) >+ if ( lock_pages(local, cpusize) != 0 ) > { > PERROR("Could not lock memory for Xen hypercall"); > goto out; >@@ -129,9 +134,10 @@ > > ret = do_domctl(xc_handle, &domctl); > >- unlock_pages(local, sizeof(local)); >+ unlock_pages(local, cpusize); > > out: >+ free(local); > return ret; > } > >@@ -139,18 +145,25 @@ > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t * local = malloc(cpusize); >+ >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > > domctl.cmd = XEN_DOMCTL_getvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >+ > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > > if ( lock_pages(local, sizeof(local)) != 0 ) > { >@@ -161,8 +174,9 @@ > ret = do_domctl(xc_handle, &domctl); > > unlock_pages(local, sizeof (local)); >- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >- out: >+ bitmap_byte_to_64(cpumap, local, cpusize * 8); >+out: >+ free(local); > return ret; > } > >diff -r 8b269215464b tools/libxc/xenctrl.h >--- a/tools/libxc/xenctrl.h Wed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xenctrl.h Thu Mar 18 11:37:55 2010 +0800 >@@ -310,11 +310,13 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap); >+ uint64_t *cpumap, >+ int cpusize); > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap); >+ uint64_t *cpumap, >+ int cpusize); > > /** > * This function will return information about one or more domains. It is >diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >--- a/tools/python/xen/lowlevel/xc/xc.c Wed Mar 10 14:01:32 2010 +0800 >+++ b/tools/python/xen/lowlevel/xc/xc.c Thu Mar 18 11:37:55 2010 +0800 >@@ -217,8 +217,12 @@ > { > uint32_t dom; > int vcpu = 0, i; >- uint64_t cpumap = ~0ULL; >+ uint64_t *cpumap; > PyObject *cpulist = NULL; >+ int nr_cpus, size; >+ xc_physinfo_t info; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; > >@@ -226,26 +230,38 @@ > &dom, &vcpu, &cpulist) ) > return NULL; > >+ set_xen_guest_handle(info.cpu_to_node, map); >+ info.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &info) != 0 ) >+ return pyxc_error_to_exception(); >+ >+ nr_cpus = info.nr_cpus; >+ >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ cpumap = malloc(cpumap_size * size); >+ if(cpumap == NULL) >+ return pyxc_error_to_exception(); >+ > if ( (cpulist != NULL) && PyList_Check(cpulist) ) > { >- cpumap = 0ULL; >+ for ( i = 0; i < size; i++) >+ { >+ cpumap[i] = 0ULL; >+ } > for ( i = 0; i < PyList_Size(cpulist); i++ ) > { > long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >- if ( cpu >= 64 ) >- { >- errno = EINVAL; >- PyErr_SetFromErrno(xc_error_obj); >- return NULL; >- } >- cpumap |= (uint64_t)1 << cpu; >+ cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % >(cpumap_size * 8)); > } > } > >- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) >+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * >cpumap_size) != 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >- >+ } > Py_INCREF(zero); >+ free(cpumap); > return zero; > } > >@@ -365,7 +381,11 @@ > uint32_t dom, vcpu = 0; > xc_vcpuinfo_t info; > int rc, i; >- uint64_t cpumap; >+ uint64_t *cpumap; >+ int nr_cpus, size; >+ xc_physinfo_t pinfo = { 0 }; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", NULL }; > >@@ -373,12 +393,25 @@ > &dom, &vcpu) ) > return NULL; > >+ set_xen_guest_handle(pinfo.cpu_to_node, map); >+ pinfo.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >+ return pyxc_error_to_exception(); >+ nr_cpus = pinfo.nr_cpus; > rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); > if ( rc < 0 ) > return pyxc_error_to_exception(); >- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ >+ if((cpumap = malloc(cpumap_size * size)) == NULL) >+ return pyxc_error_to_exception(); >+ >+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >cpumap_size * size); > if ( rc < 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >+ } > > info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", > "online", info.online, >@@ -386,19 +419,19 @@ > "running", info.running, > "cpu_time", info.cpu_time, > "cpu", info.cpu); >- > cpulist = PyList_New(0); >- for ( i = 0; cpumap != 0; i++ ) >+ for ( i = 0; i < nr_cpus; i++ ) > { >- if ( cpumap & 1 ) { >+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { > PyObject *pyint = PyInt_FromLong(i); > PyList_Append(cpulist, pyint); > Py_DECREF(pyint); > } >- cpumap >>= 1; >+ cpumap[i / (cpumap_size * 8)] >>= 1; > } > PyDict_SetItemString(info_dict, "cpumap", cpulist); > Py_DECREF(cpulist); >+ free(cpumap); > return info_dict; > } > > http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >adjust_vcpuaffinity_more_cpu.patch > > > >-- >View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu- >affinity-to-more-than-64-cpus-tp27928229p27941371.html >Sent from the Xen - Dev mailing list archive at Nabble.com. > > >_______________________________________________ >Xen-devel mailing list >Xen-devel@lists.xensource.com >http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James Song
2010-Mar-19 09:09 UTC
Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
I update upstream code to c/s 20145. This patch works fine and I haven''t meet this problem. linux-2j72:/home # xm vcpu-pin 0 1 0 linux-2j72:/home # xm vcpu-list 0 Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 --- 23.4 0 Domain-0 0 1 0 r-- 19.2 0 B.T.W, there are something wrong when compile c/s 21046! -James>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>Hi James, I tested xm vcpu-pin command with xen-unstable changeset 21044. The command failed as follows. Also Time(s) values by xm vcpu-list command were strange. The values were same in all VCPUs. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 116.5 any cpu Domain-0 0 1 1 r-- 116.5 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> Set which CPUs a VCPU can use. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 117.0 any cpu Domain-0 0 1 1 r-- 117.0 any cpu I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin command again. The command succeeded as follows. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 60.8 any cpu Domain-0 0 1 1 -b- 42.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 61.6 0 Domain-0 0 1 1 -b- 43.2 any cpu Best regards, Kan Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:> >Keir, could you take a look at this issue. >New Patch for this issue: >Singed-off-by: James (Song Wei) <jsong@novell.com> > >diff -r 8b269215464b tools/libxc/xc_domain.c >--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 >@@ -105,23 +105,28 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t *local = malloc(cpusize); > >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > domctl.cmd = XEN_DOMCTL_setvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >+ bitmap_64_to_byte(local, cpumap, cpusize * 8); > > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); > >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > >- if ( lock_pages(local, sizeof(local)) != 0 ) >+ if ( lock_pages(local, cpusize) != 0 ) > { > PERROR("Could not lock memory for Xen hypercall"); > goto out; >@@ -129,9 +134,10 @@ > > ret = do_domctl(xc_handle, &domctl); > >- unlock_pages(local, sizeof(local)); >+ unlock_pages(local, cpusize); > > out: >+ free(local); > return ret; > } > >@@ -139,18 +145,25 @@ > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t * local = malloc(cpusize); >+ >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > > domctl.cmd = XEN_DOMCTL_getvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >+ > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > > if ( lock_pages(local, sizeof(local)) != 0 ) > { >@@ -161,8 +174,9 @@ > ret = do_domctl(xc_handle, &domctl); > > unlock_pages(local, sizeof (local)); >- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >- out: >+ bitmap_byte_to_64(cpumap, local, cpusize * 8); >+out: >+ free(local); > return ret; > } > >diff -r 8b269215464b tools/libxc/xenctrl.h >--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 >@@ -310,11 +310,13 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap); >+ uint64_t *cpumap, >+ int cpusize); > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap); >+ uint64_t *cpumap, >+ int cpusize); > > /** > * This function will return information about one or more domains. It is >diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800 >@@ -217,8 +217,12 @@ > { > uint32_t dom; > int vcpu = 0, i; >- uint64_t cpumap = ~0ULL; >+ uint64_t *cpumap; > PyObject *cpulist = NULL; >+ int nr_cpus, size; >+ xc_physinfo_t info; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; > >@@ -226,26 +230,38 @@ > &dom, &vcpu, &cpulist) ) > return NULL; > >+ set_xen_guest_handle(info.cpu_to_node, map); >+ info.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &info) != 0 ) >+ return pyxc_error_to_exception(); >+ >+ nr_cpus = info.nr_cpus; >+ >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ cpumap = malloc(cpumap_size * size); >+ if(cpumap == NULL) >+ return pyxc_error_to_exception(); >+ > if ( (cpulist != NULL) && PyList_Check(cpulist) ) > { >- cpumap = 0ULL; >+ for ( i = 0; i < size; i++) >+ { >+ cpumap[i] = 0ULL; >+ } > for ( i = 0; i < PyList_Size(cpulist); i++ ) > { > long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >- if ( cpu >= 64 ) >- { >- errno = EINVAL; >- PyErr_SetFromErrno(xc_error_obj); >- return NULL; >- } >- cpumap |= (uint64_t)1 << cpu; >+ cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % >(cpumap_size * 8)); > } > } > >- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) >+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * >cpumap_size) != 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >- >+ } > Py_INCREF(zero); >+ free(cpumap); > return zero; > } > >@@ -365,7 +381,11 @@ > uint32_t dom, vcpu = 0; > xc_vcpuinfo_t info; > int rc, i; >- uint64_t cpumap; >+ uint64_t *cpumap; >+ int nr_cpus, size; >+ xc_physinfo_t pinfo = { 0 }; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", NULL }; > >@@ -373,12 +393,25 @@ > &dom, &vcpu) ) > return NULL; > >+ set_xen_guest_handle(pinfo.cpu_to_node, map); >+ pinfo.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >+ return pyxc_error_to_exception(); >+ nr_cpus = pinfo.nr_cpus; > rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); > if ( rc < 0 ) > return pyxc_error_to_exception(); >- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ >+ if((cpumap = malloc(cpumap_size * size)) == NULL) >+ return pyxc_error_to_exception(); >+ >+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >cpumap_size * size); > if ( rc < 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >+ } > > info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", > "online", info.online, >@@ -386,19 +419,19 @@ > "running", info.running, > "cpu_time", info.cpu_time, > "cpu", info.cpu); >- > cpulist = PyList_New(0); >- for ( i = 0; cpumap != 0; i++ ) >+ for ( i = 0; i < nr_cpus; i++ ) > { >- if ( cpumap & 1 ) { >+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { > PyObject *pyint = PyInt_FromLong(i); > PyList_Append(cpulist, pyint); > Py_DECREF(pyint); > } >- cpumap >>= 1; >+ cpumap[i / (cpumap_size * 8)] >>= 1; > } > PyDict_SetItemString(info_dict, "cpumap", cpulist); > Py_DECREF(cpulist); >+ free(cpumap); > return info_dict; > } > > http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >adjust_vcpuaffinity_more_cpu.patch > > > >-- >View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu- >affinity-to-more-than-64-cpus-tp27928229p27941371.html >Sent from the Xen - Dev mailing list archive at Nabble.com. > > >_______________________________________________ >Xen-devel mailing list >Xen-devel@lists.xensource.com >http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jan Beulich
2010-Mar-19 09:39 UTC
issue with c/s 21046 (was Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus)
>>> "James Song" <jsong@novell.com> 19.03.10 10:09 >>> >B.T.W, there are something wrong when compile c/s 21046!Mind stating what it is that is wrong with this? Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Masaki Kanno
2010-Mar-19 10:28 UTC
Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than64 cpus
Hi James, I tried changeset 21045, but I still meet the problem. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 31.9 any cpu Domain-0 0 1 0 r-- 31.9 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> Set which CPUs a VCPU can use. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 32.7 any cpu Domain-0 0 1 0 r-- 32.7 any cpu FYI, I tried changeset 21045 on IPF(ia64) server too. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 17.8 any cpu Domain-0 0 1 0 r-- 17.8 any cpu Domain-0 0 2 0 r-- 17.8 any cpu Domain-0 0 3 0 r-- 17.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 18.0 0 Domain-0 0 1 0 r-- 18.0 0 Domain-0 0 2 0 r-- 18.0 0 Domain-0 0 3 0 r-- 18.0 0 Best regards, Kan Fri, 19 Mar 2010 03:09:53 -0600, "James Song" wrote:>I update upstream code to c/s 20145. This patch works fine and I haven''t >meet this problem. > >linux-2j72:/home # xm vcpu-pin 0 1 0 >linux-2j72:/home # xm vcpu-list 0 >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 0 --- 23.4 0 >Domain-0 0 1 0 r-- 19.2 0 > > >B.T.W, there are something wrong when compile c/s 21046! > >-James > >>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>> >Hi James, > >I tested xm vcpu-pin command with xen-unstable changeset 21044. >The command failed as follows. Also Time(s) values by xm vcpu-list >command were strange. The values were same in all VCPUs. > ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 1 r-- 116.5 any cpu >Domain-0 0 1 1 r-- 116.5 any cpu ># xm vcpu-pin Domain-0 0 0 >Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') >Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> > >Set which CPUs a VCPU can use. ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 1 r-- 117.0 any cpu >Domain-0 0 1 1 r-- 117.0 any cpu > > >I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin >command again. The command succeeded as follows. > ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 0 r-- 60.8 any cpu >Domain-0 0 1 1 -b- 42.8 any cpu ># xm vcpu-pin Domain-0 0 0 ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 0 r-- 61.6 0 >Domain-0 0 1 1 -b- 43.2 any cpu > >Best regards, >Kan > >Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote: > >> >>Keir, could you take a look at this issue. >>New Patch for this issue: >>Singed-off-by: James (Song Wei) <jsong@novell.com> >> >>diff -r 8b269215464b tools/libxc/xc_domain.c >>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800 >>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 >>@@ -105,23 +105,28 @@ >> int xc_vcpu_setaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t cpumap) >>+ uint64_t *cpumap, int cpusize) >> { >> DECLARE_DOMCTL; >> int ret = -1; >>- uint8_t local[sizeof (cpumap)]; >>+ uint8_t *local = malloc(cpusize); >> >>+ if(local == NULL) >>+ { >>+ PERROR("Could not alloc memory for Xen hypercall"); >>+ goto out; >>+ } >> domctl.cmd = XEN_DOMCTL_setvcpuaffinity; >> domctl.domain = (domid_t)domid; >> domctl.u.vcpuaffinity.vcpu = vcpu; >> >>- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >>+ bitmap_64_to_byte(local, cpumap, cpusize * 8); >> >> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >> >>- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >>+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; >> >>- if ( lock_pages(local, sizeof(local)) != 0 ) >>+ if ( lock_pages(local, cpusize) != 0 ) >> { >> PERROR("Could not lock memory for Xen hypercall"); >> goto out; >>@@ -129,9 +134,10 @@ >> >> ret = do_domctl(xc_handle, &domctl); >> >>- unlock_pages(local, sizeof(local)); >>+ unlock_pages(local, cpusize); >> >> out: >>+ free(local); >> return ret; >> } >> >>@@ -139,18 +145,25 @@ >> int xc_vcpu_getaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t *cpumap) >>+ uint64_t *cpumap, int cpusize) >> { >> DECLARE_DOMCTL; >> int ret = -1; >>- uint8_t local[sizeof (cpumap)]; >>+ uint8_t * local = malloc(cpusize); >>+ >>+ if(local == NULL) >>+ { >>+ PERROR("Could not alloc memory for Xen hypercall"); >>+ goto out; >>+ } >> >> domctl.cmd = XEN_DOMCTL_getvcpuaffinity; >> domctl.domain = (domid_t)domid; >> domctl.u.vcpuaffinity.vcpu = vcpu; >> >>+ >> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >>- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >>+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; >> >> if ( lock_pages(local, sizeof(local)) != 0 ) >> { >>@@ -161,8 +174,9 @@ >> ret = do_domctl(xc_handle, &domctl); >> >> unlock_pages(local, sizeof (local)); >>- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >>- out: >>+ bitmap_byte_to_64(cpumap, local, cpusize * 8); >>+out: >>+ free(local); >> return ret; >> } >> >>diff -r 8b269215464b tools/libxc/xenctrl.h >>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800 >>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 >>@@ -310,11 +310,13 @@ >> int xc_vcpu_setaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t cpumap); >>+ uint64_t *cpumap, >>+ int cpusize); >> int xc_vcpu_getaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t *cpumap); >>+ uint64_t *cpumap, >>+ int cpusize); >> >> /** >> * This function will return information about one or more domains. It is >>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800 >>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800 >>@@ -217,8 +217,12 @@ >> { >> uint32_t dom; >> int vcpu = 0, i; >>- uint64_t cpumap = ~0ULL; >>+ uint64_t *cpumap; >> PyObject *cpulist = NULL; >>+ int nr_cpus, size; >>+ xc_physinfo_t info; >>+ xc_cpu_to_node_t map[1]; >>+ uint64_t cpumap_size = sizeof(*cpumap); >> >> static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; >> >>@@ -226,26 +230,38 @@ >> &dom, &vcpu, &cpulist) ) >> return NULL; >> >>+ set_xen_guest_handle(info.cpu_to_node, map); >>+ info.max_cpu_id = 1; >>+ if ( xc_physinfo(self->xc_handle, &info) != 0 ) >>+ return pyxc_error_to_exception(); >>+ >>+ nr_cpus = info.nr_cpus; >>+ >>+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >>+ cpumap = malloc(cpumap_size * size); >>+ if(cpumap == NULL) >>+ return pyxc_error_to_exception(); >>+ >> if ( (cpulist != NULL) && PyList_Check(cpulist) ) >> { >>- cpumap = 0ULL; >>+ for ( i = 0; i < size; i++) >>+ { >>+ cpumap[i] = 0ULL; >>+ } >> for ( i = 0; i < PyList_Size(cpulist); i++ ) >> { >> long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >>- if ( cpu >= 64 ) >>- { >>- errno = EINVAL; >>- PyErr_SetFromErrno(xc_error_obj); >>- return NULL; >>- } >>- cpumap |= (uint64_t)1 << cpu; >>+ cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % >>(cpumap_size * 8)); >> } >> } >> >>- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) >>+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * >>cpumap_size) != 0 ) >>+ { >>+ free(cpumap); >> return pyxc_error_to_exception(); >>- >>+ } >> Py_INCREF(zero); >>+ free(cpumap); >> return zero; >> } >> >>@@ -365,7 +381,11 @@ >> uint32_t dom, vcpu = 0; >> xc_vcpuinfo_t info; >> int rc, i; >>- uint64_t cpumap; >>+ uint64_t *cpumap; >>+ int nr_cpus, size; >>+ xc_physinfo_t pinfo = { 0 }; >>+ xc_cpu_to_node_t map[1]; >>+ uint64_t cpumap_size = sizeof(*cpumap); >> >> static char *kwd_list[] = { "domid", "vcpu", NULL }; >> >>@@ -373,12 +393,25 @@ >> &dom, &vcpu) ) >> return NULL; >> >>+ set_xen_guest_handle(pinfo.cpu_to_node, map); >>+ pinfo.max_cpu_id = 1; >>+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >>+ return pyxc_error_to_exception(); >>+ nr_cpus = pinfo.nr_cpus; >> rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); >> if ( rc < 0 ) >> return pyxc_error_to_exception(); >>- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >>+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >>+ >>+ if((cpumap = malloc(cpumap_size * size)) == NULL) >>+ return pyxc_error_to_exception(); >>+ >>+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >>cpumap_size * size); >> if ( rc < 0 ) >>+ { >>+ free(cpumap); >> return pyxc_error_to_exception(); >>+ } >> >> info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", >> "online", info.online, >>@@ -386,19 +419,19 @@ >> "running", info.running, >> "cpu_time", info.cpu_time, >> "cpu", info.cpu); >>- >> cpulist = PyList_New(0); >>- for ( i = 0; cpumap != 0; i++ ) >>+ for ( i = 0; i < nr_cpus; i++ ) >> { >>- if ( cpumap & 1 ) { >>+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { >> PyObject *pyint = PyInt_FromLong(i); >> PyList_Append(cpulist, pyint); >> Py_DECREF(pyint); >> } >>- cpumap >>= 1; >>+ cpumap[i / (cpumap_size * 8)] >>= 1; >> } >> PyDict_SetItemString(info_dict, "cpumap", cpulist); >> Py_DECREF(cpulist); >>+ free(cpumap); >> return info_dict; >> } >> >> http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >>adjust_vcpuaffinity_more_cpu.patch >> >> >> >>-- >>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu- >>affinity-to-more-than-64-cpus-tp27928229p27941371.html >>Sent from the Xen - Dev mailing list archive at Nabble.com. >> >> >>_______________________________________________ >>Xen-devel mailing list >>Xen-devel@lists.xensource.com >>http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James Song
2010-Mar-19 10:47 UTC
issue with c/s 21046 (was Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus)
sorry, I forgot to post the error message, belowing is the error message: helper2.c: In function a?cpu_x86_inita?: helper2.c:142: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa? helper2.c: In function a?sp_infoa?: helper2.c:219: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa? helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 6 has type a?uint32_ta? helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta? helper2.c:226: error: a?ioreq_ta? has no member named a?io_counta? helper2.c: In function a?__cpu_get_ioreqa?: helper2.c:235: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa? helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta? helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta? helper2.c: In function a?cpu_handle_ioreqa?: helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta? helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta>>> Jan Beulich 2010-3-19 17:39 >>> >>> "James Song" <jsong@novell.com> 19.03.10 10:09 >>> >B.T.W, there are something wrong when compile c/s 21046!Mind stating what it is that is wrong with this? Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jan Beulich
2010-Mar-19 11:10 UTC
issue with c/s 21046 (was Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus)
That''s in the qemu tree, so it has nothing to do with c/s 21046. Jan>>> James Song 19.03.10 11:47 >>>sorry, I forgot to post the error message, belowing is the error message: helper2.c: In function a?cpu_x86_inita?: helper2.c:142: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa? helper2.c: In function a?sp_infoa?: helper2.c:219: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa? helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 6 has type a?uint32_ta? helper2.c:224: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta? helper2.c:226: error: a?ioreq_ta? has no member named a?io_counta? helper2.c: In function a?__cpu_get_ioreqa?: helper2.c:235: error: a?shared_iopage_ta? has no member named a?vcpu_iodataa? helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta? helper2.c:242: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta? helper2.c: In function a?cpu_handle_ioreqa?: helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 7 has type a?uint32_ta? helper2.c:517: warning: format a?%lxa? expects type a?long unsigned inta?, but argument 8 has type a?uint32_ta>>> Jan Beulich 2010-3-19 17:39 >>> >>> "James Song" <jsong@novell.com> 19.03.10 10:09 >>> >B.T.W, there are something wrong when compile c/s 21046!Mind stating what it is that is wrong with this? Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Liu, Jinsong
2010-Mar-30 18:23 UTC
RE: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
James and Masaki, The issue Masaki said does exist, it appear under ia32pae. A patch (c/s 21087) is to fix the bug. Thanks, Jinsong ________________________________ From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of James Song Sent: Friday, March 19, 2010 5:10 PM To: Masaki Kanno; xen-devel@lists.xensource.com Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus I update upstream code to c/s 20145. This patch works fine and I haven''t meet this problem. linux-2j72:/home # xm vcpu-pin 0 1 0 linux-2j72:/home # xm vcpu-list 0 Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 --- 23.4 0 Domain-0 0 1 0 r-- 19.2 0 B.T.W, there are something wrong when compile c/s 21046! -James>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>Hi James, I tested xm vcpu-pin command with xen-unstable changeset 21044. The command failed as follows. Also Time(s) values by xm vcpu-list command were strange. The values were same in all VCPUs. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 116.5 any cpu Domain-0 0 1 1 r-- 116.5 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> Set which CPUs a VCPU can use. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 117.0 any cpu Domain-0 0 1 1 r-- 117.0 any cpu I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin command again. The command succeeded as follows. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 60.8 any cpu Domain-0 0 1 1 -b- 42.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 61.6 0 Domain-0 0 1 1 -b- 43.2 any cpu Best regards, Kan Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:> >Keir, could you take a look at this issue. >New Patch for this issue: >Singed-off-by: James (Song Wei) <jsong@novell.com> > >diff -r 8b269215464b tools/libxc/xc_domain.c >--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 >@@ -105,23 +105,28 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t *local = malloc(cpusize); > >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > domctl.cmd = XEN_DOMCTL_setvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >+ bitmap_64_to_byte(local, cpumap, cpusize * 8); > > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); > >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > >- if ( lock_pages(local, sizeof(local)) != 0 ) >+ if ( lock_pages(local, cpusize) != 0 ) > { > PERROR("Could not lock memory for Xen hypercall"); > goto out; >@@ -129,9 +134,10 @@ > > ret = do_domctl(xc_handle, &domctl); > >- unlock_pages(local, sizeof(local)); >+ unlock_pages(local, cpusize); > > out: >+ free(local); > return ret; > } > >@@ -139,18 +145,25 @@ > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t * local = malloc(cpusize); >+ >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > > domctl.cmd = XEN_DOMCTL_getvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >+ > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > > if ( lock_pages(local, sizeof(local)) != 0 ) > { >@@ -161,8 +174,9 @@ > ret = do_domctl(xc_handle, &domctl); > > unlock_pages(local, sizeof (local)); >- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >- out: >+ bitmap_byte_to_64(cpumap, local, cpusize * 8); >+out: >+ free(local); > return ret; > } > >diff -r 8b269215464b tools/libxc/xenctrl.h >--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 >@@ -310,11 +310,13 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap); >+ uint64_t *cpumap, >+ int cpusize); > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap); >+ uint64_t *cpumap, >+ int cpusize); > > /** > * This function will return information about one or more domains. It is >diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800 >@@ -217,8 +217,12 @@ > { > uint32_t dom; > int vcpu = 0, i; >- uint64_t cpumap = ~0ULL; >+ uint64_t *cpumap; > PyObject *cpulist = NULL; >+ int nr_cpus, size; >+ xc_physinfo_t info; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; > >@@ -226,26 +230,38 @@ > &dom, &vcpu, &cpulist) ) > return NULL; > >+ set_xen_guest_handle(info.cpu_to_node, map); >+ info.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &info) != 0 ) >+ return pyxc_error_to_exception(); >+ >+ nr_cpus = info.nr_cpus; >+ >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ cpumap = malloc(cpumap_size * size); >+ if(cpumap == NULL) >+ return pyxc_error_to_exception(); >+ > if ( (cpulist != NULL) && PyList_Check(cpulist) ) > { >- cpumap = 0ULL; >+ for ( i = 0; i < size; i++) >+ { >+ cpumap[i] = 0ULL; >+ } > for ( i = 0; i < PyList_Size(cpulist); i++ ) > { > long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >- if ( cpu >= 64 ) >- { >- errno = EINVAL; >- PyErr_SetFromErrno(xc_error_obj); >- return NULL; >- } >- cpumap |= (uint64_t)1 << cpu; >+ cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % >(cpumap_size * 8)); > } > } > >- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) >+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * >cpumap_size) != 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >- >+ } > Py_INCREF(zero); >+ free(cpumap); > return zero; > } > >@@ -365,7 +381,11 @@ > uint32_t dom, vcpu = 0; > xc_vcpuinfo_t info; > int rc, i; >- uint64_t cpumap; >+ uint64_t *cpumap; >+ int nr_cpus, size; >+ xc_physinfo_t pinfo = { 0 }; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", NULL }; > >@@ -373,12 +393,25 @@ > &dom, &vcpu) ) > return NULL; > >+ set_xen_guest_handle(pinfo.cpu_to_node, map); >+ pinfo.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >+ return pyxc_error_to_exception(); >+ nr_cpus = pinfo.nr_cpus; > rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); > if ( rc < 0 ) > return pyxc_error_to_exception(); >- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ >+ if((cpumap = malloc(cpumap_size * size)) == NULL) >+ return pyxc_error_to_exception(); >+ >+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >cpumap_size * size); > if ( rc < 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >+ } > > info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", > "online", info.online, >@@ -386,19 +419,19 @@ > "running", info.running, > "cpu_time", info.cpu_time, > "cpu", info.cpu); >- > cpulist = PyList_New(0); >- for ( i = 0; cpumap != 0; i++ ) >+ for ( i = 0; i < nr_cpus; i++ ) > { >- if ( cpumap & 1 ) { >+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { > PyObject *pyint = PyInt_FromLong(i); > PyList_Append(cpulist, pyint); > Py_DECREF(pyint); > } >- cpumap >>= 1; >+ cpumap[i / (cpumap_size * 8)] >>= 1; > } > PyDict_SetItemString(info_dict, "cpumap", cpulist); > Py_DECREF(cpulist); >+ free(cpumap); > return info_dict; > } > > http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >adjust_vcpuaffinity_more_cpu.patch > > > >-- >View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu- >affinity-to-more-than-64-cpus-tp27928229p27941371.html >Sent from the Xen - Dev mailing list archive at Nabble.com. > > >_______________________________________________ >Xen-devel mailing list >Xen-devel@lists.xensource.com >http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
James Song
2010-Mar-31 02:12 UTC
RE: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Yeah, perhaps, I haven''t tested this patch on IA32PAE box. Thanks for fixed it. -Song Wei (James)>>> "Liu, Jinsong" <jinsong.liu@intel.com> 2010-3-31 2:23 >>>James and Masaki, The issue Masaki said does exist, it appear under ia32pae. A patch (c/s 21087) is to fix the bug. Thanks, Jinsong From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists.xensource.com] On Behalf Of James Song Sent: Friday, March 19, 2010 5:10 PM To: Masaki Kanno; xen-devel@lists.xensource.com Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus I update upstream code to c/s 20145. This patch works fine and I haven''t meet this problem. linux-2j72:/home # xm vcpu-pin 0 1 0 linux-2j72:/home # xm vcpu-list 0 Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 --- 23.4 0 Domain-0 0 1 0 r-- 19.2 0 B.T.W, there are something wrong when compile c/s 21046! -James>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>>Hi James, I tested xm vcpu-pin command with xen-unstable changeset 21044. The command failed as follows. Also Time(s) values by xm vcpu-list command were strange. The values were same in all VCPUs. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 116.5 any cpu Domain-0 0 1 1 r-- 116.5 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> Set which CPUs a VCPU can use. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 117.0 any cpu Domain-0 0 1 1 r-- 117.0 any cpu I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin command again. The command succeeded as follows. # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 60.8 any cpu Domain-0 0 1 1 -b- 42.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 61.6 0 Domain-0 0 1 1 -b- 43.2 any cpu Best regards, Kan Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote:> >Keir, could you take a look at this issue. >New Patch for this issue: >Singed-off-by: James (Song Wei) <jsong@novell.com> > >diff -r 8b269215464b tools/libxc/xc_domain.c >--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 >@@ -105,23 +105,28 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t *local = malloc(cpusize); > >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > domctl.cmd = XEN_DOMCTL_setvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >+ bitmap_64_to_byte(local, cpumap, cpusize * 8); > > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); > >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > >- if ( lock_pages(local, sizeof(local)) != 0 ) >+ if ( lock_pages(local, cpusize) != 0 ) > { > PERROR("Could not lock memory for Xen hypercall"); > goto out; >@@ -129,9 +134,10 @@ > > ret = do_domctl(xc_handle, &domctl); > >- unlock_pages(local, sizeof(local)); >+ unlock_pages(local, cpusize); > > out: >+ free(local); > return ret; > } > >@@ -139,18 +145,25 @@ > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap) >+ uint64_t *cpumap, int cpusize) > { > DECLARE_DOMCTL; > int ret = -1; >- uint8_t local[sizeof (cpumap)]; >+ uint8_t * local = malloc(cpusize); >+ >+ if(local == NULL) >+ { >+ PERROR("Could not alloc memory for Xen hypercall"); >+ goto out; >+ } > > domctl.cmd = XEN_DOMCTL_getvcpuaffinity; > domctl.domain = (domid_t)domid; > domctl.u.vcpuaffinity.vcpu = vcpu; > >+ > set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; > > if ( lock_pages(local, sizeof(local)) != 0 ) > { >@@ -161,8 +174,9 @@ > ret = do_domctl(xc_handle, &domctl); > > unlock_pages(local, sizeof (local)); >- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >- out: >+ bitmap_byte_to_64(cpumap, local, cpusize * 8); >+out: >+ free(local); > return ret; > } > >diff -r 8b269215464b tools/libxc/xenctrl.h >--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 >@@ -310,11 +310,13 @@ > int xc_vcpu_setaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t cpumap); >+ uint64_t *cpumap, >+ int cpusize); > int xc_vcpu_getaffinity(int xc_handle, > uint32_t domid, > int vcpu, >- uint64_t *cpumap); >+ uint64_t *cpumap, >+ int cpusize); > > /** > * This function will return information about one or more domains. It is >diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800 >+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800 >@@ -217,8 +217,12 @@ > { > uint32_t dom; > int vcpu = 0, i; >- uint64_t cpumap = ~0ULL; >+ uint64_t *cpumap; > PyObject *cpulist = NULL; >+ int nr_cpus, size; >+ xc_physinfo_t info; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; > >@@ -226,26 +230,38 @@ > &dom, &vcpu, &cpulist) ) > return NULL; > >+ set_xen_guest_handle(info.cpu_to_node, map); >+ info.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &info) != 0 ) >+ return pyxc_error_to_exception(); >+ >+ nr_cpus = info.nr_cpus; >+ >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ cpumap = malloc(cpumap_size * size); >+ if(cpumap == NULL) >+ return pyxc_error_to_exception(); >+ > if ( (cpulist != NULL) && PyList_Check(cpulist) ) > { >- cpumap = 0ULL; >+ for ( i = 0; i < size; i++) >+ { >+ cpumap[i] = 0ULL; >+ } > for ( i = 0; i < PyList_Size(cpulist); i++ ) > { > long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >- if ( cpu >= 64 ) >- { >- errno = EINVAL; >- PyErr_SetFromErrno(xc_error_obj); >- return NULL; >- } >- cpumap |= (uint64_t)1 << cpu; >+ cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % >(cpumap_size * 8)); > } > } > >- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) >+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * >cpumap_size) != 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >- >+ } > Py_INCREF(zero); >+ free(cpumap); > return zero; > } > >@@ -365,7 +381,11 @@ > uint32_t dom, vcpu = 0; > xc_vcpuinfo_t info; > int rc, i; >- uint64_t cpumap; >+ uint64_t *cpumap; >+ int nr_cpus, size; >+ xc_physinfo_t pinfo = { 0 }; >+ xc_cpu_to_node_t map[1]; >+ uint64_t cpumap_size = sizeof(*cpumap); > > static char *kwd_list[] = { "domid", "vcpu", NULL }; > >@@ -373,12 +393,25 @@ > &dom, &vcpu) ) > return NULL; > >+ set_xen_guest_handle(pinfo.cpu_to_node, map); >+ pinfo.max_cpu_id = 1; >+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >+ return pyxc_error_to_exception(); >+ nr_cpus = pinfo.nr_cpus; > rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); > if ( rc < 0 ) > return pyxc_error_to_exception(); >- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >+ >+ if((cpumap = malloc(cpumap_size * size)) == NULL) >+ return pyxc_error_to_exception(); >+ >+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >cpumap_size * size); > if ( rc < 0 ) >+ { >+ free(cpumap); > return pyxc_error_to_exception(); >+ } > > info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", > "online", info.online, >@@ -386,19 +419,19 @@ > "running", info.running, > "cpu_time", info.cpu_time, > "cpu", info.cpu); >- > cpulist = PyList_New(0); >- for ( i = 0; cpumap != 0; i++ ) >+ for ( i = 0; i < nr_cpus; i++ ) > { >- if ( cpumap & 1 ) { >+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { > PyObject *pyint = PyInt_FromLong(i); > PyList_Append(cpulist, pyint); > Py_DECREF(pyint); > } >- cpumap >>= 1; >+ cpumap[i / (cpumap_size * 8)] >>= 1; > } > PyDict_SetItemString(info_dict, "cpumap", cpulist); > Py_DECREF(cpulist); >+ free(cpumap); > return info_dict; > } > > http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >adjust_vcpuaffinity_more_cpu.patch > > > >-- >View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu- >affinity-to-more-than-64-cpus-tp27928229p27941371.html >Sent from the Xen - Dev mailing list archive at Nabble.com. > > >_______________________________________________ >Xen-devel mailing list >Xen-devel@lists.xensource.com >http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Masaki Kanno
2010-Mar-31 06:29 UTC
RE: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Hi Jinsong, Thank you for fixing the issue. I confirmed that your patch works on both ia32pae and ia64. ia32pae: # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 1 r-- 33.9 any cpu Domain-0 0 1 0 -b- 22.5 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 -b- 34.4 0 Domain-0 0 1 1 r-- 23.2 any cpu ia64: # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 r-- 23.7 any cpu Domain-0 0 1 1 -b- 10.6 any cpu Domain-0 0 2 2 -b- 6.8 any cpu Domain-0 0 3 3 -b- 9.7 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list Name ID VCPU CPU State Time(s) CPU Affinity Domain-0 0 0 0 -b- 24.0 0 Domain-0 0 1 1 -b- 10.8 any cpu Domain-0 0 2 2 r-- 6.8 any cpu Domain-0 0 3 3 -b- 9.8 any cpu Best regards, Kan Wed, 31 Mar 2010 02:23:47 +0800, "Liu, Jinsong" wrote:>James and Masaki, > >The issue Masaki said does exist, it appear under ia32pae. >A patch (c/s 21087) is to fix the bug. > >Thanks, >Jinsong > >________________________________ >From: xen-devel-bounces@lists.xensource.com [mailto:xen-devel-bounces@lists >.xensource.com] On Behalf Of James Song >Sent: Friday, March 19, 2010 5:10 PM >To: Masaki Kanno; xen-devel@lists.xensource.com >Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus > >I update upstream code to c/s 20145. This patch works fine and I haven''t >meet this problem. > >linux-2j72:/home # xm vcpu-pin 0 1 0 >linux-2j72:/home # xm vcpu-list 0 >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 0 --- 23.4 0 >Domain-0 0 1 0 r-- 19.2 0 > > >B.T.W, there are something wrong when compile c/s 21046! > >-James > >>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>> >Hi James, > >I tested xm vcpu-pin command with xen-unstable changeset 21044. >The command failed as follows. Also Time(s) values by xm vcpu-list >command were strange. The values were same in all VCPUs. > ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 1 r-- 116.5 any cpu >Domain-0 0 1 1 r-- 116.5 any cpu ># xm vcpu-pin Domain-0 0 0 >Error: Cannot pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') >Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> > >Set which CPUs a VCPU can use. ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 1 r-- 117.0 any cpu >Domain-0 0 1 1 r-- 117.0 any cpu > > >I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin >command again. The command succeeded as follows. > ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 0 r-- 60.8 any cpu >Domain-0 0 1 1 -b- 42.8 any cpu ># xm vcpu-pin Domain-0 0 0 ># xm vcpu-list >Name ID VCPU CPU State Time(s) CPU >Affinity >Domain-0 0 0 0 r-- 61.6 0 >Domain-0 0 1 1 -b- 43.2 any cpu > >Best regards, >Kan > >Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote: > >> >>Keir, could you take a look at this issue. >>New Patch for this issue: >>Singed-off-by: James (Song Wei) <jsong@novell.com> >> >>diff -r 8b269215464b tools/libxc/xc_domain.c >>--- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800 >>+++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 >>@@ -105,23 +105,28 @@ >> int xc_vcpu_setaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t cpumap) >>+ uint64_t *cpumap, int cpusize) >> { >> DECLARE_DOMCTL; >> int ret = -1; >>- uint8_t local[sizeof (cpumap)]; >>+ uint8_t *local = malloc(cpusize); >> >>+ if(local == NULL) >>+ { >>+ PERROR("Could not alloc memory for Xen hypercall"); >>+ goto out; >>+ } >> domctl.cmd = XEN_DOMCTL_setvcpuaffinity; >> domctl.domain = (domid_t)domid; >> domctl.u.vcpuaffinity.vcpu = vcpu; >> >>- bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >>+ bitmap_64_to_byte(local, cpumap, cpusize * 8); >> >> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >> >>- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >>+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; >> >>- if ( lock_pages(local, sizeof(local)) != 0 ) >>+ if ( lock_pages(local, cpusize) != 0 ) >> { >> PERROR("Could not lock memory for Xen hypercall"); >> goto out; >>@@ -129,9 +134,10 @@ >> >> ret = do_domctl(xc_handle, &domctl); >> >>- unlock_pages(local, sizeof(local)); >>+ unlock_pages(local, cpusize); >> >> out: >>+ free(local); >> return ret; >> } >> >>@@ -139,18 +145,25 @@ >> int xc_vcpu_getaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t *cpumap) >>+ uint64_t *cpumap, int cpusize) >> { >> DECLARE_DOMCTL; >> int ret = -1; >>- uint8_t local[sizeof (cpumap)]; >>+ uint8_t * local = malloc(cpusize); >>+ >>+ if(local == NULL) >>+ { >>+ PERROR("Could not alloc memory for Xen hypercall"); >>+ goto out; >>+ } >> >> domctl.cmd = XEN_DOMCTL_getvcpuaffinity; >> domctl.domain = (domid_t)domid; >> domctl.u.vcpuaffinity.vcpu = vcpu; >> >>+ >> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); >>- domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >>+ domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; >> >> if ( lock_pages(local, sizeof(local)) != 0 ) >> { >>@@ -161,8 +174,9 @@ >> ret = do_domctl(xc_handle, &domctl); >> >> unlock_pages(local, sizeof (local)); >>- bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >>- out: >>+ bitmap_byte_to_64(cpumap, local, cpusize * 8); >>+out: >>+ free(local); >> return ret; >> } >> >>diff -r 8b269215464b tools/libxc/xenctrl.h >>--- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800 >>+++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 >>@@ -310,11 +310,13 @@ >> int xc_vcpu_setaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t cpumap); >>+ uint64_t *cpumap, >>+ int cpusize); >> int xc_vcpu_getaffinity(int xc_handle, >> uint32_t domid, >> int vcpu, >>- uint64_t *cpumap); >>+ uint64_t *cpumap, >>+ int cpusize); >> >> /** >> * This function will return information about one or more domains. It is >>diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >>--- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 +0800 >>+++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 2010 +0800 >>@@ -217,8 +217,12 @@ >> { >> uint32_t dom; >> int vcpu = 0, i; >>- uint64_t cpumap = ~0ULL; >>+ uint64_t *cpumap; >> PyObject *cpulist = NULL; >>+ int nr_cpus, size; >>+ xc_physinfo_t info; >>+ xc_cpu_to_node_t map[1]; >>+ uint64_t cpumap_size = sizeof(*cpumap); >> >> static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; >> >>@@ -226,26 +230,38 @@ >> &dom, &vcpu, &cpulist) ) >> return NULL; >> >>+ set_xen_guest_handle(info.cpu_to_node, map); >>+ info.max_cpu_id = 1; >>+ if ( xc_physinfo(self->xc_handle, &info) != 0 ) >>+ return pyxc_error_to_exception(); >>+ >>+ nr_cpus = info.nr_cpus; >>+ >>+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >>+ cpumap = malloc(cpumap_size * size); >>+ if(cpumap == NULL) >>+ return pyxc_error_to_exception(); >>+ >> if ( (cpulist != NULL) && PyList_Check(cpulist) ) >> { >>- cpumap = 0ULL; >>+ for ( i = 0; i < size; i++) >>+ { >>+ cpumap[i] = 0ULL; >>+ } >> for ( i = 0; i < PyList_Size(cpulist); i++ ) >> { >> long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >>- if ( cpu >= 64 ) >>- { >>- errno = EINVAL; >>- PyErr_SetFromErrno(xc_error_obj); >>- return NULL; >>- } >>- cpumap |= (uint64_t)1 << cpu; >>+ cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu % >>(cpumap_size * 8)); >> } >> } >> >>- if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 ) >>+ if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap, size * >>cpumap_size) != 0 ) >>+ { >>+ free(cpumap); >> return pyxc_error_to_exception(); >>- >>+ } >> Py_INCREF(zero); >>+ free(cpumap); >> return zero; >> } >> >>@@ -365,7 +381,11 @@ >> uint32_t dom, vcpu = 0; >> xc_vcpuinfo_t info; >> int rc, i; >>- uint64_t cpumap; >>+ uint64_t *cpumap; >>+ int nr_cpus, size; >>+ xc_physinfo_t pinfo = { 0 }; >>+ xc_cpu_to_node_t map[1]; >>+ uint64_t cpumap_size = sizeof(*cpumap); >> >> static char *kwd_list[] = { "domid", "vcpu", NULL }; >> >>@@ -373,12 +393,25 @@ >> &dom, &vcpu) ) >> return NULL; >> >>+ set_xen_guest_handle(pinfo.cpu_to_node, map); >>+ pinfo.max_cpu_id = 1; >>+ if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >>+ return pyxc_error_to_exception(); >>+ nr_cpus = pinfo.nr_cpus; >> rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); >> if ( rc < 0 ) >> return pyxc_error_to_exception(); >>- rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >>+ size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >>+ >>+ if((cpumap = malloc(cpumap_size * size)) == NULL) >>+ return pyxc_error_to_exception(); >>+ >>+ rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >>cpumap_size * size); >> if ( rc < 0 ) >>+ { >>+ free(cpumap); >> return pyxc_error_to_exception(); >>+ } >> >> info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", >> "online", info.online, >>@@ -386,19 +419,19 @@ >> "running", info.running, >> "cpu_time", info.cpu_time, >> "cpu", info.cpu); >>- >> cpulist = PyList_New(0); >>- for ( i = 0; cpumap != 0; i++ ) >>+ for ( i = 0; i < nr_cpus; i++ ) >> { >>- if ( cpumap & 1 ) { >>+ if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { >> PyObject *pyint = PyInt_FromLong(i); >> PyList_Append(cpulist, pyint); >> Py_DECREF(pyint); >> } >>- cpumap >>= 1; >>+ cpumap[i / (cpumap_size * 8)] >>= 1; >> } >> PyDict_SetItemString(info_dict, "cpumap", cpulist); >> Py_DECREF(cpulist); >>+ free(cpumap); >> return info_dict; >> } >> >> http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >>adjust_vcpuaffinity_more_cpu.patch >> >> >> >>-- >>View this message in context: http://old.nabble.com/-Patch--adjust-the-cpu- >>affinity-to-more-than-64-cpus-tp27928229p27941371.html >>Sent from the Xen - Dev mailing list archive at Nabble.com. >> >> >>_______________________________________________ >>Xen-devel mailing list >>Xen-devel@lists.xensource.com >>http://lists.xensource.com/xen-devel > > >-------------------------------text/plain------------------------------- >_______________________________________________ >Xen-devel mailing list >Xen-devel@lists.xensource.com >http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Liu, Jinsong
2010-Mar-31 08:26 UTC
RE: [Xen-devel] [Patch] adjust the cpu-affinity to more than 64 cpus
Yeah, now cpu affinity and pin are fine under all IA platform, with no 64 limit :) Thanks James for original patch, and thanks Masaki confirm it! Jinsong Masaki Kanno wrote:> Hi Jinsong, > > Thank you for fixing the issue. > I confirmed that your patch works on both ia32pae and ia64. > > ia32pae: > # xm vcpu-list > Name ID VCPU CPU State Time(s) > CPU Affinity Domain-0 0 0 1 > r-- 33.9 any cpu Domain-0 0 1 > 0 -b- 22.5 any cpu # xm vcpu-pin Domain-0 0 0 > # xm vcpu-list > Name ID VCPU CPU State Time(s) > CPU Affinity Domain-0 0 0 0 > -b- 34.4 0 Domain-0 0 1 1 > r-- 23.2 any cpu > > ia64: > # xm vcpu-list > Name ID VCPU CPU State Time(s) > CPU Affinity Domain-0 0 0 0 > r-- 23.7 any cpu Domain-0 0 1 > 1 -b- 10.6 any cpu Domain-0 0 > 2 2 -b- 6.8 any cpu Domain-0 > 0 3 3 -b- 9.7 any cpu # xm vcpu-pin Domain-0 0 0 > # xm vcpu-list > Name ID VCPU CPU State Time(s) > CPU Affinity Domain-0 0 0 0 > -b- 24.0 0 Domain-0 0 1 1 > -b- 10.8 any cpu Domain-0 0 2 > 2 r-- 6.8 any cpu Domain-0 0 > 3 3 -b- 9.8 any cpu > > Best regards, > Kan > > Wed, 31 Mar 2010 02:23:47 +0800, "Liu, Jinsong" wrote: > >> James and Masaki, >> >> The issue Masaki said does exist, it appear under ia32pae. >> A patch (c/s 21087) is to fix the bug. >> >> Thanks, >> Jinsong >> >> ________________________________ >> From: xen-devel-bounces@lists.xensource.com >> [mailto:xen-devel-bounces@lists .xensource.com] On Behalf Of James >> Song >> Sent: Friday, March 19, 2010 5:10 PM >> To: Masaki Kanno; xen-devel@lists.xensource.com >> Subject: Re: [Xen-devel] [Patch] adjust the cpu-affinity to more >> than 64 cpus >> >> I update upstream code to c/s 20145. This patch works fine and I >> haven''t meet this problem. >> >> linux-2j72:/home # xm vcpu-pin 0 1 0 >> linux-2j72:/home # xm vcpu-list 0 >> Name ID VCPU CPU State Time(s) >> CPU Affinity Domain-0 0 0 0 >> --- 23.4 0 Domain-0 0 1 0 >> r-- 19.2 0 >> >> >> B.T.W, there are something wrong when compile c/s 21046! >> >> -James >> >>>>> Masaki Kanno <kanno.masaki@jp.fujitsu.com> 2010-3-19 11:14 >>> Hi >>>>> James, >> >> I tested xm vcpu-pin command with xen-unstable changeset 21044. >> The command failed as follows. Also Time(s) values by xm vcpu-list >> command were strange. The values were same in all VCPUs. >> >> # xm vcpu-list >> Name ID VCPU CPU State Time(s) >> CPU Affinity Domain-0 0 0 1 >> r-- 116.5 any cpu Domain-0 0 1 >> 1 r-- 116.5 any cpu # xm vcpu-pin Domain-0 0 0 Error: Cannot >> pin vcpu: 0 to cpu: [0] - (22, ''Invalid argument'') >> Usage: xm vcpu-pin <Domain> <VCPU|all> <CPUs|all> >> >> Set which CPUs a VCPU can use. >> # xm vcpu-list >> Name ID VCPU CPU State Time(s) >> CPU Affinity Domain-0 0 0 1 >> r-- 117.0 any cpu Domain-0 0 1 >> 1 r-- 117.0 any cpu >> >> >> I reverted changeset 21044 and 21040, and then I tested xm vcpu-pin >> command again. The command succeeded as follows. >> >> # xm vcpu-list >> Name ID VCPU CPU State Time(s) >> CPU Affinity Domain-0 0 0 0 >> r-- 60.8 any cpu Domain-0 0 1 >> 1 -b- 42.8 any cpu # xm vcpu-pin Domain-0 0 0 # xm vcpu-list >> Name ID VCPU CPU State Time(s) >> CPU Affinity Domain-0 0 0 0 >> r-- 61.6 0 Domain-0 0 1 1 >> -b- 43.2 any cpu >> >> Best regards, >> Kan >> >> Wed, 17 Mar 2010 20:41:22 -0700 (PDT), "James (song wei)" wrote: >> >>> >>> Keir, could you take a look at this issue. >>> New Patch for this issue: >>> Singed-off-by: James (Song Wei) <jsong@novell.com> >>> >>> diff -r 8b269215464b tools/libxc/xc_domain.c >>> --- a/tools/libxc/xc_domain.cWed Mar 10 14:01:32 2010 +0800 >>> +++ b/tools/libxc/xc_domain.cThu Mar 18 11:37:55 2010 +0800 @@ >>> -105,23 +105,28 @@ int xc_vcpu_setaffinity(int xc_handle, >>> uint32_t domid, >>> int vcpu, >>> - uint64_t cpumap) >>> + uint64_t *cpumap, int cpusize) { >>> DECLARE_DOMCTL; >>> int ret = -1; >>> - uint8_t local[sizeof (cpumap)]; >>> + uint8_t *local = malloc(cpusize); >>> >>> + if(local == NULL) >>> + { >>> + PERROR("Could not alloc memory for Xen hypercall"); + >>> goto out; + } >>> domctl.cmd = XEN_DOMCTL_setvcpuaffinity; >>> domctl.domain = (domid_t)domid; >>> domctl.u.vcpuaffinity.vcpu = vcpu; >>> >>> - bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); >>> + bitmap_64_to_byte(local, cpumap, cpusize * 8); >>> >>> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, >>> local); >>> >>> - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; >>> + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; >>> >>> - if ( lock_pages(local, sizeof(local)) != 0 ) >>> + if ( lock_pages(local, cpusize) != 0 ) >>> { >>> PERROR("Could not lock memory for Xen hypercall"); >>> goto out; @@ -129,9 +134,10 @@ >>> >>> ret = do_domctl(xc_handle, &domctl); >>> >>> - unlock_pages(local, sizeof(local)); >>> + unlock_pages(local, cpusize); >>> >>> out: >>> + free(local); >>> return ret; >>> } >>> >>> @@ -139,18 +145,25 @@ >>> int xc_vcpu_getaffinity(int xc_handle, >>> uint32_t domid, >>> int vcpu, >>> - uint64_t *cpumap) >>> + uint64_t *cpumap, int cpusize) { >>> DECLARE_DOMCTL; >>> int ret = -1; >>> - uint8_t local[sizeof (cpumap)]; >>> + uint8_t * local = malloc(cpusize); >>> + >>> + if(local == NULL) >>> + { >>> + PERROR("Could not alloc memory for Xen hypercall"); + >>> goto out; + } >>> >>> domctl.cmd = XEN_DOMCTL_getvcpuaffinity; >>> domctl.domain = (domid_t)domid; >>> domctl.u.vcpuaffinity.vcpu = vcpu; >>> >>> + >>> set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, >>> local); - domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) >>> * 8; + domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8; >>> >>> if ( lock_pages(local, sizeof(local)) != 0 ) >>> { >>> @@ -161,8 +174,9 @@ >>> ret = do_domctl(xc_handle, &domctl); >>> >>> unlock_pages(local, sizeof (local)); >>> - bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); >>> - out: >>> + bitmap_byte_to_64(cpumap, local, cpusize * 8); +out: >>> + free(local); >>> return ret; >>> } >>> >>> diff -r 8b269215464b tools/libxc/xenctrl.h >>> --- a/tools/libxc/xenctrl.hWed Mar 10 14:01:32 2010 +0800 >>> +++ b/tools/libxc/xenctrl.hThu Mar 18 11:37:55 2010 +0800 @@ >>> -310,11 +310,13 @@ int xc_vcpu_setaffinity(int xc_handle, >>> uint32_t domid, >>> int vcpu, >>> - uint64_t cpumap); >>> + uint64_t *cpumap, >>> + int cpusize); >>> int xc_vcpu_getaffinity(int xc_handle, >>> uint32_t domid, >>> int vcpu, >>> - uint64_t *cpumap); >>> + uint64_t *cpumap, >>> + int cpusize); >>> >>> /** >>> * This function will return information about one or more domains. >>> It is diff -r 8b269215464b tools/python/xen/lowlevel/xc/xc.c >>> --- a/tools/python/xen/lowlevel/xc/xc.cWed Mar 10 14:01:32 2010 >>> +0800 +++ b/tools/python/xen/lowlevel/xc/xc.cThu Mar 18 11:37:55 >>> 2010 +0800 @@ -217,8 +217,12 @@ { >>> uint32_t dom; >>> int vcpu = 0, i; >>> - uint64_t cpumap = ~0ULL; >>> + uint64_t *cpumap; >>> PyObject *cpulist = NULL; >>> + int nr_cpus, size; >>> + xc_physinfo_t info; >>> + xc_cpu_to_node_t map[1]; >>> + uint64_t cpumap_size = sizeof(*cpumap); >>> >>> static char *kwd_list[] = { "domid", "vcpu", "cpumap", NULL }; >>> >>> @@ -226,26 +230,38 @@ >>> &dom, &vcpu, &cpulist) ) >>> return NULL; >>> >>> + set_xen_guest_handle(info.cpu_to_node, map); >>> + info.max_cpu_id = 1; >>> + if ( xc_physinfo(self->xc_handle, &info) != 0 ) >>> + return pyxc_error_to_exception(); >>> + >>> + nr_cpus = info.nr_cpus; >>> + >>> + size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); >>> + cpumap = malloc(cpumap_size * size); >>> + if(cpumap == NULL) >>> + return pyxc_error_to_exception(); >>> + >>> if ( (cpulist != NULL) && PyList_Check(cpulist) ) { >>> - cpumap = 0ULL; >>> + for ( i = 0; i < size; i++) >>> + { >>> + cpumap[i] = 0ULL; >>> + } >>> for ( i = 0; i < PyList_Size(cpulist); i++ ) { >>> long cpu = PyInt_AsLong(PyList_GetItem(cpulist, i)); >>> - if ( cpu >= 64 ) >>> - { >>> - errno = EINVAL; >>> - PyErr_SetFromErrno(xc_error_obj); >>> - return NULL; >>> - } >>> - cpumap |= (uint64_t)1 << cpu; >>> + cpumap[cpu / (cpumap_size * 8)] |= (uint64_t)1 << (cpu >>> % (cpumap_size * 8)); } >>> } >>> >>> - if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) >>> != 0 ) + if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, >>> cpumap, size * cpumap_size) != 0 ) + { >>> + free(cpumap); >>> return pyxc_error_to_exception(); >>> - >>> + } >>> Py_INCREF(zero); >>> + free(cpumap); >>> return zero; >>> } >>> >>> @@ -365,7 +381,11 @@ >>> uint32_t dom, vcpu = 0; >>> xc_vcpuinfo_t info; >>> int rc, i; >>> - uint64_t cpumap; >>> + uint64_t *cpumap; >>> + int nr_cpus, size; >>> + xc_physinfo_t pinfo = { 0 }; >>> + xc_cpu_to_node_t map[1]; >>> + uint64_t cpumap_size = sizeof(*cpumap); >>> >>> static char *kwd_list[] = { "domid", "vcpu", NULL }; >>> >>> @@ -373,12 +393,25 @@ >>> &dom, &vcpu) ) return >>> NULL; >>> >>> + set_xen_guest_handle(pinfo.cpu_to_node, map); >>> + pinfo.max_cpu_id = 1; >>> + if ( xc_physinfo(self->xc_handle, &pinfo) != 0 ) >>> + return pyxc_error_to_exception(); >>> + nr_cpus = pinfo.nr_cpus; >>> rc = xc_vcpu_getinfo(self->xc_handle, dom, vcpu, &info); if >>> ( rc < 0 ) return pyxc_error_to_exception(); >>> - rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, &cpumap); >>> + size = (nr_cpus + cpumap_size * 8 - 1)/ (cpumap_size * 8); + >>> + if((cpumap = malloc(cpumap_size * size)) == NULL) >>> + return pyxc_error_to_exception(); >>> + >>> + rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap, >>> cpumap_size * size); if ( rc < 0 ) >>> + { >>> + free(cpumap); >>> return pyxc_error_to_exception(); >>> + } >>> >>> info_dict = Py_BuildValue("{s:i,s:i,s:i,s:L,s:i}", >>> "online", info.online, @@ -386,19 >>> +419,19 @@ "running", info.running, >>> "cpu_time", info.cpu_time, >>> "cpu", info.cpu); - >>> cpulist = PyList_New(0); >>> - for ( i = 0; cpumap != 0; i++ ) >>> + for ( i = 0; i < nr_cpus; i++ ) >>> { >>> - if ( cpumap & 1 ) { >>> + if (*(cpumap + i / (cpumap_size * 8)) & 1 ) { >>> PyObject *pyint = PyInt_FromLong(i); >>> PyList_Append(cpulist, pyint); >>> Py_DECREF(pyint); >>> } >>> - cpumap >>= 1; >>> + cpumap[i / (cpumap_size * 8)] >>= 1; >>> } >>> PyDict_SetItemString(info_dict, "cpumap", cpulist); >>> Py_DECREF(cpulist); + free(cpumap); >>> return info_dict; >>> } >>> >>> http://old.nabble.com/file/p27941371/adjust_vcpuaffinity_more_cpu.patch >>> adjust_vcpuaffinity_more_cpu.patch >>> >>> >>> >>> -- >>> View this message in context: >>> http://old.nabble.com/-Patch--adjust-the-cpu- >>> affinity-to-more-than-64-cpus-tp27928229p27941371.html >>> Sent from the Xen - Dev mailing list archive at Nabble.com. >>> >>> >>> _______________________________________________ >>> Xen-devel mailing list >>> Xen-devel@lists.xensource.com >>> http://lists.xensource.com/xen-devel >> >> >> -------------------------------text/plain------------------------------- >> _______________________________________________ >> Xen-devel mailing list >> Xen-devel@lists.xensource.com >> http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel