This patch modifies the increase_reservation and populate_physmap hypercalls used to allocate memory to a domain. With numa support enabled we balance the allocation by using the domain''s vcpu placement as a method of distributing the pages locally to the physical cpu the vcpus will run upon. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: memory.c | 32 ++++++++++++++++++++++++++++++++ 1 files changed, 32 insertions(+) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> Signed-off-by: Ryan Grimm <grimm@us.ibm.com> --- # HG changeset patch # User Ryan Harper <ryanh@us.ibm.com> # Node ID eda82207d4bf72df7acd43bfb937fcc39313bd0d # Parent e258ba216530fc45a74da2383d07e60f97974bdc This patch modifies the increase_reservation and populate_physmap hypercalls used to allocate memory to a domain. With numa support enabled we balance the allocation by using the domain''s vcpu placement as a method of distributing the pages locally to the physical cpu the vcpus will run upon. Signed-off-by: Ryan Harper <ryanh@us.ibm.com> Signed-off-by: Ryan Grimm <grimm@us.ibm.com diff -r e258ba216530 -r eda82207d4bf xen/common/memory.c --- a/xen/common/memory.c Mon May 1 21:40:13 2006 +++ b/xen/common/memory.c Mon May 1 21:42:00 2006 @@ -40,6 +40,14 @@ struct page_info *page; unsigned long i, mfn; +#ifdef CONFIG_NUMA + int max_vcpu_id = 0; + struct vcpu *v; + + for_each_vcpu (d, v) + if ( v->vcpu_id > max_vcpu_id ) + max_vcpu_id = v->vcpu_id; +#endif if ( !guest_handle_is_null(extent_list) && !guest_handle_okay(extent_list, nr_extents) ) return 0; @@ -56,8 +64,16 @@ return i; } +#ifdef CONFIG_NUMA + /* spread each allocation across the total number of + * vcpus allocated to this domain */ + if ( unlikely((page = __alloc_domheap_pages( d, + (d->vcpu[i % (max_vcpu_id+1)])->processor, + extent_order, flags )) == NULL) ) +#else if ( unlikely((page = alloc_domheap_pages( d, extent_order, flags)) == NULL) ) +#endif { DPRINTK("Could not allocate order=%d extent: " "id=%d flags=%x (%ld of %d)\n", @@ -89,6 +105,14 @@ struct page_info *page; unsigned long i, j, gpfn, mfn; +#ifdef CONFIG_NUMA + int max_vcpu_id = 0; + struct vcpu *v; + + for_each_vcpu (d, v) + if ( v->vcpu_id > max_vcpu_id ) + max_vcpu_id = v->vcpu_id; +#endif if ( !guest_handle_okay(extent_list, nr_extents) ) return 0; @@ -107,8 +131,16 @@ if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) ) goto out; +#ifdef CONFIG_NUMA + /* spread each allocation across the total number of + * vcpus allocated to this domain */ + if ( unlikely((page = __alloc_domheap_pages( d, + (d->vcpu[i % (max_vcpu_id+1)])->processor, + extent_order, flags )) == NULL) ) +#else if ( unlikely((page = alloc_domheap_pages( d, extent_order, flags)) == NULL) ) +#endif { DPRINTK("Could not allocate order=%d extent: " "id=%d flags=%x (%ld of %d)\n", _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Ryan Harper
2006-May-12 15:12 UTC
[Xen-devel] [PATCH 3/6][RESEND] xen: Add NUMA support to Xen
* Ryan Harper <ryanh@us.ibm.com> [2006-05-01 17:00]:> This patch modifies the increase_reservation and populate_physmap > hypercalls used to allocate memory to a domain. With numa support > enabled we balance the allocation by using the domain''s vcpu placement > as a method of distributing the pages locally to the physical cpu the > vcpus will run upon.Updated to remove CONFIG_NUMA ifdefs. -- Ryan Harper Software Engineer; Linux Technology Center IBM Corp., Austin, Tx (512) 838-9253 T/L: 678-9253 ryanh@us.ibm.com diffstat output: memory.c | 26 ++++++++++++++++++++++---- 1 files changed, 22 insertions(+), 4 deletions(-) Signed-off-by: Ryan Harper <ryanh@us.ibm.com> Signed-off-by: Ryan Grimm <grimm@us.ibm.com> --- # HG changeset patch # User Ryan Harper <ryanh@us.ibm.com> # Node ID b92d38d9be2808b73dd87e0f3d61858540dc8f69 # Parent 15fba7ca6975a8aef6cb5d290767aeacc7304cd8 This patch modifies the increase_reservation and populate_physmap hypercalls used to allocate memory to a domain. We balance the allocation by using the domain''s vcpu placement as a method of distributing the pages locally to the physical cpu the vcpus will run upon. diff -r 15fba7ca6975 -r b92d38d9be28 xen/common/memory.c --- a/xen/common/memory.c Thu May 11 20:48:10 2006 +++ b/xen/common/memory.c Thu May 11 20:49:50 2006 @@ -39,6 +39,12 @@ { struct page_info *page; unsigned long i, mfn; + int max_vcpu_id = 0; + struct vcpu *v; + + for_each_vcpu (d, v) + if ( v->vcpu_id > max_vcpu_id ) + max_vcpu_id = v->vcpu_id; if ( !guest_handle_is_null(extent_list) && !guest_handle_okay(extent_list, nr_extents) ) @@ -56,8 +62,11 @@ return i; } - if ( unlikely((page = alloc_domheap_pages( - d, extent_order, flags)) == NULL) ) + /* spread each allocation across the total number of + * vcpus allocated to this domain */ + if ( unlikely((page = __alloc_domheap_pages( d, + (d->vcpu[i % (max_vcpu_id+1)])->processor, + extent_order, flags )) == NULL) ) { DPRINTK("Could not allocate order=%d extent: " "id=%d flags=%x (%ld of %d)\n", @@ -88,6 +97,12 @@ { struct page_info *page; unsigned long i, j, gpfn, mfn; + int max_vcpu_id = 0; + struct vcpu *v; + + for_each_vcpu (d, v) + if ( v->vcpu_id > max_vcpu_id ) + max_vcpu_id = v->vcpu_id; if ( !guest_handle_okay(extent_list, nr_extents) ) return 0; @@ -107,8 +122,11 @@ if ( unlikely(__copy_from_guest_offset(&gpfn, extent_list, i, 1)) ) goto out; - if ( unlikely((page = alloc_domheap_pages( - d, extent_order, flags)) == NULL) ) + /* spread each allocation across the total number of + * vcpus allocated to this domain */ + if ( unlikely((page = __alloc_domheap_pages( d, + (d->vcpu[i % (max_vcpu_id+1)])->processor, + extent_order, flags )) == NULL) ) { DPRINTK("Could not allocate order=%d extent: " "id=%d flags=%x (%ld of %d)\n", _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel