Dan Magenheimer
2012-Nov-28 21:50 UTC
[PATCH] xen: centralize accounting for domain tot_pages
xen: centralize accounting for domain tot_pages
Provide and use a common function for all adjustments to a
domain''s tot_pages counter in anticipation of future and/or
out-of-tree patches that must adjust related counters
atomically.
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
arch/x86/mm.c | 4 ++--
arch/x86/mm/mem_sharing.c | 4 ++--
common/grant_table.c | 2 +-
common/memory.c | 2 +-
common/page_alloc.c | 10 ++++++++--
include/xen/mm.h | 2 ++
6 files changed, 16 insertions(+), 8 deletions(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index ab94b02..3887ca6 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3842,7 +3842,7 @@ int donate_page(
{
if ( d->tot_pages >= d->max_pages )
goto fail;
- d->tot_pages++;
+ domain_adjust_tot_pages(d, 1);
}
page->count_info = PGC_allocated | 1;
@@ -3892,7 +3892,7 @@ int steal_page(
} while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x );
/* Unlink from original owner. */
- if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages )
+ if ( !(memflags & MEMF_no_refcount) &&
!domain_adjust_tot_pages(d, -1) )
drop_dom_ref = 1;
page_list_del(page, &d->page_list);
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 5103285..e91aac5 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -639,7 +639,7 @@ static int page_make_sharable(struct domain *d,
}
page_set_owner(page, dom_cow);
- d->tot_pages--;
+ domain_adjust_tot_pages(d, -1);
drop_dom_ref = (d->tot_pages == 0);
page_list_del(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
@@ -680,7 +680,7 @@ static int page_make_private(struct domain *d, struct
page_info *page)
ASSERT(page_get_owner(page) == dom_cow);
page_set_owner(page, d);
- if ( d->tot_pages++ == 0 )
+ if ( domain_adjust_tot_pages(d, 1) == 1 )
get_domain(d);
page_list_add_tail(page, &d->page_list);
spin_unlock(&d->page_alloc_lock);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 7912769..ca8d861 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1656,7 +1656,7 @@ gnttab_transfer(
}
/* Okay, add the page to ''e''. */
- if ( unlikely(e->tot_pages++ == 0) )
+ if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) )
get_knownalive_domain(e);
page_list_add_tail(page, &e->page_list);
page_set_owner(page, e);
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 83e2666..9842ea9 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -454,7 +454,7 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
(j * (1UL << exch.out.extent_order)));
spin_lock(&d->page_alloc_lock);
- d->tot_pages -= dec_count;
+ domain_adjust_tot_pages(d, -dec_count);
drop_dom_ref = (dec_count && !d->tot_pages);
spin_unlock(&d->page_alloc_lock);
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 15ebc66..e273bb7 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -239,6 +239,12 @@ static long midsize_alloc_zone_pages;
static DEFINE_SPINLOCK(heap_lock);
+unsigned long domain_adjust_tot_pages(struct domain *d, long pages)
+{
+ ASSERT(spin_is_locked(&d->page_alloc_lock));
+ return d->tot_pages += pages;
+}
+
static unsigned long init_node_heap(int node, unsigned long mfn,
unsigned long nr, bool_t *use_tail)
{
@@ -1291,7 +1297,7 @@ int assign_pages(
if ( unlikely(d->tot_pages == 0) )
get_knownalive_domain(d);
- d->tot_pages += 1 << order;
+ domain_adjust_tot_pages(d, 1 << order);
}
for ( i = 0; i < (1 << order); i++ )
@@ -1375,7 +1381,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int
order)
page_list_del2(&pg[i], &d->page_list,
&d->arch.relmem_list);
}
- d->tot_pages -= 1 << order;
+ domain_adjust_tot_pages(d, -(1 << order));
drop_dom_ref = (d->tot_pages == 0);
spin_unlock_recursive(&d->page_alloc_lock);
diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h
index 64a0cc1..00b1915 100644
--- a/xen/include/xen/mm.h
+++ b/xen/include/xen/mm.h
@@ -48,6 +48,8 @@ void free_xenheap_pages(void *v, unsigned int order);
#define alloc_xenheap_page() (alloc_xenheap_pages(0,0))
#define free_xenheap_page(v) (free_xenheap_pages(v,0))
+unsigned long domain_adjust_tot_pages(struct domain *d, long pages);
+
/* Domain suballocator. These functions are *not* interrupt-safe.*/
void init_domheap_pages(paddr_t ps, paddr_t pe);
struct page_info *alloc_domheap_pages(
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
Dan Magenheimer
2012-Dec-06 17:24 UTC
Re: [PATCH] xen: centralize accounting for domain tot_pages
ping?> -----Original Message----- > From: Dan Magenheimer > Sent: Wednesday, November 28, 2012 2:50 PM > To: Keir Fraser; Jan Beulich > Cc: xen-devel@lists.xen.org; Konrad Wilk; Zhigang Wang > Subject: [PATCH] xen: centralize accounting for domain tot_pages > > xen: centralize accounting for domain tot_pages > > Provide and use a common function for all adjustments to a > domain''s tot_pages counter in anticipation of future and/or > out-of-tree patches that must adjust related counters > atomically. > > Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> > > arch/x86/mm.c | 4 ++-- > arch/x86/mm/mem_sharing.c | 4 ++-- > common/grant_table.c | 2 +- > common/memory.c | 2 +- > common/page_alloc.c | 10 ++++++++-- > include/xen/mm.h | 2 ++ > 6 files changed, 16 insertions(+), 8 deletions(-) > > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c > index ab94b02..3887ca6 100644 > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -3842,7 +3842,7 @@ int donate_page( > { > if ( d->tot_pages >= d->max_pages ) > goto fail; > - d->tot_pages++; > + domain_adjust_tot_pages(d, 1); > } > > page->count_info = PGC_allocated | 1; > @@ -3892,7 +3892,7 @@ int steal_page( > } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x ); > > /* Unlink from original owner. */ > - if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages ) > + if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) ) > drop_dom_ref = 1; > page_list_del(page, &d->page_list); > > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c > index 5103285..e91aac5 100644 > --- a/xen/arch/x86/mm/mem_sharing.c > +++ b/xen/arch/x86/mm/mem_sharing.c > @@ -639,7 +639,7 @@ static int page_make_sharable(struct domain *d, > } > > page_set_owner(page, dom_cow); > - d->tot_pages--; > + domain_adjust_tot_pages(d, -1); > drop_dom_ref = (d->tot_pages == 0); > page_list_del(page, &d->page_list); > spin_unlock(&d->page_alloc_lock); > @@ -680,7 +680,7 @@ static int page_make_private(struct domain *d, struct page_info *page) > ASSERT(page_get_owner(page) == dom_cow); > page_set_owner(page, d); > > - if ( d->tot_pages++ == 0 ) > + if ( domain_adjust_tot_pages(d, 1) == 1 ) > get_domain(d); > page_list_add_tail(page, &d->page_list); > spin_unlock(&d->page_alloc_lock); > diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c > index 7912769..ca8d861 100644 > --- a/xen/common/grant_table.c > +++ b/xen/common/grant_table.c > @@ -1656,7 +1656,7 @@ gnttab_transfer( > } > > /* Okay, add the page to ''e''. */ > - if ( unlikely(e->tot_pages++ == 0) ) > + if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) ) > get_knownalive_domain(e); > page_list_add_tail(page, &e->page_list); > page_set_owner(page, e); > diff --git a/xen/common/memory.c b/xen/common/memory.c > index 83e2666..9842ea9 100644 > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -454,7 +454,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) > (j * (1UL << exch.out.extent_order))); > > spin_lock(&d->page_alloc_lock); > - d->tot_pages -= dec_count; > + domain_adjust_tot_pages(d, -dec_count); > drop_dom_ref = (dec_count && !d->tot_pages); > spin_unlock(&d->page_alloc_lock); > > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c > index 15ebc66..e273bb7 100644 > --- a/xen/common/page_alloc.c > +++ b/xen/common/page_alloc.c > @@ -239,6 +239,12 @@ static long midsize_alloc_zone_pages; > > static DEFINE_SPINLOCK(heap_lock); > > +unsigned long domain_adjust_tot_pages(struct domain *d, long pages) > +{ > + ASSERT(spin_is_locked(&d->page_alloc_lock)); > + return d->tot_pages += pages; > +} > + > static unsigned long init_node_heap(int node, unsigned long mfn, > unsigned long nr, bool_t *use_tail) > { > @@ -1291,7 +1297,7 @@ int assign_pages( > if ( unlikely(d->tot_pages == 0) ) > get_knownalive_domain(d); > > - d->tot_pages += 1 << order; > + domain_adjust_tot_pages(d, 1 << order); > } > > for ( i = 0; i < (1 << order); i++ ) > @@ -1375,7 +1381,7 @@ void free_domheap_pages(struct page_info *pg, unsigned int order) > page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list); > } > > - d->tot_pages -= 1 << order; > + domain_adjust_tot_pages(d, -(1 << order)); > drop_dom_ref = (d->tot_pages == 0); > > spin_unlock_recursive(&d->page_alloc_lock); > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h > index 64a0cc1..00b1915 100644 > --- a/xen/include/xen/mm.h > +++ b/xen/include/xen/mm.h > @@ -48,6 +48,8 @@ void free_xenheap_pages(void *v, unsigned int order); > #define alloc_xenheap_page() (alloc_xenheap_pages(0,0)) > #define free_xenheap_page(v) (free_xenheap_pages(v,0)) > > +unsigned long domain_adjust_tot_pages(struct domain *d, long pages); > + > /* Domain suballocator. These functions are *not* interrupt-safe.*/ > void init_domheap_pages(paddr_t ps, paddr_t pe); > struct page_info *alloc_domheap_pages(
Keir Fraser
2012-Dec-06 18:27 UTC
Re: [PATCH] xen: centralize accounting for domain tot_pages
I will check these two patches in. -- Keir On 06/12/2012 17:24, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:> ping? > >> -----Original Message----- >> From: Dan Magenheimer >> Sent: Wednesday, November 28, 2012 2:50 PM >> To: Keir Fraser; Jan Beulich >> Cc: xen-devel@lists.xen.org; Konrad Wilk; Zhigang Wang >> Subject: [PATCH] xen: centralize accounting for domain tot_pages >> >> xen: centralize accounting for domain tot_pages >> >> Provide and use a common function for all adjustments to a >> domain''s tot_pages counter in anticipation of future and/or >> out-of-tree patches that must adjust related counters >> atomically. >> >> Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com> >> >> arch/x86/mm.c | 4 ++-- >> arch/x86/mm/mem_sharing.c | 4 ++-- >> common/grant_table.c | 2 +- >> common/memory.c | 2 +- >> common/page_alloc.c | 10 ++++++++-- >> include/xen/mm.h | 2 ++ >> 6 files changed, 16 insertions(+), 8 deletions(-) >> >> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c >> index ab94b02..3887ca6 100644 >> --- a/xen/arch/x86/mm.c >> +++ b/xen/arch/x86/mm.c >> @@ -3842,7 +3842,7 @@ int donate_page( >> { >> if ( d->tot_pages >= d->max_pages ) >> goto fail; >> - d->tot_pages++; >> + domain_adjust_tot_pages(d, 1); >> } >> >> page->count_info = PGC_allocated | 1; >> @@ -3892,7 +3892,7 @@ int steal_page( >> } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x ); >> >> /* Unlink from original owner. */ >> - if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages ) >> + if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) ) >> drop_dom_ref = 1; >> page_list_del(page, &d->page_list); >> >> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c >> index 5103285..e91aac5 100644 >> --- a/xen/arch/x86/mm/mem_sharing.c >> +++ b/xen/arch/x86/mm/mem_sharing.c >> @@ -639,7 +639,7 @@ static int page_make_sharable(struct domain *d, >> } >> >> page_set_owner(page, dom_cow); >> - d->tot_pages--; >> + domain_adjust_tot_pages(d, -1); >> drop_dom_ref = (d->tot_pages == 0); >> page_list_del(page, &d->page_list); >> spin_unlock(&d->page_alloc_lock); >> @@ -680,7 +680,7 @@ static int page_make_private(struct domain *d, struct >> page_info *page) >> ASSERT(page_get_owner(page) == dom_cow); >> page_set_owner(page, d); >> >> - if ( d->tot_pages++ == 0 ) >> + if ( domain_adjust_tot_pages(d, 1) == 1 ) >> get_domain(d); >> page_list_add_tail(page, &d->page_list); >> spin_unlock(&d->page_alloc_lock); >> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c >> index 7912769..ca8d861 100644 >> --- a/xen/common/grant_table.c >> +++ b/xen/common/grant_table.c >> @@ -1656,7 +1656,7 @@ gnttab_transfer( >> } >> >> /* Okay, add the page to ''e''. */ >> - if ( unlikely(e->tot_pages++ == 0) ) >> + if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) ) >> get_knownalive_domain(e); >> page_list_add_tail(page, &e->page_list); >> page_set_owner(page, e); >> diff --git a/xen/common/memory.c b/xen/common/memory.c >> index 83e2666..9842ea9 100644 >> --- a/xen/common/memory.c >> +++ b/xen/common/memory.c >> @@ -454,7 +454,7 @@ static long >> memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) >> (j * (1UL << exch.out.extent_order))); >> >> spin_lock(&d->page_alloc_lock); >> - d->tot_pages -= dec_count; >> + domain_adjust_tot_pages(d, -dec_count); >> drop_dom_ref = (dec_count && !d->tot_pages); >> spin_unlock(&d->page_alloc_lock); >> >> diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c >> index 15ebc66..e273bb7 100644 >> --- a/xen/common/page_alloc.c >> +++ b/xen/common/page_alloc.c >> @@ -239,6 +239,12 @@ static long midsize_alloc_zone_pages; >> >> static DEFINE_SPINLOCK(heap_lock); >> >> +unsigned long domain_adjust_tot_pages(struct domain *d, long pages) >> +{ >> + ASSERT(spin_is_locked(&d->page_alloc_lock)); >> + return d->tot_pages += pages; >> +} >> + >> static unsigned long init_node_heap(int node, unsigned long mfn, >> unsigned long nr, bool_t *use_tail) >> { >> @@ -1291,7 +1297,7 @@ int assign_pages( >> if ( unlikely(d->tot_pages == 0) ) >> get_knownalive_domain(d); >> >> - d->tot_pages += 1 << order; >> + domain_adjust_tot_pages(d, 1 << order); >> } >> >> for ( i = 0; i < (1 << order); i++ ) >> @@ -1375,7 +1381,7 @@ void free_domheap_pages(struct page_info *pg, unsigned >> int order) >> page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list); >> } >> >> - d->tot_pages -= 1 << order; >> + domain_adjust_tot_pages(d, -(1 << order)); >> drop_dom_ref = (d->tot_pages == 0); >> >> spin_unlock_recursive(&d->page_alloc_lock); >> diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h >> index 64a0cc1..00b1915 100644 >> --- a/xen/include/xen/mm.h >> +++ b/xen/include/xen/mm.h >> @@ -48,6 +48,8 @@ void free_xenheap_pages(void *v, unsigned int order); >> #define alloc_xenheap_page() (alloc_xenheap_pages(0,0)) >> #define free_xenheap_page(v) (free_xenheap_pages(v,0)) >> >> +unsigned long domain_adjust_tot_pages(struct domain *d, long pages); >> + >> /* Domain suballocator. These functions are *not* interrupt-safe.*/ >> void init_domheap_pages(paddr_t ps, paddr_t pe); >> struct page_info *alloc_domheap_pages(
Keir Fraser
2012-Dec-07 15:13 UTC
Re: [PATCH] xen: centralize accounting for domain tot_pages
On 28/11/2012 21:50, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:> xen: centralize accounting for domain tot_pages > > Provide and use a common function for all adjustments to a > domain''s tot_pages counter in anticipation of future and/or > out-of-tree patches that must adjust related counters > atomically. > > Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>Applied. -- Keir> arch/x86/mm.c | 4 ++-- > arch/x86/mm/mem_sharing.c | 4 ++-- > common/grant_table.c | 2 +- > common/memory.c | 2 +- > common/page_alloc.c | 10 ++++++++-- > include/xen/mm.h | 2 ++ > 6 files changed, 16 insertions(+), 8 deletions(-) > > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c > index ab94b02..3887ca6 100644 > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -3842,7 +3842,7 @@ int donate_page( > { > if ( d->tot_pages >= d->max_pages ) > goto fail; > - d->tot_pages++; > + domain_adjust_tot_pages(d, 1); > } > > page->count_info = PGC_allocated | 1; > @@ -3892,7 +3892,7 @@ int steal_page( > } while ( (y = cmpxchg(&page->count_info, x, x | 1)) != x ); > > /* Unlink from original owner. */ > - if ( !(memflags & MEMF_no_refcount) && !--d->tot_pages ) > + if ( !(memflags & MEMF_no_refcount) && !domain_adjust_tot_pages(d, -1) ) > drop_dom_ref = 1; > page_list_del(page, &d->page_list); > > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c > index 5103285..e91aac5 100644 > --- a/xen/arch/x86/mm/mem_sharing.c > +++ b/xen/arch/x86/mm/mem_sharing.c > @@ -639,7 +639,7 @@ static int page_make_sharable(struct domain *d, > } > > page_set_owner(page, dom_cow); > - d->tot_pages--; > + domain_adjust_tot_pages(d, -1); > drop_dom_ref = (d->tot_pages == 0); > page_list_del(page, &d->page_list); > spin_unlock(&d->page_alloc_lock); > @@ -680,7 +680,7 @@ static int page_make_private(struct domain *d, struct > page_info *page) > ASSERT(page_get_owner(page) == dom_cow); > page_set_owner(page, d); > > - if ( d->tot_pages++ == 0 ) > + if ( domain_adjust_tot_pages(d, 1) == 1 ) > get_domain(d); > page_list_add_tail(page, &d->page_list); > spin_unlock(&d->page_alloc_lock); > diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c > index 7912769..ca8d861 100644 > --- a/xen/common/grant_table.c > +++ b/xen/common/grant_table.c > @@ -1656,7 +1656,7 @@ gnttab_transfer( > } > > /* Okay, add the page to ''e''. */ > - if ( unlikely(e->tot_pages++ == 0) ) > + if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) ) > get_knownalive_domain(e); > page_list_add_tail(page, &e->page_list); > page_set_owner(page, e); > diff --git a/xen/common/memory.c b/xen/common/memory.c > index 83e2666..9842ea9 100644 > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -454,7 +454,7 @@ static long > memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) > (j * (1UL << exch.out.extent_order))); > > spin_lock(&d->page_alloc_lock); > - d->tot_pages -= dec_count; > + domain_adjust_tot_pages(d, -dec_count); > drop_dom_ref = (dec_count && !d->tot_pages); > spin_unlock(&d->page_alloc_lock); > > diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c > index 15ebc66..e273bb7 100644 > --- a/xen/common/page_alloc.c > +++ b/xen/common/page_alloc.c > @@ -239,6 +239,12 @@ static long midsize_alloc_zone_pages; > > static DEFINE_SPINLOCK(heap_lock); > > +unsigned long domain_adjust_tot_pages(struct domain *d, long pages) > +{ > + ASSERT(spin_is_locked(&d->page_alloc_lock)); > + return d->tot_pages += pages; > +} > + > static unsigned long init_node_heap(int node, unsigned long mfn, > unsigned long nr, bool_t *use_tail) > { > @@ -1291,7 +1297,7 @@ int assign_pages( > if ( unlikely(d->tot_pages == 0) ) > get_knownalive_domain(d); > > - d->tot_pages += 1 << order; > + domain_adjust_tot_pages(d, 1 << order); > } > > for ( i = 0; i < (1 << order); i++ ) > @@ -1375,7 +1381,7 @@ void free_domheap_pages(struct page_info *pg, unsigned > int order) > page_list_del2(&pg[i], &d->page_list, &d->arch.relmem_list); > } > > - d->tot_pages -= 1 << order; > + domain_adjust_tot_pages(d, -(1 << order)); > drop_dom_ref = (d->tot_pages == 0); > > spin_unlock_recursive(&d->page_alloc_lock); > diff --git a/xen/include/xen/mm.h b/xen/include/xen/mm.h > index 64a0cc1..00b1915 100644 > --- a/xen/include/xen/mm.h > +++ b/xen/include/xen/mm.h > @@ -48,6 +48,8 @@ void free_xenheap_pages(void *v, unsigned int order); > #define alloc_xenheap_page() (alloc_xenheap_pages(0,0)) > #define free_xenheap_page(v) (free_xenheap_pages(v,0)) > > +unsigned long domain_adjust_tot_pages(struct domain *d, long pages); > + > /* Domain suballocator. These functions are *not* interrupt-safe.*/ > void init_domheap_pages(paddr_t ps, paddr_t pe); > struct page_info *alloc_domheap_pages(