These patches begin to address error handling when the shadow/hap pool can''t allocate any more p2m pages. Patch 1 is a bit of preliminary tidying. Patch 2 prints a warning on error so it can be diagnosed. Patches 3-5 add checks to direct callers of the p2m allocation functions. With these patches applied, pool allocation errors will be propagated as far as the set_p2m_entry() interface. Unfortunately there are many callers of that that don''t check the return code -- PoD and mem_paging are the major cuplrits, but there are other cases where the error is discarded further up the stack (e.g. some callers of set_mmio_p2m_entry()). Chasing those up is work for another day. Cheers, Tim.
Tim Deegan
2013-Mar-07 14:52 UTC
[PATCH 1/5] x86/mm: use bool_t for flags in shadow-pagetable structs
and reshuffle the domain struct to pack a little better. Signed-off-by: Tim Deegan <tim@xen.org> --- xen/include/asm-x86/domain.h | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 97e09ca..fd9fa0f 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -98,23 +98,23 @@ struct shadow_domain { /* 1-to-1 map for use when HVM vcpus have paging disabled */ pagetable_t unpaged_pagetable; + /* reflect guest table dirty status, incremented by write + * emulation and remove write permission */ + atomic_t gtable_dirty_version; + /* Shadow hashtable */ struct page_info **hash_table; - int hash_walking; /* Some function is walking the hash table */ + bool_t hash_walking; /* Some function is walking the hash table */ /* Fast MMIO path heuristic */ - int has_fast_mmio_entries; - - /* reflect guest table dirty status, incremented by write - * emulation and remove write permission - */ - atomic_t gtable_dirty_version; + bool_t has_fast_mmio_entries; /* OOS */ - int oos_active; - int oos_off; + bool_t oos_active; + bool_t oos_off; - int pagetable_dying_op; + /* Has this domain ever used HVMOP_pagetable_dying? */ + bool_t pagetable_dying_op; }; struct shadow_vcpu { @@ -142,7 +142,7 @@ struct shadow_vcpu { unsigned long off[SHADOW_OOS_FIXUPS]; } oos_fixup[SHADOW_OOS_PAGES]; - int pagetable_dying; + bool_t pagetable_dying; }; /************************************************/ -- 1.7.10.4
Tim Deegan
2013-Mar-07 14:53 UTC
[PATCH 2/5] x86/mm: warn if we ever run out of shadow/hap pool for p2m/lgd ops.
Even if the error propagates up through the p2m ops to the caller, it''ll look like ENOMEM, which won''t be obviously a shadow-pool problem. Warn on the console, once per domain. Signed-off-by: Tim Deegan <tim@xen.org> --- xen/arch/x86/mm/hap/hap.c | 6 ++++++ xen/arch/x86/mm/shadow/common.c | 8 +++++++- xen/include/asm-x86/domain.h | 2 ++ 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index 055833d..bff05d9 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -258,6 +258,12 @@ static struct page_info *hap_alloc_p2m_page(struct domain *d) page_set_owner(pg, d); pg->count_info |= 1; } + else if ( !d->arch.paging.p2m_alloc_failed ) + { + d->arch.paging.p2m_alloc_failed = 1; + dprintk(XENLOG_ERR, "d%i failed to allocate from HAP pool", + d->domain_id); + } paging_unlock(d); return pg; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 691776c..4b576ac 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -1591,10 +1591,16 @@ shadow_alloc_p2m_page(struct domain *d) if ( d->arch.paging.shadow.total_pages < shadow_min_acceptable_pages(d) + 1 ) { + if ( !d->arch.paging.p2m_alloc_failed ) + { + d->arch.paging.p2m_alloc_failed = 1; + dprintk(XENLOG_ERR, "d%i failed to allocate from shadow pool", + d->domain_id); + } paging_unlock(d); return NULL; } - + shadow_prealloc(d, SH_type_p2m_table, 1); pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0)); d->arch.paging.shadow.p2m_pages++; diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index fd9fa0f..6f9744a 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -190,6 +190,8 @@ struct paging_domain { * (used by p2m and log-dirty code for their tries) */ struct page_info * (*alloc_page)(struct domain *d); void (*free_page)(struct domain *d, struct page_info *pg); + /* Has that pool ever run out of memory? */ + bool_t p2m_alloc_failed; }; struct paging_vcpu { -- 1.7.10.4
Tim Deegan
2013-Mar-07 14:53 UTC
[PATCH 3/5] x86/ept: check for errors in a few callers of ept_set_entry.
AFAICT in all these cases we have the p2m lock and have just checked that the p2m trie is populated so the call should succeed. Make it explicit with ASSERT() rather than just ignoring the result. Signed-off-by: Tim Deegan <tim@xen.org> Cc: Jun Nakajima <jun.nakajima@intel.com> Cc: Eddie Dong <eddie.dong@intel.com> --- xen/arch/x86/mm/p2m-ept.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c index a2d1591..595c6e7 100644 --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -401,8 +401,9 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, /* then move to the level we want to make real changes */ for ( ; i > target; i-- ) - ept_next_level(p2m, 0, &table, &gfn_remainder, i); - + if ( !ept_next_level(p2m, 0, &table, &gfn_remainder, i) ) + break; + /* We just installed the pages we need. */ ASSERT(i == target); index = gfn_remainder >> (i * EPT_TABLE_ORDER); @@ -704,6 +705,7 @@ void ept_change_entry_emt_with_range(struct domain *d, mfn_t mfn; int order = 0; struct p2m_domain *p2m = p2m_get_hostp2m(d); + int rc; p2m_lock(p2m); for ( gfn = start_gfn; gfn <= end_gfn; gfn++ ) @@ -732,7 +734,11 @@ void ept_change_entry_emt_with_range(struct domain *d, order = level * EPT_TABLE_ORDER; if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.sa_p2mt) ) - ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access); + { + rc = ept_set_entry(p2m, gfn, mfn, order, + e.sa_p2mt, e.access); + ASSERT(rc); + } gfn += trunk; break; } @@ -741,8 +747,12 @@ void ept_change_entry_emt_with_range(struct domain *d, } else /* gfn assigned with 4k */ { - if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt, e.sa_p2mt) ) - ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access); + if ( need_modify_ept_entry(p2m, gfn, mfn, + e.ipat, e.emt, e.sa_p2mt) ) + { + rc = ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access); + ASSERT(rc); + } } } p2m_unlock(p2m); -- 1.7.10.4
Tim Deegan
2013-Mar-07 14:53 UTC
[PATCH 4/5] x86/mem_sharing: check for errors in p2m->set_entry().
This call ought always to succeed. Assert that it does rather than ignoring the return value. Signed-off-by: Tim Deegan <tim@xen.org> Cc: Andres Lagar-Cavilla <andres@lagarcavilla.org> --- xen/arch/x86/mm/mem_sharing.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c index 1caa900..0364bb0 100644 --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1273,6 +1273,8 @@ int relinquish_shared_pages(struct domain *d) p2m_access_t a; p2m_type_t t; mfn_t mfn; + int set_rc; + if ( atomic_read(&d->shr_pages) == 0 ) break; mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); @@ -1281,10 +1283,12 @@ int relinquish_shared_pages(struct domain *d) /* Does not fail with ENOMEM given the DESTROY flag */ BUG_ON(__mem_sharing_unshare_page(d, gfn, MEM_SHARING_DESTROY_GFN)); - /* Clear out the p2m entry so no one else may try to - * unshare */ - p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, - p2m_invalid, p2m_access_rwx); + /* Clear out the p2m entry so no one else may try to + * unshare. Must succeed: we just read the old entry and + * we hold the p2m lock. */ + set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, + p2m_invalid, p2m_access_rwx); + ASSERT(set_rc != 0); count++; } -- 1.7.10.4
Tim Deegan
2013-Mar-07 14:53 UTC
[PATCH 5/5] x86/mem_access: check for errors in p2m->set_entry().
These calls ought always to succeed. Assert that they do rather than ignoring the return value. Signed-off-by: Tim Deegan <tim@xen.org> Cc: Aravindh Puthiyaparambil <aravindh@virtuata.com> --- xen/arch/x86/mm/p2m.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 4837de3..f5ddd20 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -1262,21 +1262,27 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, p2m_type_t p2mt; p2m_access_t p2ma; mem_event_request_t *req; + int rc; - /* First, handle rx2rw conversion automatically */ + /* First, handle rx2rw conversion automatically. + * These calls to p2m->set_entry() must succeed: we have the gfn + * locked and just did a successful get_entry(). */ gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL); if ( access_w && p2ma == p2m_access_rx2rw ) { - p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw); + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw); + ASSERT(rc); gfn_unlock(p2m, gfn, 0); return 1; } else if ( p2ma == p2m_access_n2rwx ) { ASSERT(access_w || access_r || access_x); - p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx); + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, + p2mt, p2m_access_rwx); + ASSERT(rc); } gfn_unlock(p2m, gfn, 0); @@ -1294,13 +1300,18 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, } else { + gfn_lock(p2m, gfn, 0); + mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL); if ( p2ma != p2m_access_n2rwx ) { - /* A listener is not required, so clear the access restrictions */ - gfn_lock(p2m, gfn, 0); - p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx); - gfn_unlock(p2m, gfn, 0); + /* A listener is not required, so clear the access + * restrictions. This set must succeed: we have the + * gfn locked and just did a successful get_entry(). */ + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, + p2mt, p2m_access_rwx); + ASSERT(rc); } + gfn_unlock(p2m, gfn, 0); return 1; } } -- 1.7.10.4
Andres Lagar-Cavilla
2013-Mar-07 15:07 UTC
Re: [PATCH 4/5] x86/mem_sharing: check for errors in p2m->set_entry().
On Mar 7, 2013, at 9:53 AM, Tim Deegan <tim@xen.org> wrote:> This call ought always to succeed. Assert that it does rather than > ignoring the return value. > > Signed-off-by: Tim Deegan <tim@xen.org> > Cc: Andres Lagar-Cavilla <andres@lagarcavilla.org> > --- > xen/arch/x86/mm/mem_sharing.c | 12 ++++++++---- > 1 file changed, 8 insertions(+), 4 deletions(-) > > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c > index 1caa900..0364bb0 100644 > --- a/xen/arch/x86/mm/mem_sharing.c > +++ b/xen/arch/x86/mm/mem_sharing.c > @@ -1273,6 +1273,8 @@ int relinquish_shared_pages(struct domain *d) > p2m_access_t a; > p2m_type_t t; > mfn_t mfn; > + int set_rc; > + > if ( atomic_read(&d->shr_pages) == 0 ) > break; > mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); > @@ -1281,10 +1283,12 @@ int relinquish_shared_pages(struct domain *d) > /* Does not fail with ENOMEM given the DESTROY flag */ > BUG_ON(__mem_sharing_unshare_page(d, gfn, > MEM_SHARING_DESTROY_GFN)); > - /* Clear out the p2m entry so no one else may try to > - * unshare */ > - p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, > - p2m_invalid, p2m_access_rwx); > + /* Clear out the p2m entry so no one else may try to > + * unshare. Must succeed: we just read the old entry and > + * we hold the p2m lock. */ > + set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, > + p2m_invalid, p2m_access_rwx); > + ASSERT(set_rc != 0);Acked-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> Wouldn''t it be slightly cleaner to BUG_ON(p2m->set_entry(..) != 1)? Andres> count++; > } > > -- > 1.7.10.4 >
Andrew Cooper
2013-Mar-07 15:22 UTC
Re: [PATCH 2/5] x86/mm: warn if we ever run out of shadow/hap pool for p2m/lgd ops.
On 07/03/13 14:53, Tim Deegan wrote:> Even if the error propagates up through the p2m ops to the caller, > it''ll look like ENOMEM, which won''t be obviously a shadow-pool problem. > > Warn on the console, once per domain. > > Signed-off-by: Tim Deegan <tim@xen.org> > --- > xen/arch/x86/mm/hap/hap.c | 6 ++++++ > xen/arch/x86/mm/shadow/common.c | 8 +++++++- > xen/include/asm-x86/domain.h | 2 ++ > 3 files changed, 15 insertions(+), 1 deletion(-) > > diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c > index 055833d..bff05d9 100644 > --- a/xen/arch/x86/mm/hap/hap.c > +++ b/xen/arch/x86/mm/hap/hap.c > @@ -258,6 +258,12 @@ static struct page_info *hap_alloc_p2m_page(struct domain *d) > page_set_owner(pg, d); > pg->count_info |= 1; > } > + else if ( !d->arch.paging.p2m_alloc_failed ) > + { > + d->arch.paging.p2m_alloc_failed = 1; > + dprintk(XENLOG_ERR, "d%i failed to allocate from HAP pool", > + d->domain_id); > + } > > paging_unlock(d); > return pg; > diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c > index 691776c..4b576ac 100644 > --- a/xen/arch/x86/mm/shadow/common.c > +++ b/xen/arch/x86/mm/shadow/common.c > @@ -1591,10 +1591,16 @@ shadow_alloc_p2m_page(struct domain *d) > if ( d->arch.paging.shadow.total_pages > < shadow_min_acceptable_pages(d) + 1 ) > { > + if ( !d->arch.paging.p2m_alloc_failed ) > + { > + d->arch.paging.p2m_alloc_failed = 1; > + dprintk(XENLOG_ERR, "d%i failed to allocate from shadow pool", > + d->domain_id);domian_id is a uint16_t so %i is the wrong signedness and width. Do we not have a PRIdomid ? I seem to remember we have PRI macros for some Xen types, but cant find an appropriate one for domids. It seems like a sensible one to introduce if it doesn''t already exist. ~Andrew> + } > paging_unlock(d); > return NULL; > } > - > + > shadow_prealloc(d, SH_type_p2m_table, 1); > pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0)); > d->arch.paging.shadow.p2m_pages++; > diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h > index fd9fa0f..6f9744a 100644 > --- a/xen/include/asm-x86/domain.h > +++ b/xen/include/asm-x86/domain.h > @@ -190,6 +190,8 @@ struct paging_domain { > * (used by p2m and log-dirty code for their tries) */ > struct page_info * (*alloc_page)(struct domain *d); > void (*free_page)(struct domain *d, struct page_info *pg); > + /* Has that pool ever run out of memory? */ > + bool_t p2m_alloc_failed; > }; > > struct paging_vcpu {
Jan Beulich
2013-Mar-07 15:34 UTC
Re: [PATCH 2/5] x86/mm: warn if we ever run out of shadow/hap pool for p2m/lgd ops.
>>> On 07.03.13 at 16:22, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > On 07/03/13 14:53, Tim Deegan wrote: >> --- a/xen/arch/x86/mm/shadow/common.c >> +++ b/xen/arch/x86/mm/shadow/common.c >> @@ -1591,10 +1591,16 @@ shadow_alloc_p2m_page(struct domain *d) >> if ( d->arch.paging.shadow.total_pages >> < shadow_min_acceptable_pages(d) + 1 ) >> { >> + if ( !d->arch.paging.p2m_alloc_failed ) >> + { >> + d->arch.paging.p2m_alloc_failed = 1; >> + dprintk(XENLOG_ERR, "d%i failed to allocate from shadow pool", >> + d->domain_id); > > domian_id is a uint16_t so %i is the wrong signedness and width.How that? Everything smaller than an int gets promoted to int first. Jan
>>> On 07.03.13 at 15:52, Tim Deegan <tim@xen.org> wrote: > These patches begin to address error handling when the shadow/hap > pool can''t allocate any more p2m pages. > > Patch 1 is a bit of preliminary tidying. > Patch 2 prints a warning on error so it can be diagnosed. > Patches 3-5 add checks to direct callers of the p2m allocation functions.Thanks for doing this! Acked-by: Jan Beulich <jbeulich@suse.com> (apart from the last one, where I don''t feel qualified)> With these patches applied, pool allocation errors will be > propagated as far as the set_p2m_entry() interface. > Unfortunately there are many callers of that that don''t check the > return code -- PoD and mem_paging are the major cuplrits, but > there are other cases where the error is discarded further up the > stack (e.g. some callers of set_mmio_p2m_entry()). Chasing those > up is work for another day. > > Cheers, > > Tim. > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Tim Deegan
2013-Mar-07 16:10 UTC
Re: [PATCH 4/5] x86/mem_sharing: check for errors in p2m->set_entry().
At 10:07 -0500 on 07 Mar (1362650835), Andres Lagar-Cavilla wrote:> On Mar 7, 2013, at 9:53 AM, Tim Deegan <tim@xen.org> wrote: > > > This call ought always to succeed. Assert that it does rather than > > ignoring the return value. > > > > Signed-off-by: Tim Deegan <tim@xen.org> > > Cc: Andres Lagar-Cavilla <andres@lagarcavilla.org> > > --- > > xen/arch/x86/mm/mem_sharing.c | 12 ++++++++---- > > 1 file changed, 8 insertions(+), 4 deletions(-) > > > > diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c > > index 1caa900..0364bb0 100644 > > --- a/xen/arch/x86/mm/mem_sharing.c > > +++ b/xen/arch/x86/mm/mem_sharing.c > > @@ -1273,6 +1273,8 @@ int relinquish_shared_pages(struct domain *d) > > p2m_access_t a; > > p2m_type_t t; > > mfn_t mfn; > > + int set_rc; > > + > > if ( atomic_read(&d->shr_pages) == 0 ) > > break; > > mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); > > @@ -1281,10 +1283,12 @@ int relinquish_shared_pages(struct domain *d) > > /* Does not fail with ENOMEM given the DESTROY flag */ > > BUG_ON(__mem_sharing_unshare_page(d, gfn, > > MEM_SHARING_DESTROY_GFN)); > > - /* Clear out the p2m entry so no one else may try to > > - * unshare */ > > - p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, > > - p2m_invalid, p2m_access_rwx); > > + /* Clear out the p2m entry so no one else may try to > > + * unshare. Must succeed: we just read the old entry and > > + * we hold the p2m lock. */ > > + set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, > > + p2m_invalid, p2m_access_rwx); > > + ASSERT(set_rc != 0); > Acked-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>Thanks.> Wouldn''t it be slightly cleaner to BUG_ON(p2m->set_entry(..) != 1)?I dislike BUG_ON(something_with_side_effects()). When scanning though code, my eye skips over ASSERT()s and BUG_ON()s, assuming they''re just testing invariants. Besides, that sort of thinking leads to the much more pernicious ASSERT(thing_with_side_effects). :) Tim.
Andres Lagar-Cavilla
2013-Mar-07 16:49 UTC
Re: [PATCH 4/5] x86/mem_sharing: check for errors in p2m->set_entry().
On Mar 7, 2013, at 11:10 AM, Tim Deegan <tim@xen.org> wrote:> At 10:07 -0500 on 07 Mar (1362650835), Andres Lagar-Cavilla wrote: >> On Mar 7, 2013, at 9:53 AM, Tim Deegan <tim@xen.org> wrote: >> >>> This call ought always to succeed. Assert that it does rather than >>> ignoring the return value. >>> >>> Signed-off-by: Tim Deegan <tim@xen.org> >>> Cc: Andres Lagar-Cavilla <andres@lagarcavilla.org> >>> --- >>> xen/arch/x86/mm/mem_sharing.c | 12 ++++++++---- >>> 1 file changed, 8 insertions(+), 4 deletions(-) >>> >>> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c >>> index 1caa900..0364bb0 100644 >>> --- a/xen/arch/x86/mm/mem_sharing.c >>> +++ b/xen/arch/x86/mm/mem_sharing.c >>> @@ -1273,6 +1273,8 @@ int relinquish_shared_pages(struct domain *d) >>> p2m_access_t a; >>> p2m_type_t t; >>> mfn_t mfn; >>> + int set_rc; >>> + >>> if ( atomic_read(&d->shr_pages) == 0 ) >>> break; >>> mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); >>> @@ -1281,10 +1283,12 @@ int relinquish_shared_pages(struct domain *d) >>> /* Does not fail with ENOMEM given the DESTROY flag */ >>> BUG_ON(__mem_sharing_unshare_page(d, gfn, >>> MEM_SHARING_DESTROY_GFN)); >>> - /* Clear out the p2m entry so no one else may try to >>> - * unshare */ >>> - p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, >>> - p2m_invalid, p2m_access_rwx); >>> + /* Clear out the p2m entry so no one else may try to >>> + * unshare. Must succeed: we just read the old entry and >>> + * we hold the p2m lock. */ >>> + set_rc = p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K, >>> + p2m_invalid, p2m_access_rwx); >>> + ASSERT(set_rc != 0); >> Acked-by: Andres Lagar-Cavilla <andres@lagarcavilla.org> > > Thanks. > >> Wouldn''t it be slightly cleaner to BUG_ON(p2m->set_entry(..) != 1)? > > I dislike BUG_ON(something_with_side_effects()). When scanning though > code, my eye skips over ASSERT()s and BUG_ON()s, assuming they''re just > testing invariants. Besides, that sort of thinking leads to the much > more pernicious ASSERT(thing_with_side_effects). :)Yeah ok. My point was that BUG_ON doesn''t get compiled away on non-debug builds, but there is a point to avoiding the pattern. Andres> > Tim.
Aravindh Puthiyaparambil
2013-Mar-07 21:50 UTC
Re: [PATCH 5/5] x86/mem_access: check for errors in p2m->set_entry().
On Thu, Mar 7, 2013 at 6:53 AM, Tim Deegan <tim@xen.org> wrote:> These calls ought always to succeed. Assert that they do rather than > ignoring the return value. > > Signed-off-by: Tim Deegan <tim@xen.org> > Cc: Aravindh Puthiyaparambil <aravindh@virtuata.com> > --- > xen/arch/x86/mm/p2m.c | 25 ++++++++++++++++++------- > 1 file changed, 18 insertions(+), 7 deletions(-) > > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c > index 4837de3..f5ddd20 100644 > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -1262,21 +1262,27 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, > p2m_type_t p2mt; > p2m_access_t p2ma; > mem_event_request_t *req; > + int rc; > > - /* First, handle rx2rw conversion automatically */ > + /* First, handle rx2rw conversion automatically. > + * These calls to p2m->set_entry() must succeed: we have the gfn > + * locked and just did a successful get_entry(). */ > gfn_lock(p2m, gfn, 0); > mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL); > > if ( access_w && p2ma == p2m_access_rx2rw ) > { > - p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw); > + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw); > + ASSERT(rc); > gfn_unlock(p2m, gfn, 0); > return 1; > } > else if ( p2ma == p2m_access_n2rwx ) > { > ASSERT(access_w || access_r || access_x); > - p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx); > + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, > + p2mt, p2m_access_rwx); > + ASSERT(rc); > } > gfn_unlock(p2m, gfn, 0); > > @@ -1294,13 +1300,18 @@ bool_t p2m_mem_access_check(paddr_t gpa, bool_t gla_valid, unsigned long gla, > } > else > { > + gfn_lock(p2m, gfn, 0); > + mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL); > if ( p2ma != p2m_access_n2rwx ) > { > - /* A listener is not required, so clear the access restrictions */ > - gfn_lock(p2m, gfn, 0); > - p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx); > - gfn_unlock(p2m, gfn, 0); > + /* A listener is not required, so clear the access > + * restrictions. This set must succeed: we have the > + * gfn locked and just did a successful get_entry(). */ > + rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, > + p2mt, p2m_access_rwx); > + ASSERT(rc); > } > + gfn_unlock(p2m, gfn, 0); > return 1; > } > } > -- > 1.7.10.4 >Acked-by: Aravindh Puthiyaparambil <aravindh@virtuata.com>