Stephen Smalley
2011-Feb-02 18:56 UTC
[Xen-devel] [RFC][PATCH] xen/xsm: Fix xsm_mmu_* and xsm_update_va_mapping hooks
This isn''t targeted at 4.1 given the freeze, but is instead just a RFC. This is an attempt to properly fix the hypervisor crash previously described in http://marc.info/?l=xen-devel&m=128396289707362&w=2 In looking into this issue, I think the proper fix is to move the xsm_mmu_* and xsm_update_va_mapping hook calls later in the callers, after more validation has been performed and the page_info struct is readily available, and pass the page_info to the hooks. This patch moves the xsm_mmu_normal_update, xsm_mmu_machphys_update and xsm_update_va_mapping hook calls accordingly, and updates their interfaces and hook function implementations. This appears to resolve the crashes for me. However, as I am not overly familiar with this area of code, I''d appreciate any feedback or suggestions for improvement. Thanks. --- xen/arch/x86/mm.c | 30 ++++++++++++++++++------------ xen/include/xsm/xsm.h | 28 +++++++++++++++------------- xen/xsm/dummy.c | 11 ++++++----- xen/xsm/flask/hooks.c | 41 ++++++++--------------------------------- 4 files changed, 47 insertions(+), 63 deletions(-) diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3416,10 +3416,6 @@ { p2m_type_t p2mt; - rc = xsm_mmu_normal_update(d, pg_owner, req.val); - if ( rc ) - break; - req.ptr -= cmd; gmfn = req.ptr >> PAGE_SHIFT; mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pt_owner), gmfn, &p2mt)); @@ -3445,6 +3441,13 @@ (unsigned long)(req.ptr & ~PAGE_MASK)); page = mfn_to_page(mfn); + rc = xsm_mmu_normal_update(d, req.val, page); + if ( rc ) { + unmap_domain_page_with_cache(va, &mapcache); + put_page(page); + break; + } + if ( page_lock(page) ) { switch ( page->u.inuse.type_info & PGT_type_mask ) @@ -3612,10 +3615,6 @@ mfn = req.ptr >> PAGE_SHIFT; gpfn = req.val; - rc = xsm_mmu_machphys_update(d, mfn); - if ( rc ) - break; - if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) ) { MEM_LOG("Could not get page for mach->phys update"); @@ -3628,6 +3627,10 @@ break; } + rc = xsm_mmu_machphys_update(d, mfn_to_page(mfn)); + if ( rc ) + break; + set_gpfn_from_mfn(mfn, gpfn); okay = 1; @@ -4247,10 +4250,6 @@ perfc_incr(calls_to_update_va); - rc = xsm_update_va_mapping(d, pg_owner, val); - if ( rc ) - return rc; - rc = -EINVAL; pl1e = guest_map_l1e(v, va, &gl1mfn); if ( unlikely(!pl1e || !get_page_from_pagenr(gl1mfn, d)) ) @@ -4270,6 +4269,13 @@ goto out; } + rc = xsm_update_va_mapping(d, val, gl1pg); + if ( rc ) { + page_unlock(gl1pg); + put_page(gl1pg); + goto out; + } + rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner) ? 0 : -EINVAL; page_unlock(gl1pg); diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -136,11 +136,12 @@ int (*getidletime) (void); int (*machine_memory_map) (void); int (*domain_memory_map) (struct domain *d); - int (*mmu_normal_update) (struct domain *d, struct domain *f, - intpte_t fpte); - int (*mmu_machphys_update) (struct domain *d, unsigned long mfn); - int (*update_va_mapping) (struct domain *d, struct domain *f, - l1_pgentry_t pte); + int (*mmu_normal_update) (struct domain *d, + intpte_t fpte, struct page_info *page); + int (*mmu_machphys_update) (struct domain *d, struct page_info *page); + int (*update_va_mapping) (struct domain *d, + l1_pgentry_t pte, + struct page_info *page); int (*add_to_physmap) (struct domain *d1, struct domain *d2); int (*sendtrigger) (struct domain *d); int (*test_assign_device) (uint32_t machine_bdf); @@ -567,21 +568,22 @@ return xsm_call(domain_memory_map(d)); } -static inline int xsm_mmu_normal_update (struct domain *d, struct domain *f, - intpte_t fpte) +static inline int xsm_mmu_normal_update (struct domain *d, + intpte_t fpte, struct page_info *page) { - return xsm_call(mmu_normal_update(d, f, fpte)); + return xsm_call(mmu_normal_update(d, fpte, page)); } -static inline int xsm_mmu_machphys_update (struct domain *d, unsigned long mfn) +static inline int xsm_mmu_machphys_update (struct domain *d, struct page_info *page) { - return xsm_call(mmu_machphys_update(d, mfn)); + return xsm_call(mmu_machphys_update(d, page)); } -static inline int xsm_update_va_mapping(struct domain *d, struct domain *f, - l1_pgentry_t pte) +static inline int xsm_update_va_mapping(struct domain *d, + l1_pgentry_t pte, + struct page_info *page) { - return xsm_call(update_va_mapping(d, f, pte)); + return xsm_call(update_va_mapping(d, pte, page)); } static inline int xsm_add_to_physmap(struct domain *d1, struct domain *d2) diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -405,19 +405,20 @@ return 0; } -static int dummy_mmu_normal_update (struct domain *d, struct domain *f, - intpte_t fpte) +static int dummy_mmu_normal_update (struct domain *d, + intpte_t fpte, struct page_info *page) { return 0; } -static int dummy_mmu_machphys_update (struct domain *d, unsigned long mfn) +static int dummy_mmu_machphys_update (struct domain *d, struct page_info *page) { return 0; } -static int dummy_update_va_mapping (struct domain *d, struct domain *f, - l1_pgentry_t pte) +static int dummy_update_va_mapping (struct domain *d, + l1_pgentry_t pte, + struct page_info *page) { return 0; } diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -347,26 +347,6 @@ return rc; } -static int get_mfn_sid(unsigned long mfn, u32 *sid) -{ - int rc = 0; - struct page_info *page; - - if ( mfn_valid(mfn) ) - { - /*mfn is valid if this is a page that Xen is tracking!*/ - page = mfn_to_page(mfn); - rc = get_page_sid(page, sid); - } - else - { - /*Possibly an untracked IO page?*/ - rc = security_iomem_sid(mfn, sid); - } - - return rc; -} - static int flask_memory_adjust_reservation(struct domain *d1, struct domain *d2) { return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__ADJUST); @@ -1006,12 +986,11 @@ return domain_has_perm(current->domain, d, SECCLASS_MMU, MMU__MEMORYMAP); } -static int flask_mmu_normal_update(struct domain *d, struct domain *f, - intpte_t fpte) +static int flask_mmu_normal_update(struct domain *d, + intpte_t fpte, struct page_info *page) { int rc = 0; u32 map_perms = MMU__MAP_READ; - unsigned long fmfn; struct domain_security_struct *dsec; u32 fsid; @@ -1020,42 +999,38 @@ if ( l1e_get_flags(l1e_from_intpte(fpte)) & _PAGE_RW ) map_perms |= MMU__MAP_WRITE; - fmfn = gmfn_to_mfn(f, l1e_get_pfn(l1e_from_intpte(fpte))); - - rc = get_mfn_sid(fmfn, &fsid); + rc = get_page_sid(page, &fsid); if ( rc ) return rc; return avc_has_perm(dsec->sid, fsid, SECCLASS_MMU, map_perms, NULL); } -static int flask_mmu_machphys_update(struct domain *d, unsigned long mfn) +static int flask_mmu_machphys_update(struct domain *d, struct page_info *page) { int rc = 0; u32 psid; struct domain_security_struct *dsec; dsec = d->ssid; - rc = get_mfn_sid(mfn, &psid); + rc = get_page_sid(page, &psid); if ( rc ) return rc; return avc_has_perm(dsec->sid, psid, SECCLASS_MMU, MMU__UPDATEMP, NULL); } -static int flask_update_va_mapping(struct domain *d, struct domain *f, - l1_pgentry_t pte) +static int flask_update_va_mapping(struct domain *d, + l1_pgentry_t pte, struct page_info *page) { int rc = 0; u32 psid; u32 map_perms = MMU__MAP_READ; - unsigned long mfn; struct domain_security_struct *dsec; dsec = d->ssid; - mfn = gmfn_to_mfn(f, l1e_get_pfn(pte)); - rc = get_mfn_sid(mfn, &psid); + rc = get_page_sid(page, &psid); if ( rc ) return rc; -- Stephen Smalley National Security Agency _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel