The ept code by default sets rwx to 1 instead of 0. There are also some places where unsigned long is used instead of mfn_t. Finally, the code formatting is inconsistent both with itself and the rest of Xen. This series of patches fixes up the ept code to address these issues. Patrick _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Patrick Colp
2009-Aug-05 14:00 UTC
[Xen-devel] [PATCH 1 of 4] ept p2m: set rwx flags to 0 for invalid and mmio_dm types
# HG changeset patch # User Patrick Colp <Patrick.Colp@citrix.com> # Date 1249480778 -3600 # Node ID 86486ebbb5f0bd4f94b42492b7e00110971a4078 # Parent 68e8b8379244e293c55875e7dc3692fc81d3d212 ept p2m: set rwx flags to 0 for invalid and mmio_dm types. Read/write/execute flags are set to 1 before calling the type_to_flags function which sets them to their appropriate values depending on the p2m type. However, in invalid, mmio_dm, and default/unknown cases in type_to_flags just falls through, unsafely leaving full access to these pages. Signed-off-by: Patrick Colp <Patrick.Colp@citrix.com> diff -r 68e8b8379244 -r 86486ebbb5f0 xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Sun Aug 02 13:43:15 2009 +0100 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 @@ -36,6 +36,7 @@ case p2m_invalid: case p2m_mmio_dm: default: + entry->r = entry->w = entry->x = 0; return; case p2m_ram_rw: case p2m_mmio_direct: @@ -196,7 +197,6 @@ ept_entry->rsvd = 0; ept_entry->avail2 = 0; /* last step */ - ept_entry->r = ept_entry->w = ept_entry->x = 1; ept_p2m_type_to_flags(ept_entry, p2mt); } else @@ -242,7 +242,6 @@ split_ept_entry->rsvd = 0; split_ept_entry->avail2 = 0; /* last step */ - split_ept_entry->r = split_ept_entry->w = split_ept_entry->x = 1; ept_p2m_type_to_flags(split_ept_entry, split_p2mt); } _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Patrick Colp
2009-Aug-05 14:00 UTC
[Xen-devel] [PATCH 2 of 4] ept p2m: replace unsigned long with mfn_t for mfns
# HG changeset patch # User Patrick Colp <Patrick.Colp@citrix.com> # Date 1249480778 -3600 # Node ID a77221fe874c17b6a4ac3dc0ca40ab91949d8afa # Parent 86486ebbb5f0bd4f94b42492b7e00110971a4078 ept p2m: replace unsigned long with mfn_t for mfns. Signed-off-by: Patrick Colp <Patrick.Colp@citrix.com> diff -r 86486ebbb5f0 -r a77221fe874c xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 @@ -402,11 +402,11 @@ * return 1 to not to reset ept entry. */ static int need_modify_ept_entry(struct domain *d, unsigned long gfn, - unsigned long mfn, uint8_t o_igmt, + mfn_t mfn, uint8_t o_igmt, uint8_t o_emt, p2m_type_t p2mt) { uint8_t igmt, emt; - emt = epte_get_entry_emt(d, gfn, mfn, &igmt, + emt = epte_get_entry_emt(d, gfn, mfn_x(mfn), &igmt, (p2mt == p2m_mmio_direct)); if ( (emt == o_emt) && (igmt == o_igmt) ) return 0; @@ -420,7 +420,7 @@ p2m_type_t p2mt; uint64_t epte; int order = 0; - unsigned long mfn; + mfn_t mfn; uint8_t o_igmt, o_emt; for ( gfn = start_gfn; gfn <= end_gfn; gfn++ ) @@ -428,8 +428,8 @@ epte = ept_get_entry_content(d, gfn); if ( epte == 0 ) continue; - mfn = (epte & EPTE_MFN_MASK) >> PAGE_SHIFT; - if ( !mfn_valid(mfn) ) + mfn = _mfn((epte & EPTE_MFN_MASK) >> PAGE_SHIFT); + if ( !mfn_valid(mfn_x(mfn)) ) continue; p2mt = (epte & EPTE_AVAIL1_MASK) >> EPTE_AVAIL1_SHIFT; o_igmt = (epte & EPTE_IGMT_MASK) >> EPTE_IGMT_SHIFT; @@ -447,7 +447,7 @@ order = EPT_TABLE_ORDER; if ( need_modify_ept_entry(d, gfn, mfn, o_igmt, o_emt, p2mt) ) - ept_set_entry(d, gfn, _mfn(mfn), order, p2mt); + ept_set_entry(d, gfn, mfn, order, p2mt); gfn += 0x1FF; } else @@ -455,7 +455,7 @@ /* change emt for partial entries of the 2m area. */ if ( need_modify_ept_entry(d, gfn, mfn, o_igmt, o_emt, p2mt) ) - ept_set_entry(d, gfn, _mfn(mfn), order, p2mt); + ept_set_entry(d, gfn, mfn, order, p2mt); gfn = ((gfn >> EPT_TABLE_ORDER) << EPT_TABLE_ORDER) + 0x1FF; } } @@ -463,7 +463,7 @@ { if ( need_modify_ept_entry(d, gfn, mfn, o_igmt, o_emt, p2mt) ) - ept_set_entry(d, gfn, _mfn(mfn), order, p2mt); + ept_set_entry(d, gfn, mfn, order, p2mt); } } } _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Patrick Colp
2009-Aug-05 14:00 UTC
[Xen-devel] [PATCH 3 of 4] ept mtrr: replace unsigned long with mfn_t for mfns
# HG changeset patch # User Patrick Colp <Patrick.Colp@citrix.com> # Date 1249480778 -3600 # Node ID 9b2a92187e3cfac7753da4e4b1944e774cb01e6f # Parent a77221fe874c17b6a4ac3dc0ca40ab91949d8afa ept mtrr: replace unsigned long with mfn_t for mfns. Signed-off-by: Patrick Colp <Patrick.Colp@citrix.com> diff -r a77221fe874c -r 9b2a92187e3c xen/arch/x86/hvm/mtrr.c --- a/xen/arch/x86/hvm/mtrr.c Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/arch/x86/hvm/mtrr.c Wed Aug 05 14:59:38 2009 +0100 @@ -20,6 +20,7 @@ #include <public/hvm/e820.h> #include <xen/types.h> #include <asm/e820.h> +#include <asm/mm.h> #include <asm/paging.h> #include <asm/p2m.h> #include <xen/domain_page.h> @@ -712,7 +713,7 @@ uint8_t epte_get_entry_emt( struct domain *d, unsigned long gfn, - unsigned long mfn, uint8_t *igmt, int direct_mmio) + mfn_t mfn, uint8_t *igmt, int direct_mmio) { uint8_t gmtrr_mtype, hmtrr_mtype; uint32_t type; @@ -730,7 +731,7 @@ if ( (v == current) && v->domain->arch.hvm_domain.is_in_uc_mode ) return MTRR_TYPE_UNCACHABLE; - if ( !mfn_valid(mfn) ) + if ( !mfn_valid(mfn_x(mfn)) ) return MTRR_TYPE_UNCACHABLE; if ( hvm_get_mem_pinned_cacheattr(d, gfn, &type) ) @@ -752,6 +753,6 @@ } gmtrr_mtype = get_mtrr_type(&v->arch.hvm_vcpu.mtrr, (gfn << PAGE_SHIFT)); - hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn << PAGE_SHIFT)); + hmtrr_mtype = get_mtrr_type(&mtrr_state, (mfn_x(mfn) << PAGE_SHIFT)); return ((gmtrr_mtype <= hmtrr_mtype) ? gmtrr_mtype : hmtrr_mtype); } diff -r a77221fe874c -r 9b2a92187e3c xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 @@ -168,7 +168,7 @@ { if ( mfn_valid(mfn_x(mfn)) || (p2mt == p2m_mmio_direct) ) { - ept_entry->emt = epte_get_entry_emt(d, gfn, mfn_x(mfn), + ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &igmt, direct_mmio); ept_entry->igmt = igmt; ept_entry->sp_avail = walk_level ? 1 : 0; @@ -230,7 +230,7 @@ { split_ept_entry = split_table + i; split_ept_entry->emt = epte_get_entry_emt(d, - gfn-offset+i, split_mfn+i, + gfn-offset+i, _mfn(split_mfn+i), &igmt, direct_mmio); split_ept_entry->igmt = igmt; @@ -247,7 +247,7 @@ /* Set the destinated 4k page as normal */ split_ept_entry = split_table + offset; - split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn_x(mfn), + split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &igmt, direct_mmio); split_ept_entry->igmt = igmt; @@ -406,7 +406,7 @@ uint8_t o_emt, p2m_type_t p2mt) { uint8_t igmt, emt; - emt = epte_get_entry_emt(d, gfn, mfn_x(mfn), &igmt, + emt = epte_get_entry_emt(d, gfn, mfn, &igmt, (p2mt == p2m_mmio_direct)); if ( (emt == o_emt) && (igmt == o_igmt) ) return 0; diff -r a77221fe874c -r 9b2a92187e3c xen/include/asm-x86/mtrr.h --- a/xen/include/asm-x86/mtrr.h Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/include/asm-x86/mtrr.h Wed Aug 05 14:59:38 2009 +0100 @@ -2,6 +2,7 @@ #define __ASM_X86_MTRR_H__ #include <xen/config.h> +#include <asm/mm.h> /* These are the region types. They match the architectural specification. */ #define MTRR_TYPE_UNCACHABLE 0 @@ -66,7 +67,7 @@ extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr, paddr_t spaddr, uint8_t gmtrr_mtype); extern uint8_t epte_get_entry_emt( - struct domain *d, unsigned long gfn, unsigned long mfn, + struct domain *d, unsigned long gfn, mfn_t mfn, uint8_t *igmt, int direct_mmio); extern void ept_change_entry_emt_with_range( struct domain *d, unsigned long start_gfn, unsigned long end_gfn); _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Patrick Colp
2009-Aug-05 14:00 UTC
[Xen-devel] [PATCH 4 of 4] ept: code clean up and formatting
# HG changeset patch # User Patrick Colp <Patrick.Colp@citrix.com> # Date 1249480778 -3600 # Node ID 91a30cffa1c4e0d52e36574159ef3a2c4f0bcfee # Parent 9b2a92187e3cfac7753da4e4b1944e774cb01e6f ept: code clean up and formatting. Fix alignment and comments and add and remove spaces and lines where appropriate. Signed-off-by: Patrick Colp <Patrick.Colp@citrix.com> diff -r 9b2a92187e3c -r 91a30cffa1c4 xen/arch/x86/hvm/mtrr.c --- a/xen/arch/x86/hvm/mtrr.c Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/arch/x86/hvm/mtrr.c Wed Aug 05 14:59:38 2009 +0100 @@ -711,9 +711,8 @@ HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr, 1, HVMSR_PER_VCPU); -uint8_t epte_get_entry_emt( - struct domain *d, unsigned long gfn, - mfn_t mfn, uint8_t *igmt, int direct_mmio) +uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn, + uint8_t *igmt, int direct_mmio) { uint8_t gmtrr_mtype, hmtrr_mtype; uint32_t type; diff -r 9b2a92187e3c -r 91a30cffa1c4 xen/arch/x86/mm/hap/p2m-ept.c --- a/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/arch/x86/mm/hap/p2m-ept.c Wed Aug 05 14:59:38 2009 +0100 @@ -75,7 +75,7 @@ ept_entry->mfn = page_to_mfn(pg); ept_entry->rsvd = 0; ept_entry->avail2 = 0; - /* last step */ + ept_entry->r = ept_entry->w = ept_entry->x = 1; return 1; @@ -85,7 +85,8 @@ ept_entry_t **table, unsigned long *gfn_remainder, u32 shift, int order) { - ept_entry_t *ept_entry, *next; + ept_entry_t *ept_entry; + ept_entry_t *next; u32 index; index = *gfn_remainder >> shift; @@ -127,17 +128,19 @@ unsigned int order, p2m_type_t p2mt) { ept_entry_t *table = NULL; - unsigned long gfn_remainder = gfn, offset = 0; + unsigned long gfn_remainder = gfn; + unsigned long offset = 0; ept_entry_t *ept_entry = NULL; u32 index; - int i, rv = 0, ret = 0; + int i; + int rv = 0; + int ret = 0; int walk_level = order / EPT_TABLE_ORDER; int direct_mmio = (p2mt == p2m_mmio_direct); uint8_t igmt = 0; int need_modify_vtd_table = 1; - /* we only support 4k and 2m pages now */ - + /* We only support 4k and 2m pages now */ BUG_ON(order && order != EPT_TABLE_ORDER); if ( order != 0 ) @@ -166,10 +169,10 @@ if ( ret != GUEST_TABLE_SPLIT_PAGE ) { - if ( mfn_valid(mfn_x(mfn)) || (p2mt == p2m_mmio_direct) ) + if ( mfn_valid(mfn_x(mfn)) || direct_mmio ) { - ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, - &igmt, direct_mmio); + ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &igmt, + direct_mmio); ept_entry->igmt = igmt; ept_entry->sp_avail = walk_level ? 1 : 0; @@ -180,10 +183,10 @@ else ept_entry->mfn = mfn_x(mfn) - offset; - if ( ept_entry->avail1 == p2m_ram_logdirty && - p2mt == p2m_ram_rw ) + if ( (ept_entry->avail1 == p2m_ram_logdirty) && + (p2mt == p2m_ram_rw) ) for ( i = 0; i < 512; i++ ) - paging_mark_dirty(d, mfn_x(mfn)-offset+i); + paging_mark_dirty(d, mfn_x(mfn) - offset + i); } else { @@ -196,7 +199,7 @@ ept_entry->avail1 = p2mt; ept_entry->rsvd = 0; ept_entry->avail2 = 0; - /* last step */ + ept_p2m_type_to_flags(ept_entry, p2mt); } else @@ -204,57 +207,54 @@ } else { - /* It''s super page before, now set one of the 4k pages, so + /* + * It''s super page before, now set one of the 4k pages, so * we should split the 2m page to 4k pages now. */ - ept_entry_t *split_table = NULL; ept_entry_t *split_ept_entry = NULL; unsigned long split_mfn = ept_entry->mfn; p2m_type_t split_p2mt = ept_entry->avail1; ept_entry_t new_ept_entry; - /* alloc new page for new ept middle level entry which is + /* + * Allocate new page for new ept middle level entry which is * before a leaf super entry */ - if ( !ept_set_middle_entry(d, &new_ept_entry) ) goto out; - /* split the super page before to 4k pages */ - + /* Split the super page before to 4k pages */ split_table = map_domain_page(new_ept_entry.mfn); offset = gfn & ((1 << EPT_TABLE_ORDER) - 1); for ( i = 0; i < 512; i++ ) { split_ept_entry = split_table + i; - split_ept_entry->emt = epte_get_entry_emt(d, - gfn-offset+i, _mfn(split_mfn+i), - &igmt, direct_mmio); + split_ept_entry->emt = epte_get_entry_emt(d, gfn - offset + i, + _mfn(split_mfn + i), + &igmt, direct_mmio); split_ept_entry->igmt = igmt; - split_ept_entry->sp_avail = 0; - - split_ept_entry->mfn = split_mfn+i; - + split_ept_entry->mfn = split_mfn + i; split_ept_entry->avail1 = split_p2mt; split_ept_entry->rsvd = 0; split_ept_entry->avail2 = 0; - /* last step */ + ept_p2m_type_to_flags(split_ept_entry, split_p2mt); } /* Set the destinated 4k page as normal */ split_ept_entry = split_table + offset; - split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, - &igmt, direct_mmio); + split_ept_entry->emt = epte_get_entry_emt(d, gfn, mfn, &igmt, + direct_mmio); split_ept_entry->igmt = igmt; if ( split_ept_entry->mfn == mfn_x(mfn) ) need_modify_vtd_table = 0; else split_ept_entry->mfn = mfn_x(mfn); + split_ept_entry->avail1 = p2mt; ept_p2m_type_to_flags(split_ept_entry, p2mt); @@ -276,16 +276,14 @@ ept_sync_domain(d); /* Now the p2m table is not shared with vt-d page table */ - - if ( iommu_enabled && is_hvm_domain(d) - && need_modify_vtd_table ) + if ( iommu_enabled && is_hvm_domain(d) && need_modify_vtd_table ) { if ( p2mt == p2m_ram_rw ) { if ( order == EPT_TABLE_ORDER ) { - for ( i = 0; i < ( 1 << order ); i++ ) - iommu_map_page(d, gfn-offset+i, mfn_x(mfn)-offset+i); + for ( i = 0; i < (1 << order); i++ ) + iommu_map_page(d, gfn - offset + i, mfn_x(mfn) - offset + i); } else if ( !order ) iommu_map_page(d, gfn, mfn_x(mfn)); @@ -294,8 +292,8 @@ { if ( order == EPT_TABLE_ORDER ) { - for ( i = 0; i < ( 1 << order ); i++ ) - iommu_unmap_page(d, gfn-offset+i); + for ( i = 0; i < (1 << order); i++ ) + iommu_unmap_page(d, gfn - offset + i); } else if ( !order ) iommu_unmap_page(d, gfn); @@ -307,14 +305,15 @@ /* Read ept p2m entries */ static mfn_t ept_get_entry(struct domain *d, unsigned long gfn, p2m_type_t *t, - p2m_query_t q) + p2m_query_t q) { ept_entry_t *table map_domain_page(mfn_x(pagetable_get_mfn(d->arch.phys_table))); unsigned long gfn_remainder = gfn; ept_entry_t *ept_entry; u32 index; - int i, ret=0; + int i; + int ret = 0; mfn_t mfn = _mfn(INVALID_MFN); *t = p2m_mmio_dm; @@ -335,7 +334,7 @@ break; } - index = gfn_remainder >> ( i * EPT_TABLE_ORDER); + index = gfn_remainder >> (i * EPT_TABLE_ORDER); ept_entry = table + index; if ( ept_entry->avail1 != p2m_invalid ) @@ -344,11 +343,13 @@ mfn = _mfn(ept_entry->mfn); if ( i ) { - /* we may meet super pages, and to split into 4k pages + /* + * We may meet super pages, and to split into 4k pages * to emulate p2m table */ - unsigned long split_mfn = - mfn_x(mfn) + (gfn_remainder & ( ((1 << (i*EPT_TABLE_ORDER)) - 1 ))); + unsigned long split_mfn = mfn_x(mfn) + + (gfn_remainder & + ((1 << (i * EPT_TABLE_ORDER)) - 1)); mfn = _mfn(split_mfn); } } @@ -365,9 +366,9 @@ unsigned long gfn_remainder = gfn; ept_entry_t *ept_entry; uint64_t content = 0; - u32 index; - int i, ret=0; + int i; + int ret=0; /* This pfn is higher than the highest the p2m map currently holds */ if ( gfn > d->arch.p2m->max_mapped_pfn ) @@ -383,7 +384,7 @@ break; } - index = gfn_remainder >> ( i * EPT_TABLE_ORDER); + index = gfn_remainder >> (i * EPT_TABLE_ORDER); ept_entry = table + index; content = ept_entry->epte; @@ -398,39 +399,47 @@ return ept_get_entry(current->domain, gfn, t, q); } -/* To test if the new emt type is the same with old, +/* + * To test if the new emt type is the same with old, * return 1 to not to reset ept entry. */ static int need_modify_ept_entry(struct domain *d, unsigned long gfn, - mfn_t mfn, uint8_t o_igmt, - uint8_t o_emt, p2m_type_t p2mt) + mfn_t mfn, uint8_t o_igmt, uint8_t o_emt, + p2m_type_t p2mt) { - uint8_t igmt, emt; - emt = epte_get_entry_emt(d, gfn, mfn, &igmt, - (p2mt == p2m_mmio_direct)); + uint8_t igmt; + uint8_t emt; + int direct_mmio = (p2mt == p2m_mmio_direct); + + emt = epte_get_entry_emt(d, gfn, mfn, &igmt, direct_mmio); + if ( (emt == o_emt) && (igmt == o_igmt) ) return 0; + return 1; } void ept_change_entry_emt_with_range(struct domain *d, unsigned long start_gfn, - unsigned long end_gfn) + unsigned long end_gfn) { unsigned long gfn; + uint64_t epte; p2m_type_t p2mt; - uint64_t epte; int order = 0; mfn_t mfn; - uint8_t o_igmt, o_emt; + uint8_t o_igmt; + uint8_t o_emt; for ( gfn = start_gfn; gfn <= end_gfn; gfn++ ) { epte = ept_get_entry_content(d, gfn); if ( epte == 0 ) continue; + mfn = _mfn((epte & EPTE_MFN_MASK) >> PAGE_SHIFT); if ( !mfn_valid(mfn_x(mfn)) ) continue; + p2mt = (epte & EPTE_AVAIL1_MASK) >> EPTE_AVAIL1_SHIFT; o_igmt = (epte & EPTE_IGMT_MASK) >> EPTE_IGMT_SHIFT; o_emt = (epte & EPTE_EMT_MASK) >> EPTE_EMT_SHIFT; @@ -438,45 +447,50 @@ if ( epte & EPTE_SUPER_PAGE_MASK ) { - if ( !(gfn & ( (1 << EPT_TABLE_ORDER) - 1)) && - ((gfn + 0x1FF) <= end_gfn) ) + if ( !(gfn & ((1 << EPT_TABLE_ORDER) - 1)) && + ((gfn + 0x1FF) <= end_gfn) ) { - /* gfn assigned with 2M, and the end covers more than 2m areas. + /* + * gfn assigned with 2M, and the end covers more than 2m areas. * Set emt for super page. */ order = EPT_TABLE_ORDER; - if ( need_modify_ept_entry(d, gfn, mfn, - o_igmt, o_emt, p2mt) ) + if ( need_modify_ept_entry(d, gfn, mfn, o_igmt, o_emt, p2mt) ) ept_set_entry(d, gfn, mfn, order, p2mt); gfn += 0x1FF; } else { - /* change emt for partial entries of the 2m area. */ - if ( need_modify_ept_entry(d, gfn, mfn, - o_igmt, o_emt, p2mt) ) + /* Change emt for partial entries of the 2m area. */ + if ( need_modify_ept_entry(d, gfn, mfn, o_igmt, o_emt, p2mt) ) ept_set_entry(d, gfn, mfn, order, p2mt); gfn = ((gfn >> EPT_TABLE_ORDER) << EPT_TABLE_ORDER) + 0x1FF; } } else /* gfn assigned with 4k */ { - if ( need_modify_ept_entry(d, gfn, mfn, - o_igmt, o_emt, p2mt) ) + if ( need_modify_ept_entry(d, gfn, mfn, o_igmt, o_emt, p2mt) ) ept_set_entry(d, gfn, mfn, order, p2mt); } } } -/* Walk the whole p2m table, changing any entries of the old type +/* + * Walk the whole p2m table, changing any entries of the old type * to the new type. This is used in hardware-assisted paging to - * quickly enable or diable log-dirty tracking */ - -static void ept_change_entry_type_global(struct domain *d, - p2m_type_t ot, p2m_type_t nt) + * quickly enable or diable log-dirty tracking + */ +static void ept_change_entry_type_global(struct domain *d, p2m_type_t ot, + p2m_type_t nt) { - ept_entry_t *l4e, *l3e, *l2e, *l1e; - int i4, i3, i2, i1; + ept_entry_t *l4e; + ept_entry_t *l3e; + ept_entry_t *l2e; + ept_entry_t *l1e; + int i4; + int i3; + int i2; + int i1; if ( pagetable_get_pfn(d->arch.phys_table) == 0 ) return; @@ -488,6 +502,7 @@ { if ( !l4e[i4].epte ) continue; + if ( !l4e[i4].sp_avail ) { l3e = map_domain_page(l4e[i4].mfn); @@ -495,6 +510,7 @@ { if ( !l3e[i3].epte ) continue; + if ( !l3e[i3].sp_avail ) { l2e = map_domain_page(l3e[i3].mfn); @@ -502,18 +518,22 @@ { if ( !l2e[i2].epte ) continue; + if ( !l2e[i2].sp_avail ) { l1e = map_domain_page(l2e[i2].mfn); + for ( i1 = 0; i1 < EPT_PAGETABLE_ENTRIES; i1++ ) { if ( !l1e[i1].epte ) continue; + if ( l1e[i1].avail1 != ot ) continue; l1e[i1].avail1 = nt; ept_p2m_type_to_flags(l1e+i1, nt); } + unmap_domain_page(l1e); } else @@ -524,6 +544,7 @@ ept_p2m_type_to_flags(l2e+i2, nt); } } + unmap_domain_page(l2e); } else @@ -534,6 +555,7 @@ ept_p2m_type_to_flags(l3e+i3, nt); } } + unmap_domain_page(l3e); } else @@ -544,6 +566,7 @@ ept_p2m_type_to_flags(l4e+i4, nt); } } + unmap_domain_page(l4e); ept_sync_domain(d); diff -r 9b2a92187e3c -r 91a30cffa1c4 xen/include/asm-x86/mtrr.h --- a/xen/include/asm-x86/mtrr.h Wed Aug 05 14:59:38 2009 +0100 +++ b/xen/include/asm-x86/mtrr.h Wed Aug 05 14:59:38 2009 +0100 @@ -66,9 +66,8 @@ extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi); extern u32 get_pat_flags(struct vcpu *v, u32 gl1e_flags, paddr_t gpaddr, paddr_t spaddr, uint8_t gmtrr_mtype); -extern uint8_t epte_get_entry_emt( - struct domain *d, unsigned long gfn, mfn_t mfn, - uint8_t *igmt, int direct_mmio); +extern uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, + mfn_t mfn, uint8_t *igmt, int direct_mmio); extern void ept_change_entry_emt_with_range( struct domain *d, unsigned long start_gfn, unsigned long end_gfn); extern unsigned char pat_type_2_pte_flags(unsigned char pat_type); _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2009-Aug-06 08:18 UTC
Re: [Xen-devel] [PATCH 4 of 4] ept: code clean up and formatting
On 05/08/2009 15:00, "Patrick Colp" <Patrick.Colp@citrix.com> wrote:> # HG changeset patch > # User Patrick Colp <Patrick.Colp@citrix.com> > # Date 1249480778 -3600 > # Node ID 91a30cffa1c4e0d52e36574159ef3a2c4f0bcfee > # Parent 9b2a92187e3cfac7753da4e4b1944e774cb01e6f > ept: code clean up and formatting. > > Fix alignment and comments and add and remove spaces and lines where > appropriate.This one didn''t apply cleanly. Can you refresh against xen-unstable tip and maybe send as an attachment (in case it''s a garbling problem)? Also is patch 1/4 by itself safe for 3.4 and 3.3 branches? Thanks, Keir _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Patrick Colp
2009-Aug-06 08:34 UTC
Re: [Xen-devel] [PATCH 4 of 4] ept: code clean up and formatting
I just did a pull and there were no changes since I sent the patches. I''ll attach it in case it got garbled. It should be safe to apply 1/4 to 3.3 and 3.4 (I just checked and it applied fine with some offsetting in both cases). Patrick Keir Fraser wrote:> On 05/08/2009 15:00, "Patrick Colp" <Patrick.Colp@citrix.com> wrote: > >> # HG changeset patch >> # User Patrick Colp <Patrick.Colp@citrix.com> >> # Date 1249480778 -3600 >> # Node ID 91a30cffa1c4e0d52e36574159ef3a2c4f0bcfee >> # Parent 9b2a92187e3cfac7753da4e4b1944e774cb01e6f >> ept: code clean up and formatting. >> >> Fix alignment and comments and add and remove spaces and lines where >> appropriate. > > This one didn''t apply cleanly. Can you refresh against xen-unstable tip and > maybe send as an attachment (in case it''s a garbling problem)? > > Also is patch 1/4 by itself safe for 3.4 and 3.3 branches? > > Thanks, > Keir > >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel