RFC, expanding the monster patch with more call sites and bugfixes. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavaill.org> xen/arch/x86/hvm/svm/svm.c | 20 +++++++----------- xen/arch/x86/mm.c | 48 ++++++++++++++++++--------------------------- xen/common/memory.c | 9 +++++++- xen/common/tmem_xen.c | 26 +++++++++--------------- xen/include/asm-x86/p2m.h | 11 ---------- xen/xsm/flask/hooks.c | 19 ++++++++++++++--- 6 files changed, 60 insertions(+), 73 deletions(-)
Andres Lagar-Cavilla
2012-Apr-27 14:36 UTC
[PATCH 1 of 2] Use get page from gfn also in svm code
xen/arch/x86/hvm/svm/svm.c | 20 ++++++++------------
1 files changed, 8 insertions(+), 12 deletions(-)
And clean up some unnecessary uses of get_gfn locked.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 310e84676db3 -r 42634eca923f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -232,8 +232,7 @@ static int svm_vmcb_save(struct vcpu *v,
static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
- unsigned long mfn = 0;
- p2m_type_t p2mt;
+ struct page_info *page = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
@@ -250,10 +249,10 @@ static int svm_vmcb_restore(struct vcpu
{
if ( c->cr0 & X86_CR0_PG )
{
- mfn = mfn_x(get_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
&p2mt));
- if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain)
)
+ page = get_page_from_gfn(v->domain, c->cr3 >>
PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
gdprintk(XENLOG_ERR, "Invalid CR3
value=0x%"PRIx64"\n",
c->cr3);
return -EINVAL;
@@ -263,9 +262,8 @@ static int svm_vmcb_restore(struct vcpu
if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( c->cr0 & X86_CR0_PG )
- put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
+ v->arch.guest_table =
+ page ? pagetable_from_page(page) : pagetable_null();
}
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
@@ -1321,8 +1319,7 @@ static void svm_do_nested_pgfault(struct
p2m = p2m_get_p2m(v);
_d.gpa = gpa;
_d.qualification = 0;
- mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL);
- __put_gfn(p2m, gfn);
+ mfn = __get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL,
0);
_d.mfn = mfn_x(mfn);
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
@@ -1343,8 +1340,7 @@ static void svm_do_nested_pgfault(struct
if ( p2m == NULL )
p2m = p2m_get_p2m(v);
/* Everything else is an error. */
- mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
- __put_gfn(p2m, gfn);
+ mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
gdprintk(XENLOG_ERR,
"SVM violation gpa %#"PRIpaddr", mfn %#lx, type
%i\n",
gpa, mfn_x(mfn), p2mt);
xen/arch/x86/mm.c | 48 ++++++++++++++++++----------------------------
xen/common/memory.c | 9 +++++++-
xen/common/tmem_xen.c | 26 +++++++++---------------
xen/include/asm-x86/p2m.h | 11 ----------
xen/xsm/flask/hooks.c | 19 ++++++++++++++---
5 files changed, 52 insertions(+), 61 deletions(-)
Cover more users in common/memory.c, arch/x86/mm.c, xsm and tmem.
Fix bugs on xsm for get_gfn_untyped and get_page_from_gfn.
Eliminate altogether get_gfn_untyped.
Add appropriate ifdefe''ery in common code so that ARM doesn''t
whine.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 42634eca923f -r a840e45febc2 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3731,18 +3731,17 @@ static int create_grant_pte_mapping(
adjust_guest_l1e(nl1e, d);
gmfn = pte_addr >> PAGE_SHIFT;
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( unlikely(!page) )
{
- put_gfn(d, gmfn);
MEM_LOG("Could not get page for normal update");
return GNTST_general_error;
}
+ mfn = page_to_mfn(page);
va = map_domain_page(mfn);
va = (void *)((unsigned long)va + ((unsigned long)pte_addr &
~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( !page_lock(page) )
{
@@ -3773,7 +3772,6 @@ static int create_grant_pte_mapping(
failed:
unmap_domain_page(va);
put_page(page);
- put_gfn(d, gmfn);
return rc;
}
@@ -3788,18 +3786,17 @@ static int destroy_grant_pte_mapping(
l1_pgentry_t ol1e;
gmfn = addr >> PAGE_SHIFT;
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( unlikely(!page) )
{
- put_gfn(d, gmfn);
MEM_LOG("Could not get page for normal update");
return GNTST_general_error;
}
+ mfn = page_to_mfn(page);
va = map_domain_page(mfn);
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( !page_lock(page) )
{
@@ -3844,7 +3841,6 @@ static int destroy_grant_pte_mapping(
failed:
unmap_domain_page(va);
put_page(page);
- put_gfn(d, gmfn);
return rc;
}
@@ -4367,11 +4363,12 @@ long set_gdt(struct vcpu *v,
/* Check the pages in the new GDT. */
for ( i = 0; i < nr_pages; i++ )
{
+ struct page_info *page;
pfns[i] = frames[i];
- mfn = frames[i] = get_gfn_untyped(d, frames[i]);
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page) )
+ page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+ if ( !page || !get_page_type(page, PGT_seg_desc_page) )
goto fail;
+ mfn = frames[i] = page_to_mfn(page);
}
/* Tear down the old GDT. */
@@ -4384,7 +4381,6 @@ long set_gdt(struct vcpu *v,
v->arch.pv_vcpu.gdt_frames[i] = frames[i];
l1e_write(&v->arch.perdomain_ptes[i],
l1e_from_pfn(frames[i], __PAGE_HYPERVISOR));
- put_gfn(d, pfns[i]);
}
xfree(pfns);
@@ -4394,7 +4390,6 @@ long set_gdt(struct vcpu *v,
while ( i-- > 0 )
{
put_page_and_type(mfn_to_page(frames[i]));
- put_gfn(d, pfns[i]);
}
xfree(pfns);
return -EINVAL;
@@ -4440,21 +4435,16 @@ long do_update_descriptor(u64 pa, u64 de
*(u64 *)&d = desc;
- mfn = get_gfn_untyped(dom, gmfn);
+ page = get_page_from_gfn(dom, gmfn, NULL, P2M_ALLOC);
if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
- !mfn_valid(mfn) ||
+ !page ||
!check_descriptor(dom, &d) )
{
- put_gfn(dom, gmfn);
+ if ( page )
+ put_page(page);
return -EINVAL;
}
-
- page = mfn_to_page(mfn);
- if ( unlikely(!get_page(page, dom)) )
- {
- put_gfn(dom, gmfn);
- return -EINVAL;
- }
+ mfn = page_to_mfn(page);
/* Check if the given frame is in use in an unsafe context. */
switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -4482,7 +4472,6 @@ long do_update_descriptor(u64 pa, u64 de
out:
put_page(page);
- put_gfn(dom, gmfn);
return ret;
}
@@ -4529,6 +4518,7 @@ static int xenmem_add_to_physmap_once(
unsigned long gfn = 0; /* gcc ... */
unsigned long prev_mfn, mfn = 0, gpfn, idx;
int rc;
+ p2m_type_t p2mt;
switch ( xatp->space )
{
@@ -4617,7 +4607,7 @@ static int xenmem_add_to_physmap_once(
put_page(page);
/* Remove previously mapped page if it was present. */
- prev_mfn = get_gfn_untyped(d, xatp->gpfn);
+ prev_mfn = mfn_x(get_gfn(d, xatp->gpfn, &p2mt));
if ( mfn_valid(prev_mfn) )
{
if ( is_xen_heap_mfn(prev_mfn) )
diff -r 42634eca923f -r a840e45febc2 xen/common/memory.c
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -694,7 +694,14 @@ long do_memory_op(unsigned long cmd, XEN
domain_lock(d);
- mfn = get_gfn_untyped(d, xrfp.gpfn);
+#ifdef CONFIG_X86
+ {
+ p2m_type_t p2mt;
+ mfn = mfn_x(get_gfn(d, xrfp.gpfn, &p2mt));
+ }
+#else
+ mfn = gmfn_to_mfn(d, xrfp.gpfn);
+#endif
if ( mfn_valid(mfn) )
guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
diff -r 42634eca923f -r a840e45febc2 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -107,30 +107,25 @@ static inline void cli_put_page(tmem_cli
static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
pfp_t **pcli_pfp, bool_t cli_write)
{
- unsigned long cli_mfn;
p2m_type_t t;
struct page_info *page;
- int ret;
- cli_mfn = mfn_x(get_gfn(current->domain, cmfn, &t));
- if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
+ page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC);
+ if ( !page || t != p2m_ram_rw )
{
- put_gfn(current->domain, (unsigned long) cmfn);
- return NULL;
+ if ( page )
+ put_page(page);
}
- page = mfn_to_page(cli_mfn);
- if ( cli_write )
- ret = get_page_and_type(page, current->domain, PGT_writable_page);
- else
- ret = get_page(page, current->domain);
- if ( !ret )
+
+ if ( cli_write && !get_page_type(page, PGT_writable_page) )
{
- put_gfn(current->domain, (unsigned long) cmfn);
+ put_page(page);
return NULL;
}
- *pcli_mfn = cli_mfn;
+
+ *pcli_mfn = page_to_mfn(page);
*pcli_pfp = (pfp_t *)page;
- return map_domain_page(cli_mfn);
+ return map_domain_page(*pcli_mfn);
}
static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t
*cli_pfp,
@@ -144,7 +139,6 @@ static inline void cli_put_page(tmem_cli
else
put_page((struct page_info *)cli_pfp);
unmap_domain_page(cli_va);
- put_gfn(current->domain, (unsigned long) cmfn);
}
#endif
diff -r 42634eca923f -r a840e45febc2 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -342,17 +342,6 @@ static inline mfn_t get_gfn_type(struct
#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \
P2M_ALLOC | P2M_UNSHARE)
-/* Compatibility function exporting the old untyped interface */
-static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long
gpfn)
-{
- mfn_t mfn;
- p2m_type_t t;
- mfn = get_gfn(d, gpfn, &t);
- if ( p2m_is_valid(t) )
- return mfn_x(mfn);
- return INVALID_MFN;
-}
-
/* Will release the p2m_lock for this gfn entry. */
void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
diff -r 42634eca923f -r a840e45febc2 xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1318,7 +1318,7 @@ static int flask_mmu_normal_update(struc
struct domain_security_struct *dsec;
u32 fsid;
struct avc_audit_data ad;
- struct page_info *page;
+ struct page_info *page = NULL;
if (d != t)
rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__REMOTE_REMAP);
@@ -1334,9 +1334,12 @@ static int flask_mmu_normal_update(struc
map_perms |= MMU__MAP_WRITE;
AVC_AUDIT_DATA_INIT(&ad, MEMORY);
- page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)), P2M_ALLOC);
+#if CONFIG_X86
+ page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)), NULL,
P2M_ALLOC);
mfn = page ? page_to_mfn(page) : INVALID_MFN;
-
+#else
+ mfn = gmfn_to_mfn(f, l1e_get_pfn(l1e_from_intpte(fpte)));
+#endif
ad.sdom = d;
ad.tdom = f;
ad.memory.pte = fpte;
@@ -1373,6 +1376,7 @@ static int flask_update_va_mapping(struc
int rc = 0;
u32 psid;
u32 map_perms = MMU__MAP_READ;
+ struct page_info *page = NULL;
unsigned long mfn;
struct domain_security_struct *dsec;
@@ -1384,8 +1388,15 @@ static int flask_update_va_mapping(struc
dsec = d->ssid;
- mfn = get_gfn_untyped(f, l1e_get_pfn(pte));
+#if CONFIG_X86
+ page = get_page_from_gfn(f, l1e_get_pfn(pte), NULL, P2M_ALLOC);
+ mfn = (page) ? page_to_mfn(page) : INVALID_MFN;
+#else
+ mfn = gmfn_to_mfn(f, l1e_get_pfn(pte));
+#endif
rc = get_mfn_sid(mfn, &psid);
+ if ( page )
+ put_page(page);
if ( rc )
return rc;