Andres Lagar-Cavilla
2012-Apr-27 21:16 UTC
[PATCH 0 of 4] RFC top up to experimental p2m rwlock patch
Extending the core idea form Tim''s patch, and bug fixing galore. Our testing works quite smoothly with this in place. Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla> xen/arch/x86/mm.c | 2 +- xen/arch/x86/mm/shadow/common.c | 5 - xen/include/asm-x86/mm.h | 1 - xen/common/grant_table.c | 231 ++++++++++++++++----------------------- xen/arch/x86/hvm/svm/svm.c | 20 +-- xen/arch/x86/mm.c | 48 +++----- xen/common/memory.c | 9 +- xen/common/tmem_xen.c | 26 +-- xen/include/asm-x86/p2m.h | 11 - xen/xsm/flask/hooks.c | 19 ++- 10 files changed, 155 insertions(+), 217 deletions(-)
Andres Lagar-Cavilla
2012-Apr-27 21:16 UTC
[PATCH 1 of 4] x86/mm: Eliminate _shadow_mode_refcounts
xen/arch/x86/mm.c | 2 +-
xen/arch/x86/mm/shadow/common.c | 5 -----
xen/include/asm-x86/mm.h | 1 -
3 files changed, 1 insertions(+), 7 deletions(-)
Replace it with the proper paging_mode_refcounts. This was causing spurious
and abundant verbiage from Xen for the
!get_page(page, d) && !get_page(page, dom_cow)
sequence in get_page_from_gfn
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 40938dc16dfa -r 00034349414e xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2167,7 +2167,7 @@ int get_page(struct page_info *page, str
if ( owner != NULL )
put_page(page);
- if ( !_shadow_mode_refcounts(domain) && !domain->is_dying )
+ if ( !paging_mode_refcounts(domain) && !domain->is_dying )
gdprintk(XENLOG_INFO,
"Error pfn %lx: rd=%p, od=%p, caf=%08lx, taf=%"
PRtype_info "\n",
diff -r 40938dc16dfa -r 00034349414e xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -105,11 +105,6 @@ static int __init shadow_audit_key_init(
__initcall(shadow_audit_key_init);
#endif /* SHADOW_AUDIT */
-int _shadow_mode_refcounts(struct domain *d)
-{
- return shadow_mode_refcounts(d);
-}
-
/**************************************************************************/
/* x86 emulator support for the shadow code
diff -r 40938dc16dfa -r 00034349414e xen/include/asm-x86/mm.h
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -331,7 +331,6 @@ static inline void *__page_to_virt(const
int free_page_type(struct page_info *page, unsigned long type,
int preemptible);
-int _shadow_mode_refcounts(struct domain *d);
int is_iomem_page(unsigned long mfn);
Andres Lagar-Cavilla
2012-Apr-27 21:16 UTC
[PATCH 2 of 4] Grant table: Adopt get_page_from_gfn
xen/common/grant_table.c | 231 +++++++++++++++++++---------------------------
1 files changed, 94 insertions(+), 137 deletions(-)
This requires some careful re-engineering of __get_paged_frame and its callers.
Functions that previously returned gfn''s to be put now return pages to
be put.
Tested with Win7 + Citrix PV drivers guest, using speedtest for networking
(yes!) plus the loginVSI framework to constantly hit disk.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 00034349414e -r ea8642853601 xen/common/grant_table.c
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -107,18 +107,6 @@ static unsigned inline int max_nr_maptra
return (max_nr_grant_frames * MAX_MAPTRACK_TO_GRANTS_RATIO);
}
-#ifdef CONFIG_X86
-#define gfn_to_mfn_private(_d, _gfn) ({ \
- p2m_type_t __p2mt; \
- unsigned long __x; \
- __x = mfn_x(get_gfn_unshare((_d), (_gfn), &__p2mt)); \
- if ( p2m_is_shared(__p2mt) || !p2m_is_valid(__p2mt) ) \
- __x = INVALID_MFN; \
- __x; })
-#else
-#define gfn_to_mfn_private(_d, _gfn) gmfn_to_mfn(_d, _gfn)
-#endif
-
#define SHGNT_PER_PAGE_V1 (PAGE_SIZE / sizeof(grant_entry_v1_t))
#define shared_entry_v1(t, e) \
((t)->shared_v1[(e)/SHGNT_PER_PAGE_V1][(e)%SHGNT_PER_PAGE_V1])
@@ -141,41 +129,41 @@ shared_entry_header(struct grant_table *
#define active_entry(t, e) \
((t)->active[(e)/ACGNT_PER_PAGE][(e)%ACGNT_PER_PAGE])
-/* Check if the page has been paged out. If rc == GNTST_okay, caller must do
put_gfn(rd, gfn) */
-static int __get_paged_frame(unsigned long gfn, unsigned long *frame, int
readonly, struct domain *rd)
+/* Check if the page has been paged out, or needs unsharing.
+ If rc == GNTST_okay, *page contains the page struct with a ref taken.
+ Caller must do put_page(*page).
+ If any error, *page = NULL, *frame = INVALID_MFN, no ref taken. */
+static int __get_paged_frame(unsigned long gfn, unsigned long *frame, struct
page_info **page,
+ int readonly, struct domain *rd)
{
int rc = GNTST_okay;
#if defined(P2M_PAGED_TYPES) || defined(P2M_SHARED_TYPES)
p2m_type_t p2mt;
- mfn_t mfn;
-
- if ( readonly )
- mfn = get_gfn(rd, gfn, &p2mt);
- else
+
+ *page = get_page_from_gfn(rd, gfn, &p2mt,
+ (readonly) ? P2M_ALLOC : P2M_UNSHARE);
+ if ( !(*page) )
{
- mfn = get_gfn_unshare(rd, gfn, &p2mt);
+ *frame = INVALID_MFN;
if ( p2m_is_shared(p2mt) )
+ return GNTST_eagain;
+ if ( p2m_is_paging(p2mt) )
{
- put_gfn(rd, gfn);
+ p2m_mem_paging_populate(rd, gfn);
return GNTST_eagain;
}
+ return GNTST_bad_page;
}
-
- if ( p2m_is_valid(p2mt) ) {
- *frame = mfn_x(mfn);
- if ( p2m_is_paging(p2mt) )
- {
- put_gfn(rd, gfn);
- p2m_mem_paging_populate(rd, gfn);
- rc = GNTST_eagain;
- }
- } else {
- put_gfn(rd, gfn);
- *frame = INVALID_MFN;
- rc = GNTST_bad_page;
+ *frame = page_to_mfn(*page);
+#else
+ *frame = gmfn_to_mfn(rd, gfn);
+ *page = mfn_valid(*frame) ? mfn_to_page(*frame) : NULL;
+ if ( (!(*page)) || (!get_page(*page, rd)) )
+ {
+ *frame = INVALID_MFN;
+ *page = NULL;
+ rc = GNTST_bad_page;
}
-#else
- *frame = readonly ? gmfn_to_mfn(rd, gfn) : gfn_to_mfn_private(rd, gfn);
#endif
return rc;
@@ -470,12 +458,11 @@ static void
__gnttab_map_grant_ref(
struct gnttab_map_grant_ref *op)
{
- struct domain *ld, *rd, *owner;
+ struct domain *ld, *rd, *owner = NULL;
struct vcpu *led;
int handle;
- unsigned long gfn = INVALID_GFN;
unsigned long frame = 0, nr_gets = 0;
- struct page_info *pg;
+ struct page_info *pg = NULL;
int rc = GNTST_okay;
u32 old_pin;
u32 act_pin;
@@ -573,13 +560,11 @@ __gnttab_map_grant_ref(
{
unsigned long frame;
- gfn = sha1 ? sha1->frame : sha2->full_page.frame;
- rc = __get_paged_frame(gfn, &frame, !!(op->flags &
GNTMAP_readonly), rd);
+ unsigned long gfn = sha1 ? sha1->frame :
sha2->full_page.frame;
+ rc = __get_paged_frame(gfn, &frame, &pg,
+ !!(op->flags & GNTMAP_readonly),
rd);
if ( rc != GNTST_okay )
- {
- gfn = INVALID_GFN;
goto unlock_out_clear;
- }
act->gfn = gfn;
act->domid = ld->domain_id;
act->frame = frame;
@@ -606,9 +591,17 @@ __gnttab_map_grant_ref(
spin_unlock(&rd->grant_table->lock);
- pg = mfn_valid(frame) ? mfn_to_page(frame) : NULL;
-
- if ( !pg || (owner = page_get_owner_and_reference(pg)) == dom_io )
+ /* pg may be set, with a refcount included, from __get_paged_frame */
+ if ( !pg )
+ {
+ pg = mfn_valid(frame) ? mfn_to_page(frame) : NULL;
+ if ( pg )
+ owner = page_get_owner_and_reference(pg);
+ }
+ else
+ owner = page_get_owner(pg);
+
+ if ( !pg || (owner == dom_io) )
{
/* Only needed the reference to confirm dom_io ownership. */
if ( pg )
@@ -708,8 +701,6 @@ __gnttab_map_grant_ref(
op->handle = handle;
op->status = GNTST_okay;
- if ( gfn != INVALID_GFN )
- put_gfn(rd, gfn);
rcu_unlock_domain(rd);
return;
@@ -748,8 +739,6 @@ __gnttab_map_grant_ref(
gnttab_clear_flag(_GTF_reading, status);
unlock_out:
- if ( gfn != INVALID_GFN )
- put_gfn(rd, gfn);
spin_unlock(&rd->grant_table->lock);
op->status = rc;
put_maptrack_handle(ld->grant_table, handle);
@@ -1479,7 +1468,16 @@ gnttab_transfer(
return -EFAULT;
}
- mfn = gfn_to_mfn_private(d, gop.mfn);
+#ifdef CONFIG_X86
+ {
+ p2m_type_t __p2mt;
+ mfn = mfn_x(get_gfn_unshare(d, gop.mfn, &__p2mt));
+ if ( p2m_is_shared(__p2mt) || !p2m_is_valid(__p2mt) )
+ mfn = INVALID_MFN;
+ }
+#else
+ mfn = gmfn_to_mfn(d, gop.mfn)
+#endif
/* Check the passed page frame for basic validity. */
if ( unlikely(!mfn_valid(mfn)) )
@@ -1723,15 +1721,14 @@ static void __fixup_status_for_pin(const
}
/* Grab a frame number from a grant entry and update the flags and pin
- count as appropriate. Note that this does *not* update the page
- type or reference counts, and does not check that the mfn is
- actually valid. If *gfn != INVALID_GFN, and rc == GNTST_okay, then
- we leave this function holding the p2m entry for *gfn in *owning_domain */
+ count as appropriate. If rc == GNTST_okay, note that this *does*
+ take one ref count on the target page, stored in *page.
+ If there is any error, *page = NULL, no ref taken. */
static int
__acquire_grant_for_copy(
struct domain *rd, unsigned long gref, struct domain *ld, int readonly,
- unsigned long *frame, unsigned long *gfn, unsigned *page_off, unsigned
*length,
- unsigned allow_transitive, struct domain **owning_domain)
+ unsigned long *frame, struct page_info **page,
+ unsigned *page_off, unsigned *length, unsigned allow_transitive)
{
grant_entry_v1_t *sha1;
grant_entry_v2_t *sha2;
@@ -1746,11 +1743,9 @@ __acquire_grant_for_copy(
unsigned trans_page_off;
unsigned trans_length;
int is_sub_page;
- struct domain *ignore;
s16 rc = GNTST_okay;
- *owning_domain = NULL;
- *gfn = INVALID_GFN;
+ *page = NULL;
spin_lock(&rd->grant_table->lock);
@@ -1827,14 +1822,13 @@ __acquire_grant_for_copy(
spin_unlock(&rd->grant_table->lock);
rc = __acquire_grant_for_copy(td, trans_gref, rd,
- readonly, &grant_frame, gfn,
- &trans_page_off,
&trans_length,
- 0, &ignore);
+ readonly, &grant_frame, page,
+ &trans_page_off,
&trans_length, 0);
spin_lock(&rd->grant_table->lock);
if ( rc != GNTST_okay ) {
__fixup_status_for_pin(act, status);
- rcu_unlock_domain(td);
+ rcu_unlock_domain(td);
spin_unlock(&rd->grant_table->lock);
return rc;
}
@@ -1846,56 +1840,49 @@ __acquire_grant_for_copy(
if ( act->pin != old_pin )
{
__fixup_status_for_pin(act, status);
- rcu_unlock_domain(td);
+ rcu_unlock_domain(td);
spin_unlock(&rd->grant_table->lock);
+ put_page(*page);
return __acquire_grant_for_copy(rd, gref, ld, readonly,
- frame, gfn, page_off, length,
- allow_transitive,
- owning_domain);
+ frame, page, page_off, length,
+ allow_transitive);
}
/* The actual remote remote grant may or may not be a
sub-page, but we always treat it as one because that
blocks mappings of transitive grants. */
is_sub_page = 1;
- *owning_domain = td;
act->gfn = -1ul;
}
else if ( sha1 )
{
- *gfn = sha1->frame;
- rc = __get_paged_frame(*gfn, &grant_frame, readonly, rd);
+ rc = __get_paged_frame(sha1->frame, &grant_frame, page,
readonly, rd);
if ( rc != GNTST_okay )
goto unlock_out;
- act->gfn = *gfn;
+ act->gfn = sha1->frame;
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
- *owning_domain = rd;
}
else if ( !(sha2->hdr.flags & GTF_sub_page) )
{
- *gfn = sha2->full_page.frame;
- rc = __get_paged_frame(*gfn, &grant_frame, readonly, rd);
+ rc = __get_paged_frame(sha2->full_page.frame, &grant_frame,
page, readonly, rd);
if ( rc != GNTST_okay )
goto unlock_out;
- act->gfn = *gfn;
+ act->gfn = sha2->full_page.frame;
is_sub_page = 0;
trans_page_off = 0;
trans_length = PAGE_SIZE;
- *owning_domain = rd;
}
else
{
- *gfn = sha2->sub_page.frame;
- rc = __get_paged_frame(*gfn, &grant_frame, readonly, rd);
+ rc = __get_paged_frame(sha2->sub_page.frame, &grant_frame,
page, readonly, rd);
if ( rc != GNTST_okay )
goto unlock_out;
- act->gfn = *gfn;
+ act->gfn = sha2->sub_page.frame;
is_sub_page = 1;
trans_page_off = sha2->sub_page.page_off;
trans_length = sha2->sub_page.length;
- *owning_domain = rd;
}
if ( !act->pin )
@@ -1911,7 +1898,9 @@ __acquire_grant_for_copy(
}
else
{
- *owning_domain = rd;
+ ASSERT(mfn_valid(act->frame));
+ *page = mfn_to_page(act->frame);
+ (void)page_get_owner_and_reference(*page);
}
act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
@@ -1930,11 +1919,11 @@ __gnttab_copy(
struct gnttab_copy *op)
{
struct domain *sd = NULL, *dd = NULL;
- struct domain *source_domain = NULL, *dest_domain = NULL;
- unsigned long s_frame, d_frame, s_gfn = INVALID_GFN, d_gfn = INVALID_GFN;
+ unsigned long s_frame, d_frame;
+ struct page_info *s_pg = NULL, *d_pg = NULL;
char *sp, *dp;
s16 rc = GNTST_okay;
- int have_d_grant = 0, have_s_grant = 0, have_s_ref = 0;
+ int have_d_grant = 0, have_s_grant = 0;
int src_is_gref, dest_is_gref;
if ( ((op->source.offset + op->len) > PAGE_SIZE) ||
@@ -1972,82 +1961,54 @@ __gnttab_copy(
{
unsigned source_off, source_len;
rc = __acquire_grant_for_copy(sd, op->source.u.ref,
current->domain, 1,
- &s_frame, &s_gfn,
&source_off, &source_len, 1,
- &source_domain);
+ &s_frame, &s_pg, &source_off,
&source_len, 1);
if ( rc != GNTST_okay )
goto error_out;
have_s_grant = 1;
if ( op->source.offset < source_off ||
op->len > source_len )
- PIN_FAIL(error_put_s_gfn, GNTST_general_error,
+ PIN_FAIL(error_out, GNTST_general_error,
"copy source out of bounds: %d < %d || %d >
%d\n",
op->source.offset, source_off,
op->len, source_len);
}
else
{
-#ifdef CONFIG_X86
- s_gfn = op->source.u.gmfn;
- rc = __get_paged_frame(op->source.u.gmfn, &s_frame, 1, sd);
+ rc = __get_paged_frame(op->source.u.gmfn, &s_frame, &s_pg,
1, sd);
if ( rc != GNTST_okay )
- goto error_out;
-#else
- s_frame = gmfn_to_mfn(sd, op->source.u.gmfn);
-#endif
- source_domain = sd;
+ PIN_FAIL(error_out, rc,
+ "source frame %lx invalid.\n", s_frame);
}
- if ( unlikely(!mfn_valid(s_frame)) )
- PIN_FAIL(error_put_s_gfn, GNTST_general_error,
- "source frame %lx invalid.\n", s_frame);
- /* For the source frame, the page could still be shared, so
- * don''t assume ownership by source_domain */
- if ( !page_get_owner_and_reference(mfn_to_page(s_frame)) )
- {
- if ( !sd->is_dying )
- gdprintk(XENLOG_WARNING, "Could not get src frame %lx\n",
s_frame);
- rc = GNTST_general_error;
- goto error_put_s_gfn;
- }
- have_s_ref = 1;
if ( dest_is_gref )
{
unsigned dest_off, dest_len;
rc = __acquire_grant_for_copy(dd, op->dest.u.ref,
current->domain, 0,
- &d_frame, &d_gfn, &dest_off,
&dest_len, 1,
- &dest_domain);
+ &d_frame, &d_pg, &dest_off,
&dest_len, 1);
if ( rc != GNTST_okay )
- goto error_put_s_gfn;
+ goto error_out;
have_d_grant = 1;
if ( op->dest.offset < dest_off ||
op->len > dest_len )
- PIN_FAIL(error_put_d_gfn, GNTST_general_error,
+ PIN_FAIL(error_out, GNTST_general_error,
"copy dest out of bounds: %d < %d || %d >
%d\n",
op->dest.offset, dest_off,
op->len, dest_len);
}
else
{
-#ifdef CONFIG_X86
- d_gfn = op->dest.u.gmfn;
- rc = __get_paged_frame(op->dest.u.gmfn, &d_frame, 0, dd);
+ rc = __get_paged_frame(op->dest.u.gmfn, &d_frame, &d_pg, 0,
dd);
if ( rc != GNTST_okay )
- goto error_put_s_gfn;
-#else
- d_frame = gmfn_to_mfn(dd, op->dest.u.gmfn);
-#endif
- dest_domain = dd;
+ PIN_FAIL(error_out, rc,
+ "destination frame %lx invalid.\n", d_frame);
}
- if ( unlikely(!mfn_valid(d_frame)) )
- PIN_FAIL(error_put_d_gfn, GNTST_general_error,
- "destination frame %lx invalid.\n", d_frame);
- if ( !get_page_and_type(mfn_to_page(d_frame), dest_domain,
- PGT_writable_page) )
+
+ if ( !get_page_type(d_pg, PGT_writable_page) )
{
if ( !dd->is_dying )
gdprintk(XENLOG_WARNING, "Could not get dst frame %lx\n",
d_frame);
rc = GNTST_general_error;
- goto error_put_d_gfn;
+ goto error_out;
}
sp = map_domain_page(s_frame);
@@ -2060,16 +2021,12 @@ __gnttab_copy(
gnttab_mark_dirty(dd, d_frame);
- put_page_and_type(mfn_to_page(d_frame));
- error_put_d_gfn:
- if ( (d_gfn != INVALID_GFN) && (dest_domain) )
- put_gfn(dest_domain, d_gfn);
- error_put_s_gfn:
- if ( (s_gfn != INVALID_GFN) && (source_domain) )
- put_gfn(source_domain, s_gfn);
+ put_page_type(d_pg);
error_out:
- if ( have_s_ref )
- put_page(mfn_to_page(s_frame));
+ if ( d_pg )
+ put_page(d_pg);
+ if ( s_pg )
+ put_page(s_pg);
if ( have_s_grant )
__release_grant_for_copy(sd, op->source.u.ref, 1);
if ( have_d_grant )
Andres Lagar-Cavilla
2012-Apr-27 21:16 UTC
[PATCH 3 of 4] Use get page from gfn also in svm code
xen/arch/x86/hvm/svm/svm.c | 20 ++++++++------------
1 files changed, 8 insertions(+), 12 deletions(-)
And clean up some unnecessary uses of get_gfn locked.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r ea8642853601 -r 07fda1825c29 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -232,8 +232,7 @@ static int svm_vmcb_save(struct vcpu *v,
static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
{
- unsigned long mfn = 0;
- p2m_type_t p2mt;
+ struct page_info *page = NULL;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
@@ -250,10 +249,10 @@ static int svm_vmcb_restore(struct vcpu
{
if ( c->cr0 & X86_CR0_PG )
{
- mfn = mfn_x(get_gfn(v->domain, c->cr3 >> PAGE_SHIFT,
&p2mt));
- if ( !p2m_is_ram(p2mt) || !get_page(mfn_to_page(mfn), v->domain)
)
+ page = get_page_from_gfn(v->domain, c->cr3 >>
PAGE_SHIFT,
+ NULL, P2M_ALLOC);
+ if ( !page )
{
- put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
gdprintk(XENLOG_ERR, "Invalid CR3
value=0x%"PRIx64"\n",
c->cr3);
return -EINVAL;
@@ -263,9 +262,8 @@ static int svm_vmcb_restore(struct vcpu
if ( v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG )
put_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_from_pfn(mfn);
- if ( c->cr0 & X86_CR0_PG )
- put_gfn(v->domain, c->cr3 >> PAGE_SHIFT);
+ v->arch.guest_table =
+ page ? pagetable_from_page(page) : pagetable_null();
}
v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
@@ -1321,8 +1319,7 @@ static void svm_do_nested_pgfault(struct
p2m = p2m_get_p2m(v);
_d.gpa = gpa;
_d.qualification = 0;
- mfn = get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL);
- __put_gfn(p2m, gfn);
+ mfn = __get_gfn_type_access(p2m, gfn, &_d.p2mt, &p2ma, 0, NULL,
0);
_d.mfn = mfn_x(mfn);
__trace_var(TRC_HVM_NPF, 0, sizeof(_d), &_d);
@@ -1343,8 +1340,7 @@ static void svm_do_nested_pgfault(struct
if ( p2m == NULL )
p2m = p2m_get_p2m(v);
/* Everything else is an error. */
- mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL);
- __put_gfn(p2m, gfn);
+ mfn = __get_gfn_type_access(p2m, gfn, &p2mt, &p2ma, 0, NULL, 0);
gdprintk(XENLOG_ERR,
"SVM violation gpa %#"PRIpaddr", mfn %#lx, type
%i\n",
gpa, mfn_x(mfn), p2mt);
xen/arch/x86/mm.c | 48 ++++++++++++++++++----------------------------
xen/common/memory.c | 9 +++++++-
xen/common/tmem_xen.c | 26 +++++++++---------------
xen/include/asm-x86/p2m.h | 11 ----------
xen/xsm/flask/hooks.c | 19 ++++++++++++++---
5 files changed, 52 insertions(+), 61 deletions(-)
Replace get_gfn* calls in common/memory.c, arch/x86/mm.c, xsm, and tmem.
Fix bugs on xsm for get_gfn_untyped and get_page_from_gfn.
Eliminate altogether get_gfn_untyped.
Add appropriate ifdefe''ery in common code so that ARM isn''t
trapped.
Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>
diff -r 07fda1825c29 -r 8674ecae829c xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3731,18 +3731,17 @@ static int create_grant_pte_mapping(
adjust_guest_l1e(nl1e, d);
gmfn = pte_addr >> PAGE_SHIFT;
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( unlikely(!page) )
{
- put_gfn(d, gmfn);
MEM_LOG("Could not get page for normal update");
return GNTST_general_error;
}
+ mfn = page_to_mfn(page);
va = map_domain_page(mfn);
va = (void *)((unsigned long)va + ((unsigned long)pte_addr &
~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( !page_lock(page) )
{
@@ -3773,7 +3772,6 @@ static int create_grant_pte_mapping(
failed:
unmap_domain_page(va);
put_page(page);
- put_gfn(d, gmfn);
return rc;
}
@@ -3788,18 +3786,17 @@ static int destroy_grant_pte_mapping(
l1_pgentry_t ol1e;
gmfn = addr >> PAGE_SHIFT;
- mfn = get_gfn_untyped(d, gmfn);
-
- if ( unlikely(!get_page_from_pagenr(mfn, current->domain)) )
+ page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
+
+ if ( unlikely(!page) )
{
- put_gfn(d, gmfn);
MEM_LOG("Could not get page for normal update");
return GNTST_general_error;
}
+ mfn = page_to_mfn(page);
va = map_domain_page(mfn);
va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
- page = mfn_to_page(mfn);
if ( !page_lock(page) )
{
@@ -3844,7 +3841,6 @@ static int destroy_grant_pte_mapping(
failed:
unmap_domain_page(va);
put_page(page);
- put_gfn(d, gmfn);
return rc;
}
@@ -4367,11 +4363,12 @@ long set_gdt(struct vcpu *v,
/* Check the pages in the new GDT. */
for ( i = 0; i < nr_pages; i++ )
{
+ struct page_info *page;
pfns[i] = frames[i];
- mfn = frames[i] = get_gfn_untyped(d, frames[i]);
- if ( !mfn_valid(mfn) ||
- !get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page) )
+ page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC);
+ if ( !page || !get_page_type(page, PGT_seg_desc_page) )
goto fail;
+ mfn = frames[i] = page_to_mfn(page);
}
/* Tear down the old GDT. */
@@ -4384,7 +4381,6 @@ long set_gdt(struct vcpu *v,
v->arch.pv_vcpu.gdt_frames[i] = frames[i];
l1e_write(&v->arch.perdomain_ptes[i],
l1e_from_pfn(frames[i], __PAGE_HYPERVISOR));
- put_gfn(d, pfns[i]);
}
xfree(pfns);
@@ -4394,7 +4390,6 @@ long set_gdt(struct vcpu *v,
while ( i-- > 0 )
{
put_page_and_type(mfn_to_page(frames[i]));
- put_gfn(d, pfns[i]);
}
xfree(pfns);
return -EINVAL;
@@ -4440,21 +4435,16 @@ long do_update_descriptor(u64 pa, u64 de
*(u64 *)&d = desc;
- mfn = get_gfn_untyped(dom, gmfn);
+ page = get_page_from_gfn(dom, gmfn, NULL, P2M_ALLOC);
if ( (((unsigned int)pa % sizeof(struct desc_struct)) != 0) ||
- !mfn_valid(mfn) ||
+ !page ||
!check_descriptor(dom, &d) )
{
- put_gfn(dom, gmfn);
+ if ( page )
+ put_page(page);
return -EINVAL;
}
-
- page = mfn_to_page(mfn);
- if ( unlikely(!get_page(page, dom)) )
- {
- put_gfn(dom, gmfn);
- return -EINVAL;
- }
+ mfn = page_to_mfn(page);
/* Check if the given frame is in use in an unsafe context. */
switch ( page->u.inuse.type_info & PGT_type_mask )
@@ -4482,7 +4472,6 @@ long do_update_descriptor(u64 pa, u64 de
out:
put_page(page);
- put_gfn(dom, gmfn);
return ret;
}
@@ -4529,6 +4518,7 @@ static int xenmem_add_to_physmap_once(
unsigned long gfn = 0; /* gcc ... */
unsigned long prev_mfn, mfn = 0, gpfn, idx;
int rc;
+ p2m_type_t p2mt;
switch ( xatp->space )
{
@@ -4617,7 +4607,7 @@ static int xenmem_add_to_physmap_once(
put_page(page);
/* Remove previously mapped page if it was present. */
- prev_mfn = get_gfn_untyped(d, xatp->gpfn);
+ prev_mfn = mfn_x(get_gfn(d, xatp->gpfn, &p2mt));
if ( mfn_valid(prev_mfn) )
{
if ( is_xen_heap_mfn(prev_mfn) )
diff -r 07fda1825c29 -r 8674ecae829c xen/common/memory.c
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -694,7 +694,14 @@ long do_memory_op(unsigned long cmd, XEN
domain_lock(d);
- mfn = get_gfn_untyped(d, xrfp.gpfn);
+#ifdef CONFIG_X86
+ {
+ p2m_type_t p2mt;
+ mfn = mfn_x(get_gfn(d, xrfp.gpfn, &p2mt));
+ }
+#else
+ mfn = gmfn_to_mfn(d, xrfp.gpfn);
+#endif
if ( mfn_valid(mfn) )
guest_physmap_remove_page(d, xrfp.gpfn, mfn, 0);
diff -r 07fda1825c29 -r 8674ecae829c xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -107,30 +107,25 @@ static inline void cli_put_page(tmem_cli
static inline void *cli_get_page(tmem_cli_mfn_t cmfn, unsigned long *pcli_mfn,
pfp_t **pcli_pfp, bool_t cli_write)
{
- unsigned long cli_mfn;
p2m_type_t t;
struct page_info *page;
- int ret;
- cli_mfn = mfn_x(get_gfn(current->domain, cmfn, &t));
- if ( t != p2m_ram_rw || !mfn_valid(cli_mfn) )
+ page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC);
+ if ( !page || t != p2m_ram_rw )
{
- put_gfn(current->domain, (unsigned long) cmfn);
- return NULL;
+ if ( page )
+ put_page(page);
}
- page = mfn_to_page(cli_mfn);
- if ( cli_write )
- ret = get_page_and_type(page, current->domain, PGT_writable_page);
- else
- ret = get_page(page, current->domain);
- if ( !ret )
+
+ if ( cli_write && !get_page_type(page, PGT_writable_page) )
{
- put_gfn(current->domain, (unsigned long) cmfn);
+ put_page(page);
return NULL;
}
- *pcli_mfn = cli_mfn;
+
+ *pcli_mfn = page_to_mfn(page);
*pcli_pfp = (pfp_t *)page;
- return map_domain_page(cli_mfn);
+ return map_domain_page(*pcli_mfn);
}
static inline void cli_put_page(tmem_cli_mfn_t cmfn, void *cli_va, pfp_t
*cli_pfp,
@@ -144,7 +139,6 @@ static inline void cli_put_page(tmem_cli
else
put_page((struct page_info *)cli_pfp);
unmap_domain_page(cli_va);
- put_gfn(current->domain, (unsigned long) cmfn);
}
#endif
diff -r 07fda1825c29 -r 8674ecae829c xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -350,17 +350,6 @@ static inline mfn_t get_gfn_type(struct
#define get_gfn_unshare(d, g, t) get_gfn_type((d), (g), (t), \
P2M_ALLOC | P2M_UNSHARE)
-/* Compatibility function exporting the old untyped interface */
-static inline unsigned long get_gfn_untyped(struct domain *d, unsigned long
gpfn)
-{
- mfn_t mfn;
- p2m_type_t t;
- mfn = get_gfn(d, gpfn, &t);
- if ( p2m_is_valid(t) )
- return mfn_x(mfn);
- return INVALID_MFN;
-}
-
/* Will release the p2m_lock for this gfn entry. */
void __put_gfn(struct p2m_domain *p2m, unsigned long gfn);
diff -r 07fda1825c29 -r 8674ecae829c xen/xsm/flask/hooks.c
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1318,7 +1318,7 @@ static int flask_mmu_normal_update(struc
struct domain_security_struct *dsec;
u32 fsid;
struct avc_audit_data ad;
- struct page_info *page;
+ struct page_info *page = NULL;
if (d != t)
rc = domain_has_perm(d, t, SECCLASS_MMU, MMU__REMOTE_REMAP);
@@ -1334,9 +1334,12 @@ static int flask_mmu_normal_update(struc
map_perms |= MMU__MAP_WRITE;
AVC_AUDIT_DATA_INIT(&ad, MEMORY);
- page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)), P2M_ALLOC);
+#if CONFIG_X86
+ page = get_page_from_gfn(f, l1e_get_pfn(l1e_from_intpte(fpte)), NULL,
P2M_ALLOC);
mfn = page ? page_to_mfn(page) : INVALID_MFN;
-
+#else
+ mfn = gmfn_to_mfn(f, l1e_get_pfn(l1e_from_intpte(fpte)));
+#endif
ad.sdom = d;
ad.tdom = f;
ad.memory.pte = fpte;
@@ -1373,6 +1376,7 @@ static int flask_update_va_mapping(struc
int rc = 0;
u32 psid;
u32 map_perms = MMU__MAP_READ;
+ struct page_info *page = NULL;
unsigned long mfn;
struct domain_security_struct *dsec;
@@ -1384,8 +1388,15 @@ static int flask_update_va_mapping(struc
dsec = d->ssid;
- mfn = get_gfn_untyped(f, l1e_get_pfn(pte));
+#if CONFIG_X86
+ page = get_page_from_gfn(f, l1e_get_pfn(pte), NULL, P2M_ALLOC);
+ mfn = (page) ? page_to_mfn(page) : INVALID_MFN;
+#else
+ mfn = gmfn_to_mfn(f, l1e_get_pfn(pte));
+#endif
rc = get_mfn_sid(mfn, &psid);
+ if ( page )
+ put_page(page);
if ( rc )
return rc;
Tim Deegan
2012-May-03 16:19 UTC
Re: [PATCH 0 of 4] RFC top up to experimental p2m rwlock patch
At 17:16 -0400 on 27 Apr (1335546977), Andres Lagar-Cavilla wrote:> Extending the core idea form Tim''s patch, and bug fixing galore. > > Our testing works quite smoothly with this in place. > > Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla>Thanks very much for that. I had intended to merge this into a cleaned-up series based on the previous patch today, but other things came up, so I''m afraid it will have to wait until next week. Cheers, Tim.
Tim Deegan
2012-May-03 16:24 UTC
Re: [PATCH 1 of 4] x86/mm: Eliminate _shadow_mode_refcounts
At 17:16 -0400 on 27 Apr (1335546978), Andres Lagar-Cavilla wrote:> xen/arch/x86/mm.c | 2 +- > xen/arch/x86/mm/shadow/common.c | 5 ----- > xen/include/asm-x86/mm.h | 1 - > 3 files changed, 1 insertions(+), 7 deletions(-) > > > Replace it with the proper paging_mode_refcounts. This was causing spurious > and abundant verbiage from Xen for the > > !get_page(page, d) && !get_page(page, dom_cow) > > sequence in get_page_from_gfn > > Signed-off-by: Andres Lagar-Cavilla <andres@lagarcavilla.org>Applied, thanks. Tim.