In this patch, I introduce a new type p2m_map_foreign for pages that a
dom0 maps from foreign domains its creating. Also, add
set_foreign_p2m_entry() to map p2m_map_foreign type pages. Other misc changes
related to p2m.
Changes in V2:
- Make guest_physmap_add_entry() same for PVH in terms of overwriting old
entry.
- In set_foreign_p2m_entry() do locked get_gfn and not unlocked.
- Replace ASSERT with return -EINVAL in do_physdev_op.
- Remove unnecessary check for PVH in do_physdev_op().
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
---
xen/arch/x86/domctl.c | 19 +++++++++++++------
xen/arch/x86/mm/p2m-ept.c | 3 ++-
xen/arch/x86/mm/p2m-pt.c | 3 ++-
xen/arch/x86/mm/p2m.c | 30 +++++++++++++++++++++++++++++-
xen/arch/x86/physdev.c | 8 ++++++++
xen/include/asm-x86/p2m.h | 4 ++++
6 files changed, 58 insertions(+), 9 deletions(-)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ecc8240..da49d6d 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -64,9 +64,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long
gfn,
if ( add_map )
{
- printk(XENLOG_G_INFO
- "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ if ( !is_pvh_domain(d) ) /* PVH maps lots and lots */
+ printk(XENLOG_G_INFO
+ "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n",
+ d->domain_id, gfn, mfn, nr_mfns);
ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
if ( !ret && paging_mode_translate(d) )
@@ -89,9 +90,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long
gfn,
}
}
} else {
- printk(XENLOG_G_INFO
- "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n",
- d->domain_id, gfn, mfn, nr_mfns);
+ if ( !is_pvh_domain(d) ) /* PVH unmaps lots and lots */
+ printk(XENLOG_G_INFO
+ "memory_map:remove: dom%d gfn=%lx mfn=%lx
nr=%lx\n",
+ d->domain_id, gfn, mfn, nr_mfns);
if ( paging_mode_translate(d) )
for ( i = 0; i < nr_mfns; i++ )
@@ -1307,6 +1309,11 @@ void arch_get_info_guest(struct vcpu *v,
vcpu_guest_context_u c)
c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v);
}
}
+ else if ( is_pvh_vcpu(v) )
+ {
+ /* fixme: punt it to phase II */
+ printk("PVH: fixme: arch_get_info_guest()\n");
+ }
else
{
c(ldt_base = v->arch.pv_vcpu.ldt_base);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index a2d1591..38ea9ec 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -75,6 +75,7 @@ static void ept_p2m_type_to_flags(ept_entry_t *entry,
p2m_type_t type, p2m_acces
entry->w = 0;
break;
case p2m_grant_map_rw:
+ case p2m_map_foreign:
entry->r = entry->w = 1;
entry->x = 0;
break;
@@ -430,7 +431,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn,
mfn_t mfn,
}
/* Track the highest gfn for which we have ever had a valid mapping */
- if ( p2mt != p2m_invalid &&
+ if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm &&
(gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 302b621..3f46418 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -89,6 +89,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t
mfn)
case p2m_ram_rw:
return flags | P2M_BASE_FLAGS | _PAGE_RW;
case p2m_grant_map_rw:
+ case p2m_map_foreign:
return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT;
case p2m_mmio_direct:
if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) )
@@ -429,7 +430,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn,
mfn_t mfn,
}
/* Track the highest gfn for which we have ever had a valid mapping */
- if ( p2mt != p2m_invalid
+ if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm
&& (gfn + (1UL << page_order) - 1 >
p2m->max_mapped_pfn) )
p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 4837de3..6888cf1 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -523,7 +523,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn,
unsigned long mfn,
for ( i = 0; i < (1UL << page_order); i++ )
{
mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0,
NULL);
- if ( !p2m_is_grant(t) && !p2m_is_shared(t) )
+ if ( !p2m_is_grant(t) && !p2m_is_shared(t) &&
!p2m_is_foreign(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
@@ -754,7 +754,35 @@ void p2m_change_type_range(struct domain *d,
p2m_unlock(p2m);
}
+/* Returns: True for success. 0 for failure */
+int set_foreign_p2m_entry(struct domain *dp, unsigned long gfn, mfn_t mfn)
+{
+ int rc = 0;
+ p2m_type_t ot;
+ mfn_t omfn;
+ struct p2m_domain *p2m = p2m_get_hostp2m(dp);
+
+ if ( !paging_mode_translate(dp) )
+ return 0;
+
+ omfn = get_gfn_query(dp, gfn, &ot);
+ if (mfn_valid(omfn)) {
+ gdprintk(XENLOG_ERR, "Already mapped mfn %lx at gfn:%lx\n",
+ mfn_x(omfn), gfn);
+ set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);
+ }
+ put_gfn(dp, gfn);
+ P2M_DEBUG("set foreign %lx %lx\n", gfn, mfn_x(mfn));
+ p2m_lock(p2m);
+ rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_map_foreign,
p2m->default_access);
+ p2m_unlock(p2m);
+ if ( rc == 0 )
+ gdprintk(XENLOG_ERR,
+ "set_foreign_p2m_entry: set_p2m_entry failed! gfn:%lx
mfn=%08lx\n",
+ gfn, mfn_x(get_gfn_query(dp, gfn, &ot)));
+ return rc;
+}
int
set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn)
diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
index 876ac9d..ca66c1c 100644
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -475,6 +475,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
case PHYSDEVOP_set_iopl: {
struct physdev_set_iopl set_iopl;
+
+ if ( is_pvh_vcpu(current) ) {
+ ret = -EINVAL;
+ break;
+ }
+
ret = -EFAULT;
if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
break;
@@ -488,6 +494,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void)
arg)
case PHYSDEVOP_set_iobitmap: {
struct physdev_set_iobitmap set_iobitmap;
+
+ ASSERT( !is_pvh_vcpu(current) );
ret = -EFAULT;
if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
break;
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 43583b2..b76dc33 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -70,6 +70,7 @@ typedef enum {
p2m_ram_paging_in = 11, /* Memory that is being paged in */
p2m_ram_shared = 12, /* Shared or sharable memory */
p2m_ram_broken = 13, /* Broken page, access cause domain crash */
+ p2m_map_foreign = 14, /* ram pages from foreign domain */
} p2m_type_t;
/*
@@ -180,6 +181,7 @@ typedef unsigned int p2m_query_t;
#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
+#define p2m_is_foreign(_t) (p2m_to_mask(_t) &
p2m_to_mask(p2m_map_foreign))
/* Per-p2m-table state */
struct p2m_domain {
@@ -510,6 +512,8 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long
gfn,
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn);
int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn);
+/* Set foreign mfn in the current guest''s p2m table (for pvh dom0) */
+int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);
/*
* Populate-on-demand
--
1.7.2.3
Jan Beulich
2013-Mar-18 12:32 UTC
Re: [PATCH 13/18 V2]: PVH xen: introduce p2m_map_foreign
>>> On 16.03.13 at 01:51, Mukesh Rathor <mukesh.rathor@oracle.com> wrote: > @@ -1307,6 +1309,11 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) > c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v); > } > } > + else if ( is_pvh_vcpu(v) ) > + { > + /* fixme: punt it to phase II */ > + printk("PVH: fixme: arch_get_info_guest()\n"); > + }This has hardly anything to do with the patch at hand.> @@ -430,7 +431,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > } > > /* Track the highest gfn for which we have ever had a valid mapping */ > - if ( p2mt != p2m_invalid && > + if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm &&Considering the subject of the patch, was this perhaps meant to be p2m_map_foreign?> @@ -429,7 +430,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > } > > /* Track the highest gfn for which we have ever had a valid mapping */ > - if ( p2mt != p2m_invalid > + if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dmSame here.> --- a/xen/arch/x86/physdev.c > +++ b/xen/arch/x86/physdev.c > @@ -475,6 +475,12 @@ ret_t do_physdev_op(int cmd, > XEN_GUEST_HANDLE_PARAM(void) arg) > > case PHYSDEVOP_set_iopl: { > struct physdev_set_iopl set_iopl; > + > + if ( is_pvh_vcpu(current) ) { > + ret = -EINVAL; > + break; > + } > +Again unrelated to the subject of the patch.> @@ -488,6 +494,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > > case PHYSDEVOP_set_iobitmap: { > struct physdev_set_iobitmap set_iobitmap; > + > + ASSERT( !is_pvh_vcpu(current) );Again. Jan
Jan Beulich
2013-Mar-18 12:35 UTC
Re: [PATCH 13/18 V2]: PVH xen: introduce p2m_map_foreign
>>> On 16.03.13 at 01:51, Mukesh Rathor <mukesh.rathor@oracle.com> wrote: > --- a/xen/arch/x86/domctl.c > +++ b/xen/arch/x86/domctl.c > @@ -64,9 +64,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long gfn, > > if ( add_map ) > { > - printk(XENLOG_G_INFO > - "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n", > - d->domain_id, gfn, mfn, nr_mfns); > + if ( !is_pvh_domain(d) ) /* PVH maps lots and lots */ > + printk(XENLOG_G_INFO > + "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n", > + d->domain_id, gfn, mfn, nr_mfns); > > ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); > if ( !ret && paging_mode_translate(d) ) > @@ -89,9 +90,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long gfn, > } > } > } else { > - printk(XENLOG_G_INFO > - "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n", > - d->domain_id, gfn, mfn, nr_mfns); > + if ( !is_pvh_domain(d) ) /* PVH unmaps lots and lots */ > + printk(XENLOG_G_INFO > + "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n", > + d->domain_id, gfn, mfn, nr_mfns); > > if ( paging_mode_translate(d) ) > for ( i = 0; i < nr_mfns; i++ )While these change looked related to the subject at a first glance, with the patch not adding the promised new caller of the function I don''t see the relation of the changes above to the rest of the patch. Jan
Konrad Rzeszutek Wilk
2013-Mar-18 17:54 UTC
Re: [PATCH 13/18 V2]: PVH xen: introduce p2m_map_foreign
On Fri, Mar 15, 2013 at 05:51:09PM -0700, Mukesh Rathor wrote:> In this patch, I introduce a new type p2m_map_foreign for pages that a > dom0 maps from foreign domains its creating. Also, add > set_foreign_p2m_entry() to map p2m_map_foreign type pages. Other misc changes > related to p2m. > > Changes in V2: > - Make guest_physmap_add_entry() same for PVH in terms of overwriting old > entry. > - In set_foreign_p2m_entry() do locked get_gfn and not unlocked. > - Replace ASSERT with return -EINVAL in do_physdev_op. > - Remove unnecessary check for PVH in do_physdev_op(). >You should really CC Tim on this patch. Doing it for you.> Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com> > --- > xen/arch/x86/domctl.c | 19 +++++++++++++------ > xen/arch/x86/mm/p2m-ept.c | 3 ++- > xen/arch/x86/mm/p2m-pt.c | 3 ++- > xen/arch/x86/mm/p2m.c | 30 +++++++++++++++++++++++++++++- > xen/arch/x86/physdev.c | 8 ++++++++ > xen/include/asm-x86/p2m.h | 4 ++++ > 6 files changed, 58 insertions(+), 9 deletions(-) > > diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c > index ecc8240..da49d6d 100644 > --- a/xen/arch/x86/domctl.c > +++ b/xen/arch/x86/domctl.c > @@ -64,9 +64,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long gfn, > > if ( add_map ) > { > - printk(XENLOG_G_INFO > - "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n", > - d->domain_id, gfn, mfn, nr_mfns); > + if ( !is_pvh_domain(d) ) /* PVH maps lots and lots */ > + printk(XENLOG_G_INFO > + "memory_map:add: dom%d gfn=%lx mfn=%lx nr=%lx\n", > + d->domain_id, gfn, mfn, nr_mfns); > > ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); > if ( !ret && paging_mode_translate(d) ) > @@ -89,9 +90,10 @@ long domctl_memory_mapping(struct domain *d, unsigned long gfn, > } > } > } else { > - printk(XENLOG_G_INFO > - "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n", > - d->domain_id, gfn, mfn, nr_mfns); > + if ( !is_pvh_domain(d) ) /* PVH unmaps lots and lots */ > + printk(XENLOG_G_INFO > + "memory_map:remove: dom%d gfn=%lx mfn=%lx nr=%lx\n", > + d->domain_id, gfn, mfn, nr_mfns); > > if ( paging_mode_translate(d) ) > for ( i = 0; i < nr_mfns; i++ ) > @@ -1307,6 +1309,11 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c) > c.nat->gs_base_kernel = hvm_get_shadow_gs_base(v); > } > } > + else if ( is_pvh_vcpu(v) ) > + { > + /* fixme: punt it to phase II */ > + printk("PVH: fixme: arch_get_info_guest()\n"); > + } > else > { > c(ldt_base = v->arch.pv_vcpu.ldt_base); > diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c > index a2d1591..38ea9ec 100644 > --- a/xen/arch/x86/mm/p2m-ept.c > +++ b/xen/arch/x86/mm/p2m-ept.c > @@ -75,6 +75,7 @@ static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_acces > entry->w = 0; > break; > case p2m_grant_map_rw: > + case p2m_map_foreign: > entry->r = entry->w = 1; > entry->x = 0; > break; > @@ -430,7 +431,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > } > > /* Track the highest gfn for which we have ever had a valid mapping */ > - if ( p2mt != p2m_invalid && > + if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm && > (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) ) > p2m->max_mapped_pfn = gfn + (1UL << order) - 1; > > diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c > index 302b621..3f46418 100644 > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -89,6 +89,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn) > case p2m_ram_rw: > return flags | P2M_BASE_FLAGS | _PAGE_RW; > case p2m_grant_map_rw: > + case p2m_map_foreign: > return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT; > case p2m_mmio_direct: > if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) > @@ -429,7 +430,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > } > > /* Track the highest gfn for which we have ever had a valid mapping */ > - if ( p2mt != p2m_invalid > + if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm > && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) > p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; > > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c > index 4837de3..6888cf1 100644 > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -523,7 +523,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, > for ( i = 0; i < (1UL << page_order); i++ ) > { > mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL); > - if ( !p2m_is_grant(t) && !p2m_is_shared(t) ) > + if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) ) > set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); > ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); > } > @@ -754,7 +754,35 @@ void p2m_change_type_range(struct domain *d, > p2m_unlock(p2m); > } > > +/* Returns: True for success. 0 for failure */You mean 1 for success? If so, why not just use a bool?> +int set_foreign_p2m_entry(struct domain *dp, unsigned long gfn, mfn_t mfn) > +{ > + int rc = 0; > + p2m_type_t ot; > + mfn_t omfn; > + struct p2m_domain *p2m = p2m_get_hostp2m(dp); > + > + if ( !paging_mode_translate(dp) ) > + return 0; > + > + omfn = get_gfn_query(dp, gfn, &ot); > + if (mfn_valid(omfn)) { > + gdprintk(XENLOG_ERR, "Already mapped mfn %lx at gfn:%lx\n", > + mfn_x(omfn), gfn); > + set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); > + } > + put_gfn(dp, gfn); > > + P2M_DEBUG("set foreign %lx %lx\n", gfn, mfn_x(mfn)); > + p2m_lock(p2m); > + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_map_foreign, p2m->default_access); > + p2m_unlock(p2m); > + if ( rc == 0 ) > + gdprintk(XENLOG_ERR, > + "set_foreign_p2m_entry: set_p2m_entry failed! gfn:%lx mfn=%08lx\n", > + gfn, mfn_x(get_gfn_query(dp, gfn, &ot))); > + return rc; > +} > > int > set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) > diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c > index 876ac9d..ca66c1c 100644 > --- a/xen/arch/x86/physdev.c > +++ b/xen/arch/x86/physdev.c > @@ -475,6 +475,12 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > > case PHYSDEVOP_set_iopl: { > struct physdev_set_iopl set_iopl; > + > + if ( is_pvh_vcpu(current) ) { > + ret = -EINVAL;That looks like it belongs to a different patch - one that filters which of the hypercalls should NOT be called when doing PVH. And perhaps -ENOSYS?> + break; > + } > + > ret = -EFAULT; > if ( copy_from_guest(&set_iopl, arg, 1) != 0 ) > break; > @@ -488,6 +494,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > > case PHYSDEVOP_set_iobitmap: { > struct physdev_set_iobitmap set_iobitmap; > + > + ASSERT( !is_pvh_vcpu(current) );Ouch! Why not the same treatment as previously? return -ENOSYS.> ret = -EFAULT; > if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 ) > break; > diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h > index 43583b2..b76dc33 100644 > --- a/xen/include/asm-x86/p2m.h > +++ b/xen/include/asm-x86/p2m.h > @@ -70,6 +70,7 @@ typedef enum { > p2m_ram_paging_in = 11, /* Memory that is being paged in */ > p2m_ram_shared = 12, /* Shared or sharable memory */ > p2m_ram_broken = 13, /* Broken page, access cause domain crash */ > + p2m_map_foreign = 14, /* ram pages from foreign domain */ > } p2m_type_t; > > /* > @@ -180,6 +181,7 @@ typedef unsigned int p2m_query_t; > #define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES) > #define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES) > #define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES) > +#define p2m_is_foreign(_t) (p2m_to_mask(_t) & p2m_to_mask(p2m_map_foreign)) > > /* Per-p2m-table state */ > struct p2m_domain { > @@ -510,6 +512,8 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, > int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); > int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn); > > +/* Set foreign mfn in the current guest''s p2m table (for pvh dom0) */ > +int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn); > > /* > * Populate-on-demand > -- > 1.7.2.3 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel >
At 17:51 -0700 on 15 Mar (1363369869), Mukesh Rathor wrote:> In this patch, I introduce a new type p2m_map_foreign for pages that a > dom0 maps from foreign domains its creating. Also, add > set_foreign_p2m_entry() to map p2m_map_foreign type pages. Other misc changes > related to p2m.First of all, as Jan and Konrad have pointed out, there seems to be a lot of unrelated changes in here, which needs to be separated out.> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c > index a2d1591..38ea9ec 100644 > --- a/xen/arch/x86/mm/p2m-ept.c > +++ b/xen/arch/x86/mm/p2m-ept.c > @@ -75,6 +75,7 @@ static void ept_p2m_type_to_flags(ept_entry_t *entry, p2m_type_t type, p2m_acces > entry->w = 0; > break; > case p2m_grant_map_rw: > + case p2m_map_foreign: > entry->r = entry->w = 1; > entry->x = 0; > break; > @@ -430,7 +431,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > } > > /* Track the highest gfn for which we have ever had a valid mapping */ > - if ( p2mt != p2m_invalid && > + if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dm && > (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) ) > p2m->max_mapped_pfn = gfn + (1UL << order) - 1;Is this relevant to the p2m_map_foreign stuff?> > diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c > index 302b621..3f46418 100644 > --- a/xen/arch/x86/mm/p2m-pt.c > +++ b/xen/arch/x86/mm/p2m-pt.c > @@ -89,6 +89,7 @@ static unsigned long p2m_type_to_flags(p2m_type_t t, mfn_t mfn) > case p2m_ram_rw: > return flags | P2M_BASE_FLAGS | _PAGE_RW; > case p2m_grant_map_rw: > + case p2m_map_foreign: > return flags | P2M_BASE_FLAGS | _PAGE_RW | _PAGE_NX_BIT; > case p2m_mmio_direct: > if ( !rangeset_contains_singleton(mmio_ro_ranges, mfn_x(mfn)) ) > @@ -429,7 +430,7 @@ p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, > } > > /* Track the highest gfn for which we have ever had a valid mapping */ > - if ( p2mt != p2m_invalid > + if ( p2mt != p2m_invalid && p2mt != p2m_mmio_dmLikewise.> && (gfn + (1UL << page_order) - 1 > p2m->max_mapped_pfn) ) > p2m->max_mapped_pfn = gfn + (1UL << page_order) - 1; > > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c > index 4837de3..6888cf1 100644 > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -523,7 +523,7 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, > for ( i = 0; i < (1UL << page_order); i++ ) > { > mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL); > - if ( !p2m_is_grant(t) && !p2m_is_shared(t) ) > + if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) ) > set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); > ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); > } > @@ -754,7 +754,35 @@ void p2m_change_type_range(struct domain *d, > p2m_unlock(p2m); > } > > +/* Returns: True for success. 0 for failure */ > +int set_foreign_p2m_entry(struct domain *dp, unsigned long gfn, mfn_t mfn) > +{ > + int rc = 0; > + p2m_type_t ot; > + mfn_t omfn; > + struct p2m_domain *p2m = p2m_get_hostp2m(dp); > + > + if ( !paging_mode_translate(dp) ) > + return 0; > + > + omfn = get_gfn_query(dp, gfn, &ot); > + if (mfn_valid(omfn)) { > + gdprintk(XENLOG_ERR, "Already mapped mfn %lx at gfn:%lx\n", > + mfn_x(omfn), gfn); > + set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY);That needs to be gated on ''ot'' -- you should only be doing this for plain RAM owned by the caller.> + } > + put_gfn(dp, gfn);Also, this put_gfn is too early. Now there''s a race where another CPU could populate that GFN again before your set_p2m_entry(). AFAICT once you''ve done that this function will look so similar to set_mmio_p2m_entry that they should be refactored to extract the common parts.> + P2M_DEBUG("set foreign %lx %lx\n", gfn, mfn_x(mfn)); > + p2m_lock(p2m); > + rc = set_p2m_entry(p2m, gfn, mfn, 0, p2m_map_foreign, p2m->default_access); > + p2m_unlock(p2m); > + if ( rc == 0 ) > + gdprintk(XENLOG_ERR, > + "set_foreign_p2m_entry: set_p2m_entry failed! gfn:%lx mfn=%08lx\n", > + gfn, mfn_x(get_gfn_query(dp, gfn, &ot))); > + return rc; > +}> @@ -510,6 +512,8 @@ p2m_type_t p2m_change_type(struct domain *d, unsigned long gfn, > int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn); > int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn); > > +/* Set foreign mfn in the current guest''s p2m table (for pvh dom0) */ > +int set_foreign_p2m_entry(struct domain *domp, unsigned long gfn, mfn_t mfn);I don''t think it''s specific to dom 0. Certainly it doesn''t have any access controls in it. Cheers, Tim.