The following series makes mapping a gfn from foreign domains more robust. Now that xc_mem_paging_load() takes care of the page-in path, adjust also the page-out path. This moves the pager out of the way and all places where a p2mt check is done can now return -ENOENT right away. Olaf Changes: xenpaging: extend xc_mem_paging_enable() to handle interface version xenpaging: map gfn before nomination xenpaging: add need_populate and paged_no_mfn checks xenpaging: restore p2mt if gfn is needed before evict xenpaging: improve evict error handling tools/libxc/xc_mem_paging.c | 3 +- tools/libxc/xenctrl.h | 1 tools/xenpaging/xenpaging.c | 49 ++++++++++++++++++++++++---------- xen/arch/x86/hvm/emulate.c | 3 +- xen/arch/x86/hvm/hvm.c | 17 +++++++----- xen/arch/x86/mm.c | 55 +++++++++++---------------------------- xen/arch/x86/mm/guest_walk.c | 3 +- xen/arch/x86/mm/hap/guest_walk.c | 6 ++-- xen/arch/x86/mm/mem_event.c | 9 ++++++ xen/arch/x86/mm/p2m-ept.c | 3 -- xen/arch/x86/mm/p2m.c | 33 ++++++++++++----------- xen/common/grant_table.c | 3 +- xen/include/asm-x86/p2m.h | 9 ++++-- xen/include/public/mem_event.h | 2 + 14 files changed, 110 insertions(+), 86 deletions(-)
Olaf Hering
2011-Dec-06 17:07 UTC
[PATCH 1 of 5] xenpaging: extend xc_mem_paging_enable() to handle interface version
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1323189139 -3600
# Node ID b733498b351a8650b2d952aa56725f63d49c1889
# Parent a4d7c27ec1f190ecbb9a909609f6ef0eca250c00
xenpaging: extend xc_mem_paging_enable() to handle interface version
Since upcoming patches will change the way how paging internally works,
add a new interface to xc_mem_paging_enable() to make sure the pager is
not out-of-date. This is similar to XEN_DOMCTL_INTERFACE_VERSION in
do_domctl() where the tools have to match the running hypervisor.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
diff -r a4d7c27ec1f1 -r b733498b351a tools/libxc/xc_mem_paging.c
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -25,12 +25,13 @@
int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
+ unsigned long interface_age,
void *shared_page, void *ring_page)
{
return xc_mem_event_control(xch, domain_id,
XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
XEN_DOMCTL_MEM_EVENT_OP_PAGING,
- shared_page, ring_page, INVALID_MFN);
+ shared_page, ring_page, interface_age);
}
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
diff -r a4d7c27ec1f1 -r b733498b351a tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1859,6 +1859,7 @@ int xc_mem_event_control(xc_interface *x
void *ring_page, unsigned long gfn);
int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
+ unsigned long interface_age,
void *shared_page, void *ring_page);
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
diff -r a4d7c27ec1f1 -r b733498b351a tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -366,6 +366,7 @@ static xenpaging_t *xenpaging_init(int a
/* Initialise Xen */
rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
+ MEM_EVENT_PAGING_AGE,
paging->mem_event.shared_page,
paging->mem_event.ring_page);
if ( rc != 0 )
@@ -380,6 +381,9 @@ static xenpaging_t *xenpaging_init(int a
case EXDEV:
ERROR("xenpaging not supported in a PoD guest");
break;
+ case ENOEXEC:
+ ERROR("xenpaging version mismatch");
+ break;
default:
PERROR("Error initialising shared page");
break;
diff -r a4d7c27ec1f1 -r b733498b351a xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -287,6 +287,15 @@ int mem_event_domctl(struct domain *d, x
if ( p2m->pod.entry_count )
break;
+ rc = -ENOEXEC;
+ /* Reject old pager */
+ if ( mec->gfn != MEM_EVENT_PAGING_AGE )
+ {
+ gdprintk(XENLOG_INFO, "Expected paging age %lx, got
%lx\n",
+ MEM_EVENT_PAGING_AGE, mec->gfn);
+ break;
+ }
+
rc = mem_event_enable(d, mec, med);
}
break;
diff -r a4d7c27ec1f1 -r b733498b351a xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -49,6 +49,8 @@
#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn
are RIP */
+#define MEM_EVENT_PAGING_AGE 1UL /* Number distinguish the mem_paging
<-> pager interface */
+
typedef struct mem_event_shared_page {
uint32_t port;
} mem_event_shared_page_t;
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1323189147 -3600
# Node ID 96d3292797d861592a7d2d3840f371ec719775a9
# Parent b733498b351a8650b2d952aa56725f63d49c1889
xenpaging: map gfn before nomination
If the gfn is mapped before nomination, all special cases in do_mmu_update()
for paged gfns can be removed. If a gfn is actually in any of the paging
states the caller has to try again.
Bump interface age.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
diff -r b733498b351a -r 96d3292797d8 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -576,7 +576,7 @@ static int xenpaging_evict_page(xenpagin
DECLARE_DOMCTL;
- /* Map page */
+ /* Map page to get a handle */
gfn = victim->gfn;
ret = -EFAULT;
page = xc_map_foreign_pages(xch, paging->mem_event.domain_id,
@@ -587,16 +587,21 @@ static int xenpaging_evict_page(xenpagin
goto out;
}
+ /* Nominate the page */
+ ret = xc_mem_paging_nominate(xch, paging->mem_event.domain_id, gfn);
+ if ( ret != 0 )
+ goto out;
+
/* Copy page */
ret = write_page(fd, page, i);
if ( ret != 0 )
{
PERROR("Error copying page %lx", victim->gfn);
- munmap(page, PAGE_SIZE);
goto out;
}
munmap(page, PAGE_SIZE);
+ page = NULL;
/* Tell Xen to evict page */
ret = xc_mem_paging_evict(xch, paging->mem_event.domain_id,
@@ -615,6 +620,8 @@ static int xenpaging_evict_page(xenpagin
paging->num_paged_out++;
out:
+ if (page)
+ munmap(page, PAGE_SIZE);
return ret;
}
@@ -738,14 +745,11 @@ static int evict_victim(xenpaging_t *pag
ret = -EINTR;
goto out;
}
- ret = xc_mem_paging_nominate(xch, paging->mem_event.domain_id,
victim->gfn);
- if ( ret == 0 )
- ret = xenpaging_evict_page(paging, victim, fd, i);
- else
+ ret = xenpaging_evict_page(paging, victim, fd, i);
+ if ( ret && j++ % 1000 == 0 )
{
- if ( j++ % 1000 == 0 )
- if ( xenpaging_mem_paging_flush_ioemu_cache(paging) )
- PERROR("Error flushing ioemu cache");
+ if ( xenpaging_mem_paging_flush_ioemu_cache(paging) )
+ PERROR("Error flushing ioemu cache");
}
}
while ( ret );
diff -r b733498b351a -r 96d3292797d8 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -723,7 +723,7 @@ set_shared_p2m_entry(struct domain *d, u
* - the gfn is backed by a mfn
* - the p2mt of the gfn is pageable
* - the mfn is not used for IO
- * - the mfn has exactly one user and has no special meaning
+ * - the mfn has exactly two users (guest+pager) and has no special meaning
*
* Once the p2mt is changed the page is readonly for the guest. On success the
* pager can write the page contents to disk and later evict the page.
@@ -758,7 +758,7 @@ int p2m_mem_paging_nominate(struct domai
/* Check page count and type */
page = mfn_to_page(mfn);
if ( (page->count_info & (PGC_count_mask | PGC_allocated)) !-
(1 | PGC_allocated) )
+ (2 | PGC_allocated) )
goto out;
if ( (page->u.inuse.type_info & PGT_type_mask) != PGT_none )
@@ -785,7 +785,7 @@ int p2m_mem_paging_nominate(struct domai
* freed:
* - the gfn is backed by a mfn
* - the gfn was nominated
- * - the mfn has still exactly one user and has no special meaning
+ * - the mfn has still exactly one user (the guest) and has no special meaning
*
* After successful nomination some other process could have mapped the page.
In
* this case eviction can not be done. If the gfn was populated before the
pager
diff -r b733498b351a -r 96d3292797d8 xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -49,7 +49,7 @@
#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn
are RIP */
-#define MEM_EVENT_PAGING_AGE 1UL /* Number distinguish the mem_paging
<-> pager interface */
+#define MEM_EVENT_PAGING_AGE 2UL /* Number distinguish the mem_paging
<-> pager interface */
typedef struct mem_event_shared_page {
uint32_t port;
Olaf Hering
2011-Dec-06 17:07 UTC
[PATCH 3 of 5] xenpaging: add need_populate and paged_no_mfn checks
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1323189148 -3600
# Node ID 7f2cfd9bd113c40a49cc7e036fd07eb706a22f15
# Parent 96d3292797d861592a7d2d3840f371ec719775a9
xenpaging: add need_populate and paged_no_mfn checks
There is currently a mix of p2mt checks for the various paging types.
Some mean the p2mt needs to be populated, others mean a gfn without mfn.
Add a new p2m_do_populate() helper which covers the p2m_ram_paged and
p2m_ram_paging_out types. If a gfn is not in these states anymore another
populate request for the pager is not needed. This avoids a call to
p2m_mem_paging_populate() which in turn reduces the pressure on the ring
buffer because no temporary slot needs to be claimed. As such, this helper is
an optimization.
Modify the existing p2m_is_paged() helper which now covers also
p2m_ram_paging_in_start in addition to the current p2m_ram_paged type. A gfn
in these two states is not backed by a mfn.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -67,7 +67,8 @@ static int hvmemul_do_io(
ram_mfn = get_gfn_unshare(curr->domain, ram_gfn, &p2mt);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(curr->domain, ram_gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(curr->domain, ram_gfn);
put_gfn(curr->domain, ram_gfn);
return X86EMUL_RETRY;
}
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -363,7 +363,8 @@ static int hvm_set_ioreq_page(
}
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(d, gmfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(d, gmfn);
put_gfn(d, gmfn);
return -ENOENT;
}
@@ -1298,7 +1299,7 @@ int hvm_hap_nested_page_fault(unsigned l
#ifdef __x86_64__
/* Check if the page has been paged out */
- if ( p2m_is_paged(p2mt) || (p2mt == p2m_ram_paging_out) )
+ if ( p2m_do_populate(p2mt) )
p2m_mem_paging_populate(v->domain, gfn);
/* Mem sharing: unshare the page and try again */
@@ -1844,7 +1845,8 @@ static void *__hvm_map_guest_frame(unsig
}
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(d, gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(d, gfn);
put_gfn(d, gfn);
return NULL;
}
@@ -2320,7 +2322,8 @@ static enum hvm_copy_result __hvm_copy(
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(curr->domain, gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(curr->domain, gfn);
put_gfn(curr->domain, gfn);
return HVMCOPY_gfn_paged_out;
}
@@ -3808,7 +3811,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
mfn_t mfn = get_gfn_unshare(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, pfn);
+ if ( p2m_do_populate(t) )
+ p2m_mem_paging_populate(d, pfn);
put_gfn(d, pfn);
rc = -EINVAL;
goto param_fail3;
@@ -3912,7 +3916,8 @@ long do_hvm_op(unsigned long op, XEN_GUE
mfn = get_gfn_unshare(d, pfn, &t);
if ( p2m_is_paging(t) )
{
- p2m_mem_paging_populate(d, pfn);
+ if ( p2m_do_populate(t) )
+ p2m_mem_paging_populate(d, pfn);
put_gfn(d, pfn);
rc = -EINVAL;
goto param_fail4;
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3528,9 +3528,10 @@ int do_mmu_update(
if ( !p2m_is_valid(p2mt) )
mfn = INVALID_MFN;
- if ( p2m_is_paged(p2mt) )
+ if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(pg_owner, gmfn);
+ if ( p2m_is_paged(p2mt) )
+ p2m_mem_paging_populate(pg_owner, gmfn);
put_gfn(pt_owner, gmfn);
rc = -ENOENT;
break;
@@ -3560,21 +3561,15 @@ int do_mmu_update(
l1emfn = mfn_x(get_gfn(pg_owner, l1egfn, &l1e_p2mt));
- if ( p2m_is_paged(l1e_p2mt) )
+#ifdef __x86_64__
+ if ( p2m_is_paging(l1e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l1e_get_pfn(l1e));
+ if ( p2m_is_paged(l1e_p2mt) )
+ p2m_mem_paging_populate(pg_owner,
l1e_get_pfn(l1e));
put_gfn(pg_owner, l1egfn);
rc = -ENOENT;
break;
}
- else if ( p2m_ram_paging_in_start == l1e_p2mt &&
- !mfn_valid(l1emfn) )
- {
- put_gfn(pg_owner, l1egfn);
- rc = -ENOENT;
- break;
- }
-#ifdef __x86_64__
/* XXX: Ugly: pull all the checks into a separate function.
* Don''t want to do it now, not to interfere with
mem_paging
* patches */
@@ -3609,16 +3604,10 @@ int do_mmu_update(
l2emfn = mfn_x(get_gfn(pg_owner, l2egfn, &l2e_p2mt));
- if ( p2m_is_paged(l2e_p2mt) )
+ if ( p2m_is_paging(l2e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l2egfn);
- put_gfn(pg_owner, l2egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in_start == l2e_p2mt &&
- !mfn_valid(l2emfn) )
- {
+ if ( p2m_is_paged(l2e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l2egfn);
put_gfn(pg_owner, l2egfn);
rc = -ENOENT;
break;
@@ -3644,16 +3633,10 @@ int do_mmu_update(
l3emfn = mfn_x(get_gfn(pg_owner, l3egfn, &l3e_p2mt));
- if ( p2m_is_paged(l3e_p2mt) )
+ if ( p2m_is_paging(l3e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l3egfn);
- put_gfn(pg_owner, l3egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in_start == l3e_p2mt &&
- !mfn_valid(l3emfn) )
- {
+ if ( p2m_is_paged(l3e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l3egfn);
put_gfn(pg_owner, l3egfn);
rc = -ENOENT;
break;
@@ -3679,16 +3662,10 @@ int do_mmu_update(
l4emfn = mfn_x(get_gfn(pg_owner, l4egfn, &l4e_p2mt));
- if ( p2m_is_paged(l4e_p2mt) )
+ if ( p2m_is_paging(l4e_p2mt) )
{
- p2m_mem_paging_populate(pg_owner, l4egfn);
- put_gfn(pg_owner, l4egfn);
- rc = -ENOENT;
- break;
- }
- else if ( p2m_ram_paging_in_start == l4e_p2mt &&
- !mfn_valid(l4emfn) )
- {
+ if ( p2m_is_paged(l4e_p2mt) )
+ p2m_mem_paging_populate(pg_owner, l4egfn);
put_gfn(pg_owner, l4egfn);
rc = -ENOENT;
break;
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/guest_walk.c
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -102,7 +102,8 @@ static inline void *map_domain_gfn(struc
if ( p2m_is_paging(*p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+ if ( p2m_do_populate(*p2mt) )
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
__put_gfn(p2m, gfn_x(gfn));
*rc = _PAGE_PAGED;
return NULL;
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/hap/guest_walk.c
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -64,7 +64,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT);
pfec[0] = PFEC_page_paged;
__put_gfn(p2m, top_gfn);
@@ -101,7 +102,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
if ( p2m_is_paging(p2mt) )
{
ASSERT(!p2m_is_nestedp2m(p2m));
- p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(p2m->domain, gfn_x(gfn));
pfec[0] = PFEC_page_paged;
__put_gfn(p2m, gfn_x(gfn));
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -375,8 +375,7 @@ ept_set_entry(struct p2m_domain *p2m, un
* Read-then-write is OK because we hold the p2m lock. */
old_entry = *ept_entry;
- if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) ||
- (p2mt == p2m_ram_paging_in_start) )
+ if ( mfn_valid(mfn_x(mfn)) || direct_mmio || p2m_is_paged(p2mt) )
{
/* Construct the new entry, and then write it once */
new_entry.emt = epte_get_entry_emt(p2m->domain, gfn, mfn,
&ipat,
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -918,7 +918,7 @@ void p2m_mem_paging_populate(struct doma
p2m_lock(p2m);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
/* Allow only nominated or evicted pages to enter page-in path */
- if ( p2mt == p2m_ram_paging_out || p2mt == p2m_ram_paged )
+ if ( p2m_do_populate(p2mt) )
{
/* Evict will fail now, tag this request for pager */
if ( p2mt == p2m_ram_paging_out )
@@ -935,7 +935,7 @@ void p2m_mem_paging_populate(struct doma
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
}
/* No need to inform pager if the gfn is not in the page-out path */
- else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged )
+ else if ( !p2m_do_populate(p2mt) )
{
/* gfn is already on its way back and vcpu is not paused */
mem_event_put_req_producers(&d->mem_event->paging);
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/common/grant_table.c
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -163,7 +163,8 @@ static int __get_paged_frame(unsigned lo
*frame = mfn_x(mfn);
if ( p2m_is_paging(p2mt) )
{
- p2m_mem_paging_populate(rd, gfn);
+ if ( p2m_do_populate(p2mt) )
+ p2m_mem_paging_populate(rd, gfn);
put_gfn(rd, gfn);
rc = GNTST_eagain;
}
diff -r 96d3292797d8 -r 7f2cfd9bd113 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -158,7 +158,11 @@ typedef enum {
| p2m_to_mask(p2m_ram_paging_in_start) \
| p2m_to_mask(p2m_ram_paging_in))
-#define P2M_PAGED_TYPES (p2m_to_mask(p2m_ram_paged))
+#define P2M_POPULATE_TYPES (p2m_to_mask(p2m_ram_paged) \
+ | p2m_to_mask(p2m_ram_paging_out) )
+
+#define P2M_PAGED_NO_MFN_TYPES (p2m_to_mask(p2m_ram_paged) \
+ | p2m_to_mask(p2m_ram_paging_in_start) )
/* Shared types */
/* XXX: Sharable types could include p2m_ram_ro too, but we would need to
@@ -184,7 +188,8 @@ typedef enum {
#define p2m_has_emt(_t) (p2m_to_mask(_t) & (P2M_RAM_TYPES |
p2m_to_mask(p2m_mmio_direct)))
#define p2m_is_pageable(_t) (p2m_to_mask(_t) & P2M_PAGEABLE_TYPES)
#define p2m_is_paging(_t) (p2m_to_mask(_t) & P2M_PAGING_TYPES)
-#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_TYPES)
+#define p2m_is_paged(_t) (p2m_to_mask(_t) & P2M_PAGED_NO_MFN_TYPES)
+#define p2m_do_populate(_t) (p2m_to_mask(_t) & P2M_POPULATE_TYPES)
#define p2m_is_sharable(_t) (p2m_to_mask(_t) & P2M_SHARABLE_TYPES)
#define p2m_is_shared(_t) (p2m_to_mask(_t) & P2M_SHARED_TYPES)
#define p2m_is_broken(_t) (p2m_to_mask(_t) & P2M_BROKEN_TYPES)
Olaf Hering
2011-Dec-06 17:07 UTC
[PATCH 4 of 5] xenpaging: restore p2mt if gfn is needed before evict
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1323189150 -3600
# Node ID 4e52b9c9309815a4b05c7b970a95e318d59fb71b
# Parent 7f2cfd9bd113c40a49cc7e036fd07eb706a22f15
xenpaging: restore p2mt if gfn is needed before evict
In the rare case that a gfn is needed by a guest or a foreign domain
between nominate and evict, restore the p2mt and skip sending a request.
A request is not needed because the pager will notice the evict failure.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
diff -r 7f2cfd9bd113 -r 4e52b9c93098 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -905,6 +905,7 @@ void p2m_mem_paging_populate(struct doma
p2m_type_t p2mt;
p2m_access_t a;
mfn_t mfn;
+ int restored = 0;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
/* Check that there''s space on the ring for this request */
@@ -920,22 +921,25 @@ void p2m_mem_paging_populate(struct doma
/* Allow only nominated or evicted pages to enter page-in path */
if ( p2m_do_populate(p2mt) )
{
- /* Evict will fail now, tag this request for pager */
- if ( p2mt == p2m_ram_paging_out )
- req.flags |= MEM_EVENT_FLAG_EVICT_FAIL;
-
- set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_paging_in_start,
a);
+ /* Restore page state if gfn was requested before evict */
+ if ( p2mt == p2m_ram_paging_out && mfn_valid(mfn) ) {
+ set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
+ paging_mode_log_dirty(d) ? p2m_ram_logdirty :
p2m_ram_rw, a);
+ restored = 1;
+ } else {
+ set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K,
p2m_ram_paging_in_start, a);
+ }
}
p2m_unlock(p2m);
/* Pause domain if request came from guest and gfn has paging type */
- if ( p2m_is_paging(p2mt) && v->domain == d )
+ if ( !restored && p2m_is_paging(p2mt) && v->domain == d
)
{
vcpu_pause_nosync(v);
req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED;
}
/* No need to inform pager if the gfn is not in the page-out path */
- else if ( !p2m_do_populate(p2mt) )
+ else if ( restored || !p2m_do_populate(p2mt) )
{
/* gfn is already on its way back and vcpu is not paused */
mem_event_put_req_producers(&d->mem_event->paging);
# HG changeset patch
# User Olaf Hering <olaf@aepfle.de>
# Date 1323189154 -3600
# Node ID 344e337be85eee244036f7cd158b64fe9d09aa00
# Parent 4e52b9c9309815a4b05c7b970a95e318d59fb71b
xenpaging: improve evict error handling
Adjust return codes in Xen and handle errors in evict_victim() properly.
p2m_mem_paging_nominate() returns -EAGAIN, p2m_mem_paging_evict()
returns -EBUSY. Other errors indicate guest failures, which
xenpaging_evict_page() can now catch correctly. Also write() failures
are fatal.
Without this change, evict_victim() may spin forever if the guest is
killed because this function does not get a signal.
Signed-off-by: Olaf Hering <olaf@aepfle.de>
diff -r 4e52b9c93098 -r 344e337be85e tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -576,9 +576,11 @@ static int xenpaging_evict_page(xenpagin
DECLARE_DOMCTL;
+ /* Errors are fatal */
+ ret = -1;
+
/* Map page to get a handle */
gfn = victim->gfn;
- ret = -EFAULT;
page = xc_map_foreign_pages(xch, paging->mem_event.domain_id,
PROT_READ | PROT_WRITE, &gfn, 1);
if ( page == NULL )
@@ -588,13 +590,15 @@ static int xenpaging_evict_page(xenpagin
}
/* Nominate the page */
- ret = xc_mem_paging_nominate(xch, paging->mem_event.domain_id, gfn);
- if ( ret != 0 )
+ if ( xc_mem_paging_nominate(xch, paging->mem_event.domain_id, gfn) )
+ {
+ if ( errno == EAGAIN )
+ ret = 1;
goto out;
+ }
/* Copy page */
- ret = write_page(fd, page, i);
- if ( ret != 0 )
+ if ( write_page(fd, page, i) )
{
PERROR("Error copying page %lx", victim->gfn);
goto out;
@@ -604,10 +608,10 @@ static int xenpaging_evict_page(xenpagin
page = NULL;
/* Tell Xen to evict page */
- ret = xc_mem_paging_evict(xch, paging->mem_event.domain_id,
- victim->gfn);
- if ( ret != 0 )
+ if ( xc_mem_paging_evict(xch, paging->mem_event.domain_id,
victim->gfn) )
{
+ if ( errno == EBUSY )
+ ret = 1;
PERROR("Error evicting page %lx", victim->gfn);
goto out;
}
@@ -619,6 +623,8 @@ static int xenpaging_evict_page(xenpagin
/* Record number of evicted pages */
paging->num_paged_out++;
+ ret = 0;
+
out:
if (page)
munmap(page, PAGE_SIZE);
@@ -746,7 +752,12 @@ static int evict_victim(xenpaging_t *pag
goto out;
}
ret = xenpaging_evict_page(paging, victim, fd, i);
- if ( ret && j++ % 1000 == 0 )
+ if ( ret < 0 )
+ {
+ ret = -EINTR;
+ goto out;
+ }
+ if ( ret > 0 && j++ % 1000 == 0 )
{
if ( xenpaging_mem_paging_flush_ioemu_cache(paging) )
PERROR("Error flushing ioemu cache");
diff -r 4e52b9c93098 -r 344e337be85e xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -735,19 +735,17 @@ int p2m_mem_paging_nominate(struct domai
p2m_type_t p2mt;
p2m_access_t a;
mfn_t mfn;
- int ret;
+ int ret = -EAGAIN;
p2m_lock(p2m);
mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL);
/* Check if mfn is valid */
- ret = -EINVAL;
if ( !mfn_valid(mfn) )
goto out;
/* Check p2m type */
- ret = -EAGAIN;
if ( !p2m_is_pageable(p2mt) )
goto out;
@@ -799,7 +797,7 @@ int p2m_mem_paging_evict(struct domain *
p2m_access_t a;
mfn_t mfn;
struct p2m_domain *p2m = p2m_get_hostp2m(d);
- int ret = -EINVAL;
+ int ret = -EBUSY;
p2m_lock(p2m);
@@ -812,7 +810,6 @@ int p2m_mem_paging_evict(struct domain *
if ( p2mt != p2m_ram_paging_out )
goto out;
- ret = -EBUSY;
/* Get the page so it doesn''t get modified under Xen''s
feet */
page = mfn_to_page(mfn);
if ( unlikely(!get_page(page, d)) )
diff -r 4e52b9c93098 -r 344e337be85e xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -49,7 +49,7 @@
#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn
are RIP */
-#define MEM_EVENT_PAGING_AGE 2UL /* Number distinguish the mem_paging
<-> pager interface */
+#define MEM_EVENT_PAGING_AGE 3UL /* Number distinguish the mem_paging
<-> pager interface */
typedef struct mem_event_shared_page {
uint32_t port;