Joe Epstein
2011-Jan-06 03:53 UTC
[Xen-devel] [PATCH 4 of 7] REDO2: mem_access & mem_access 2: HVMOPs for setting mem access
* Creates HVMOPs for setting and getting memory access. The hypercalls
can set individual pages or the default access for new/refreshed
pages.
* Added functions to libxc to access these hypercalls.
Signed-off-by: Joe Epstein <jepstein98@gmail.com>
diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/Makefile
--- a/tools/libxc/Makefile Wed Jan 05 18:50:54 2011 -0800
+++ b/tools/libxc/Makefile Wed Jan 05 18:53:22 2011 -0800
@@ -28,6 +28,7 @@ CTRL_SRCS-y += xc_resume.c
CTRL_SRCS-y += xc_tmem.c
CTRL_SRCS-y += xc_mem_event.c
CTRL_SRCS-y += xc_mem_paging.c
+CTRL_SRCS-y += xc_mem_access.c
CTRL_SRCS-y += xc_memshr.c
CTRL_SRCS-y += xc_hcall_buf.c
CTRL_SRCS-y += xc_foreign_memory.c
diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c Wed Jan 05 18:50:54 2011 -0800
+++ b/tools/libxc/xc_domain.c Wed Jan 05 18:53:22 2011 -0800
@@ -1442,6 +1442,17 @@ int xc_domain_debug_control(xc_interface
return do_domctl(xc, &domctl);
}
+int xc_domain_set_access_required(xc_interface *xch,
+ uint32_t domid,
+ unsigned int required)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_set_access_required;
+ domctl.domain = domid;
+ domctl.u.access_required.access_required = required;
+ return do_domctl(xch, &domctl);
+}
/*
* Local variables:
diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xc_mem_access.c
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/libxc/xc_mem_access.c Wed Jan 05 18:53:22 2011 -0800
@@ -0,0 +1,42 @@
+/******************************************************************************
+ *
+ * tools/libxc/xc_mem_access.c
+ *
+ * Interface to low-level memory access mode functionality
+ *
+ * Copyright (c) 2011 Virtuata, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include "xc_private.h"
+
+
+int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned
long gfn)
+{
+ return xc_mem_event_control(xch, domain_id,
+ XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME,
+ XEN_DOMCTL_MEM_EVENT_OP_ACCESS, NULL, NULL,
+ gfn);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c Wed Jan 05 18:50:54 2011 -0800
+++ b/tools/libxc/xc_misc.c Wed Jan 05 18:53:22 2011 -0800
@@ -511,6 +511,66 @@ int xc_hvm_set_mem_type(
return rc;
}
+int xc_hvm_set_mem_access(
+ xc_interface *xch, domid_t dom, hvmmem_access_t mem_access, uint64_t
first_pfn, uint64_t nr)
+{
+ DECLARE_HYPERCALL;
+ DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_access, arg);
+ int rc;
+
+ arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+ if ( arg == NULL )
+ {
+ PERROR("Could not allocate memory for xc_hvm_set_mem_access
hypercall");
+ return -1;
+ }
+
+ arg->domid = dom;
+ arg->hvmmem_access = mem_access;
+ arg->first_pfn = first_pfn;
+ arg->nr = nr;
+
+ hypercall.op = __HYPERVISOR_hvm_op;
+ hypercall.arg[0] = HVMOP_set_mem_access;
+ hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+
+ rc = do_xen_hypercall(xch, &hypercall);
+
+ xc_hypercall_buffer_free(xch, arg);
+
+ return rc;
+}
+
+int xc_hvm_get_mem_access(
+ xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t*
mem_access)
+{
+ DECLARE_HYPERCALL;
+ DECLARE_HYPERCALL_BUFFER(struct xen_hvm_get_mem_access, arg);
+ int rc;
+
+ arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+ if ( arg == NULL )
+ {
+ PERROR("Could not allocate memory for xc_hvm_get_mem_access
hypercall");
+ return -1;
+ }
+
+ arg->domid = dom;
+ arg->pfn = pfn;
+
+ hypercall.op = __HYPERVISOR_hvm_op;
+ hypercall.arg[0] = HVMOP_get_mem_access;
+ hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+
+ rc = do_xen_hypercall(xch, &hypercall);
+
+ if ( !rc )
+ *mem_access = arg->hvmmem_access;
+
+ xc_hypercall_buffer_free(xch, arg);
+
+ return rc;
+}
/*
* Local variables:
diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h Wed Jan 05 18:50:54 2011 -0800
+++ b/tools/libxc/xenctrl.h Wed Jan 05 18:53:22 2011 -0800
@@ -701,6 +701,19 @@ int xc_domain_setdebugging(xc_interface
uint32_t domid,
unsigned int enable);
+/**
+ * This function sets or clears the requirement that an access memory
+ * event listener is required on the domain.
+ *
+ * @parm xch a handle to an open hypervisor interface
+ * @parm domid the domain id to send trigger
+ * @parm enable true to require a listener
+ * return 0 on success, -1 on failure
+ */
+int xc_domain_set_access_required(xc_interface *xch,
+ uint32_t domid,
+ unsigned int required);
+
/*
* CPUPOOL MANAGEMENT FUNCTIONS
*/
@@ -1398,6 +1411,19 @@ int xc_hvm_modified_memory(
int xc_hvm_set_mem_type(
xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t
first_pfn, uint64_t nr);
+/*
+ * Set a range of memory to a specific access.
+ * Allowed types are HVMMEM_access_default, HVMMEM_access_n, any
combination of
+ * HVM_access_ + (rwx), and HVM_access_rx2rw
+ */
+int xc_hvm_set_mem_access(
+ xc_interface *xch, domid_t dom, hvmmem_access_t memaccess, uint64_t
first_pfn, uint64_t nr);
+
+/*
+ * Gets the mem access for the given page (returned in memacess on success)
+ */
+int xc_hvm_get_mem_access(
+ xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t*
memaccess);
/*
* LOGGING AND ERROR REPORTING
@@ -1704,6 +1730,8 @@ int xc_mem_paging_evict(xc_interface *xc
int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned long
gfn);
int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id,
unsigned long gfn);
+int xc_mem_access_resume(xc_interface *xch, domid_t domain_id,
+ unsigned long gfn);
/**
* memshr operations
diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/ia64/vmx/vmx_hypercall.c
--- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Jan 05 18:50:54 2011 -0800
+++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Jan 05 18:53:22 2011 -0800
@@ -218,6 +218,9 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA
}
case HVMOP_set_mem_type:
+ case HVMOP_set_mem_access:
+ case HVMOP_get_mem_access:
+
rc = -ENOSYS;
break;
diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Wed Jan 05 18:50:54 2011 -0800
+++ b/xen/arch/x86/hvm/hvm.c Wed Jan 05 18:53:22 2011 -0800
@@ -3466,6 +3466,149 @@ long do_hvm_op(unsigned long op, XEN_GUE
break;
}
+ case HVMOP_set_mem_access:
+ {
+ struct xen_hvm_set_mem_access a;
+ struct domain *d;
+ struct p2m_domain *p2m;
+ unsigned long pfn;
+
+ p2m_access_t memaccess[] = {
+ p2m_access_n,
+ p2m_access_r,
+ p2m_access_w,
+ p2m_access_rw,
+ p2m_access_x,
+ p2m_access_rx,
+ p2m_access_wx,
+ p2m_access_rwx,
+ p2m_access_rx2rw,
+ 0, /* HVMMEM_access_default -- will get set below */
+ };
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( current->domain->domain_id == a.domid )
+ return -EPERM;
+
+ rc = rcu_lock_target_domain_by_id(a.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto param_fail5;
+
+ p2m = p2m_get_hostp2m(d);
+ memaccess[HVMMEM_access_default] = p2m->default_access;
+
+ /* If request to set default access */
+ if ( a.first_pfn == ~0ull )
+ {
+ rc = 0;
+ p2m->default_access = memaccess[a.hvmmem_access];
+ goto param_fail5;
+ }
+
+ rc = -EINVAL;
+ if ( (a.first_pfn > domain_get_maximum_gpfn(d)) ||
+ ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+ ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
+ goto param_fail5;
+
+ if ( a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+ goto param_fail5;
+
+ for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
+ {
+ p2m_type_t t;
+ mfn_t mfn;
+ int success;
+
+ mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0);
+
+ p2m_lock(p2m);
+ success = p2m->set_entry(p2m, pfn, mfn, 0, t,
memaccess[a.hvmmem_access]);
+ p2m_unlock(p2m);
+ if ( !success )
+ goto param_fail5;
+ }
+
+ rc = 0;
+
+ param_fail5:
+ rcu_unlock_domain(d);
+ break;
+ }
+
+ case HVMOP_get_mem_access:
+ {
+ struct xen_hvm_get_mem_access a;
+ struct domain *d;
+ struct p2m_domain *p2m;
+ p2m_type_t t;
+ p2m_access_t ac;
+ mfn_t mfn;
+
+ /* Interface access to internal p2m accesses */
+ hvmmem_access_t memaccess[] = {
+ HVMMEM_access_n,
+ HVMMEM_access_r,
+ HVMMEM_access_w,
+ HVMMEM_access_rw,
+ HVMMEM_access_x,
+ HVMMEM_access_rx,
+ HVMMEM_access_wx,
+ HVMMEM_access_rwx,
+ HVMMEM_access_rx2rw
+ };
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ if ( current->domain->domain_id == a.domid )
+ return -EPERM;
+
+ rc = rcu_lock_target_domain_by_id(a.domid, &d);
+ if ( rc != 0 )
+ return rc;
+
+ rc = -EINVAL;
+ if ( !is_hvm_domain(d) )
+ goto param_fail6;
+
+ p2m = p2m_get_hostp2m(d);
+
+ if ( a.pfn == ~0ull )
+ {
+ a.hvmmem_access = memaccess[p2m->default_access];
+ }
+ else {
+ rc = -EINVAL;
+ if ( (a.pfn > domain_get_maximum_gpfn(d)) )
+ goto param_fail6;
+
+ rc = -ESRCH;
+ mfn = p2m->get_entry(p2m, a.pfn, &t, &ac, p2m_query);
+
+ if ( mfn_x(mfn) == INVALID_MFN )
+ goto param_fail6;
+
+ rc = -ERANGE;
+ if ( ac >= ARRAY_SIZE(memaccess) )
+ goto param_fail6;
+
+ a.hvmmem_access = memaccess[ac];
+ }
+
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+
+ param_fail6:
+ rcu_unlock_domain(d);
+ break;
+ }
+
case HVMOP_pagetable_dying:
{
struct xen_hvm_pagetable_dying a;
@@ -3480,12 +3623,12 @@ long do_hvm_op(unsigned long op, XEN_GUE
rc = -EINVAL;
if ( !is_hvm_domain(d) || !paging_mode_shadow(d) )
- goto param_fail5;
+ goto param_fail7;
rc = 0;
pagetable_dying(d, a.gpa);
- param_fail5:
+ param_fail7:
rcu_unlock_domain(d);
break;
}
diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c Wed Jan 05 18:50:54 2011 -0800
+++ b/xen/arch/x86/mm/mem_sharing.c Wed Jan 05 18:53:22 2011 -0800
@@ -304,6 +304,8 @@ static struct page_info* mem_sharing_all
if(page != NULL) return page;
memset(&req, 0, sizeof(req));
+ req.type = MEM_EVENT_TYPE_SHARED;
+
if(must_succeed)
{
/* We do not support ''must_succeed'' any more.
External operations
such
diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c Wed Jan 05 18:50:54 2011 -0800
+++ b/xen/arch/x86/mm/p2m.c Wed Jan 05 18:53:22 2011 -0800
@@ -2781,6 +2781,7 @@ void p2m_mem_paging_populate(struct p2m_
return;
memset(&req, 0, sizeof(req));
+ req.type = MEM_EVENT_TYPE_PAGING;
/* Fix p2m mapping */
/* XXX: It seems inefficient to have this here, as it''s only
needed
diff -r 281e5488ee97 -r 98c86c674ba3 xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h Wed Jan 05 18:50:54 2011 -0800
+++ b/xen/include/public/hvm/hvm_op.h Wed Jan 05 18:53:22 2011 -0800
@@ -158,4 +158,46 @@ struct xen_hvm_xentrace {
typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
+#define HVMOP_set_mem_access 12
+typedef enum {
+ HVMMEM_access_n,
+ HVMMEM_access_r,
+ HVMMEM_access_w,
+ HVMMEM_access_rw,
+ HVMMEM_access_x,
+ HVMMEM_access_rx,
+ HVMMEM_access_wx,
+ HVMMEM_access_rwx,
+ HVMMEM_access_rx2rw, /* Page starts off as read-execute, but
automatically change
+ * to read-write on a write */
+ HVMMEM_access_default /* Take the domain default */
+} hvmmem_access_t;
+/* Notify that a region of memory is to have specific access types */
+struct xen_hvm_set_mem_access {
+ /* Domain to be updated. */
+ domid_t domid;
+ uint16_t pad[3]; /* align next field on 8-byte boundary */
+ /* Memory type */
+ uint64_t hvmmem_access; /* hvm_access_t */
+ /* First pfn, or ~0ull to set the default access for new pages */
+ uint64_t first_pfn;
+ /* Number of pages, ignored on setting default access */
+ uint64_t nr;
+};
+typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
+
+#define HVMOP_get_mem_access 13
+/* Get the specific access type for that region of memory */
+struct xen_hvm_get_mem_access {
+ /* Domain to be queried. */
+ domid_t domid;
+ uint16_t pad[3]; /* align next field on 8-byte boundary */
+ /* Memory type: OUT */
+ uint64_t hvmmem_access; /* hvm_access_t */
+ /* pfn, or ~0ull for default access for new pages. IN */
+ uint64_t pfn;
+};
+typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Ian Campbell
2011-Jan-06 09:54 UTC
Re: [Xen-devel] [PATCH 4 of 7] REDO2: mem_access & mem_access 2: HVMOPs for setting mem access
On Thu, 2011-01-06 at 03:53 +0000, Joe Epstein wrote:> * Creates HVMOPs for setting and getting memory access. The > hypercalls > can set individual pages or the default access for new/refreshed > pages. > > * Added functions to libxc to access these hypercalls.The tools/libxc/* bits look good to me, except perhaps xc_domain_set_access_required perhaps belongs in the previous patch? Reviewed-by: Ian Campbell <ian.campbell@citrix.com>> > Signed-off-by: Joe Epstein <jepstein98@gmail.com> > > > > diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/Makefile > --- a/tools/libxc/Makefile Wed Jan 05 18:50:54 2011 -0800 > +++ b/tools/libxc/Makefile Wed Jan 05 18:53:22 2011 -0800 > @@ -28,6 +28,7 @@ CTRL_SRCS-y += xc_resume.c > CTRL_SRCS-y += xc_tmem.c > CTRL_SRCS-y += xc_mem_event.c > CTRL_SRCS-y += xc_mem_paging.c > +CTRL_SRCS-y += xc_mem_access.c > CTRL_SRCS-y += xc_memshr.c > CTRL_SRCS-y += xc_hcall_buf.c > CTRL_SRCS-y += xc_foreign_memory.c > diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xc_domain.c > --- a/tools/libxc/xc_domain.c Wed Jan 05 18:50:54 2011 -0800 > +++ b/tools/libxc/xc_domain.c Wed Jan 05 18:53:22 2011 -0800 > @@ -1442,6 +1442,17 @@ int xc_domain_debug_control(xc_interface > return do_domctl(xc, &domctl); > } > > +int xc_domain_set_access_required(xc_interface *xch, > + uint32_t domid, > + unsigned int required) > +{ > + DECLARE_DOMCTL; > + > + domctl.cmd = XEN_DOMCTL_set_access_required; > + domctl.domain = domid; > + domctl.u.access_required.access_required = required; > + return do_domctl(xch, &domctl); > +} > > /* > * Local variables: > diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xc_mem_access.c > --- /dev/null Thu Jan 01 00:00:00 1970 +0000 > +++ b/tools/libxc/xc_mem_access.c Wed Jan 05 18:53:22 2011 -0800 > @@ -0,0 +1,42 @@ > +/****************************************************************************** > + * > + * tools/libxc/xc_mem_access.c > + * > + * Interface to low-level memory access mode functionality > + * > + * Copyright (c) 2011 Virtuata, Inc. > + * > + * This library is free software; you can redistribute it and/or > + * modify it under the terms of the GNU Lesser General Public > + * License as published by the Free Software Foundation; either > + * version 2.1 of the License, or (at your option) any later version. > + * > + * This library is distributed in the hope that it will be useful, > + * but WITHOUT ANY WARRANTY; without even the implied warranty of > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU > + * Lesser General Public License for more details. > + * > + * You should have received a copy of the GNU Lesser General Public > + * License along with this library; if not, write to the Free > Software > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA > 02110-1301 USA > + */ > + > +#include "xc_private.h" > + > + > +int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, > unsigned long gfn) > +{ > + return xc_mem_event_control(xch, domain_id, > + > XEN_DOMCTL_MEM_EVENT_OP_ACCESS_RESUME, > + XEN_DOMCTL_MEM_EVENT_OP_ACCESS, NULL, > NULL, > + gfn); > +} > + > +/* > + * Local variables: > + * mode: C > + * c-set-style: "BSD" > + * c-basic-offset: 4 > + * indent-tabs-mode: nil > + * End: > + */ > diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xc_misc.c > --- a/tools/libxc/xc_misc.c Wed Jan 05 18:50:54 2011 -0800 > +++ b/tools/libxc/xc_misc.c Wed Jan 05 18:53:22 2011 -0800 > @@ -511,6 +511,66 @@ int xc_hvm_set_mem_type( > return rc; > } > > +int xc_hvm_set_mem_access( > + xc_interface *xch, domid_t dom, hvmmem_access_t mem_access, > uint64_t first_pfn, uint64_t nr) > +{ > + DECLARE_HYPERCALL; > + DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_access, arg); > + int rc; > + > + arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); > + if ( arg == NULL ) > + { > + PERROR("Could not allocate memory for xc_hvm_set_mem_access > hypercall"); > + return -1; > + } > + > + arg->domid = dom; > + arg->hvmmem_access = mem_access; > + arg->first_pfn = first_pfn; > + arg->nr = nr; > + > + hypercall.op = __HYPERVISOR_hvm_op; > + hypercall.arg[0] = HVMOP_set_mem_access; > + hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); > + > + rc = do_xen_hypercall(xch, &hypercall); > + > + xc_hypercall_buffer_free(xch, arg); > + > + return rc; > +} > + > +int xc_hvm_get_mem_access( > + xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t* > mem_access) > +{ > + DECLARE_HYPERCALL; > + DECLARE_HYPERCALL_BUFFER(struct xen_hvm_get_mem_access, arg); > + int rc; > + > + arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); > + if ( arg == NULL ) > + { > + PERROR("Could not allocate memory for xc_hvm_get_mem_access > hypercall"); > + return -1; > + } > + > + arg->domid = dom; > + arg->pfn = pfn; > + > + hypercall.op = __HYPERVISOR_hvm_op; > + hypercall.arg[0] = HVMOP_get_mem_access; > + hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg); > + > + rc = do_xen_hypercall(xch, &hypercall); > + > + if ( !rc ) > + *mem_access = arg->hvmmem_access; > + > + xc_hypercall_buffer_free(xch, arg); > + > + return rc; > +} > > /* > * Local variables: > diff -r 281e5488ee97 -r 98c86c674ba3 tools/libxc/xenctrl.h > --- a/tools/libxc/xenctrl.h Wed Jan 05 18:50:54 2011 -0800 > +++ b/tools/libxc/xenctrl.h Wed Jan 05 18:53:22 2011 -0800 > @@ -701,6 +701,19 @@ int xc_domain_setdebugging(xc_interface > uint32_t domid, > unsigned int enable); > > +/** > + * This function sets or clears the requirement that an access memory > + * event listener is required on the domain. > + * > + * @parm xch a handle to an open hypervisor interface > + * @parm domid the domain id to send trigger > + * @parm enable true to require a listener > + * return 0 on success, -1 on failure > + */ > +int xc_domain_set_access_required(xc_interface *xch, > + uint32_t domid, > + unsigned int required); > + > /* > * CPUPOOL MANAGEMENT FUNCTIONS > */ > @@ -1398,6 +1411,19 @@ int xc_hvm_modified_memory( > int xc_hvm_set_mem_type( > xc_interface *xch, domid_t dom, hvmmem_type_t memtype, uint64_t > first_pfn, uint64_t nr); > > +/* > + * Set a range of memory to a specific access. > + * Allowed types are HVMMEM_access_default, HVMMEM_access_n, any > combination of > + * HVM_access_ + (rwx), and HVM_access_rx2rw > + */ > +int xc_hvm_set_mem_access( > + xc_interface *xch, domid_t dom, hvmmem_access_t memaccess, > uint64_t first_pfn, uint64_t nr); > + > +/* > + * Gets the mem access for the given page (returned in memacess on > success) > + */ > +int xc_hvm_get_mem_access( > + xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t* > memaccess); > > /* > * LOGGING AND ERROR REPORTING > @@ -1704,6 +1730,8 @@ int xc_mem_paging_evict(xc_interface *xc > int xc_mem_paging_prep(xc_interface *xch, domid_t domain_id, unsigned > long gfn); > int xc_mem_paging_resume(xc_interface *xch, domid_t domain_id, > unsigned long gfn); > +int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, > + unsigned long gfn); > > /** > * memshr operations > diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/ia64/vmx/vmx_hypercall.c > --- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Jan 05 18:50:54 2011 > -0800 > +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Wed Jan 05 18:53:22 2011 > -0800 > @@ -218,6 +218,9 @@ do_hvm_op(unsigned long op, XEN_GUEST_HA > } > > case HVMOP_set_mem_type: > + case HVMOP_set_mem_access: > + case HVMOP_get_mem_access: > + > rc = -ENOSYS; > break; > > diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/x86/hvm/hvm.c > --- a/xen/arch/x86/hvm/hvm.c Wed Jan 05 18:50:54 2011 -0800 > +++ b/xen/arch/x86/hvm/hvm.c Wed Jan 05 18:53:22 2011 -0800 > @@ -3466,6 +3466,149 @@ long do_hvm_op(unsigned long op, XEN_GUE > break; > } > > + case HVMOP_set_mem_access: > + { > + struct xen_hvm_set_mem_access a; > + struct domain *d; > + struct p2m_domain *p2m; > + unsigned long pfn; > + > + p2m_access_t memaccess[] = { > + p2m_access_n, > + p2m_access_r, > + p2m_access_w, > + p2m_access_rw, > + p2m_access_x, > + p2m_access_rx, > + p2m_access_wx, > + p2m_access_rwx, > + p2m_access_rx2rw, > + 0, /* HVMMEM_access_default -- will get set below */ > + }; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + if ( current->domain->domain_id == a.domid ) > + return -EPERM; > + > + rc = rcu_lock_target_domain_by_id(a.domid, &d); > + if ( rc != 0 ) > + return rc; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) ) > + goto param_fail5; > + > + p2m = p2m_get_hostp2m(d); > + memaccess[HVMMEM_access_default] = p2m->default_access; > + > + /* If request to set default access */ > + if ( a.first_pfn == ~0ull ) > + { > + rc = 0; > + p2m->default_access = memaccess[a.hvmmem_access]; > + goto param_fail5; > + } > + > + rc = -EINVAL; > + if ( (a.first_pfn > domain_get_maximum_gpfn(d)) || > + ((a.first_pfn + a.nr - 1) < a.first_pfn) || > + ((a.first_pfn + a.nr - 1) > > domain_get_maximum_gpfn(d)) ) > + goto param_fail5; > + > + if ( a.hvmmem_access >= ARRAY_SIZE(memaccess) ) > + goto param_fail5; > + > + for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ ) > + { > + p2m_type_t t; > + mfn_t mfn; > + int success; > + > + mfn = gfn_to_mfn_unshare(p2m, pfn, &t, 0); > + > + p2m_lock(p2m); > + success = p2m->set_entry(p2m, pfn, mfn, 0, t, > memaccess[a.hvmmem_access]); > + p2m_unlock(p2m); > + if ( !success ) > + goto param_fail5; > + } > + > + rc = 0; > + > + param_fail5: > + rcu_unlock_domain(d); > + break; > + } > + > + case HVMOP_get_mem_access: > + { > + struct xen_hvm_get_mem_access a; > + struct domain *d; > + struct p2m_domain *p2m; > + p2m_type_t t; > + p2m_access_t ac; > + mfn_t mfn; > + > + /* Interface access to internal p2m accesses */ > + hvmmem_access_t memaccess[] = { > + HVMMEM_access_n, > + HVMMEM_access_r, > + HVMMEM_access_w, > + HVMMEM_access_rw, > + HVMMEM_access_x, > + HVMMEM_access_rx, > + HVMMEM_access_wx, > + HVMMEM_access_rwx, > + HVMMEM_access_rx2rw > + }; > + > + if ( copy_from_guest(&a, arg, 1) ) > + return -EFAULT; > + > + if ( current->domain->domain_id == a.domid ) > + return -EPERM; > + > + rc = rcu_lock_target_domain_by_id(a.domid, &d); > + if ( rc != 0 ) > + return rc; > + > + rc = -EINVAL; > + if ( !is_hvm_domain(d) ) > + goto param_fail6; > + > + p2m = p2m_get_hostp2m(d); > + > + if ( a.pfn == ~0ull ) > + { > + a.hvmmem_access = memaccess[p2m->default_access]; > + } > + else { > + rc = -EINVAL; > + if ( (a.pfn > domain_get_maximum_gpfn(d)) ) > + goto param_fail6; > + > + rc = -ESRCH; > + mfn = p2m->get_entry(p2m, a.pfn, &t, &ac, p2m_query); > + > + if ( mfn_x(mfn) == INVALID_MFN ) > + goto param_fail6; > + > + rc = -ERANGE; > + if ( ac >= ARRAY_SIZE(memaccess) ) > + goto param_fail6; > + > + a.hvmmem_access = memaccess[ac]; > + } > + > + rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; > + > + param_fail6: > + rcu_unlock_domain(d); > + break; > + } > + > case HVMOP_pagetable_dying: > { > struct xen_hvm_pagetable_dying a; > @@ -3480,12 +3623,12 @@ long do_hvm_op(unsigned long op, XEN_GUE > > rc = -EINVAL; > if ( !is_hvm_domain(d) || !paging_mode_shadow(d) ) > - goto param_fail5; > + goto param_fail7; > > rc = 0; > pagetable_dying(d, a.gpa); > > - param_fail5: > + param_fail7: > rcu_unlock_domain(d); > break; > } > diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/x86/mm/mem_sharing.c > --- a/xen/arch/x86/mm/mem_sharing.c Wed Jan 05 18:50:54 2011 -0800 > +++ b/xen/arch/x86/mm/mem_sharing.c Wed Jan 05 18:53:22 2011 -0800 > @@ -304,6 +304,8 @@ static struct page_info* mem_sharing_all > if(page != NULL) return page; > > memset(&req, 0, sizeof(req)); > + req.type = MEM_EVENT_TYPE_SHARED; > + > if(must_succeed) > { > /* We do not support ''must_succeed'' any more. External > operations such > diff -r 281e5488ee97 -r 98c86c674ba3 xen/arch/x86/mm/p2m.c > --- a/xen/arch/x86/mm/p2m.c Wed Jan 05 18:50:54 2011 -0800 > +++ b/xen/arch/x86/mm/p2m.c Wed Jan 05 18:53:22 2011 -0800 > @@ -2781,6 +2781,7 @@ void p2m_mem_paging_populate(struct p2m_ > return; > > memset(&req, 0, sizeof(req)); > + req.type = MEM_EVENT_TYPE_PAGING; > > /* Fix p2m mapping */ > /* XXX: It seems inefficient to have this here, as it''s only > needed > diff -r 281e5488ee97 -r 98c86c674ba3 xen/include/public/hvm/hvm_op.h > --- a/xen/include/public/hvm/hvm_op.h Wed Jan 05 18:50:54 2011 > -0800 > +++ b/xen/include/public/hvm/hvm_op.h Wed Jan 05 18:53:22 2011 > -0800 > @@ -158,4 +158,46 @@ struct xen_hvm_xentrace { > typedef struct xen_hvm_xentrace xen_hvm_xentrace_t; > DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t); > > +#define HVMOP_set_mem_access 12 > +typedef enum { > + HVMMEM_access_n, > + HVMMEM_access_r, > + HVMMEM_access_w, > + HVMMEM_access_rw, > + HVMMEM_access_x, > + HVMMEM_access_rx, > + HVMMEM_access_wx, > + HVMMEM_access_rwx, > + HVMMEM_access_rx2rw, /* Page starts off as read-execute, > but automatically change > + * to read-write on a write */ > + HVMMEM_access_default /* Take the domain default */ > +} hvmmem_access_t; > +/* Notify that a region of memory is to have specific access types */ > +struct xen_hvm_set_mem_access { > + /* Domain to be updated. */ > + domid_t domid; > + uint16_t pad[3]; /* align next field on 8-byte boundary */ > + /* Memory type */ > + uint64_t hvmmem_access; /* hvm_access_t */ > + /* First pfn, or ~0ull to set the default access for new pages */ > + uint64_t first_pfn; > + /* Number of pages, ignored on setting default access */ > + uint64_t nr; > +}; > +typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t; > +DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t); > + > +#define HVMOP_get_mem_access 13 > +/* Get the specific access type for that region of memory */ > +struct xen_hvm_get_mem_access { > + /* Domain to be queried. */ > + domid_t domid; > + uint16_t pad[3]; /* align next field on 8-byte boundary */ > + /* Memory type: OUT */ > + uint64_t hvmmem_access; /* hvm_access_t */ > + /* pfn, or ~0ull for default access for new pages. IN */ > + uint64_t pfn; > +}; > +typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t; > +DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t); > #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */ >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel