John Levon
2006-Oct-20 03:42 UTC
[Xen-devel] [PATCH] don''t use mlock() with Solaris tools
# HG changeset patch # User john.levon@sun.com # Date 1161315500 25200 # Node ID ad404ee927926b8cc4a6498b180e78f0939eb2eb # Parent 3685871c6f7930c7d35baea7cee4f51d00415796 on solaris: mlock requires a page aligned address and mlock doesn''t ensure the pages won''t minor page fault; so don''t use it on solaris. Signed-off-by: Mark Johnson <mark.johnson@sun.com> diff --git a/tools/libxc/xc_acm.c b/tools/libxc/xc_acm.c --- a/tools/libxc/xc_acm.c +++ b/tools/libxc/xc_acm.c @@ -24,12 +24,12 @@ int xc_acm_op(int xc_handle, int cmd, vo hypercall.arg[0] = cmd; hypercall.arg[1] = (unsigned long) arg; - if (mlock(arg, arg_size) != 0) { - PERROR("xc_acm_op: arg mlock failed"); + if (lock_pages(arg, arg_size) != 0) { + PERROR("xc_acm_op: arg lock failed"); goto out; } ret = do_xen_hypercall(xc_handle, &hypercall); - safe_munlock(arg, arg_size); + unlock_pages(arg, arg_size); out: return ret; } diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -72,7 +72,7 @@ int xc_domain_shutdown(int xc_handle, arg.domain_id = domid; arg.reason = reason; - if ( mlock(&arg, sizeof(arg)) != 0 ) + if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -80,7 +80,7 @@ int xc_domain_shutdown(int xc_handle, ret = do_xen_hypercall(xc_handle, &hypercall); - safe_munlock(&arg, sizeof(arg)); + unlock_pages(&arg, sizeof(arg)); out1: return ret; @@ -103,7 +103,7 @@ int xc_vcpu_setaffinity(int xc_handle, (uint8_t *)&cpumap); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; - if ( mlock(&cpumap, sizeof(cpumap)) != 0 ) + if ( lock_pages(&cpumap, sizeof(cpumap)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -111,7 +111,7 @@ int xc_vcpu_setaffinity(int xc_handle, ret = do_domctl(xc_handle, &domctl); - safe_munlock(&cpumap, sizeof(cpumap)); + unlock_pages(&cpumap, sizeof(cpumap)); out: return ret; @@ -134,7 +134,7 @@ int xc_vcpu_getaffinity(int xc_handle, (uint8_t *)cpumap); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(*cpumap) * 8; - if ( mlock(cpumap, sizeof(*cpumap)) != 0 ) + if ( lock_pages(cpumap, sizeof(*cpumap)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -142,7 +142,7 @@ int xc_vcpu_getaffinity(int xc_handle, ret = do_domctl(xc_handle, &domctl); - safe_munlock(cpumap, sizeof(*cpumap)); + unlock_pages(cpumap, sizeof(*cpumap)); out: return ret; @@ -213,7 +213,7 @@ int xc_domain_getinfolist(int xc_handle, int ret = 0; DECLARE_SYSCTL; - if ( mlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) + if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) return -1; sysctl.cmd = XEN_SYSCTL_getdomaininfolist; @@ -226,8 +226,7 @@ int xc_domain_getinfolist(int xc_handle, else ret = sysctl.u.getdomaininfolist.num_domains; - if ( munlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) - ret = -1; + unlock_pages(info, max_domains*sizeof(xc_domaininfo_t)); return ret; } @@ -245,12 +244,12 @@ int xc_vcpu_getcontext(int xc_handle, domctl.u.vcpucontext.vcpu = (uint16_t)vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt); - if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 ) + if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); - safe_munlock(ctxt, sizeof(*ctxt)); + unlock_pages(ctxt, sizeof(*ctxt)); return rc; } @@ -512,12 +511,12 @@ int xc_vcpu_setcontext(int xc_handle, domctl.u.vcpucontext.vcpu = vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt); - if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 ) + if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); - safe_munlock(ctxt, sizeof(*ctxt)); + unlock_pages(ctxt, sizeof(*ctxt)); return rc; diff --git a/tools/libxc/xc_evtchn.c b/tools/libxc/xc_evtchn.c --- a/tools/libxc/xc_evtchn.c +++ b/tools/libxc/xc_evtchn.c @@ -18,16 +18,16 @@ static int do_evtchn_op(int xc_handle, i hypercall.arg[0] = cmd; hypercall.arg[1] = (unsigned long)arg; - if ( mlock(arg, arg_size) != 0 ) + if ( lock_pages(arg, arg_size) != 0 ) { - PERROR("do_evtchn_op: arg mlock failed"); + PERROR("do_evtchn_op: arg lock failed"); goto out; } if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0) ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret); - safe_munlock(arg, arg_size); + unlock_pages(arg, arg_size); out: return ret; } diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c --- a/tools/libxc/xc_hvm_build.c +++ b/tools/libxc/xc_hvm_build.c @@ -38,13 +38,13 @@ static void xc_set_hvm_param(int handle, arg.domid = dom; arg.index = param; arg.value = value; - if ( mlock(&arg, sizeof(arg)) != 0 ) + if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for set parameter"); return; } rc = do_xen_hypercall(handle, &hypercall); - safe_munlock(&arg, sizeof(arg)); + unlock_pages(&arg, sizeof(arg)); if (rc < 0) PERROR("set HVM parameter failed (%d)", rc); } @@ -403,7 +403,7 @@ static int xc_hvm_build_internal(int xc_ goto error_out; } - if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ) + if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) ) { PERROR("%s: ctxt mlock failed", __func__); return 1; diff --git a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c --- a/tools/libxc/xc_linux_build.c +++ b/tools/libxc/xc_linux_build.c @@ -1140,9 +1140,9 @@ static int xc_linux_build_internal(int x memset(&st_ctxt, 0, sizeof(st_ctxt)); #endif - if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ) - { - PERROR("%s: ctxt mlock failed", __func__); + if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) ) + { + PERROR("%s: ctxt lock failed", __func__); return 1; } diff --git a/tools/libxc/xc_linux_restore.c b/tools/libxc/xc_linux_restore.c --- a/tools/libxc/xc_linux_restore.c +++ b/tools/libxc/xc_linux_restore.c @@ -162,9 +162,9 @@ int xc_linux_restore(int xc_handle, int return 1; } - if (mlock(&ctxt, sizeof(ctxt))) { + if (lock_pages(&ctxt, sizeof(ctxt))) { /* needed for build domctl, but might as well do early */ - ERROR("Unable to mlock ctxt"); + ERROR("Unable to lock ctxt"); return 1; } @@ -251,8 +251,8 @@ int xc_linux_restore(int xc_handle, int goto out; } - if (mlock(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) { - ERROR("Could not mlock region_mfn"); + if (lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) { + ERROR("Could not lock region_mfn"); goto out; } diff --git a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c --- a/tools/libxc/xc_linux_save.c +++ b/tools/libxc/xc_linux_save.c @@ -628,8 +628,8 @@ int xc_linux_save(int xc_handle, int io_ return 1; } - if (mlock(&ctxt, sizeof(ctxt))) { - ERROR("Unable to mlock ctxt"); + if (lock_pages(&ctxt, sizeof(ctxt))) { + ERROR("Unable to lock ctxt"); return 1; } @@ -767,14 +767,14 @@ int xc_linux_save(int xc_handle, int io_ memset(to_send, 0xff, BITMAP_SIZE); - if (mlock(to_send, BITMAP_SIZE)) { - ERROR("Unable to mlock to_send"); + if (lock_pages(to_send, BITMAP_SIZE)) { + ERROR("Unable to lock to_send"); return 1; } /* (to fix is local only) */ - if (mlock(to_skip, BITMAP_SIZE)) { - ERROR("Unable to mlock to_skip"); + if (lock_pages(to_skip, BITMAP_SIZE)) { + ERROR("Unable to lock to_skip"); return 1; } @@ -790,8 +790,8 @@ int xc_linux_save(int xc_handle, int io_ goto out; } - if (mlock(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) { - ERROR("Unable to mlock"); + if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) { + ERROR("Unable to lock"); goto out; } diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c --- a/tools/libxc/xc_misc.c +++ b/tools/libxc/xc_misc.c @@ -21,13 +21,13 @@ int xc_readconsolering(int xc_handle, sysctl.u.readconsole.count = nr_chars; sysctl.u.readconsole.clear = clear; - if ( (ret = mlock(buffer, nr_chars)) != 0 ) + if ( (ret = lock_pages(buffer, nr_chars)) != 0 ) return ret; if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 ) *pnr_chars = sysctl.u.readconsole.count; - safe_munlock(buffer, nr_chars); + unlock_pages(buffer, nr_chars); return ret; } diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c --- a/tools/libxc/xc_private.c +++ b/tools/libxc/xc_private.c @@ -7,7 +7,23 @@ #include <inttypes.h> #include "xc_private.h" -/* NB: arr must be mlock''ed */ +int lock_pages(void *addr, size_t len) +{ + int e = 0; +#ifndef __sun__ + e = mlock(addr, len); +#endif + return (e); +} + +void unlock_pages(void *addr, size_t len) +{ +#ifndef __sun__ + safe_munlock(addr, len); +#endif +} + +/* NB: arr must be locked */ int xc_get_pfn_type_batch(int xc_handle, uint32_t dom, int num, unsigned long *arr) { @@ -51,7 +67,7 @@ int xc_mmuext_op( hypercall.arg[2] = (unsigned long)0; hypercall.arg[3] = (unsigned long)dom; - if ( mlock(op, nr_ops*sizeof(*op)) != 0 ) + if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -59,7 +75,7 @@ int xc_mmuext_op( ret = do_xen_hypercall(xc_handle, &hypercall); - safe_munlock(op, nr_ops*sizeof(*op)); + unlock_pages(op, nr_ops*sizeof(*op)); out1: return ret; @@ -79,9 +95,9 @@ static int flush_mmu_updates(int xc_hand hypercall.arg[2] = 0; hypercall.arg[3] = mmu->subject; - if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 ) - { - PERROR("flush_mmu_updates: mmu updates mlock failed"); + if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 ) + { + PERROR("flush_mmu_updates: mmu updates lock_pages failed"); err = 1; goto out; } @@ -94,7 +110,7 @@ static int flush_mmu_updates(int xc_hand mmu->idx = 0; - safe_munlock(mmu->updates, sizeof(mmu->updates)); + unlock_pages(mmu->updates, sizeof(mmu->updates)); out: return err; @@ -149,62 +165,62 @@ int xc_memory_op(int xc_handle, case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - if ( mlock(reservation, sizeof(*reservation)) != 0 ) - { - PERROR("Could not mlock"); + if ( lock_pages(reservation, sizeof(*reservation)) != 0 ) + { + PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, reservation->extent_start); if ( (extent_start != NULL) && - (mlock(extent_start, + (lock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)) != 0) ) { - PERROR("Could not mlock"); - safe_munlock(reservation, sizeof(*reservation)); + PERROR("Could not lock"); + unlock_pages(reservation, sizeof(*reservation)); goto out1; } break; case XENMEM_machphys_mfn_list: - if ( mlock(xmml, sizeof(*xmml)) != 0 ) - { - PERROR("Could not mlock"); + if ( lock_pages(xmml, sizeof(*xmml)) != 0 ) + { + PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, xmml->extent_start); - if ( mlock(extent_start, + if ( lock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)) != 0 ) { - PERROR("Could not mlock"); - safe_munlock(xmml, sizeof(*xmml)); + PERROR("Could not lock"); + unlock_pages(xmml, sizeof(*xmml)); goto out1; } break; case XENMEM_add_to_physmap: - if ( mlock(arg, sizeof(struct xen_add_to_physmap)) ) - { - PERROR("Could not mlock"); + if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) ) + { + PERROR("Could not lock"); goto out1; } break; case XENMEM_translate_gpfn_list: - if ( mlock(trans, sizeof(*trans)) != 0 ) - { - PERROR("Could not mlock"); + if ( lock_pages(trans, sizeof(*trans)) != 0 ) + { + PERROR("Could not lock"); goto out1; } get_xen_guest_handle(gpfn_list, trans->gpfn_list); - if ( mlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) - { - PERROR("Could not mlock"); - safe_munlock(trans, sizeof(*trans)); + if ( lock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) + { + PERROR("Could not lock"); + unlock_pages(trans, sizeof(*trans)); goto out1; } get_xen_guest_handle(mfn_list, trans->mfn_list); - if ( mlock(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) - { - PERROR("Could not mlock"); - safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); - safe_munlock(trans, sizeof(*trans)); + if ( lock_pages(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) + { + PERROR("Could not lock"); + unlock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); + unlock_pages(trans, sizeof(*trans)); goto out1; } break; @@ -217,27 +233,27 @@ int xc_memory_op(int xc_handle, case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - safe_munlock(reservation, sizeof(*reservation)); + unlock_pages(reservation, sizeof(*reservation)); get_xen_guest_handle(extent_start, reservation->extent_start); if ( extent_start != NULL ) - safe_munlock(extent_start, + unlock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)); break; case XENMEM_machphys_mfn_list: - safe_munlock(xmml, sizeof(*xmml)); + unlock_pages(xmml, sizeof(*xmml)); get_xen_guest_handle(extent_start, xmml->extent_start); - safe_munlock(extent_start, + unlock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)); break; case XENMEM_add_to_physmap: - safe_munlock(arg, sizeof(struct xen_add_to_physmap)); + unlock_pages(arg, sizeof(struct xen_add_to_physmap)); break; case XENMEM_translate_gpfn_list: get_xen_guest_handle(mfn_list, trans->mfn_list); - safe_munlock(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); + unlock_pages(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); get_xen_guest_handle(gpfn_list, trans->gpfn_list); - safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); - safe_munlock(trans, sizeof(*trans)); + unlock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); + unlock_pages(trans, sizeof(*trans)); break; } @@ -279,15 +295,15 @@ int xc_get_pfn_list(int xc_handle, memset(pfn_buf, 0, max_pfns * sizeof(xen_pfn_t)); #endif - if ( mlock(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 ) - { - PERROR("xc_get_pfn_list: pfn_buf mlock failed"); + if ( lock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 ) + { + PERROR("xc_get_pfn_list: pfn_buf lock failed"); return -1; } ret = do_domctl(xc_handle, &domctl); - safe_munlock(pfn_buf, max_pfns * sizeof(xen_pfn_t)); + unlock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t)); #if 0 #ifdef DEBUG @@ -419,7 +435,7 @@ int xc_version(int xc_handle, int cmd, v break; } - if ( (argsize != 0) && (mlock(arg, argsize) != 0) ) + if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) ) { PERROR("Could not lock memory for version hypercall"); return -ENOMEM; @@ -433,7 +449,7 @@ int xc_version(int xc_handle, int cmd, v rc = do_xen_version(xc_handle, cmd, arg); if ( argsize != 0 ) - safe_munlock(arg, argsize); + unlock_pages(arg, argsize); return rc; } diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h --- a/tools/libxc/xc_private.h +++ b/tools/libxc/xc_private.h @@ -66,6 +66,9 @@ do { errno = __saved_errno; \ } while (0) +int lock_pages(void *addr, size_t len); +void unlock_pages(void *addr, size_t len); + #define PERROR(_m, _a...) \ do { \ int __saved_errno = errno; \ @@ -104,7 +107,7 @@ static inline int do_domctl(int xc_handl hypercall.op = __HYPERVISOR_domctl; hypercall.arg[0] = (unsigned long)domctl; - if ( mlock(domctl, sizeof(*domctl)) != 0 ) + if ( lock_pages(domctl, sizeof(*domctl)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -117,7 +120,7 @@ static inline int do_domctl(int xc_handl " rebuild the user-space tool set?\n"); } - safe_munlock(domctl, sizeof(*domctl)); + unlock_pages(domctl, sizeof(*domctl)); out1: return ret; @@ -133,7 +136,7 @@ static inline int do_sysctl(int xc_handl hypercall.op = __HYPERVISOR_sysctl; hypercall.arg[0] = (unsigned long)sysctl; - if ( mlock(sysctl, sizeof(*sysctl)) != 0 ) + if ( lock_pages(sysctl, sizeof(*sysctl)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -146,7 +149,7 @@ static inline int do_sysctl(int xc_handl " rebuild the user-space tool set?\n"); } - safe_munlock(sysctl, sizeof(*sysctl)); + unlock_pages(sysctl, sizeof(*sysctl)); out1: return ret; diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c --- a/tools/libxc/xc_tbuf.c +++ b/tools/libxc/xc_tbuf.c @@ -104,7 +104,7 @@ int xc_tbuf_set_cpu_mask(int xc_handle, set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, (uint8_t *)&mask); sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(mask) * 8; - if ( mlock(&mask, sizeof(mask)) != 0 ) + if ( lock_pages(&mask, sizeof(mask)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -112,7 +112,7 @@ int xc_tbuf_set_cpu_mask(int xc_handle, ret = do_sysctl(xc_handle, &sysctl); - safe_munlock(&mask, sizeof(mask)); + unlock_pages(&mask, sizeof(mask)); out: return ret; diff --git a/tools/libxc/xg_private.c b/tools/libxc/xg_private.c --- a/tools/libxc/xg_private.c +++ b/tools/libxc/xg_private.c @@ -10,6 +10,22 @@ #include <strings.h> #include "xg_private.h" + +int lock_pages(void *addr, size_t len) +{ + int e = 0; +#ifndef __sun__ + e = mlock(addr, len); +#endif + return (e); +} + +void unlock_pages(void *addr, size_t len) +{ +#ifndef __sun__ + safe_munlock(addr, len); +#endif +} char *xc_read_image(const char *filename, unsigned long *size) { diff --git a/tools/misc/xenperf.c b/tools/misc/xenperf.c --- a/tools/misc/xenperf.c +++ b/tools/misc/xenperf.c @@ -17,6 +17,22 @@ #include <sys/mman.h> #include <errno.h> #include <string.h> + +int lock_pages(void *addr, size_t len) +{ + int e = 0; +#ifndef __sun__ + e = mlock(addr, len); +#endif + return (e); +} + +void unlock_pages(void *addr, size_t len) +{ +#ifndef __sun__ + munlock(addr, len); +#endif +} int main(int argc, char *argv[]) { @@ -87,11 +103,11 @@ int main(int argc, char *argv[]) pcv = malloc(sizeof(*pcv) * num_val); if ( pcd == NULL - || mlock(pcd, sizeof(*pcd) * num_desc) != 0 + || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0 || pcv == NULL - || mlock(pcd, sizeof(*pcv) * num_val) != 0) + || lock_pages(pcd, sizeof(*pcv) * num_val) != 0) { - fprintf(stderr, "Could not alloc or mlock buffers: %d (%s)\n", + fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n", errno, strerror(errno)); exit(-1); } @@ -104,8 +120,8 @@ int main(int argc, char *argv[]) return 1; } - munlock(pcd, sizeof(*pcd) * num_desc); - munlock(pcv, sizeof(*pcv) * num_val); + unlock_pages(pcd, sizeof(*pcd) * num_desc); + unlock_pages(pcv, sizeof(*pcv) * num_val); val = pcv; for ( i = 0; i < num_desc; i++ ) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Daniel Veillard
2006-Oct-20 22:35 UTC
Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On Fri, Oct 20, 2006 at 04:42:47AM +0100, John Levon wrote:> on solaris: mlock requires a page aligned address and mlock doesn''t ensure the pages won''t minor page fault; so don''t use it on solaris.[...]> -/* NB: arr must be mlock''ed */ > +int lock_pages(void *addr, size_t len) > +{ > + int e = 0; > +#ifndef __sun__ > + e = mlock(addr, len); > +#endif > + return (e); > +} > + > +void unlock_pages(void *addr, size_t len) > +{ > +#ifndef __sun__ > + safe_munlock(addr, len); > +#endif > +} > +So on Solaris you don''t pin down the memory used for hypercall parameters and results at all ? Is there really no risk associated with this strategy ? surprised, Daniel -- Red Hat Virtualization group http://redhat.com/virtualization/ Daniel Veillard | virtualization library http://libvirt.org/ veillard@redhat.com | libxml GNOME XML XSLT toolkit http://xmlsoft.org/ http://veillard.com/ | Rpmfind RPM search engine http://rpmfind.net/ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Mark Johnson
2006-Oct-20 23:11 UTC
Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On 10/20/06, Daniel Veillard <veillard@redhat.com> wrote:> On Fri, Oct 20, 2006 at 04:42:47AM +0100, John Levon wrote: > > on solaris: mlock requires a page aligned address and mlock doesn''t ensure the pages won''t minor page fault; so don''t use it on solaris. > [...] > > -/* NB: arr must be mlock''ed */ > > +int lock_pages(void *addr, size_t len) > > +{ > > + int e = 0; > > +#ifndef __sun__ > > + e = mlock(addr, len); > > +#endif > > + return (e); > > +} > > + > > +void unlock_pages(void *addr, size_t len) > > +{ > > +#ifndef __sun__ > > + safe_munlock(addr, len); > > +#endif > > +} > > + > > So on Solaris you don''t pin down the memory used for hypercall parameters > and results at all ? Is there really no risk associated with this strategy ? > > surprised,We have to pin it down in the privcmd driver. There is no way to ping the memory from a user app. MRJ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
John Levon
2006-Oct-22 18:02 UTC
Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On Fri, Oct 20, 2006 at 04:42:47AM +0100, John Levon wrote:> # HG changeset patch > # User john.levon@sun.com > # Date 1161315500 25200 > # Node ID ad404ee927926b8cc4a6498b180e78f0939eb2eb > # Parent 3685871c6f7930c7d35baea7cee4f51d00415796 > on solaris: mlock requires a page aligned address and mlock doesn''t ensure the pages won''t minor page fault; so don''t use it on solaris.This isn''t in unstable yet; I don''t know if it was missed or if there''s some better way we could do this? thanks, john # HG changeset patch # User john.levon@sun.com # Date 1161315500 25200 # Node ID ad404ee927926b8cc4a6498b180e78f0939eb2eb # Parent 3685871c6f7930c7d35baea7cee4f51d00415796 on solaris: mlock requires a page aligned address and mlock doesn''t ensure the pages won''t minor page fault; so don''t use it on solaris. Signed-off-by: Mark Johnson <mark.johnson@sun.com> diff --git a/tools/libxc/xc_acm.c b/tools/libxc/xc_acm.c --- a/tools/libxc/xc_acm.c +++ b/tools/libxc/xc_acm.c @@ -24,12 +24,12 @@ int xc_acm_op(int xc_handle, int cmd, vo hypercall.arg[0] = cmd; hypercall.arg[1] = (unsigned long) arg; - if (mlock(arg, arg_size) != 0) { - PERROR("xc_acm_op: arg mlock failed"); + if (lock_pages(arg, arg_size) != 0) { + PERROR("xc_acm_op: arg lock failed"); goto out; } ret = do_xen_hypercall(xc_handle, &hypercall); - safe_munlock(arg, arg_size); + unlock_pages(arg, arg_size); out: return ret; } diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c --- a/tools/libxc/xc_domain.c +++ b/tools/libxc/xc_domain.c @@ -72,7 +72,7 @@ int xc_domain_shutdown(int xc_handle, arg.domain_id = domid; arg.reason = reason; - if ( mlock(&arg, sizeof(arg)) != 0 ) + if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -80,7 +80,7 @@ int xc_domain_shutdown(int xc_handle, ret = do_xen_hypercall(xc_handle, &hypercall); - safe_munlock(&arg, sizeof(arg)); + unlock_pages(&arg, sizeof(arg)); out1: return ret; @@ -103,7 +103,7 @@ int xc_vcpu_setaffinity(int xc_handle, (uint8_t *)&cpumap); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; - if ( mlock(&cpumap, sizeof(cpumap)) != 0 ) + if ( lock_pages(&cpumap, sizeof(cpumap)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -111,7 +111,7 @@ int xc_vcpu_setaffinity(int xc_handle, ret = do_domctl(xc_handle, &domctl); - safe_munlock(&cpumap, sizeof(cpumap)); + unlock_pages(&cpumap, sizeof(cpumap)); out: return ret; @@ -134,7 +134,7 @@ int xc_vcpu_getaffinity(int xc_handle, (uint8_t *)cpumap); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(*cpumap) * 8; - if ( mlock(cpumap, sizeof(*cpumap)) != 0 ) + if ( lock_pages(cpumap, sizeof(*cpumap)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -142,7 +142,7 @@ int xc_vcpu_getaffinity(int xc_handle, ret = do_domctl(xc_handle, &domctl); - safe_munlock(cpumap, sizeof(*cpumap)); + unlock_pages(cpumap, sizeof(*cpumap)); out: return ret; @@ -213,7 +213,7 @@ int xc_domain_getinfolist(int xc_handle, int ret = 0; DECLARE_SYSCTL; - if ( mlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) + if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) return -1; sysctl.cmd = XEN_SYSCTL_getdomaininfolist; @@ -226,8 +226,7 @@ int xc_domain_getinfolist(int xc_handle, else ret = sysctl.u.getdomaininfolist.num_domains; - if ( munlock(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) - ret = -1; + unlock_pages(info, max_domains*sizeof(xc_domaininfo_t)); return ret; } @@ -245,12 +244,12 @@ int xc_vcpu_getcontext(int xc_handle, domctl.u.vcpucontext.vcpu = (uint16_t)vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt); - if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 ) + if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); - safe_munlock(ctxt, sizeof(*ctxt)); + unlock_pages(ctxt, sizeof(*ctxt)); return rc; } @@ -512,12 +511,12 @@ int xc_vcpu_setcontext(int xc_handle, domctl.u.vcpucontext.vcpu = vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt); - if ( (rc = mlock(ctxt, sizeof(*ctxt))) != 0 ) + if ( (rc = lock_pages(ctxt, sizeof(*ctxt))) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); - safe_munlock(ctxt, sizeof(*ctxt)); + unlock_pages(ctxt, sizeof(*ctxt)); return rc; diff --git a/tools/libxc/xc_evtchn.c b/tools/libxc/xc_evtchn.c --- a/tools/libxc/xc_evtchn.c +++ b/tools/libxc/xc_evtchn.c @@ -18,16 +18,16 @@ static int do_evtchn_op(int xc_handle, i hypercall.arg[0] = cmd; hypercall.arg[1] = (unsigned long)arg; - if ( mlock(arg, arg_size) != 0 ) + if ( lock_pages(arg, arg_size) != 0 ) { - PERROR("do_evtchn_op: arg mlock failed"); + PERROR("do_evtchn_op: arg lock failed"); goto out; } if ((ret = do_xen_hypercall(xc_handle, &hypercall)) < 0) ERROR("do_evtchn_op: HYPERVISOR_event_channel_op failed: %d", ret); - safe_munlock(arg, arg_size); + unlock_pages(arg, arg_size); out: return ret; } diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c --- a/tools/libxc/xc_hvm_build.c +++ b/tools/libxc/xc_hvm_build.c @@ -38,13 +38,13 @@ static void xc_set_hvm_param(int handle, arg.domid = dom; arg.index = param; arg.value = value; - if ( mlock(&arg, sizeof(arg)) != 0 ) + if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for set parameter"); return; } rc = do_xen_hypercall(handle, &hypercall); - safe_munlock(&arg, sizeof(arg)); + unlock_pages(&arg, sizeof(arg)); if (rc < 0) PERROR("set HVM parameter failed (%d)", rc); } @@ -403,7 +403,7 @@ static int xc_hvm_build_internal(int xc_ goto error_out; } - if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ) + if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) ) { PERROR("%s: ctxt mlock failed", __func__); return 1; diff --git a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c --- a/tools/libxc/xc_linux_build.c +++ b/tools/libxc/xc_linux_build.c @@ -1140,9 +1140,9 @@ static int xc_linux_build_internal(int x memset(&st_ctxt, 0, sizeof(st_ctxt)); #endif - if ( mlock(&st_ctxt, sizeof(st_ctxt) ) ) - { - PERROR("%s: ctxt mlock failed", __func__); + if ( lock_pages(&st_ctxt, sizeof(st_ctxt) ) ) + { + PERROR("%s: ctxt lock failed", __func__); return 1; } diff --git a/tools/libxc/xc_linux_restore.c b/tools/libxc/xc_linux_restore.c --- a/tools/libxc/xc_linux_restore.c +++ b/tools/libxc/xc_linux_restore.c @@ -162,9 +162,9 @@ int xc_linux_restore(int xc_handle, int return 1; } - if (mlock(&ctxt, sizeof(ctxt))) { + if (lock_pages(&ctxt, sizeof(ctxt))) { /* needed for build domctl, but might as well do early */ - ERROR("Unable to mlock ctxt"); + ERROR("Unable to lock ctxt"); return 1; } @@ -251,8 +251,8 @@ int xc_linux_restore(int xc_handle, int goto out; } - if (mlock(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) { - ERROR("Could not mlock region_mfn"); + if (lock_pages(region_mfn, sizeof(xen_pfn_t) * MAX_BATCH_SIZE)) { + ERROR("Could not lock region_mfn"); goto out; } diff --git a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c --- a/tools/libxc/xc_linux_save.c +++ b/tools/libxc/xc_linux_save.c @@ -628,8 +628,8 @@ int xc_linux_save(int xc_handle, int io_ return 1; } - if (mlock(&ctxt, sizeof(ctxt))) { - ERROR("Unable to mlock ctxt"); + if (lock_pages(&ctxt, sizeof(ctxt))) { + ERROR("Unable to lock ctxt"); return 1; } @@ -767,14 +767,14 @@ int xc_linux_save(int xc_handle, int io_ memset(to_send, 0xff, BITMAP_SIZE); - if (mlock(to_send, BITMAP_SIZE)) { - ERROR("Unable to mlock to_send"); + if (lock_pages(to_send, BITMAP_SIZE)) { + ERROR("Unable to lock to_send"); return 1; } /* (to fix is local only) */ - if (mlock(to_skip, BITMAP_SIZE)) { - ERROR("Unable to mlock to_skip"); + if (lock_pages(to_skip, BITMAP_SIZE)) { + ERROR("Unable to lock to_skip"); return 1; } @@ -790,8 +790,8 @@ int xc_linux_save(int xc_handle, int io_ goto out; } - if (mlock(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) { - ERROR("Unable to mlock"); + if (lock_pages(pfn_type, MAX_BATCH_SIZE * sizeof(*pfn_type))) { + ERROR("Unable to lock"); goto out; } diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c --- a/tools/libxc/xc_misc.c +++ b/tools/libxc/xc_misc.c @@ -21,13 +21,13 @@ int xc_readconsolering(int xc_handle, sysctl.u.readconsole.count = nr_chars; sysctl.u.readconsole.clear = clear; - if ( (ret = mlock(buffer, nr_chars)) != 0 ) + if ( (ret = lock_pages(buffer, nr_chars)) != 0 ) return ret; if ( (ret = do_sysctl(xc_handle, &sysctl)) == 0 ) *pnr_chars = sysctl.u.readconsole.count; - safe_munlock(buffer, nr_chars); + unlock_pages(buffer, nr_chars); return ret; } diff --git a/tools/libxc/xc_private.c b/tools/libxc/xc_private.c --- a/tools/libxc/xc_private.c +++ b/tools/libxc/xc_private.c @@ -7,7 +7,23 @@ #include <inttypes.h> #include "xc_private.h" -/* NB: arr must be mlock''ed */ +int lock_pages(void *addr, size_t len) +{ + int e = 0; +#ifndef __sun__ + e = mlock(addr, len); +#endif + return (e); +} + +void unlock_pages(void *addr, size_t len) +{ +#ifndef __sun__ + safe_munlock(addr, len); +#endif +} + +/* NB: arr must be locked */ int xc_get_pfn_type_batch(int xc_handle, uint32_t dom, int num, unsigned long *arr) { @@ -51,7 +67,7 @@ int xc_mmuext_op( hypercall.arg[2] = (unsigned long)0; hypercall.arg[3] = (unsigned long)dom; - if ( mlock(op, nr_ops*sizeof(*op)) != 0 ) + if ( lock_pages(op, nr_ops*sizeof(*op)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -59,7 +75,7 @@ int xc_mmuext_op( ret = do_xen_hypercall(xc_handle, &hypercall); - safe_munlock(op, nr_ops*sizeof(*op)); + unlock_pages(op, nr_ops*sizeof(*op)); out1: return ret; @@ -79,9 +95,9 @@ static int flush_mmu_updates(int xc_hand hypercall.arg[2] = 0; hypercall.arg[3] = mmu->subject; - if ( mlock(mmu->updates, sizeof(mmu->updates)) != 0 ) - { - PERROR("flush_mmu_updates: mmu updates mlock failed"); + if ( lock_pages(mmu->updates, sizeof(mmu->updates)) != 0 ) + { + PERROR("flush_mmu_updates: mmu updates lock_pages failed"); err = 1; goto out; } @@ -94,7 +110,7 @@ static int flush_mmu_updates(int xc_hand mmu->idx = 0; - safe_munlock(mmu->updates, sizeof(mmu->updates)); + unlock_pages(mmu->updates, sizeof(mmu->updates)); out: return err; @@ -149,62 +165,62 @@ int xc_memory_op(int xc_handle, case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - if ( mlock(reservation, sizeof(*reservation)) != 0 ) - { - PERROR("Could not mlock"); + if ( lock_pages(reservation, sizeof(*reservation)) != 0 ) + { + PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, reservation->extent_start); if ( (extent_start != NULL) && - (mlock(extent_start, + (lock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)) != 0) ) { - PERROR("Could not mlock"); - safe_munlock(reservation, sizeof(*reservation)); + PERROR("Could not lock"); + unlock_pages(reservation, sizeof(*reservation)); goto out1; } break; case XENMEM_machphys_mfn_list: - if ( mlock(xmml, sizeof(*xmml)) != 0 ) - { - PERROR("Could not mlock"); + if ( lock_pages(xmml, sizeof(*xmml)) != 0 ) + { + PERROR("Could not lock"); goto out1; } get_xen_guest_handle(extent_start, xmml->extent_start); - if ( mlock(extent_start, + if ( lock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)) != 0 ) { - PERROR("Could not mlock"); - safe_munlock(xmml, sizeof(*xmml)); + PERROR("Could not lock"); + unlock_pages(xmml, sizeof(*xmml)); goto out1; } break; case XENMEM_add_to_physmap: - if ( mlock(arg, sizeof(struct xen_add_to_physmap)) ) - { - PERROR("Could not mlock"); + if ( lock_pages(arg, sizeof(struct xen_add_to_physmap)) ) + { + PERROR("Could not lock"); goto out1; } break; case XENMEM_translate_gpfn_list: - if ( mlock(trans, sizeof(*trans)) != 0 ) - { - PERROR("Could not mlock"); + if ( lock_pages(trans, sizeof(*trans)) != 0 ) + { + PERROR("Could not lock"); goto out1; } get_xen_guest_handle(gpfn_list, trans->gpfn_list); - if ( mlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) - { - PERROR("Could not mlock"); - safe_munlock(trans, sizeof(*trans)); + if ( lock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) + { + PERROR("Could not lock"); + unlock_pages(trans, sizeof(*trans)); goto out1; } get_xen_guest_handle(mfn_list, trans->mfn_list); - if ( mlock(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) - { - PERROR("Could not mlock"); - safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); - safe_munlock(trans, sizeof(*trans)); + if ( lock_pages(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)) != 0 ) + { + PERROR("Could not lock"); + unlock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); + unlock_pages(trans, sizeof(*trans)); goto out1; } break; @@ -217,27 +233,27 @@ int xc_memory_op(int xc_handle, case XENMEM_increase_reservation: case XENMEM_decrease_reservation: case XENMEM_populate_physmap: - safe_munlock(reservation, sizeof(*reservation)); + unlock_pages(reservation, sizeof(*reservation)); get_xen_guest_handle(extent_start, reservation->extent_start); if ( extent_start != NULL ) - safe_munlock(extent_start, + unlock_pages(extent_start, reservation->nr_extents * sizeof(xen_pfn_t)); break; case XENMEM_machphys_mfn_list: - safe_munlock(xmml, sizeof(*xmml)); + unlock_pages(xmml, sizeof(*xmml)); get_xen_guest_handle(extent_start, xmml->extent_start); - safe_munlock(extent_start, + unlock_pages(extent_start, xmml->max_extents * sizeof(xen_pfn_t)); break; case XENMEM_add_to_physmap: - safe_munlock(arg, sizeof(struct xen_add_to_physmap)); + unlock_pages(arg, sizeof(struct xen_add_to_physmap)); break; case XENMEM_translate_gpfn_list: get_xen_guest_handle(mfn_list, trans->mfn_list); - safe_munlock(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); + unlock_pages(mfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); get_xen_guest_handle(gpfn_list, trans->gpfn_list); - safe_munlock(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); - safe_munlock(trans, sizeof(*trans)); + unlock_pages(gpfn_list, trans->nr_gpfns * sizeof(xen_pfn_t)); + unlock_pages(trans, sizeof(*trans)); break; } @@ -279,15 +295,15 @@ int xc_get_pfn_list(int xc_handle, memset(pfn_buf, 0, max_pfns * sizeof(xen_pfn_t)); #endif - if ( mlock(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 ) - { - PERROR("xc_get_pfn_list: pfn_buf mlock failed"); + if ( lock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t)) != 0 ) + { + PERROR("xc_get_pfn_list: pfn_buf lock failed"); return -1; } ret = do_domctl(xc_handle, &domctl); - safe_munlock(pfn_buf, max_pfns * sizeof(xen_pfn_t)); + unlock_pages(pfn_buf, max_pfns * sizeof(xen_pfn_t)); #if 0 #ifdef DEBUG @@ -419,7 +435,7 @@ int xc_version(int xc_handle, int cmd, v break; } - if ( (argsize != 0) && (mlock(arg, argsize) != 0) ) + if ( (argsize != 0) && (lock_pages(arg, argsize) != 0) ) { PERROR("Could not lock memory for version hypercall"); return -ENOMEM; @@ -433,7 +449,7 @@ int xc_version(int xc_handle, int cmd, v rc = do_xen_version(xc_handle, cmd, arg); if ( argsize != 0 ) - safe_munlock(arg, argsize); + unlock_pages(arg, argsize); return rc; } diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h --- a/tools/libxc/xc_private.h +++ b/tools/libxc/xc_private.h @@ -66,6 +66,9 @@ do { errno = __saved_errno; \ } while (0) +int lock_pages(void *addr, size_t len); +void unlock_pages(void *addr, size_t len); + #define PERROR(_m, _a...) \ do { \ int __saved_errno = errno; \ @@ -104,7 +107,7 @@ static inline int do_domctl(int xc_handl hypercall.op = __HYPERVISOR_domctl; hypercall.arg[0] = (unsigned long)domctl; - if ( mlock(domctl, sizeof(*domctl)) != 0 ) + if ( lock_pages(domctl, sizeof(*domctl)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -117,7 +120,7 @@ static inline int do_domctl(int xc_handl " rebuild the user-space tool set?\n"); } - safe_munlock(domctl, sizeof(*domctl)); + unlock_pages(domctl, sizeof(*domctl)); out1: return ret; @@ -133,7 +136,7 @@ static inline int do_sysctl(int xc_handl hypercall.op = __HYPERVISOR_sysctl; hypercall.arg[0] = (unsigned long)sysctl; - if ( mlock(sysctl, sizeof(*sysctl)) != 0 ) + if ( lock_pages(sysctl, sizeof(*sysctl)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; @@ -146,7 +149,7 @@ static inline int do_sysctl(int xc_handl " rebuild the user-space tool set?\n"); } - safe_munlock(sysctl, sizeof(*sysctl)); + unlock_pages(sysctl, sizeof(*sysctl)); out1: return ret; diff --git a/tools/libxc/xc_tbuf.c b/tools/libxc/xc_tbuf.c --- a/tools/libxc/xc_tbuf.c +++ b/tools/libxc/xc_tbuf.c @@ -104,7 +104,7 @@ int xc_tbuf_set_cpu_mask(int xc_handle, set_xen_guest_handle(sysctl.u.tbuf_op.cpu_mask.bitmap, (uint8_t *)&mask); sysctl.u.tbuf_op.cpu_mask.nr_cpus = sizeof(mask) * 8; - if ( mlock(&mask, sizeof(mask)) != 0 ) + if ( lock_pages(&mask, sizeof(mask)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; @@ -112,7 +112,7 @@ int xc_tbuf_set_cpu_mask(int xc_handle, ret = do_sysctl(xc_handle, &sysctl); - safe_munlock(&mask, sizeof(mask)); + unlock_pages(&mask, sizeof(mask)); out: return ret; diff --git a/tools/libxc/xg_private.c b/tools/libxc/xg_private.c --- a/tools/libxc/xg_private.c +++ b/tools/libxc/xg_private.c @@ -10,6 +10,22 @@ #include <strings.h> #include "xg_private.h" + +int lock_pages(void *addr, size_t len) +{ + int e = 0; +#ifndef __sun__ + e = mlock(addr, len); +#endif + return (e); +} + +void unlock_pages(void *addr, size_t len) +{ +#ifndef __sun__ + safe_munlock(addr, len); +#endif +} char *xc_read_image(const char *filename, unsigned long *size) { diff --git a/tools/misc/xenperf.c b/tools/misc/xenperf.c --- a/tools/misc/xenperf.c +++ b/tools/misc/xenperf.c @@ -17,6 +17,22 @@ #include <sys/mman.h> #include <errno.h> #include <string.h> + +int lock_pages(void *addr, size_t len) +{ + int e = 0; +#ifndef __sun__ + e = mlock(addr, len); +#endif + return (e); +} + +void unlock_pages(void *addr, size_t len) +{ +#ifndef __sun__ + munlock(addr, len); +#endif +} int main(int argc, char *argv[]) { @@ -87,11 +103,11 @@ int main(int argc, char *argv[]) pcv = malloc(sizeof(*pcv) * num_val); if ( pcd == NULL - || mlock(pcd, sizeof(*pcd) * num_desc) != 0 + || lock_pages(pcd, sizeof(*pcd) * num_desc) != 0 || pcv == NULL - || mlock(pcd, sizeof(*pcv) * num_val) != 0) + || lock_pages(pcd, sizeof(*pcv) * num_val) != 0) { - fprintf(stderr, "Could not alloc or mlock buffers: %d (%s)\n", + fprintf(stderr, "Could not alloc or lock buffers: %d (%s)\n", errno, strerror(errno)); exit(-1); } @@ -104,8 +120,8 @@ int main(int argc, char *argv[]) return 1; } - munlock(pcd, sizeof(*pcd) * num_desc); - munlock(pcv, sizeof(*pcv) * num_val); + unlock_pages(pcd, sizeof(*pcd) * num_desc); + unlock_pages(pcv, sizeof(*pcv) * num_val); val = pcv; for ( i = 0; i < num_desc; i++ ) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2006-Oct-22 18:16 UTC
Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On 22/10/06 7:02 pm, "John Levon" <levon@movementarian.org> wrote:>> # HG changeset patch >> # User john.levon@sun.com >> # Date 1161315500 25200 >> # Node ID ad404ee927926b8cc4a6498b180e78f0939eb2eb >> # Parent 3685871c6f7930c7d35baea7cee4f51d00415796 >> on solaris: mlock requires a page aligned address and mlock doesn''t ensure >> the pages won''t minor page fault; so don''t use it on solaris. > > This isn''t in unstable yet; I don''t know if it was missed or if there''s > some better way we could do this? > > thanks, > johnDon''t worry, it''s in my to-apply list. I don''t do FIFO. :-) -- Keir _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jimi Xenidis
2006-Oct-22 21:09 UTC
Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On Oct 22, 2006, at 2:16 PM, Keir Fraser wrote:> On 22/10/06 7:02 pm, "John Levon" <levon@movementarian.org> wrote: > >>> # HG changeset patch >>> # User john.levon@sun.com >>> # Date 1161315500 25200 >>> # Node ID ad404ee927926b8cc4a6498b180e78f0939eb2eb >>> # Parent 3685871c6f7930c7d35baea7cee4f51d00415796 >>> on solaris: mlock requires a page aligned address and mlock >>> doesn''t ensure >>> the pages won''t minor page fault; so don''t use it on solaris. >> >> This isn''t in unstable yet; I don''t know if it was missed or if >> there''s >> some better way we could do this? >> >> thanks, >> john > > Don''t worry, it''s in my to-apply list. I don''t do FIFO. :-)Hey, hang on a minute. If you are gonna add this then can we abstract this to the arches and with somethign better than an ifdef? The locking makes no sense in PPC (even on linux) either. Hollis and I touched on this and has been the bane of our existence since we were required to have Xen interpret user level pointers, thread started here: http://lists.xensource.com/archives/html/xen-devel/2005-08/ msg00719.html I cannot speak for Hollis (I think he may actually disagree with me) but see this as an opportunity to design something better, or at least have the debat (again). What might be a better alternative an to actually have an allocate call rather than an mlock call where the arches and OSes could to what is best for them. So what is done on x86 could be: do { x = alloca(len); mlock (x, len); } while (0) but where solaris and other arches could do something more. BTW: John, how do you solve the minor faults?! -JX _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
John Levon
2006-Oct-23 08:38 UTC
[XenPPC] Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On Sun, Oct 22, 2006 at 05:09:35PM -0400, Jimi Xenidis wrote:> Hollis and I touched on this and has been the bane of our existence > since we were required to have Xen interpret user level pointers, > thread started here: > http://lists.xensource.com/archives/html/xen-devel/2005-08/ > msg00719.html > > I cannot speak for Hollis (I think he may actually disagree with me) > but see this as an opportunity to design something better, or at > least have the debat (again).Yes, we should have something better. At the moment I''m trying to clear out our patch queue (we only have a few hacks left really now...) The "special pages" idea of yours sounds workable if the xc_get_pfn_list() thing can be fixed.> What might be a better alternative an to actually have an allocate > call rather than an mlock call where the arches and OSes could to > what is best for them. > So what is done on x86 could be: > do { x = alloca(len); mlock (x, len); } while (0)This would be a little uncomfortable, if only because it''s an extra system call every time. Also letting userspace pin translations in memory would be troublesome.> BTW: John, how do you solve the minor faults?!We currently interpret the ioctl in the privcmd driver and softlock the mappings in. Which isn''t very nice for all the reasons you point out. regards john _______________________________________________ Xen-ppc-devel mailing list Xen-ppc-devel@lists.xensource.com http://lists.xensource.com/xen-ppc-devel
Keir Fraser
2006-Oct-23 08:46 UTC
[XenPPC] Re: [Xen-devel] [PATCH] don''t use mlock() with Solaris tools
On 22/10/06 22:09, "Jimi Xenidis" <jimix@watson.ibm.com> wrote:> I cannot speak for Hollis (I think he may actually disagree with me) > but see this as an opportunity to design something better, or at > least have the debat (again). > What might be a better alternative an to actually have an allocate > call rather than an mlock call where the arches and OSes could to > what is best for them. > So what is done on x86 could be: > do { x = alloca(len); mlock (x, len); } while (0) > > but where solaris and other arches could do something more.It would change the API too, since memory buffers passed in by callers to libxc would also need to be allocated in a special way. Unless you would be prepared to perform the hypercall on a shadow buffer and then copy to the caller buffer, which I suppose would be a simpler API. John''s patch is fine for now. -- Keir _______________________________________________ Xen-ppc-devel mailing list Xen-ppc-devel@lists.xensource.com http://lists.xensource.com/xen-ppc-devel