Daniel De Graaf
2012-Dec-12 20:39 UTC
[PATCH v7 1/2] xen: unify domain locking in domctl code
These two patches were originally part of the XSM series that I have posted, and remain prerequisites for that series. However, they are independent of the XSM changes and are a useful simplification regardless of the use of XSM. The Acked-bys on these patches were provided before rebasing them over the copyback changes in 26268:1b72138bddda, which had minor conflicts that I resolved. [PATCH 1/2] xen: lock target domain in do_domctl common code [PATCH 2/2] xen/arch/*: add struct domain parameter to
Daniel De Graaf
2012-Dec-12 20:39 UTC
[PATCH 1/2] xen: lock target domain in do_domctl common code
Because almost all domctls need to lock the target domain, do this by default instead of repeating it in each domctl. This is not currently extended to the arch-specific domctls, but RCU locks are safe to take recursively so this only causes duplicate but correct locking. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Acked-by: Jan Beulich <jbeulich@suse.com> Cc: Keir Fraser <keir@xen.org> --- xen/common/domctl.c | 268 ++++++++++++---------------------------------------- 1 file changed, 59 insertions(+), 209 deletions(-) diff --git a/xen/common/domctl.c b/xen/common/domctl.c index 99eea48..a491159 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -244,6 +244,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) long ret = 0; bool_t copyback = 0; struct xen_domctl curop, *op = &curop; + struct domain *d; if ( copy_from_guest(op, u_domctl, 1) ) return -EFAULT; @@ -253,19 +254,29 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) switch ( op->cmd ) { + case XEN_DOMCTL_createdomain: + case XEN_DOMCTL_getdomaininfo: + case XEN_DOMCTL_test_assign_device: + d = NULL; + break; + default: + d = rcu_lock_domain_by_id(op->domain); + if ( d == NULL ) + return -ESRCH; + } + + switch ( op->cmd ) + { case XEN_DOMCTL_ioport_mapping: case XEN_DOMCTL_memory_mapping: case XEN_DOMCTL_bind_pt_irq: case XEN_DOMCTL_unbind_pt_irq: { - struct domain *d; - bool_t is_priv = IS_PRIV(current->domain); - if ( !is_priv && ((d = rcu_lock_domain_by_id(op->domain)) != NULL) ) + bool_t is_priv = IS_PRIV_FOR(current->domain, d); + if ( !is_priv ) { - is_priv = IS_PRIV_FOR(current->domain, d); - rcu_unlock_domain(d); + ret = -EPERM; + goto domctl_out_unlock_domonly; } - if ( !is_priv ) - return -EPERM; break; } #ifdef XSM_ENABLE @@ -279,15 +290,18 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) } if ( !domctl_lock_acquire() ) + { + if ( d ) + rcu_unlock_domain(d); return hypercall_create_continuation( __HYPERVISOR_domctl, "h", u_domctl); + } switch ( op->cmd ) { case XEN_DOMCTL_setvcpucontext: { - struct domain *d = rcu_lock_domain_by_id(op->domain); vcpu_guest_context_u c = { .nat = NULL }; unsigned int vcpu = op->u.vcpucontext.vcpu; struct vcpu *v; @@ -341,77 +355,48 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) svc_out: free_vcpu_guest_context(c.nat); - rcu_unlock_domain(d); } break; case XEN_DOMCTL_pausedomain: { - struct domain *d = rcu_lock_domain_by_id(op->domain); - ret = -ESRCH; - if ( d != NULL ) - { - ret = xsm_pausedomain(d); - if ( ret ) - goto pausedomain_out; + ret = xsm_pausedomain(d); + if ( ret ) + break; - ret = -EINVAL; - if ( d != current->domain ) - { - domain_pause_by_systemcontroller(d); - ret = 0; - } - pausedomain_out: - rcu_unlock_domain(d); + ret = -EINVAL; + if ( d != current->domain ) + { + domain_pause_by_systemcontroller(d); + ret = 0; } } break; case XEN_DOMCTL_unpausedomain: { - struct domain *d = rcu_lock_domain_by_id(op->domain); - - ret = -ESRCH; - if ( d == NULL ) - break; - ret = xsm_unpausedomain(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } domain_unpause_by_systemcontroller(d); - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_resumedomain: { - struct domain *d = rcu_lock_domain_by_id(op->domain); - - ret = -ESRCH; - if ( d == NULL ) - break; - ret = xsm_resumedomain(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } domain_resume(d); - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_createdomain: { - struct domain *d; domid_t dom; static domid_t rover = 0; unsigned int domcr_flags; @@ -461,6 +446,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( IS_ERR(d) ) { ret = PTR_ERR(d); + d = NULL; break; } @@ -471,39 +457,28 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) op->domain = d->domain_id; copyback = 1; + d = NULL; } break; case XEN_DOMCTL_max_vcpus: { - struct domain *d; unsigned int i, max = op->u.max_vcpus.max, cpu; cpumask_t *online; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) - break; - ret = -EINVAL; if ( (d == current->domain) || /* no domain_pause() */ (max > MAX_VIRT_CPUS) || (is_hvm_domain(d) && (max > MAX_HVM_VCPUS)) ) - { - rcu_unlock_domain(d); break; - } ret = xsm_max_vcpus(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } /* Until Xenoprof can dynamically grow its vcpu-s array... */ if ( d->xenoprof ) { - rcu_unlock_domain(d); ret = -EAGAIN; break; } @@ -578,44 +553,31 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) maxvcpu_out_novcpulock: domain_unpause(d); - rcu_unlock_domain(d); } break; case XEN_DOMCTL_destroydomain: { - struct domain *d = rcu_lock_domain_by_id(op->domain); - ret = -ESRCH; - if ( d != NULL ) - { - ret = xsm_destroydomain(d) ? : domain_kill(d); - rcu_unlock_domain(d); - } + ret = xsm_destroydomain(d) ? : domain_kill(d); } break; case XEN_DOMCTL_setvcpuaffinity: case XEN_DOMCTL_getvcpuaffinity: { - domid_t dom = op->domain; - struct domain *d = rcu_lock_domain_by_id(dom); struct vcpu *v; - ret = -ESRCH; - if ( d == NULL ) - break; - ret = xsm_vcpuaffinity(op->cmd, d); if ( ret ) - goto vcpuaffinity_out; + break; ret = -EINVAL; if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus ) - goto vcpuaffinity_out; + break; ret = -ESRCH; if ( (v = d->vcpu[op->u.vcpuaffinity.vcpu]) == NULL ) - goto vcpuaffinity_out; + break; if ( op->cmd == XEN_DOMCTL_setvcpuaffinity ) { @@ -634,35 +596,22 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) ret = cpumask_to_xenctl_cpumap( &op->u.vcpuaffinity.cpumap, v->cpu_affinity); } - - vcpuaffinity_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_scheduler_op: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) - break; - ret = xsm_scheduler(d); if ( ret ) - goto scheduler_op_out; + break; ret = sched_adjust(d, &op->u.scheduler_op); copyback = 1; - - scheduler_op_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_getdomaininfo: { - struct domain *d; domid_t dom = op->domain; rcu_read_lock(&domlist_read_lock); @@ -689,19 +638,15 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) getdomaininfo_out: rcu_read_unlock(&domlist_read_lock); + d = NULL; } break; case XEN_DOMCTL_getvcpucontext: { vcpu_guest_context_u c = { .nat = NULL }; - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) - break; - ret = xsm_getvcpucontext(d); if ( ret ) goto getvcpucontext_out; @@ -751,31 +696,25 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) getvcpucontext_out: xfree(c.nat); - rcu_unlock_domain(d); } break; case XEN_DOMCTL_getvcpuinfo: { - struct domain *d; struct vcpu *v; struct vcpu_runstate_info runstate; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(op->domain)) == NULL ) - break; - ret = xsm_getvcpuinfo(d); if ( ret ) - goto getvcpuinfo_out; + break; ret = -EINVAL; if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus ) - goto getvcpuinfo_out; + break; ret = -ESRCH; if ( (v = d->vcpu[op->u.getvcpuinfo.vcpu]) == NULL ) - goto getvcpuinfo_out; + break; vcpu_runstate_get(v, &runstate); @@ -786,25 +725,16 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) op->u.getvcpuinfo.cpu = v->processor; ret = 0; copyback = 1; - - getvcpuinfo_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_max_mem: { - struct domain *d; unsigned long new_max; - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; - ret = xsm_setdomainmaxmem(d); if ( ret ) - goto max_mem_out; + break; ret = -EINVAL; new_max = op->u.max_mem.max_memkb >> (PAGE_SHIFT-10); @@ -818,77 +748,43 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) d->max_pages = new_max; ret = 0; spin_unlock(&d->page_alloc_lock); - - max_mem_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_setdomainhandle: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; - ret = xsm_setdomainhandle(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } memcpy(d->handle, op->u.setdomainhandle.handle, sizeof(xen_domain_handle_t)); - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_setdebugging: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; - ret = -EINVAL; if ( d == current->domain ) /* no domain_pause() */ - { - rcu_unlock_domain(d); break; - } ret = xsm_setdebugging(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } domain_pause(d); d->debugger_attached = !!op->u.setdebugging.enable; domain_unpause(d); /* causes guest to latch new status */ - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_irq_permission: { - struct domain *d; unsigned int pirq = op->u.irq_permission.pirq; int allow = op->u.irq_permission.allow_access; - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; - if ( pirq >= d->nr_pirqs ) ret = -EINVAL; else if ( xsm_irq_permission(d, pirq, allow) ) @@ -897,14 +793,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) ret = irq_permit_access(d, pirq); else ret = irq_deny_access(d, pirq); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_iomem_permission: { - struct domain *d; unsigned long mfn = op->u.iomem_permission.first_mfn; unsigned long nr_mfns = op->u.iomem_permission.nr_mfns; int allow = op->u.iomem_permission.allow_access; @@ -913,125 +806,78 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) if ( (mfn + nr_mfns - 1) < mfn ) /* wrap? */ break; - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; - if ( xsm_iomem_permission(d, mfn, mfn + nr_mfns - 1, allow) ) ret = -EPERM; else if ( allow ) ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1); else ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_settimeoffset: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; - ret = xsm_domain_settime(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } domain_set_time_offset(d, op->u.settimeoffset.time_offset_seconds); - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_set_target: { - struct domain *d, *e; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d == NULL ) - break; + struct domain *e; ret = -ESRCH; e = get_domain_by_id(op->u.set_target.target); if ( e == NULL ) - goto set_target_out; + break; ret = -EINVAL; if ( (d == e) || (d->target != NULL) ) { put_domain(e); - goto set_target_out; + break; } ret = xsm_set_target(d, e); if ( ret ) { put_domain(e); - goto set_target_out; + break; } /* Hold reference on @e until we destroy @d. */ d->target = e; ret = 0; - - set_target_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_subscribe: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d != NULL ) - { - ret = xsm_domctl(d, op->cmd); - if ( !ret ) - d->suspend_evtchn = op->u.subscribe.port; - rcu_unlock_domain(d); - } + ret = xsm_domctl(d, op->cmd); + if ( !ret ) + d->suspend_evtchn = op->u.subscribe.port; } break; case XEN_DOMCTL_disable_migrate: { - struct domain *d; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(op->domain)) != NULL ) - { - ret = xsm_domctl(d, op->cmd); - if ( !ret ) - d->disable_migrate = op->u.disable_migrate.disable; - rcu_unlock_domain(d); - } + ret = xsm_domctl(d, op->cmd); + if ( !ret ) + d->disable_migrate = op->u.disable_migrate.disable; } break; case XEN_DOMCTL_set_virq_handler: { - struct domain *d; uint32_t virq = op->u.set_virq_handler.virq; - ret = -ESRCH; - d = rcu_lock_domain_by_id(op->domain); - if ( d != NULL ) - { - ret = xsm_set_virq_handler(d, virq); - if ( !ret ) - ret = set_global_virq_handler(d, virq); - rcu_unlock_domain(d); - } + ret = xsm_set_virq_handler(d, virq); + if ( !ret ) + ret = set_global_virq_handler(d, virq); } break; @@ -1042,6 +888,10 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) domctl_lock_release(); + domctl_out_unlock_domonly: + if ( d ) + rcu_unlock_domain(d); + if ( copyback && __copy_to_guest(u_domctl, op, 1) ) ret = -EFAULT; -- 1.7.11.7
Daniel De Graaf
2012-Dec-12 20:39 UTC
[PATCH 2/2] xen/arch/*: add struct domain parameter to arch_do_domctl
Since the arch-independent do_domctl function now RCU locks the domain specified by op->domain, pass the struct domain to the arch-specific domctl function and remove the duplicate per-subfunction locking. This also removes two get_domain/put_domain call pairs (in XEN_DOMCTL_assign_device and XEN_DOMCTL_deassign_device), replacing them with RCU locking. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Acked-by: Tim Deegan <tim@xen.org> Acked-by: Jan Beulich <jbeulich@suse.com> --- xen/arch/arm/domctl.c | 2 +- xen/arch/x86/domctl.c | 455 +++++++--------------------------------- xen/common/domctl.c | 2 +- xen/drivers/passthrough/iommu.c | 31 +-- xen/include/xen/hypercall.h | 2 +- xen/include/xen/iommu.h | 3 +- 6 files changed, 84 insertions(+), 411 deletions(-) diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c index cf16791..d54a387 100644 --- a/xen/arch/arm/domctl.c +++ b/xen/arch/arm/domctl.c @@ -10,7 +10,7 @@ #include <xen/errno.h> #include <public/domctl.h> -long arch_do_domctl(struct xen_domctl *domctl, +long arch_do_domctl(struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { return -ENOSYS; diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 239e411..e89a20a 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -47,7 +47,7 @@ static int gdbsx_guest_mem_io( } long arch_do_domctl( - struct xen_domctl *domctl, + struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { long ret = 0; @@ -58,23 +58,15 @@ long arch_do_domctl( case XEN_DOMCTL_shadow_op: { - struct domain *d; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = paging_domctl(d, - &domctl->u.shadow_op, - guest_handle_cast(u_domctl, void)); - rcu_unlock_domain(d); - copyback = 1; - } + ret = paging_domctl(d, + &domctl->u.shadow_op, + guest_handle_cast(u_domctl, void)); + copyback = 1; } break; case XEN_DOMCTL_ioport_permission: { - struct domain *d; unsigned int fp = domctl->u.ioport_permission.first_port; unsigned int np = domctl->u.ioport_permission.nr_ports; int allow = domctl->u.ioport_permission.allow_access; @@ -83,10 +75,6 @@ long arch_do_domctl( if ( (fp + np) > 65536 ) break; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) - break; - if ( np == 0 ) ret = 0; else if ( xsm_ioport_permission(d, fp, fp + np - 1, allow) ) @@ -95,8 +83,6 @@ long arch_do_domctl( ret = ioports_permit_access(d, fp, fp + np - 1); else ret = ioports_deny_access(d, fp, fp + np - 1); - - rcu_unlock_domain(d); } break; @@ -104,23 +90,16 @@ long arch_do_domctl( { struct page_info *page; unsigned long mfn = domctl->u.getpageframeinfo.gmfn; - domid_t dom = domctl->domain; - struct domain *d; ret = -EINVAL; - - if ( unlikely(!mfn_valid(mfn)) || - unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) ) + if ( unlikely(!mfn_valid(mfn)) ) break; page = mfn_to_page(mfn); ret = xsm_getpageframeinfo(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } if ( likely(get_page(page, d)) ) { @@ -150,7 +129,6 @@ long arch_do_domctl( put_page(page); } - rcu_unlock_domain(d); copyback = 1; } break; @@ -160,27 +138,17 @@ long arch_do_domctl( { unsigned int n, j; unsigned int num = domctl->u.getpageframeinfo3.num; - domid_t dom = domctl->domain; - struct domain *d; struct page_info *page; xen_pfn_t *arr; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) ) - break; - ret = xsm_getpageframeinfo(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } if ( unlikely(num > 1024) || unlikely(num != domctl->u.getpageframeinfo3.num) ) { ret = -E2BIG; - rcu_unlock_domain(d); break; } @@ -188,7 +156,6 @@ long arch_do_domctl( if ( !page ) { ret = -ENOMEM; - rcu_unlock_domain(d); break; } arr = page_to_virt(page); @@ -263,7 +230,6 @@ long arch_do_domctl( free_domheap_page(virt_to_page(arr)); - rcu_unlock_domain(d); break; } /* fall thru */ @@ -271,25 +237,15 @@ long arch_do_domctl( { int n,j; int num = domctl->u.getpageframeinfo2.num; - domid_t dom = domctl->domain; - struct domain *d; uint32_t *arr32; - ret = -ESRCH; - - if ( unlikely((d = rcu_lock_domain_by_id(dom)) == NULL) ) - break; ret = xsm_getpageframeinfo(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } if ( unlikely(num > 1024) ) { ret = -E2BIG; - rcu_unlock_domain(d); break; } @@ -297,7 +253,6 @@ long arch_do_domctl( if ( !arr32 ) { ret = -ENOMEM; - rcu_unlock_domain(d); break; } @@ -369,78 +324,58 @@ long arch_do_domctl( } free_xenheap_page(arr32); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_getmemlist: { int i; - struct domain *d = rcu_lock_domain_by_id(domctl->domain); unsigned long max_pfns = domctl->u.getmemlist.max_pfns; uint64_t mfn; struct page_info *page; - ret = -EINVAL; - if ( d != NULL ) - { - ret = xsm_getmemlist(d); - if ( ret ) - { - rcu_unlock_domain(d); - break; - } + ret = xsm_getmemlist(d); + if ( ret ) + break; - spin_lock(&d->page_alloc_lock); + if ( unlikely(d->is_dying) ) { + ret = -EINVAL; + break; + } - if ( unlikely(d->is_dying) ) { - spin_unlock(&d->page_alloc_lock); - goto getmemlist_out; - } + spin_lock(&d->page_alloc_lock); - ret = i = 0; - page_list_for_each(page, &d->page_list) + ret = i = 0; + page_list_for_each(page, &d->page_list) + { + if ( i >= max_pfns ) + break; + mfn = page_to_mfn(page); + if ( copy_to_guest_offset(domctl->u.getmemlist.buffer, + i, &mfn, 1) ) { - if ( i >= max_pfns ) - break; - mfn = page_to_mfn(page); - if ( copy_to_guest_offset(domctl->u.getmemlist.buffer, - i, &mfn, 1) ) - { - ret = -EFAULT; - break; - } - ++i; + ret = -EFAULT; + break; } - - spin_unlock(&d->page_alloc_lock); + ++i; + } - domctl->u.getmemlist.num_pfns = i; - copyback = 1; - getmemlist_out: - rcu_unlock_domain(d); - } + spin_unlock(&d->page_alloc_lock); + + domctl->u.getmemlist.num_pfns = i; + copyback = 1; } break; case XEN_DOMCTL_hypercall_init: { - struct domain *d = rcu_lock_domain_by_id(domctl->domain); unsigned long gmfn = domctl->u.hypercall_init.gmfn; struct page_info *page; void *hypercall_page; - ret = -ESRCH; - if ( unlikely(d == NULL) ) - break; - ret = xsm_hypercall_init(d); if ( ret ) - { - rcu_unlock_domain(d); break; - } page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); @@ -449,7 +384,6 @@ long arch_do_domctl( { if ( page ) put_page(page); - rcu_unlock_domain(d); break; } @@ -460,19 +394,12 @@ long arch_do_domctl( unmap_domain_page(hypercall_page); put_page_and_type(page); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_sethvmcontext: { struct hvm_domain_context c = { .size = domctl->u.hvmcontext.size }; - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; ret = xsm_hvmcontext(d, domctl->cmd); if ( ret ) @@ -497,19 +424,12 @@ long arch_do_domctl( sethvmcontext_out: if ( c.data != NULL ) xfree(c.data); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gethvmcontext: { struct hvm_domain_context c = { 0 }; - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; ret = xsm_hvmcontext(d, domctl->cmd); if ( ret ) @@ -548,7 +468,6 @@ long arch_do_domctl( ret = -EFAULT; gethvmcontext_out: - rcu_unlock_domain(d); copyback = 1; if ( c.data != NULL ) @@ -558,46 +477,28 @@ long arch_do_domctl( case XEN_DOMCTL_gethvmcontext_partial: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_hvmcontext(d, domctl->cmd); if ( ret ) - goto gethvmcontext_partial_out; + break; ret = -EINVAL; if ( !is_hvm_domain(d) ) - goto gethvmcontext_partial_out; + break; domain_pause(d); ret = hvm_save_one(d, domctl->u.hvmcontext_partial.type, domctl->u.hvmcontext_partial.instance, domctl->u.hvmcontext_partial.buffer); domain_unpause(d); - - gethvmcontext_partial_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_set_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_address_size(d, domctl->cmd); if ( ret ) - { - rcu_unlock_domain(d); break; - } switch ( domctl->u.address_size.size ) { @@ -611,30 +512,18 @@ long arch_do_domctl( ret = (domctl->u.address_size.size == BITS_PER_LONG) ? 0 : -EINVAL; break; } - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_get_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_address_size(d, domctl->cmd); if ( ret ) - { - rcu_unlock_domain(d); break; - } domctl->u.address_size.size is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG; - rcu_unlock_domain(d); ret = 0; copyback = 1; } @@ -642,46 +531,28 @@ long arch_do_domctl( case XEN_DOMCTL_set_machine_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_machine_address_size(d, domctl->cmd); if ( ret ) - goto set_machine_address_size_out; + break; ret = -EBUSY; if ( d->tot_pages > 0 ) - goto set_machine_address_size_out; + break; d->arch.physaddr_bitsize = domctl->u.address_size.size; ret = 0; - set_machine_address_size_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_get_machine_address_size: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_machine_address_size(d, domctl->cmd); if ( ret ) - { - rcu_unlock_domain(d); break; - } domctl->u.address_size.size = d->arch.physaddr_bitsize; - rcu_unlock_domain(d); ret = 0; copyback = 1; } @@ -689,25 +560,20 @@ long arch_do_domctl( case XEN_DOMCTL_sendtrigger: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = xsm_sendtrigger(d); if ( ret ) - goto sendtrigger_out; + break; ret = -EINVAL; if ( domctl->u.sendtrigger.vcpu >= MAX_VIRT_CPUS ) - goto sendtrigger_out; + break; ret = -ESRCH; if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus || (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL ) - goto sendtrigger_out; + break; switch ( domctl->u.sendtrigger.trigger ) { @@ -744,34 +610,27 @@ long arch_do_domctl( default: ret = -ENOSYS; } - - sendtrigger_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_bind_pt_irq: { - struct domain * d; xen_domctl_bind_pt_irq_t * bind; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; bind = &(domctl->u.bind_pt_irq); ret = -EINVAL; if ( !is_hvm_domain(d) ) - goto bind_out; + break; ret = xsm_bind_pt_irq(d, bind); if ( ret ) - goto bind_out; + break; ret = -EPERM; if ( !IS_PRIV(current->domain) && !irq_access_permitted(current->domain, bind->machine_irq) ) - goto bind_out; + break; ret = -ESRCH; if ( iommu_enabled ) @@ -783,26 +642,19 @@ long arch_do_domctl( if ( ret < 0 ) printk(XENLOG_G_ERR "pt_irq_create_bind failed (%ld) for dom%d\n", ret, d->domain_id); - - bind_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_unbind_pt_irq: { - struct domain * d; xen_domctl_bind_pt_irq_t * bind; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; bind = &(domctl->u.bind_pt_irq); ret = -EPERM; if ( !IS_PRIV(current->domain) && !irq_access_permitted(current->domain, bind->machine_irq) ) - goto unbind_out; + break; if ( iommu_enabled ) { @@ -813,15 +665,11 @@ long arch_do_domctl( if ( ret < 0 ) printk(XENLOG_G_ERR "pt_irq_destroy_bind failed (%ld) for dom%d\n", ret, d->domain_id); - - unbind_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_memory_mapping: { - struct domain *d; unsigned long gfn = domctl->u.memory_mapping.first_gfn; unsigned long mfn = domctl->u.memory_mapping.first_mfn; unsigned long nr_mfns = domctl->u.memory_mapping.nr_mfns; @@ -839,15 +687,9 @@ long arch_do_domctl( !iomem_access_permitted(current->domain, mfn, mfn + nr_mfns - 1) ) break; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) - break; - ret = xsm_iomem_permission(d, mfn, mfn + nr_mfns - 1, add); - if ( ret ) { - rcu_unlock_domain(d); + if ( ret ) break; - } if ( add ) { @@ -894,15 +736,12 @@ long arch_do_domctl( ret, add ? "removing" : "denying", d->domain_id, mfn, mfn + nr_mfns - 1); } - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_ioport_mapping: { #define MAX_IOPORTS 0x10000 - struct domain *d; struct hvm_iommu *hd; unsigned int fgp = domctl->u.ioport_mapping.first_gport; unsigned int fmp = domctl->u.ioport_mapping.first_mport; @@ -926,15 +765,9 @@ long arch_do_domctl( !ioports_access_permitted(current->domain, fmp, fmp + np - 1) ) break; - ret = -ESRCH; - if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) ) - break; - ret = xsm_ioport_permission(d, fmp, fmp + np - 1, add); - if ( ret ) { - rcu_unlock_domain(d); + if ( ret ) break; - } hd = domain_hvm_iommu(d); if ( add ) @@ -990,30 +823,19 @@ long arch_do_domctl( "ioport_map: error %ld denying dom%d access to [%x,%x]\n", ret, d->domain_id, fmp, fmp + np - 1); } - rcu_unlock_domain(d); } break; case XEN_DOMCTL_pin_mem_cacheattr: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = xsm_pin_mem_cacheattr(d); if ( ret ) - goto pin_out; + break; ret = hvm_set_mem_pinned_cacheattr( d, domctl->u.pin_mem_cacheattr.start, domctl->u.pin_mem_cacheattr.end, domctl->u.pin_mem_cacheattr.type); - - pin_out: - rcu_unlock_domain(d); } break; @@ -1021,19 +843,13 @@ long arch_do_domctl( case XEN_DOMCTL_get_ext_vcpucontext: { struct xen_domctl_ext_vcpucontext *evc; - struct domain *d; struct vcpu *v; evc = &domctl->u.ext_vcpucontext; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = xsm_ext_vcpucontext(d, domctl->cmd); if ( ret ) - goto ext_vcpucontext_out; + break; ret = -ESRCH; if ( (evc->vcpu >= d->max_vcpus) || @@ -1124,7 +940,6 @@ long arch_do_domctl( ret = 0; ext_vcpucontext_out: - rcu_unlock_domain(d); if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext ) copyback = 1; } @@ -1132,16 +947,10 @@ long arch_do_domctl( case XEN_DOMCTL_set_cpuid: { - struct domain *d; xen_domctl_cpuid_t *ctl = &domctl->u.cpuid; cpuid_input_t *cpuid = NULL; int i; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - for ( i = 0; i < MAX_CPUID_INPUT; i++ ) { cpuid = &d->arch.cpuids[i]; @@ -1164,21 +973,13 @@ long arch_do_domctl( memcpy(cpuid, ctl, sizeof(cpuid_input_t)); ret = 0; } - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gettscinfo: { - struct domain *d; xen_guest_tsc_info_t info; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - domain_pause(d); tsc_get_info(d, &info.tsc_mode, &info.elapsed_nsec, @@ -1189,20 +990,11 @@ long arch_do_domctl( else ret = 0; domain_unpause(d); - - rcu_unlock_domain(d); } break; case XEN_DOMCTL_settscinfo: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - domain_pause(d); tsc_set_info(d, domctl->u.tsc_info.info.tsc_mode, domctl->u.tsc_info.info.elapsed_nsec, @@ -1210,66 +1002,40 @@ long arch_do_domctl( domctl->u.tsc_info.info.incarnation); domain_unpause(d); - rcu_unlock_domain(d); ret = 0; } break; case XEN_DOMCTL_suppress_spurious_page_faults: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - d->arch.suppress_spurious_page_faults = 1; - rcu_unlock_domain(d); - ret = 0; - } + d->arch.suppress_spurious_page_faults = 1; + ret = 0; } break; case XEN_DOMCTL_debug_op: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = -EINVAL; if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) || ((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) ) - goto debug_op_out; + break; ret = -EINVAL; if ( !is_hvm_domain(d)) - goto debug_op_out; + break; ret = hvm_debug_op(v, domctl->u.debug_op.op); - - debug_op_out: - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_guestmemio: { - struct domain *d; - - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - domctl->u.gdbsx_guest_memio.remain domctl->u.gdbsx_guest_memio.len; ret = gdbsx_guest_mem_io(domctl->domain, &domctl->u.gdbsx_guest_memio); - - rcu_unlock_domain(d); if ( !ret ) copyback = 1; } @@ -1277,71 +1043,42 @@ long arch_do_domctl( case XEN_DOMCTL_gdbsx_pausevcpu: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = -EBUSY; if ( !d->is_paused_by_controller ) - { - rcu_unlock_domain(d); break; - } ret = -EINVAL; if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS || (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL ) - { - rcu_unlock_domain(d); break; - } vcpu_pause(v); ret = 0; - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_unpausevcpu: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - ret = -EBUSY; if ( !d->is_paused_by_controller ) - { - rcu_unlock_domain(d); break; - } ret = -EINVAL; if ( domctl->u.gdbsx_pauseunp_vcpu.vcpu >= MAX_VIRT_CPUS || (v = d->vcpu[domctl->u.gdbsx_pauseunp_vcpu.vcpu]) == NULL ) - { - rcu_unlock_domain(d); break; - } if ( !atomic_read(&v->pause_count) ) printk("WARN: Unpausing vcpu:%d which is not paused\n", v->vcpu_id); vcpu_unpause(v); ret = 0; - rcu_unlock_domain(d); } break; case XEN_DOMCTL_gdbsx_domstatus: { - struct domain *d; struct vcpu *v; - ret = -ESRCH; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - domctl->u.gdbsx_domstatus.vcpu_id = -1; domctl->u.gdbsx_domstatus.paused = d->is_paused_by_controller; if ( domctl->u.gdbsx_domstatus.paused ) @@ -1358,7 +1095,6 @@ long arch_do_domctl( } } } - rcu_unlock_domain(d); ret = 0; copyback = 1; } @@ -1368,7 +1104,6 @@ long arch_do_domctl( case XEN_DOMCTL_getvcpuextstate: { struct xen_domctl_vcpuextstate *evc; - struct domain *d; struct vcpu *v; uint32_t offset = 0; uint64_t _xfeature_mask = 0; @@ -1379,12 +1114,6 @@ long arch_do_domctl( evc = &domctl->u.vcpuextstate; - ret = -ESRCH; - - d = rcu_lock_domain_by_id(domctl->domain); - if ( d == NULL ) - break; - ret = xsm_vcpuextstate(d, domctl->cmd); if ( ret ) goto vcpuextstate_out; @@ -1483,7 +1212,6 @@ long arch_do_domctl( ret = 0; vcpuextstate_out: - rcu_unlock_domain(d); if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate ) copyback = 1; } @@ -1491,52 +1219,35 @@ long arch_do_domctl( case XEN_DOMCTL_mem_event_op: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = xsm_mem_event(d); - if ( !ret ) - ret = mem_event_domctl(d, &domctl->u.mem_event_op, - guest_handle_cast(u_domctl, void)); - rcu_unlock_domain(d); - copyback = 1; - } + ret = xsm_mem_event(d); + if ( !ret ) + ret = mem_event_domctl(d, &domctl->u.mem_event_op, + guest_handle_cast(u_domctl, void)); + copyback = 1; } break; case XEN_DOMCTL_mem_sharing_op: { - struct domain *d; - - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = xsm_mem_sharing(d); - if ( !ret ) - ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op); - rcu_unlock_domain(d); - } + ret = xsm_mem_sharing(d); + if ( !ret ) + ret = mem_sharing_domctl(d, &domctl->u.mem_sharing_op); } break; #if P2M_AUDIT case XEN_DOMCTL_audit_p2m: { - struct domain *d; - - ret = rcu_lock_remote_target_domain_by_id(domctl->domain, &d); - if ( ret != 0 ) + if ( d == current->domain ) + { + ret = -EPERM; break; + } audit_p2m(d, &domctl->u.audit_p2m.orphans, &domctl->u.audit_p2m.m2p_bad, &domctl->u.audit_p2m.p2m_bad); - rcu_unlock_domain(d); copyback = 1; } break; @@ -1544,52 +1255,36 @@ long arch_do_domctl( case XEN_DOMCTL_set_access_required: { - struct domain *d; struct p2m_domain* p2m; ret = -EPERM; - if ( current->domain->domain_id == domctl->domain ) + if ( current->domain == d ) break; - ret = -ESRCH; - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - ret = xsm_mem_event(d); - if ( !ret ) { - p2m = p2m_get_hostp2m(d); - p2m->access_required = domctl->u.access_required.access_required; - } - rcu_unlock_domain(d); - } + ret = xsm_mem_event(d); + if ( !ret ) { + p2m = p2m_get_hostp2m(d); + p2m->access_required = domctl->u.access_required.access_required; + } } break; case XEN_DOMCTL_set_broken_page_p2m: { - struct domain *d; + p2m_type_t pt; + unsigned long pfn = domctl->u.set_broken_page_p2m.pfn; + mfn_t mfn = get_gfn_query(d, pfn, &pt); - d = rcu_lock_domain_by_id(domctl->domain); - if ( d != NULL ) - { - p2m_type_t pt; - unsigned long pfn = domctl->u.set_broken_page_p2m.pfn; - mfn_t mfn = get_gfn_query(d, pfn, &pt); - - if ( unlikely(!mfn_valid(mfn_x(mfn)) || !p2m_is_ram(pt) || - (p2m_change_type(d, pfn, pt, p2m_ram_broken) != pt)) ) - ret = -EINVAL; + if ( unlikely(!mfn_valid(mfn_x(mfn)) || !p2m_is_ram(pt) || + (p2m_change_type(d, pfn, pt, p2m_ram_broken) != pt)) ) + ret = -EINVAL; - put_gfn(d, pfn); - rcu_unlock_domain(d); - } - else - ret = -ESRCH; + put_gfn(d, pfn); } break; default: - ret = iommu_do_domctl(domctl, u_domctl); + ret = iommu_do_domctl(domctl, d, u_domctl); break; } diff --git a/xen/common/domctl.c b/xen/common/domctl.c index a491159..ca789bb 100644 --- a/xen/common/domctl.c +++ b/xen/common/domctl.c @@ -882,7 +882,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) break; default: - ret = arch_do_domctl(op, u_domctl); + ret = arch_do_domctl(op, d, u_domctl); break; } diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index fb6b5db..1cd0007 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -542,10 +542,9 @@ void iommu_crash_shutdown(void) } int iommu_do_domctl( - struct xen_domctl *domctl, + struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) { - struct domain *d; u16 seg; u8 bus, devfn; int ret = 0; @@ -564,10 +563,6 @@ int iommu_do_domctl( if ( ret ) break; - ret = -EINVAL; - if ( (d = rcu_lock_domain_by_id(domctl->domain)) == NULL ) - break; - seg = domctl->u.get_device_group.machine_sbdf >> 16; bus = (domctl->u.get_device_group.machine_sbdf >> 8) & 0xff; devfn = domctl->u.get_device_group.machine_sbdf & 0xff; @@ -588,7 +583,6 @@ int iommu_do_domctl( } if ( __copy_field_to_guest(u_domctl, domctl, u.get_device_group) ) ret = -EFAULT; - rcu_unlock_domain(d); } break; @@ -611,20 +605,15 @@ int iommu_do_domctl( break; case XEN_DOMCTL_assign_device: - if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) || - unlikely(d->is_dying) ) + if ( unlikely(d->is_dying) ) { - printk(XENLOG_G_ERR - "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); ret = -EINVAL; - if ( d ) - goto assign_device_out; break; } ret = xsm_assign_device(d, domctl->u.assign_device.machine_sbdf); if ( ret ) - goto assign_device_out; + break; seg = domctl->u.get_device_group.machine_sbdf >> 16; bus = (domctl->u.assign_device.machine_sbdf >> 8) & 0xff; @@ -638,22 +627,12 @@ int iommu_do_domctl( seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d->domain_id, ret); - assign_device_out: - put_domain(d); break; case XEN_DOMCTL_deassign_device: - if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) - { - printk(XENLOG_G_ERR - "XEN_DOMCTL_deassign_device: get_domain_by_id() failed\n"); - ret = -EINVAL; - break; - } - ret = xsm_deassign_device(d, domctl->u.assign_device.machine_sbdf); if ( ret ) - goto deassign_device_out; + break; seg = domctl->u.get_device_group.machine_sbdf >> 16; bus = (domctl->u.assign_device.machine_sbdf >> 8) & 0xff; @@ -668,8 +647,6 @@ int iommu_do_domctl( seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), d->domain_id, ret); - deassign_device_out: - put_domain(d); break; default: diff --git a/xen/include/xen/hypercall.h b/xen/include/xen/hypercall.h index e315523..7c3d719 100644 --- a/xen/include/xen/hypercall.h +++ b/xen/include/xen/hypercall.h @@ -37,7 +37,7 @@ do_domctl( extern long arch_do_domctl( - struct xen_domctl *domctl, + struct xen_domctl *domctl, struct domain *d, XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl); extern long diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index 7626216..d477137 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -132,7 +132,8 @@ void iommu_crash_shutdown(void); void iommu_set_dom0_mapping(struct domain *d); void iommu_share_p2m_table(struct domain *d); -int iommu_do_domctl(struct xen_domctl *, XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); +int iommu_do_domctl(struct xen_domctl *, struct domain *d, + XEN_GUEST_HANDLE_PARAM(xen_domctl_t)); void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count); void iommu_iotlb_flush_all(struct domain *d); -- 1.7.11.7
Apparently Analagous Threads
- when timer go back in dom0 save and restore or migrate, PV domain hung
- [PATCH v2 0/5] ARM hypercall ABI: 64 bit ready
- [PATCH v3 0/6] ARM hypercall ABI: 64 bit ready
- [PATCH] Enable PCI passthrough with stub domain.
- Re: [XenARM] XEN tools for ARM with Virtualization Extensions