- use the variants not validating the VA range when writing back
structures/fields to the same space that they were previously read
from
- when only a single field of a structure actually changed, copy back
just that field where possible
- consolidate copying back results in a few places
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
If really necessary, this patch could of course be split up at almost
arbitrary boundaries.
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -51,6 +51,7 @@ long arch_do_domctl(
XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
long ret = 0;
+ bool_t copyback = 0;
switch ( domctl->cmd )
{
@@ -66,7 +67,7 @@ long arch_do_domctl(
&domctl->u.shadow_op,
guest_handle_cast(u_domctl, void));
rcu_unlock_domain(d);
- copy_to_guest(u_domctl, domctl, 1);
+ copyback = 1;
}
}
break;
@@ -150,8 +151,7 @@ long arch_do_domctl(
}
rcu_unlock_domain(d);
-
- copy_to_guest(u_domctl, domctl, 1);
+ copyback = 1;
}
break;
@@ -408,7 +408,7 @@ long arch_do_domctl(
spin_unlock(&d->page_alloc_lock);
domctl->u.getmemlist.num_pfns = i;
- copy_to_guest(u_domctl, domctl, 1);
+ copyback = 1;
getmemlist_out:
rcu_unlock_domain(d);
}
@@ -539,13 +539,11 @@ long arch_do_domctl(
ret = -EFAULT;
gethvmcontext_out:
- if ( copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
+ rcu_unlock_domain(d);
+ copyback = 1;
if ( c.data != NULL )
xfree(c.data);
-
- rcu_unlock_domain(d);
}
break;
@@ -627,11 +625,9 @@ long arch_do_domctl(
domctl->u.address_size.size is_pv_32on64_domain(d) ? 32
: BITS_PER_LONG;
- ret = 0;
rcu_unlock_domain(d);
-
- if ( copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
+ ret = 0;
+ copyback = 1;
}
break;
@@ -676,13 +672,9 @@ long arch_do_domctl(
domctl->u.address_size.size = d->arch.physaddr_bitsize;
- ret = 0;
rcu_unlock_domain(d);
-
- if ( copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
-
-
+ ret = 0;
+ copyback = 1;
}
break;
@@ -1124,9 +1116,8 @@ long arch_do_domctl(
ext_vcpucontext_out:
rcu_unlock_domain(d);
- if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) &&
- copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
+ if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext )
+ copyback = 1;
}
break;
@@ -1268,10 +1259,10 @@ long arch_do_domctl(
domctl->u.gdbsx_guest_memio.len;
ret = gdbsx_guest_mem_io(domctl->domain,
&domctl->u.gdbsx_guest_memio);
- if ( !ret && copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
rcu_unlock_domain(d);
+ if ( !ret )
+ copyback = 1;
}
break;
@@ -1358,10 +1349,9 @@ long arch_do_domctl(
}
}
}
- ret = 0;
- if ( copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
rcu_unlock_domain(d);
+ ret = 0;
+ copyback = 1;
}
break;
@@ -1485,9 +1475,8 @@ long arch_do_domctl(
vcpuextstate_out:
rcu_unlock_domain(d);
- if ( (domctl->cmd == XEN_DOMCTL_getvcpuextstate) &&
- copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
+ if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate )
+ copyback = 1;
}
break;
@@ -1504,7 +1493,7 @@ long arch_do_domctl(
ret = mem_event_domctl(d, &domctl->u.mem_event_op,
guest_handle_cast(u_domctl, void));
rcu_unlock_domain(d);
- copy_to_guest(u_domctl, domctl, 1);
+ copyback = 1;
}
}
break;
@@ -1539,8 +1528,7 @@ long arch_do_domctl(
&domctl->u.audit_p2m.m2p_bad,
&domctl->u.audit_p2m.p2m_bad);
rcu_unlock_domain(d);
- if ( copy_to_guest(u_domctl, domctl, 1) )
- ret = -EFAULT;
+ copyback = 1;
}
break;
#endif /* P2M_AUDIT */
@@ -1573,6 +1561,9 @@ long arch_do_domctl(
break;
}
+ if ( copyback && __copy_to_guest(u_domctl, domctl, 1) )
+ ret = -EFAULT;
+
return ret;
}
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4407,7 +4407,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
if ( xatp.space == XENMAPSPACE_gmfn_range )
{
- if ( rc && copy_to_guest(arg, &xatp, 1) )
+ if ( rc && __copy_to_guest(arg, &xatp, 1) )
rc = -EFAULT;
if ( rc == -EAGAIN )
@@ -4492,7 +4492,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
map.nr_entries = min(map.nr_entries, d->arch.pv_domain.nr_e820);
if ( copy_to_guest(map.buffer, d->arch.pv_domain.e820,
map.nr_entries) ||
- copy_to_guest(arg, &map, 1) )
+ __copy_to_guest(arg, &map, 1) )
{
spin_unlock(&d->arch.pv_domain.e820_lock);
return -EFAULT;
@@ -4559,7 +4559,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
ctxt.map.nr_entries = ctxt.n;
- if ( copy_to_guest(arg, &ctxt.map, 1) )
+ if ( __copy_to_guest(arg, &ctxt.map, 1) )
return -EFAULT;
return 0;
@@ -4630,7 +4630,7 @@ long arch_memory_op(int op, XEN_GUEST_HA
target.pod_cache_pages = p2m->pod.count;
target.pod_entries = p2m->pod.entry_count;
- if ( copy_to_guest(arg, &target, 1) )
+ if ( __copy_to_guest(arg, &target, 1) )
{
rc= -EFAULT;
goto pod_target_out_unlock;
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -384,7 +384,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
irq_status_query.flags |= XENIRQSTAT_needs_eoi;
if ( pirq_shared(v->domain, irq) )
irq_status_query.flags |= XENIRQSTAT_shared;
- ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
+ ret = __copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0;
break;
}
@@ -412,7 +412,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
ret = physdev_map_pirq(map.domid, map.type, &map.index,
&map.pirq,
&msi);
- if ( copy_to_guest(arg, &map, 1) != 0 )
+ if ( __copy_to_guest(arg, &map, 1) )
ret = -EFAULT;
break;
}
@@ -440,7 +440,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( ret )
break;
ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
- if ( copy_to_guest(arg, &apic, 1) != 0 )
+ if ( __copy_to_guest(arg, &apic, 1) )
ret = -EFAULT;
break;
}
@@ -478,7 +478,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
irq_op.vector = irq_op.irq;
ret = 0;
- if ( copy_to_guest(arg, &irq_op, 1) != 0 )
+ if ( __copy_to_guest(arg, &irq_op, 1) )
ret = -EFAULT;
break;
}
@@ -714,7 +714,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
if ( ret >= 0 )
{
out.pirq = ret;
- ret = copy_to_guest(arg, &out, 1) ? -EFAULT : 0;
+ ret = __copy_to_guest(arg, &out, 1) ? -EFAULT : 0;
}
break;
--- a/xen/arch/x86/platform_hypercall.c
+++ b/xen/arch/x86/platform_hypercall.c
@@ -115,7 +115,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
{
op->u.add_memtype.handle = 0;
op->u.add_memtype.reg = ret;
- ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+ ret = __copy_field_to_guest(u_xenpf_op, op, u.add_memtype) ?
+ -EFAULT : 0;
if ( ret != 0 )
mtrr_del_page(ret, 0, 0);
}
@@ -157,7 +158,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
op->u.read_memtype.mfn = mfn;
op->u.read_memtype.nr_mfns = nr_mfns;
op->u.read_memtype.type = type;
- ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+ ret = __copy_field_to_guest(u_xenpf_op, op, u.read_memtype)
+ ? -EFAULT : 0;
}
}
break;
@@ -263,8 +265,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
C(legacy_sectors_per_track);
#undef C
- ret = (copy_field_to_guest(u_xenpf_op, op,
- u.firmware_info.u.disk_info)
+ ret = (__copy_field_to_guest(u_xenpf_op, op,
+ u.firmware_info.u.disk_info)
? -EFAULT : 0);
break;
}
@@ -281,8 +283,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
op->u.firmware_info.u.disk_mbr_signature.mbr_signature
sig->signature;
- ret = (copy_field_to_guest(u_xenpf_op, op,
- u.firmware_info.u.disk_mbr_signature)
+ ret = (__copy_field_to_guest(u_xenpf_op, op,
+ u.firmware_info.u.disk_mbr_signature)
? -EFAULT : 0);
break;
}
@@ -299,10 +301,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
bootsym(boot_edid_caps) >> 8;
ret = 0;
- if ( copy_field_to_guest(u_xenpf_op, op, u.firmware_info.
- u.vbeddc_info.capabilities) ||
- copy_field_to_guest(u_xenpf_op, op, u.firmware_info.
- u.vbeddc_info.edid_transfer_time) ||
+ if ( __copy_field_to_guest(u_xenpf_op, op, u.firmware_info.
+ u.vbeddc_info.capabilities) ||
+ __copy_field_to_guest(u_xenpf_op, op, u.firmware_info.
+ u.vbeddc_info.edid_transfer_time) ||
copy_to_compat(op->u.firmware_info.u.vbeddc_info.edid,
bootsym(boot_edid_info), 128) )
ret = -EFAULT;
@@ -311,8 +313,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
ret = efi_get_info(op->u.firmware_info.index,
&op->u.firmware_info.u.efi_info);
if ( ret == 0 &&
- copy_field_to_guest(u_xenpf_op, op,
- u.firmware_info.u.efi_info) )
+ __copy_field_to_guest(u_xenpf_op, op,
+ u.firmware_info.u.efi_info) )
ret = -EFAULT;
break;
case XEN_FW_KBD_SHIFT_FLAGS:
@@ -323,8 +325,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
op->u.firmware_info.u.kbd_shift_flags =
bootsym(kbd_shift_flags);
ret = 0;
- if ( copy_field_to_guest(u_xenpf_op, op,
- u.firmware_info.u.kbd_shift_flags) )
+ if ( __copy_field_to_guest(u_xenpf_op, op,
+ u.firmware_info.u.kbd_shift_flags) )
ret = -EFAULT;
break;
default:
@@ -340,7 +342,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
ret = efi_runtime_call(&op->u.efi_runtime_call);
if ( ret == 0 &&
- copy_field_to_guest(u_xenpf_op, op, u.efi_runtime_call) )
+ __copy_field_to_guest(u_xenpf_op, op, u.efi_runtime_call) )
ret = -EFAULT;
break;
@@ -412,7 +414,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
ret = cpumask_to_xenctl_cpumap(&ctlmap, cpumap);
free_cpumask_var(cpumap);
- if ( ret == 0 && copy_to_guest(u_xenpf_op, op, 1) )
+ if ( ret == 0 && __copy_field_to_guest(u_xenpf_op, op,
u.getidletime) )
ret = -EFAULT;
}
break;
@@ -503,7 +505,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
put_cpu_maps();
- ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+ ret = __copy_field_to_guest(u_xenpf_op, op, u.pcpu_info) ? -EFAULT : 0;
}
break;
@@ -538,7 +540,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
put_cpu_maps();
- if ( copy_field_to_guest(u_xenpf_op, op, u.pcpu_version) )
+ if ( __copy_field_to_guest(u_xenpf_op, op, u.pcpu_version) )
ret = -EFAULT;
}
break;
@@ -639,7 +641,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA
case XEN_CORE_PARKING_GET:
op->u.core_parking.idle_nums = get_cur_idle_nums();
- ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0;
+ ret = __copy_field_to_guest(u_xenpf_op, op, u.core_parking) ?
+ -EFAULT : 0;
break;
default:
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -93,7 +93,7 @@ long arch_do_sysctl(
if ( iommu_enabled )
pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
- if ( copy_to_guest(u_sysctl, sysctl, 1) )
+ if ( __copy_field_to_guest(u_sysctl, sysctl, u.physinfo) )
ret = -EFAULT;
}
break;
@@ -133,7 +133,8 @@ long arch_do_sysctl(
}
}
- ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, sysctl, 1))
+ ret = ((i <= max_cpu_index) ||
+ __copy_field_to_guest(u_sysctl, sysctl, u.topologyinfo))
? -EFAULT : 0;
}
break;
@@ -185,7 +186,8 @@ long arch_do_sysctl(
}
}
- ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, sysctl, 1))
+ ret = ((i <= max_node_index) ||
+ __copy_field_to_guest(u_sysctl, sysctl, u.numainfo))
? -EFAULT : 0;
}
break;
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -122,7 +122,7 @@ int compat_arch_memory_op(int op, XEN_GU
#define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0)
XLAT_memory_map(&cmp, nat);
#undef XLAT_memory_map_HNDL_buffer
- if ( copy_to_guest(arg, &cmp, 1) )
+ if ( __copy_to_guest(arg, &cmp, 1) )
rc = -EFAULT;
break;
@@ -148,7 +148,7 @@ int compat_arch_memory_op(int op, XEN_GU
XLAT_pod_target(&cmp, nat);
- if ( copy_to_guest(arg, &cmp, 1) )
+ if ( __copy_to_guest(arg, &cmp, 1) )
{
if ( rc == __HYPERVISOR_memory_op )
hypercall_cancel_continuation();
@@ -200,7 +200,7 @@ int compat_arch_memory_op(int op, XEN_GU
}
xmml.nr_extents = i;
- if ( copy_to_guest(arg, &xmml, 1) )
+ if ( __copy_to_guest(arg, &xmml, 1) )
rc = -EFAULT;
break;
@@ -219,7 +219,7 @@ int compat_arch_memory_op(int op, XEN_GU
if ( copy_from_guest(&meo, arg, 1) )
return -EFAULT;
rc = do_mem_event_op(op, meo.domain, (void *) &meo);
- if ( !rc && copy_to_guest(arg, &meo, 1) )
+ if ( !rc && __copy_to_guest(arg, &meo, 1) )
return -EFAULT;
break;
}
@@ -231,7 +231,7 @@ int compat_arch_memory_op(int op, XEN_GU
if ( mso.op == XENMEM_sharing_op_audit )
return mem_sharing_audit();
rc = do_mem_event_op(op, mso.domain, (void *) &mso);
- if ( !rc && copy_to_guest(arg, &mso, 1) )
+ if ( !rc && __copy_to_guest(arg, &mso, 1) )
return -EFAULT;
break;
}
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1074,7 +1074,7 @@ long subarch_memory_op(int op, XEN_GUEST
}
xmml.nr_extents = i;
- if ( copy_to_guest(arg, &xmml, 1) )
+ if ( __copy_to_guest(arg, &xmml, 1) )
return -EFAULT;
break;
@@ -1092,7 +1092,7 @@ long subarch_memory_op(int op, XEN_GUEST
if ( copy_from_guest(&meo, arg, 1) )
return -EFAULT;
rc = do_mem_event_op(op, meo.domain, (void *) &meo);
- if ( !rc && copy_to_guest(arg, &meo, 1) )
+ if ( !rc && __copy_to_guest(arg, &meo, 1) )
return -EFAULT;
break;
}
@@ -1104,7 +1104,7 @@ long subarch_memory_op(int op, XEN_GUEST
if ( mso.op == XENMEM_sharing_op_audit )
return mem_sharing_audit();
rc = do_mem_event_op(op, mso.domain, (void *) &mso);
- if ( !rc && copy_to_guest(arg, &mso, 1) )
+ if ( !rc && __copy_to_guest(arg, &mso, 1) )
return -EFAULT;
break;
}
--- a/xen/common/compat/memory.c
+++ b/xen/common/compat/memory.c
@@ -292,8 +292,9 @@ int compat_memory_op(unsigned int cmd, X
}
cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged;
- if ( copy_field_to_guest(guest_handle_cast(compat,
compat_memory_exchange_t),
- &cmp.xchg, nr_exchanged) )
+ if ( __copy_field_to_guest(guest_handle_cast(compat,
+
compat_memory_exchange_t),
+ &cmp.xchg, nr_exchanged) )
rc = -EFAULT;
if ( rc < 0 )
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -242,6 +242,7 @@ void domctl_lock_release(void)
long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
long ret = 0;
+ bool_t copyback = 0;
struct xen_domctl curop, *op = &curop;
if ( copy_from_guest(op, u_domctl, 1) )
@@ -469,8 +470,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
sizeof(xen_domain_handle_t));
op->domain = d->domain_id;
- if ( copy_to_guest(u_domctl, op, 1) )
- ret = -EFAULT;
+ copyback = 1;
}
break;
@@ -653,8 +653,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
goto scheduler_op_out;
ret = sched_adjust(d, &op->u.scheduler_op);
- if ( copy_to_guest(u_domctl, op, 1) )
- ret = -EFAULT;
+ copyback = 1;
scheduler_op_out:
rcu_unlock_domain(d);
@@ -686,8 +685,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
getdomaininfo(d, &op->u.getdomaininfo);
op->domain = op->u.getdomaininfo.domain;
- if ( copy_to_guest(u_domctl, op, 1) )
- ret = -EFAULT;
+ copyback = 1;
getdomaininfo_out:
rcu_read_unlock(&domlist_read_lock);
@@ -747,8 +745,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1);
#endif
- if ( copy_to_guest(u_domctl, op, 1) || ret )
+ if ( ret )
ret = -EFAULT;
+ copyback = 1;
getvcpucontext_out:
xfree(c.nat);
@@ -786,9 +785,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
op->u.getvcpuinfo.cpu = v->processor;
ret = 0;
-
- if ( copy_to_guest(u_domctl, op, 1) )
- ret = -EFAULT;
+ copyback = 1;
getvcpuinfo_out:
rcu_unlock_domain(d);
@@ -1045,6 +1042,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
domctl_lock_release();
+ if ( copyback && __copy_to_guest(u_domctl, op, 1) )
+ ret = -EFAULT;
+
return ret;
}
--- a/xen/common/event_channel.c
+++ b/xen/common/event_channel.c
@@ -981,7 +981,7 @@ long do_event_channel_op(int cmd, XEN_GU
if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_alloc_unbound(&alloc_unbound);
- if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) !=
0) )
+ if ( !rc && __copy_to_guest(arg, &alloc_unbound, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
@@ -991,7 +991,7 @@ long do_event_channel_op(int cmd, XEN_GU
if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_interdomain(&bind_interdomain);
- if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1)
!= 0) )
+ if ( !rc && __copy_to_guest(arg, &bind_interdomain, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
@@ -1001,7 +1001,7 @@ long do_event_channel_op(int cmd, XEN_GU
if ( copy_from_guest(&bind_virq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_virq(&bind_virq);
- if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0)
)
+ if ( !rc && __copy_to_guest(arg, &bind_virq, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
@@ -1011,7 +1011,7 @@ long do_event_channel_op(int cmd, XEN_GU
if ( copy_from_guest(&bind_ipi, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_ipi(&bind_ipi);
- if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) )
+ if ( !rc && __copy_to_guest(arg, &bind_ipi, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
@@ -1021,7 +1021,7 @@ long do_event_channel_op(int cmd, XEN_GU
if ( copy_from_guest(&bind_pirq, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_bind_pirq(&bind_pirq);
- if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0)
)
+ if ( !rc && __copy_to_guest(arg, &bind_pirq, 1) )
rc = -EFAULT; /* Cleaning up here would be a mess! */
break;
}
@@ -1047,7 +1047,7 @@ long do_event_channel_op(int cmd, XEN_GU
if ( copy_from_guest(&status, arg, 1) != 0 )
return -EFAULT;
rc = evtchn_status(&status);
- if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) )
+ if ( !rc && __copy_to_guest(arg, &status, 1) )
rc = -EFAULT;
break;
}
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1115,12 +1115,13 @@ gnttab_unmap_grant_ref(
for ( i = 0; i < c; i++ )
{
- if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
+ if ( unlikely(__copy_from_guest(&op, uop, 1)) )
goto fault;
__gnttab_unmap_grant_ref(&op, &(common[i]));
++partial_done;
- if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
goto fault;
+ guest_handle_add_offset(uop, 1);
}
flush_tlb_mask(current->domain->domain_dirty_cpumask);
@@ -1177,12 +1178,13 @@ gnttab_unmap_and_replace(
for ( i = 0; i < c; i++ )
{
- if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) )
+ if ( unlikely(__copy_from_guest(&op, uop, 1)) )
goto fault;
__gnttab_unmap_and_replace(&op, &(common[i]));
++partial_done;
- if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
goto fault;
+ guest_handle_add_offset(uop, 1);
}
flush_tlb_mask(current->domain->domain_dirty_cpumask);
@@ -1396,7 +1398,7 @@ gnttab_setup_table(
out2:
rcu_unlock_domain(d);
out1:
- if ( unlikely(copy_to_guest(uop, &op, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
return -EFAULT;
return 0;
@@ -1446,7 +1448,7 @@ gnttab_query_size(
rcu_unlock_domain(d);
query_out:
- if ( unlikely(copy_to_guest(uop, &op, 1)) )
+ if ( unlikely(__copy_to_guest(uop, &op, 1)) )
return -EFAULT;
return 0;
@@ -1542,7 +1544,7 @@ gnttab_transfer(
return i;
/* Read from caller address space. */
- if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) )
+ if ( unlikely(__copy_from_guest(&gop, uop, 1)) )
{
gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req
%d/%d\n",
i, count);
@@ -1701,12 +1703,13 @@ gnttab_transfer(
gop.status = GNTST_okay;
copyback:
- if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &gop, status)) )
{
gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp
"
"%d/%d\n", i, count);
return -EFAULT;
}
+ guest_handle_add_offset(uop, 1);
}
return 0;
@@ -2143,17 +2146,18 @@ gnttab_copy(
{
if (i && hypercall_preempt_check())
return i;
- if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
+ if ( unlikely(__copy_from_guest(&op, uop, 1)) )
return -EFAULT;
__gnttab_copy(&op);
- if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
return -EFAULT;
+ guest_handle_add_offset(uop, 1);
}
return 0;
}
static long
-gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t uop))
+gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop)
{
gnttab_set_version_t op;
struct domain *d = current->domain;
@@ -2265,7 +2269,7 @@ out_unlock:
out:
op.version = gt->gt_version;
- if (copy_to_guest(uop, &op, 1))
+ if (__copy_to_guest(uop, &op, 1))
res = -EFAULT;
return res;
@@ -2329,14 +2333,14 @@ gnttab_get_status_frames(XEN_GUEST_HANDL
out2:
rcu_unlock_domain(d);
out1:
- if ( unlikely(copy_to_guest(uop, &op, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
return -EFAULT;
return 0;
}
static long
-gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t uop))
+gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t) uop)
{
gnttab_get_version_t op;
struct domain *d;
@@ -2359,7 +2363,7 @@ gnttab_get_version(XEN_GUEST_HANDLE_PARA
rcu_unlock_domain(d);
- if ( copy_to_guest(uop, &op, 1) )
+ if ( __copy_field_to_guest(uop, &op, version) )
return -EFAULT;
return 0;
@@ -2421,7 +2425,7 @@ out:
}
static long
-gnttab_swap_grant_ref(XEN_GUEST_HANDLE_PARAM(gnttab_swap_grant_ref_t uop),
+gnttab_swap_grant_ref(XEN_GUEST_HANDLE_PARAM(gnttab_swap_grant_ref_t) uop,
unsigned int count)
{
int i;
@@ -2431,11 +2435,12 @@ gnttab_swap_grant_ref(XEN_GUEST_HANDLE_P
{
if ( i && hypercall_preempt_check() )
return i;
- if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) )
+ if ( unlikely(__copy_from_guest(&op, uop, 1)) )
return -EFAULT;
op.status = __gnttab_swap_grant_ref(op.ref_a, op.ref_b);
- if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) )
+ if ( unlikely(__copy_field_to_guest(uop, &op, status)) )
return -EFAULT;
+ guest_handle_add_offset(uop, 1);
}
return 0;
}
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -359,7 +359,7 @@ static long memory_exchange(XEN_GUEST_HA
{
exch.nr_exchanged = i << in_chunk_order;
rcu_unlock_domain(d);
- if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
+ if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
return -EFAULT;
return hypercall_create_continuation(
__HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg);
@@ -500,7 +500,7 @@ static long memory_exchange(XEN_GUEST_HA
}
exch.nr_exchanged = exch.in.nr_extents;
- if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
+ if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
rc = -EFAULT;
rcu_unlock_domain(d);
return rc;
@@ -527,7 +527,7 @@ static long memory_exchange(XEN_GUEST_HA
exch.nr_exchanged = i << in_chunk_order;
fail_early:
- if ( copy_field_to_guest(arg, &exch, nr_exchanged) )
+ if ( __copy_field_to_guest(arg, &exch, nr_exchanged) )
rc = -EFAULT;
return rc;
}
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -30,6 +30,7 @@
long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
{
long ret = 0;
+ int copyback = -1;
struct xen_sysctl curop, *op = &curop;
static DEFINE_SPINLOCK(sysctl_lock);
@@ -55,42 +56,28 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
switch ( op->cmd )
{
case XEN_SYSCTL_readconsole:
- {
ret = xsm_readconsole(op->u.readconsole.clear);
if ( ret )
break;
ret = read_console_ring(&op->u.readconsole);
- if ( copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- }
- break;
+ break;
case XEN_SYSCTL_tbuf_op:
- {
ret = xsm_tbufcontrol();
if ( ret )
break;
ret = tb_control(&op->u.tbuf_op);
- if ( copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- }
- break;
+ break;
case XEN_SYSCTL_sched_id:
- {
ret = xsm_sched_id();
if ( ret )
break;
op->u.sched_id.sched_id = sched_id();
- if ( copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- else
- ret = 0;
- }
- break;
+ break;
case XEN_SYSCTL_getdomaininfolist:
{
@@ -129,38 +116,27 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
break;
op->u.getdomaininfolist.num_domains = num_domains;
-
- if ( copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
}
break;
#ifdef PERF_COUNTERS
case XEN_SYSCTL_perfc_op:
- {
ret = xsm_perfcontrol();
if ( ret )
break;
ret = perfc_control(&op->u.perfc_op);
- if ( copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- }
- break;
+ break;
#endif
#ifdef LOCK_PROFILE
case XEN_SYSCTL_lockprof_op:
- {
ret = xsm_lockprof();
if ( ret )
break;
ret = spinlock_profile_control(&op->u.lockprof_op);
- if ( copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- }
- break;
+ break;
#endif
case XEN_SYSCTL_debug_keys:
{
@@ -179,6 +155,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
handle_keypress(c, guest_cpu_user_regs());
}
ret = 0;
+ copyback = 0;
}
break;
@@ -193,22 +170,21 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
if ( ret )
break;
+ ret = -EFAULT;
for ( i = 0; i < nr_cpus; i++ )
{
cpuinfo.idletime = get_cpu_idle_time(i);
- ret = -EFAULT;
if ( copy_to_guest_offset(op->u.getcpuinfo.info, i,
&cpuinfo, 1) )
goto out;
}
op->u.getcpuinfo.nr_cpus = i;
- ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0;
+ ret = 0;
}
break;
case XEN_SYSCTL_availheap:
- {
ret = xsm_availheap();
if ( ret )
break;
@@ -218,47 +194,26 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
op->u.availheap.min_bitwidth,
op->u.availheap.max_bitwidth);
op->u.availheap.avail_bytes <<= PAGE_SHIFT;
-
- ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0;
- }
- break;
+ break;
#ifdef HAS_ACPI
case XEN_SYSCTL_get_pmstat:
- {
ret = xsm_get_pmstat();
if ( ret )
break;
ret = do_get_pm_info(&op->u.get_pmstat);
- if ( ret )
- break;
-
- if ( copy_to_guest(u_sysctl, op, 1) )
- {
- ret = -EFAULT;
- break;
- }
- }
- break;
+ break;
case XEN_SYSCTL_pm_op:
- {
ret = xsm_pm_op();
if ( ret )
break;
ret = do_pm_op(&op->u.pm_op);
- if ( ret && (ret != -EAGAIN) )
- break;
-
- if ( copy_to_guest(u_sysctl, op, 1) )
- {
- ret = -EFAULT;
- break;
- }
- }
- break;
+ if ( ret == -EAGAIN )
+ copyback = 1;
+ break;
#endif
case XEN_SYSCTL_page_offline_op:
@@ -317,41 +272,39 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
}
xfree(status);
+ copyback = 0;
}
break;
case XEN_SYSCTL_cpupool_op:
- {
ret = xsm_cpupool_op();
if ( ret )
break;
ret = cpupool_do_sysctl(&op->u.cpupool_op);
- if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- }
- break;
+ break;
case XEN_SYSCTL_scheduler_op:
- {
ret = xsm_sched_op();
if ( ret )
break;
ret = sched_adjust_global(&op->u.scheduler_op);
- if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) )
- ret = -EFAULT;
- }
- break;
+ break;
default:
ret = arch_do_sysctl(op, u_sysctl);
+ copyback = 0;
break;
}
out:
spin_unlock(&sysctl_lock);
+ if ( copyback && (!ret || copyback > 0) &&
+ __copy_to_guest(u_sysctl, op, 1) )
+ ret = -EFAULT;
+
return ret;
}
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -449,7 +449,7 @@ static int add_passive_list(XEN_GUEST_HA
current->domain, __pa(d->xenoprof->rawbuf),
passive.buf_gmaddr, d->xenoprof->npages);
- if ( copy_to_guest(arg, &passive, 1) )
+ if ( __copy_to_guest(arg, &passive, 1) )
{
put_domain(d);
return -EFAULT;
@@ -604,7 +604,7 @@ static int xenoprof_op_init(XEN_GUEST_HA
if ( xenoprof_init.is_primary )
xenoprof_primary_profiler = current->domain;
- return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0);
+ return __copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0;
}
#define ret_t long
@@ -651,10 +651,7 @@ static int xenoprof_op_get_buffer(XEN_GU
d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr,
d->xenoprof->npages);
- if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) )
- return -EFAULT;
-
- return 0;
+ return __copy_to_guest(arg, &xenoprof_get_buffer, 1) ? -EFAULT : 0;
}
#define NONPRIV_OP(op) ( (op == XENOPROF_init) \
--- a/xen/drivers/passthrough/iommu.c
+++ b/xen/drivers/passthrough/iommu.c
@@ -586,7 +586,7 @@ int iommu_do_domctl(
domctl->u.get_device_group.num_sdevs = ret;
ret = 0;
}
- if ( copy_to_guest(u_domctl, domctl, 1) )
+ if ( __copy_field_to_guest(u_domctl, domctl, u.get_device_group) )
ret = -EFAULT;
rcu_unlock_domain(d);
}
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
On 07/12/2012 13:06, "Jan Beulich" <JBeulich@suse.com> wrote:> - use the variants not validating the VA range when writing back > structures/fields to the same space that they were previously read > from > - when only a single field of a structure actually changed, copy back > just that field where possible > - consolidate copying back results in a few places > > Signed-off-by: Jan Beulich <jbeulich@suse.com>Acked-by: Keir Fraser <keir@xen.org>> --- > If really necessary, this patch could of course be split up at almost > arbitrary boundaries.I wouldn''t bother.> --- a/xen/arch/x86/domctl.c > +++ b/xen/arch/x86/domctl.c > @@ -51,6 +51,7 @@ long arch_do_domctl( > XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) > { > long ret = 0; > + bool_t copyback = 0; > > switch ( domctl->cmd ) > { > @@ -66,7 +67,7 @@ long arch_do_domctl( > &domctl->u.shadow_op, > guest_handle_cast(u_domctl, void)); > rcu_unlock_domain(d); > - copy_to_guest(u_domctl, domctl, 1); > + copyback = 1; > } > } > break; > @@ -150,8 +151,7 @@ long arch_do_domctl( > } > > rcu_unlock_domain(d); > - > - copy_to_guest(u_domctl, domctl, 1); > + copyback = 1; > } > break; > > @@ -408,7 +408,7 @@ long arch_do_domctl( > spin_unlock(&d->page_alloc_lock); > > domctl->u.getmemlist.num_pfns = i; > - copy_to_guest(u_domctl, domctl, 1); > + copyback = 1; > getmemlist_out: > rcu_unlock_domain(d); > } > @@ -539,13 +539,11 @@ long arch_do_domctl( > ret = -EFAULT; > > gethvmcontext_out: > - if ( copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > + rcu_unlock_domain(d); > + copyback = 1; > > if ( c.data != NULL ) > xfree(c.data); > - > - rcu_unlock_domain(d); > } > break; > > @@ -627,11 +625,9 @@ long arch_do_domctl( > domctl->u.address_size.size > is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG; > > - ret = 0; > rcu_unlock_domain(d); > - > - if ( copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > + ret = 0; > + copyback = 1; > } > break; > > @@ -676,13 +672,9 @@ long arch_do_domctl( > > domctl->u.address_size.size = d->arch.physaddr_bitsize; > > - ret = 0; > rcu_unlock_domain(d); > - > - if ( copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > - > - > + ret = 0; > + copyback = 1; > } > break; > > @@ -1124,9 +1116,8 @@ long arch_do_domctl( > > ext_vcpucontext_out: > rcu_unlock_domain(d); > - if ( (domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext) && > - copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > + if ( domctl->cmd == XEN_DOMCTL_get_ext_vcpucontext ) > + copyback = 1; > } > break; > > @@ -1268,10 +1259,10 @@ long arch_do_domctl( > domctl->u.gdbsx_guest_memio.len; > > ret = gdbsx_guest_mem_io(domctl->domain, > &domctl->u.gdbsx_guest_memio); > - if ( !ret && copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > > rcu_unlock_domain(d); > + if ( !ret ) > + copyback = 1; > } > break; > > @@ -1358,10 +1349,9 @@ long arch_do_domctl( > } > } > } > - ret = 0; > - if ( copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > rcu_unlock_domain(d); > + ret = 0; > + copyback = 1; > } > break; > > @@ -1485,9 +1475,8 @@ long arch_do_domctl( > > vcpuextstate_out: > rcu_unlock_domain(d); > - if ( (domctl->cmd == XEN_DOMCTL_getvcpuextstate) && > - copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > + if ( domctl->cmd == XEN_DOMCTL_getvcpuextstate ) > + copyback = 1; > } > break; > > @@ -1504,7 +1493,7 @@ long arch_do_domctl( > ret = mem_event_domctl(d, &domctl->u.mem_event_op, > guest_handle_cast(u_domctl, void)); > rcu_unlock_domain(d); > - copy_to_guest(u_domctl, domctl, 1); > + copyback = 1; > } > } > break; > @@ -1539,8 +1528,7 @@ long arch_do_domctl( > &domctl->u.audit_p2m.m2p_bad, > &domctl->u.audit_p2m.p2m_bad); > rcu_unlock_domain(d); > - if ( copy_to_guest(u_domctl, domctl, 1) ) > - ret = -EFAULT; > + copyback = 1; > } > break; > #endif /* P2M_AUDIT */ > @@ -1573,6 +1561,9 @@ long arch_do_domctl( > break; > } > > + if ( copyback && __copy_to_guest(u_domctl, domctl, 1) ) > + ret = -EFAULT; > + > return ret; > } > > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -4407,7 +4407,7 @@ long arch_memory_op(int op, XEN_GUEST_HA > > if ( xatp.space == XENMAPSPACE_gmfn_range ) > { > - if ( rc && copy_to_guest(arg, &xatp, 1) ) > + if ( rc && __copy_to_guest(arg, &xatp, 1) ) > rc = -EFAULT; > > if ( rc == -EAGAIN ) > @@ -4492,7 +4492,7 @@ long arch_memory_op(int op, XEN_GUEST_HA > map.nr_entries = min(map.nr_entries, d->arch.pv_domain.nr_e820); > if ( copy_to_guest(map.buffer, d->arch.pv_domain.e820, > map.nr_entries) || > - copy_to_guest(arg, &map, 1) ) > + __copy_to_guest(arg, &map, 1) ) > { > spin_unlock(&d->arch.pv_domain.e820_lock); > return -EFAULT; > @@ -4559,7 +4559,7 @@ long arch_memory_op(int op, XEN_GUEST_HA > > ctxt.map.nr_entries = ctxt.n; > > - if ( copy_to_guest(arg, &ctxt.map, 1) ) > + if ( __copy_to_guest(arg, &ctxt.map, 1) ) > return -EFAULT; > > return 0; > @@ -4630,7 +4630,7 @@ long arch_memory_op(int op, XEN_GUEST_HA > target.pod_cache_pages = p2m->pod.count; > target.pod_entries = p2m->pod.entry_count; > > - if ( copy_to_guest(arg, &target, 1) ) > + if ( __copy_to_guest(arg, &target, 1) ) > { > rc= -EFAULT; > goto pod_target_out_unlock; > --- a/xen/arch/x86/physdev.c > +++ b/xen/arch/x86/physdev.c > @@ -384,7 +384,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H > irq_status_query.flags |= XENIRQSTAT_needs_eoi; > if ( pirq_shared(v->domain, irq) ) > irq_status_query.flags |= XENIRQSTAT_shared; > - ret = copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0; > + ret = __copy_to_guest(arg, &irq_status_query, 1) ? -EFAULT : 0; > break; > } > > @@ -412,7 +412,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H > ret = physdev_map_pirq(map.domid, map.type, &map.index, &map.pirq, > &msi); > > - if ( copy_to_guest(arg, &map, 1) != 0 ) > + if ( __copy_to_guest(arg, &map, 1) ) > ret = -EFAULT; > break; > } > @@ -440,7 +440,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H > if ( ret ) > break; > ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value); > - if ( copy_to_guest(arg, &apic, 1) != 0 ) > + if ( __copy_to_guest(arg, &apic, 1) ) > ret = -EFAULT; > break; > } > @@ -478,7 +478,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H > irq_op.vector = irq_op.irq; > ret = 0; > > - if ( copy_to_guest(arg, &irq_op, 1) != 0 ) > + if ( __copy_to_guest(arg, &irq_op, 1) ) > ret = -EFAULT; > break; > } > @@ -714,7 +714,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H > if ( ret >= 0 ) > { > out.pirq = ret; > - ret = copy_to_guest(arg, &out, 1) ? -EFAULT : 0; > + ret = __copy_to_guest(arg, &out, 1) ? -EFAULT : 0; > } > > break; > --- a/xen/arch/x86/platform_hypercall.c > +++ b/xen/arch/x86/platform_hypercall.c > @@ -115,7 +115,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > { > op->u.add_memtype.handle = 0; > op->u.add_memtype.reg = ret; > - ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0; > + ret = __copy_field_to_guest(u_xenpf_op, op, u.add_memtype) ? > + -EFAULT : 0; > if ( ret != 0 ) > mtrr_del_page(ret, 0, 0); > } > @@ -157,7 +158,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > op->u.read_memtype.mfn = mfn; > op->u.read_memtype.nr_mfns = nr_mfns; > op->u.read_memtype.type = type; > - ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0; > + ret = __copy_field_to_guest(u_xenpf_op, op, u.read_memtype) > + ? -EFAULT : 0; > } > } > break; > @@ -263,8 +265,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > C(legacy_sectors_per_track); > #undef C > > - ret = (copy_field_to_guest(u_xenpf_op, op, > - u.firmware_info.u.disk_info) > + ret = (__copy_field_to_guest(u_xenpf_op, op, > + u.firmware_info.u.disk_info) > ? -EFAULT : 0); > break; > } > @@ -281,8 +283,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > op->u.firmware_info.u.disk_mbr_signature.mbr_signature > sig->signature; > > - ret = (copy_field_to_guest(u_xenpf_op, op, > - u.firmware_info.u.disk_mbr_signature) > + ret = (__copy_field_to_guest(u_xenpf_op, op, > + > u.firmware_info.u.disk_mbr_signature) > ? -EFAULT : 0); > break; > } > @@ -299,10 +301,10 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > bootsym(boot_edid_caps) >> 8; > > ret = 0; > - if ( copy_field_to_guest(u_xenpf_op, op, u.firmware_info. > - u.vbeddc_info.capabilities) || > - copy_field_to_guest(u_xenpf_op, op, u.firmware_info. > - u.vbeddc_info.edid_transfer_time) || > + if ( __copy_field_to_guest(u_xenpf_op, op, u.firmware_info. > + u.vbeddc_info.capabilities) || > + __copy_field_to_guest(u_xenpf_op, op, u.firmware_info. > + u.vbeddc_info.edid_transfer_time) || > copy_to_compat(op->u.firmware_info.u.vbeddc_info.edid, > bootsym(boot_edid_info), 128) ) > ret = -EFAULT; > @@ -311,8 +313,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > ret = efi_get_info(op->u.firmware_info.index, > &op->u.firmware_info.u.efi_info); > if ( ret == 0 && > - copy_field_to_guest(u_xenpf_op, op, > - u.firmware_info.u.efi_info) ) > + __copy_field_to_guest(u_xenpf_op, op, > + u.firmware_info.u.efi_info) ) > ret = -EFAULT; > break; > case XEN_FW_KBD_SHIFT_FLAGS: > @@ -323,8 +325,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > op->u.firmware_info.u.kbd_shift_flags = bootsym(kbd_shift_flags); > > ret = 0; > - if ( copy_field_to_guest(u_xenpf_op, op, > - u.firmware_info.u.kbd_shift_flags) ) > + if ( __copy_field_to_guest(u_xenpf_op, op, > + u.firmware_info.u.kbd_shift_flags) ) > ret = -EFAULT; > break; > default: > @@ -340,7 +342,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > > ret = efi_runtime_call(&op->u.efi_runtime_call); > if ( ret == 0 && > - copy_field_to_guest(u_xenpf_op, op, u.efi_runtime_call) ) > + __copy_field_to_guest(u_xenpf_op, op, u.efi_runtime_call) ) > ret = -EFAULT; > break; > > @@ -412,7 +414,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > ret = cpumask_to_xenctl_cpumap(&ctlmap, cpumap); > free_cpumask_var(cpumap); > > - if ( ret == 0 && copy_to_guest(u_xenpf_op, op, 1) ) > + if ( ret == 0 && __copy_field_to_guest(u_xenpf_op, op, u.getidletime) > ) > ret = -EFAULT; > } > break; > @@ -503,7 +505,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > > put_cpu_maps(); > > - ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0; > + ret = __copy_field_to_guest(u_xenpf_op, op, u.pcpu_info) ? -EFAULT : > 0; > } > break; > > @@ -538,7 +540,7 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > > put_cpu_maps(); > > - if ( copy_field_to_guest(u_xenpf_op, op, u.pcpu_version) ) > + if ( __copy_field_to_guest(u_xenpf_op, op, u.pcpu_version) ) > ret = -EFAULT; > } > break; > @@ -639,7 +641,8 @@ ret_t do_platform_op(XEN_GUEST_HANDLE_PA > > case XEN_CORE_PARKING_GET: > op->u.core_parking.idle_nums = get_cur_idle_nums(); > - ret = copy_to_guest(u_xenpf_op, op, 1) ? -EFAULT : 0; > + ret = __copy_field_to_guest(u_xenpf_op, op, u.core_parking) ? > + -EFAULT : 0; > break; > > default: > --- a/xen/arch/x86/sysctl.c > +++ b/xen/arch/x86/sysctl.c > @@ -93,7 +93,7 @@ long arch_do_sysctl( > if ( iommu_enabled ) > pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio; > > - if ( copy_to_guest(u_sysctl, sysctl, 1) ) > + if ( __copy_field_to_guest(u_sysctl, sysctl, u.physinfo) ) > ret = -EFAULT; > } > break; > @@ -133,7 +133,8 @@ long arch_do_sysctl( > } > } > > - ret = ((i <= max_cpu_index) || copy_to_guest(u_sysctl, sysctl, 1)) > + ret = ((i <= max_cpu_index) || > + __copy_field_to_guest(u_sysctl, sysctl, u.topologyinfo)) > ? -EFAULT : 0; > } > break; > @@ -185,7 +186,8 @@ long arch_do_sysctl( > } > } > > - ret = ((i <= max_node_index) || copy_to_guest(u_sysctl, sysctl, 1)) > + ret = ((i <= max_node_index) || > + __copy_field_to_guest(u_sysctl, sysctl, u.numainfo)) > ? -EFAULT : 0; > } > break; > --- a/xen/arch/x86/x86_64/compat/mm.c > +++ b/xen/arch/x86/x86_64/compat/mm.c > @@ -122,7 +122,7 @@ int compat_arch_memory_op(int op, XEN_GU > #define XLAT_memory_map_HNDL_buffer(_d_, _s_) ((void)0) > XLAT_memory_map(&cmp, nat); > #undef XLAT_memory_map_HNDL_buffer > - if ( copy_to_guest(arg, &cmp, 1) ) > + if ( __copy_to_guest(arg, &cmp, 1) ) > rc = -EFAULT; > > break; > @@ -148,7 +148,7 @@ int compat_arch_memory_op(int op, XEN_GU > > XLAT_pod_target(&cmp, nat); > > - if ( copy_to_guest(arg, &cmp, 1) ) > + if ( __copy_to_guest(arg, &cmp, 1) ) > { > if ( rc == __HYPERVISOR_memory_op ) > hypercall_cancel_continuation(); > @@ -200,7 +200,7 @@ int compat_arch_memory_op(int op, XEN_GU > } > > xmml.nr_extents = i; > - if ( copy_to_guest(arg, &xmml, 1) ) > + if ( __copy_to_guest(arg, &xmml, 1) ) > rc = -EFAULT; > > break; > @@ -219,7 +219,7 @@ int compat_arch_memory_op(int op, XEN_GU > if ( copy_from_guest(&meo, arg, 1) ) > return -EFAULT; > rc = do_mem_event_op(op, meo.domain, (void *) &meo); > - if ( !rc && copy_to_guest(arg, &meo, 1) ) > + if ( !rc && __copy_to_guest(arg, &meo, 1) ) > return -EFAULT; > break; > } > @@ -231,7 +231,7 @@ int compat_arch_memory_op(int op, XEN_GU > if ( mso.op == XENMEM_sharing_op_audit ) > return mem_sharing_audit(); > rc = do_mem_event_op(op, mso.domain, (void *) &mso); > - if ( !rc && copy_to_guest(arg, &mso, 1) ) > + if ( !rc && __copy_to_guest(arg, &mso, 1) ) > return -EFAULT; > break; > } > --- a/xen/arch/x86/x86_64/mm.c > +++ b/xen/arch/x86/x86_64/mm.c > @@ -1074,7 +1074,7 @@ long subarch_memory_op(int op, XEN_GUEST > } > > xmml.nr_extents = i; > - if ( copy_to_guest(arg, &xmml, 1) ) > + if ( __copy_to_guest(arg, &xmml, 1) ) > return -EFAULT; > > break; > @@ -1092,7 +1092,7 @@ long subarch_memory_op(int op, XEN_GUEST > if ( copy_from_guest(&meo, arg, 1) ) > return -EFAULT; > rc = do_mem_event_op(op, meo.domain, (void *) &meo); > - if ( !rc && copy_to_guest(arg, &meo, 1) ) > + if ( !rc && __copy_to_guest(arg, &meo, 1) ) > return -EFAULT; > break; > } > @@ -1104,7 +1104,7 @@ long subarch_memory_op(int op, XEN_GUEST > if ( mso.op == XENMEM_sharing_op_audit ) > return mem_sharing_audit(); > rc = do_mem_event_op(op, mso.domain, (void *) &mso); > - if ( !rc && copy_to_guest(arg, &mso, 1) ) > + if ( !rc && __copy_to_guest(arg, &mso, 1) ) > return -EFAULT; > break; > } > --- a/xen/common/compat/memory.c > +++ b/xen/common/compat/memory.c > @@ -292,8 +292,9 @@ int compat_memory_op(unsigned int cmd, X > } > > cmp.xchg.nr_exchanged = nat.xchg->nr_exchanged; > - if ( copy_field_to_guest(guest_handle_cast(compat, > compat_memory_exchange_t), > - &cmp.xchg, nr_exchanged) ) > + if ( __copy_field_to_guest(guest_handle_cast(compat, > + > compat_memory_exchange_t), > + &cmp.xchg, nr_exchanged) ) > rc = -EFAULT; > > if ( rc < 0 ) > --- a/xen/common/domctl.c > +++ b/xen/common/domctl.c > @@ -242,6 +242,7 @@ void domctl_lock_release(void) > long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) > { > long ret = 0; > + bool_t copyback = 0; > struct xen_domctl curop, *op = &curop; > > if ( copy_from_guest(op, u_domctl, 1) ) > @@ -469,8 +470,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > sizeof(xen_domain_handle_t)); > > op->domain = d->domain_id; > - if ( copy_to_guest(u_domctl, op, 1) ) > - ret = -EFAULT; > + copyback = 1; > } > break; > > @@ -653,8 +653,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > goto scheduler_op_out; > > ret = sched_adjust(d, &op->u.scheduler_op); > - if ( copy_to_guest(u_domctl, op, 1) ) > - ret = -EFAULT; > + copyback = 1; > > scheduler_op_out: > rcu_unlock_domain(d); > @@ -686,8 +685,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > getdomaininfo(d, &op->u.getdomaininfo); > > op->domain = op->u.getdomaininfo.domain; > - if ( copy_to_guest(u_domctl, op, 1) ) > - ret = -EFAULT; > + copyback = 1; > > getdomaininfo_out: > rcu_read_unlock(&domlist_read_lock); > @@ -747,8 +745,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > ret = copy_to_guest(op->u.vcpucontext.ctxt, c.nat, 1); > #endif > > - if ( copy_to_guest(u_domctl, op, 1) || ret ) > + if ( ret ) > ret = -EFAULT; > + copyback = 1; > > getvcpucontext_out: > xfree(c.nat); > @@ -786,9 +785,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running]; > op->u.getvcpuinfo.cpu = v->processor; > ret = 0; > - > - if ( copy_to_guest(u_domctl, op, 1) ) > - ret = -EFAULT; > + copyback = 1; > > getvcpuinfo_out: > rcu_unlock_domain(d); > @@ -1045,6 +1042,9 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe > > domctl_lock_release(); > > + if ( copyback && __copy_to_guest(u_domctl, op, 1) ) > + ret = -EFAULT; > + > return ret; > } > > --- a/xen/common/event_channel.c > +++ b/xen/common/event_channel.c > @@ -981,7 +981,7 @@ long do_event_channel_op(int cmd, XEN_GU > if ( copy_from_guest(&alloc_unbound, arg, 1) != 0 ) > return -EFAULT; > rc = evtchn_alloc_unbound(&alloc_unbound); > - if ( (rc == 0) && (copy_to_guest(arg, &alloc_unbound, 1) != 0) ) > + if ( !rc && __copy_to_guest(arg, &alloc_unbound, 1) ) > rc = -EFAULT; /* Cleaning up here would be a mess! */ > break; > } > @@ -991,7 +991,7 @@ long do_event_channel_op(int cmd, XEN_GU > if ( copy_from_guest(&bind_interdomain, arg, 1) != 0 ) > return -EFAULT; > rc = evtchn_bind_interdomain(&bind_interdomain); > - if ( (rc == 0) && (copy_to_guest(arg, &bind_interdomain, 1) != 0) ) > + if ( !rc && __copy_to_guest(arg, &bind_interdomain, 1) ) > rc = -EFAULT; /* Cleaning up here would be a mess! */ > break; > } > @@ -1001,7 +1001,7 @@ long do_event_channel_op(int cmd, XEN_GU > if ( copy_from_guest(&bind_virq, arg, 1) != 0 ) > return -EFAULT; > rc = evtchn_bind_virq(&bind_virq); > - if ( (rc == 0) && (copy_to_guest(arg, &bind_virq, 1) != 0) ) > + if ( !rc && __copy_to_guest(arg, &bind_virq, 1) ) > rc = -EFAULT; /* Cleaning up here would be a mess! */ > break; > } > @@ -1011,7 +1011,7 @@ long do_event_channel_op(int cmd, XEN_GU > if ( copy_from_guest(&bind_ipi, arg, 1) != 0 ) > return -EFAULT; > rc = evtchn_bind_ipi(&bind_ipi); > - if ( (rc == 0) && (copy_to_guest(arg, &bind_ipi, 1) != 0) ) > + if ( !rc && __copy_to_guest(arg, &bind_ipi, 1) ) > rc = -EFAULT; /* Cleaning up here would be a mess! */ > break; > } > @@ -1021,7 +1021,7 @@ long do_event_channel_op(int cmd, XEN_GU > if ( copy_from_guest(&bind_pirq, arg, 1) != 0 ) > return -EFAULT; > rc = evtchn_bind_pirq(&bind_pirq); > - if ( (rc == 0) && (copy_to_guest(arg, &bind_pirq, 1) != 0) ) > + if ( !rc && __copy_to_guest(arg, &bind_pirq, 1) ) > rc = -EFAULT; /* Cleaning up here would be a mess! */ > break; > } > @@ -1047,7 +1047,7 @@ long do_event_channel_op(int cmd, XEN_GU > if ( copy_from_guest(&status, arg, 1) != 0 ) > return -EFAULT; > rc = evtchn_status(&status); > - if ( (rc == 0) && (copy_to_guest(arg, &status, 1) != 0) ) > + if ( !rc && __copy_to_guest(arg, &status, 1) ) > rc = -EFAULT; > break; > } > --- a/xen/common/grant_table.c > +++ b/xen/common/grant_table.c > @@ -1115,12 +1115,13 @@ gnttab_unmap_grant_ref( > > for ( i = 0; i < c; i++ ) > { > - if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) ) > + if ( unlikely(__copy_from_guest(&op, uop, 1)) ) > goto fault; > __gnttab_unmap_grant_ref(&op, &(common[i])); > ++partial_done; > - if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) > goto fault; > + guest_handle_add_offset(uop, 1); > } > > flush_tlb_mask(current->domain->domain_dirty_cpumask); > @@ -1177,12 +1178,13 @@ gnttab_unmap_and_replace( > > for ( i = 0; i < c; i++ ) > { > - if ( unlikely(__copy_from_guest_offset(&op, uop, done+i, 1)) ) > + if ( unlikely(__copy_from_guest(&op, uop, 1)) ) > goto fault; > __gnttab_unmap_and_replace(&op, &(common[i])); > ++partial_done; > - if ( unlikely(__copy_to_guest_offset(uop, done+i, &op, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) > goto fault; > + guest_handle_add_offset(uop, 1); > } > > flush_tlb_mask(current->domain->domain_dirty_cpumask); > @@ -1396,7 +1398,7 @@ gnttab_setup_table( > out2: > rcu_unlock_domain(d); > out1: > - if ( unlikely(copy_to_guest(uop, &op, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) > return -EFAULT; > > return 0; > @@ -1446,7 +1448,7 @@ gnttab_query_size( > rcu_unlock_domain(d); > > query_out: > - if ( unlikely(copy_to_guest(uop, &op, 1)) ) > + if ( unlikely(__copy_to_guest(uop, &op, 1)) ) > return -EFAULT; > > return 0; > @@ -1542,7 +1544,7 @@ gnttab_transfer( > return i; > > /* Read from caller address space. */ > - if ( unlikely(__copy_from_guest_offset(&gop, uop, i, 1)) ) > + if ( unlikely(__copy_from_guest(&gop, uop, 1)) ) > { > gdprintk(XENLOG_INFO, "gnttab_transfer: error reading req > %d/%d\n", > i, count); > @@ -1701,12 +1703,13 @@ gnttab_transfer( > gop.status = GNTST_okay; > > copyback: > - if ( unlikely(__copy_to_guest_offset(uop, i, &gop, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &gop, status)) ) > { > gdprintk(XENLOG_INFO, "gnttab_transfer: error writing resp " > "%d/%d\n", i, count); > return -EFAULT; > } > + guest_handle_add_offset(uop, 1); > } > > return 0; > @@ -2143,17 +2146,18 @@ gnttab_copy( > { > if (i && hypercall_preempt_check()) > return i; > - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) ) > + if ( unlikely(__copy_from_guest(&op, uop, 1)) ) > return -EFAULT; > __gnttab_copy(&op); > - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) > return -EFAULT; > + guest_handle_add_offset(uop, 1); > } > return 0; > } > > static long > -gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t uop)) > +gnttab_set_version(XEN_GUEST_HANDLE_PARAM(gnttab_set_version_t) uop) > { > gnttab_set_version_t op; > struct domain *d = current->domain; > @@ -2265,7 +2269,7 @@ out_unlock: > out: > op.version = gt->gt_version; > > - if (copy_to_guest(uop, &op, 1)) > + if (__copy_to_guest(uop, &op, 1)) > res = -EFAULT; > > return res; > @@ -2329,14 +2333,14 @@ gnttab_get_status_frames(XEN_GUEST_HANDL > out2: > rcu_unlock_domain(d); > out1: > - if ( unlikely(copy_to_guest(uop, &op, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) > return -EFAULT; > > return 0; > } > > static long > -gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t uop)) > +gnttab_get_version(XEN_GUEST_HANDLE_PARAM(gnttab_get_version_t) uop) > { > gnttab_get_version_t op; > struct domain *d; > @@ -2359,7 +2363,7 @@ gnttab_get_version(XEN_GUEST_HANDLE_PARA > > rcu_unlock_domain(d); > > - if ( copy_to_guest(uop, &op, 1) ) > + if ( __copy_field_to_guest(uop, &op, version) ) > return -EFAULT; > > return 0; > @@ -2421,7 +2425,7 @@ out: > } > > static long > -gnttab_swap_grant_ref(XEN_GUEST_HANDLE_PARAM(gnttab_swap_grant_ref_t uop), > +gnttab_swap_grant_ref(XEN_GUEST_HANDLE_PARAM(gnttab_swap_grant_ref_t) uop, > unsigned int count) > { > int i; > @@ -2431,11 +2435,12 @@ gnttab_swap_grant_ref(XEN_GUEST_HANDLE_P > { > if ( i && hypercall_preempt_check() ) > return i; > - if ( unlikely(__copy_from_guest_offset(&op, uop, i, 1)) ) > + if ( unlikely(__copy_from_guest(&op, uop, 1)) ) > return -EFAULT; > op.status = __gnttab_swap_grant_ref(op.ref_a, op.ref_b); > - if ( unlikely(__copy_to_guest_offset(uop, i, &op, 1)) ) > + if ( unlikely(__copy_field_to_guest(uop, &op, status)) ) > return -EFAULT; > + guest_handle_add_offset(uop, 1); > } > return 0; > } > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -359,7 +359,7 @@ static long memory_exchange(XEN_GUEST_HA > { > exch.nr_exchanged = i << in_chunk_order; > rcu_unlock_domain(d); > - if ( copy_field_to_guest(arg, &exch, nr_exchanged) ) > + if ( __copy_field_to_guest(arg, &exch, nr_exchanged) ) > return -EFAULT; > return hypercall_create_continuation( > __HYPERVISOR_memory_op, "lh", XENMEM_exchange, arg); > @@ -500,7 +500,7 @@ static long memory_exchange(XEN_GUEST_HA > } > > exch.nr_exchanged = exch.in.nr_extents; > - if ( copy_field_to_guest(arg, &exch, nr_exchanged) ) > + if ( __copy_field_to_guest(arg, &exch, nr_exchanged) ) > rc = -EFAULT; > rcu_unlock_domain(d); > return rc; > @@ -527,7 +527,7 @@ static long memory_exchange(XEN_GUEST_HA > exch.nr_exchanged = i << in_chunk_order; > > fail_early: > - if ( copy_field_to_guest(arg, &exch, nr_exchanged) ) > + if ( __copy_field_to_guest(arg, &exch, nr_exchanged) ) > rc = -EFAULT; > return rc; > } > --- a/xen/common/sysctl.c > +++ b/xen/common/sysctl.c > @@ -30,6 +30,7 @@ > long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) > { > long ret = 0; > + int copyback = -1; > struct xen_sysctl curop, *op = &curop; > static DEFINE_SPINLOCK(sysctl_lock); > > @@ -55,42 +56,28 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe > switch ( op->cmd ) > { > case XEN_SYSCTL_readconsole: > - { > ret = xsm_readconsole(op->u.readconsole.clear); > if ( ret ) > break; > > ret = read_console_ring(&op->u.readconsole); > - if ( copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - } > - break; > + break; > > case XEN_SYSCTL_tbuf_op: > - { > ret = xsm_tbufcontrol(); > if ( ret ) > break; > > ret = tb_control(&op->u.tbuf_op); > - if ( copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - } > - break; > + break; > > case XEN_SYSCTL_sched_id: > - { > ret = xsm_sched_id(); > if ( ret ) > break; > > op->u.sched_id.sched_id = sched_id(); > - if ( copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - else > - ret = 0; > - } > - break; > + break; > > case XEN_SYSCTL_getdomaininfolist: > { > @@ -129,38 +116,27 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe > break; > > op->u.getdomaininfolist.num_domains = num_domains; > - > - if ( copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > } > break; > > #ifdef PERF_COUNTERS > case XEN_SYSCTL_perfc_op: > - { > ret = xsm_perfcontrol(); > if ( ret ) > break; > > ret = perfc_control(&op->u.perfc_op); > - if ( copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - } > - break; > + break; > #endif > > #ifdef LOCK_PROFILE > case XEN_SYSCTL_lockprof_op: > - { > ret = xsm_lockprof(); > if ( ret ) > break; > > ret = spinlock_profile_control(&op->u.lockprof_op); > - if ( copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - } > - break; > + break; > #endif > case XEN_SYSCTL_debug_keys: > { > @@ -179,6 +155,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe > handle_keypress(c, guest_cpu_user_regs()); > } > ret = 0; > + copyback = 0; > } > break; > > @@ -193,22 +170,21 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe > if ( ret ) > break; > > + ret = -EFAULT; > for ( i = 0; i < nr_cpus; i++ ) > { > cpuinfo.idletime = get_cpu_idle_time(i); > > - ret = -EFAULT; > if ( copy_to_guest_offset(op->u.getcpuinfo.info, i, &cpuinfo, 1) > ) > goto out; > } > > op->u.getcpuinfo.nr_cpus = i; > - ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0; > + ret = 0; > } > break; > > case XEN_SYSCTL_availheap: > - { > ret = xsm_availheap(); > if ( ret ) > break; > @@ -218,47 +194,26 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe > op->u.availheap.min_bitwidth, > op->u.availheap.max_bitwidth); > op->u.availheap.avail_bytes <<= PAGE_SHIFT; > - > - ret = copy_to_guest(u_sysctl, op, 1) ? -EFAULT : 0; > - } > - break; > + break; > > #ifdef HAS_ACPI > case XEN_SYSCTL_get_pmstat: > - { > ret = xsm_get_pmstat(); > if ( ret ) > break; > > ret = do_get_pm_info(&op->u.get_pmstat); > - if ( ret ) > - break; > - > - if ( copy_to_guest(u_sysctl, op, 1) ) > - { > - ret = -EFAULT; > - break; > - } > - } > - break; > + break; > > case XEN_SYSCTL_pm_op: > - { > ret = xsm_pm_op(); > if ( ret ) > break; > > ret = do_pm_op(&op->u.pm_op); > - if ( ret && (ret != -EAGAIN) ) > - break; > - > - if ( copy_to_guest(u_sysctl, op, 1) ) > - { > - ret = -EFAULT; > - break; > - } > - } > - break; > + if ( ret == -EAGAIN ) > + copyback = 1; > + break; > #endif > > case XEN_SYSCTL_page_offline_op: > @@ -317,41 +272,39 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe > } > > xfree(status); > + copyback = 0; > } > break; > > case XEN_SYSCTL_cpupool_op: > - { > ret = xsm_cpupool_op(); > if ( ret ) > break; > > ret = cpupool_do_sysctl(&op->u.cpupool_op); > - if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - } > - break; > + break; > > case XEN_SYSCTL_scheduler_op: > - { > ret = xsm_sched_op(); > if ( ret ) > break; > > ret = sched_adjust_global(&op->u.scheduler_op); > - if ( (ret == 0) && copy_to_guest(u_sysctl, op, 1) ) > - ret = -EFAULT; > - } > - break; > + break; > > default: > ret = arch_do_sysctl(op, u_sysctl); > + copyback = 0; > break; > } > > out: > spin_unlock(&sysctl_lock); > > + if ( copyback && (!ret || copyback > 0) && > + __copy_to_guest(u_sysctl, op, 1) ) > + ret = -EFAULT; > + > return ret; > } > > --- a/xen/common/xenoprof.c > +++ b/xen/common/xenoprof.c > @@ -449,7 +449,7 @@ static int add_passive_list(XEN_GUEST_HA > current->domain, __pa(d->xenoprof->rawbuf), > passive.buf_gmaddr, d->xenoprof->npages); > > - if ( copy_to_guest(arg, &passive, 1) ) > + if ( __copy_to_guest(arg, &passive, 1) ) > { > put_domain(d); > return -EFAULT; > @@ -604,7 +604,7 @@ static int xenoprof_op_init(XEN_GUEST_HA > if ( xenoprof_init.is_primary ) > xenoprof_primary_profiler = current->domain; > > - return (copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0); > + return __copy_to_guest(arg, &xenoprof_init, 1) ? -EFAULT : 0; > } > > #define ret_t long > @@ -651,10 +651,7 @@ static int xenoprof_op_get_buffer(XEN_GU > d, __pa(d->xenoprof->rawbuf), xenoprof_get_buffer.buf_gmaddr, > d->xenoprof->npages); > > - if ( copy_to_guest(arg, &xenoprof_get_buffer, 1) ) > - return -EFAULT; > - > - return 0; > + return __copy_to_guest(arg, &xenoprof_get_buffer, 1) ? -EFAULT : 0; > } > > #define NONPRIV_OP(op) ( (op == XENOPROF_init) \ > --- a/xen/drivers/passthrough/iommu.c > +++ b/xen/drivers/passthrough/iommu.c > @@ -586,7 +586,7 @@ int iommu_do_domctl( > domctl->u.get_device_group.num_sdevs = ret; > ret = 0; > } > - if ( copy_to_guest(u_domctl, domctl, 1) ) > + if ( __copy_field_to_guest(u_domctl, domctl, u.get_device_group) ) > ret = -EFAULT; > rcu_unlock_domain(d); > } > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel