Displaying 6 results from an estimated 6 matches for "traverse_pag".
Did you mean:
traverse_pages
2010 Oct 28
1
[PATCH] xen: xenfs: privcmd: check put_user() return code
...ser++);
-
- return 0;
+ return put_user(*mfnp, st->user++);
}
static struct vm_operations_struct privcmd_vm_ops;
@@ -323,10 +321,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
up_write(&mm->mmap_sem);
if (state.err > 0) {
- ret = 0;
-
state.user = m.arr;
- traverse_pages(m.num, sizeof(xen_pfn_t),
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist,
mmap_return_errors, &state);
}
--
1.7.0.4
2010 Oct 28
1
[PATCH] xen: xenfs: privcmd: check put_user() return code
...ser++);
-
- return 0;
+ return put_user(*mfnp, st->user++);
}
static struct vm_operations_struct privcmd_vm_ops;
@@ -323,10 +321,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
up_write(&mm->mmap_sem);
if (state.err > 0) {
- ret = 0;
-
state.user = m.arr;
- traverse_pages(m.num, sizeof(xen_pfn_t),
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist,
mmap_return_errors, &state);
}
--
1.7.0.4
2012 Sep 08
3
[patch 3/3] xen/privcmd: remove const modifier from declaration
...d.c
@@ -389,7 +389,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
if (state.global_error && (version == 1)) {
/* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
+ state.user_mfn = m.arr;
state.err = err_array;
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors_v1, &state);
2012 Sep 08
3
[patch 3/3] xen/privcmd: remove const modifier from declaration
...d.c
@@ -389,7 +389,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
if (state.global_error && (version == 1)) {
/* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
+ state.user_mfn = m.arr;
state.err = err_array;
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors_v1, &state);
2012 Sep 06
0
[PATCH] Fix mmap batch ioctl error status copy back.
...rtion(+), 1 deletion(-)
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5386f20..e4dfa3b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -389,7 +389,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
state.err = err_array;
ret = traverse_pages(m.num, sizeof(xen_pfn_t),
&pagelist, mmap_return_errors_v1, &state);
- } else
+ } else if (version == 2)
ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
/* If we have not had any EFAULT-like global errors then set the global
--
1.7.9.5
2011 Nov 08
48
Need help with fixing the Xen waitqueue feature
The patch ''mem_event: use wait queue when ring is full'' I just sent out
makes use of the waitqueue feature. There are two issues I get with the
change applied:
I think I got the logic right, and in my testing vcpu->pause_count drops
to zero in p2m_mem_paging_resume(). But for some reason the vcpu does
not make progress after the first wakeup. In my debugging there is one