Displaying 20 results from an estimated 39 matches for "this_cpu_xchg".
2017 Dec 23
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...R_RT_MARKER, newly created slots
have the IDR_FREE tag set. We only clear the IDR_FREE tag once the
bitmap is full. So if we try to find a free slot and the tag is clear,
we know the bitmap is full.
> > + bitmap = rcu_dereference_raw(*slot);
> > + if (!bitmap) {
> > + bitmap = this_cpu_xchg(ida_bitmap, NULL);
> > + if (!bitmap)
> > + return -ENOMEM;
>
> I can't understand this. I can understand if it were
>
> BUG_ON(!bitmap);
>
> because you called xb_preload().
>
> But
>
> /*
> * Regular test 2
> * set bit 2000, 2001,...
2017 Nov 03
1
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...= rcu_dereference_raw(*slot);
> + if (radix_tree_exception(bitmap)) {
> + unsigned long tmp = (unsigned long)bitmap;
> +
> + if (ebit < BITS_PER_LONG) {
> + tmp |= 1UL << ebit;
> + rcu_assign_pointer(*slot, (void *)tmp);
> + return 0;
> + }
> + bitmap = this_cpu_xchg(ida_bitmap, NULL);
> + if (!bitmap)
Please write locking rules, in order to explain how memory
allocated by __radix_tree_create() will not leak.
> + return -EAGAIN;
> + memset(bitmap, 0, sizeof(*bitmap));
> + bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
>...
2017 Nov 03
1
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...= rcu_dereference_raw(*slot);
> + if (radix_tree_exception(bitmap)) {
> + unsigned long tmp = (unsigned long)bitmap;
> +
> + if (ebit < BITS_PER_LONG) {
> + tmp |= 1UL << ebit;
> + rcu_assign_pointer(*slot, (void *)tmp);
> + return 0;
> + }
> + bitmap = this_cpu_xchg(ida_bitmap, NULL);
> + if (!bitmap)
Please write locking rules, in order to explain how memory
allocated by __radix_tree_create() will not leak.
> + return -EAGAIN;
> + memset(bitmap, 0, sizeof(*bitmap));
> + bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
>...
2017 Dec 24
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
On 12/23/2017 10:33 PM, Tetsuo Handa wrote:
>>>> + bitmap = rcu_dereference_raw(*slot);
>>>> + if (!bitmap) {
>>>> + bitmap = this_cpu_xchg(ida_bitmap, NULL);
>>>> + if (!bitmap)
>>>> + return -ENOMEM;
>>> I can't understand this. I can understand if it were
>>>
>>> BUG_ON(!bitmap);
>>>
>>> because you called xb_preload().
>>>
>>> But
>&...
2017 Dec 15
2
[PATCH v19 1/7] xbitmap: Introduce xbitmap
...eference_raw(*slot);
33 if (radix_tree_exception(bitmap)) {
34 unsigned long tmp = (unsigned long)bitmap;
35
36 if (ebit < BITS_PER_LONG) {
37 tmp |= 1UL << ebit;
38 rcu_assign_pointer(*slot, (void *)tmp);
39 return 0;
40 }
41 bitmap = this_cpu_xchg(ida_bitmap, NULL);
42 if (!bitmap)
43 return -EAGAIN;
44 memset(bitmap, 0, sizeof(*bitmap));
45 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
46 rcu_assign_pointer(*slot, bitmap);
47 }
48
49 if (!bitmap) {
50 if (ebit < BITS...
2017 Dec 15
2
[PATCH v19 1/7] xbitmap: Introduce xbitmap
...eference_raw(*slot);
33 if (radix_tree_exception(bitmap)) {
34 unsigned long tmp = (unsigned long)bitmap;
35
36 if (ebit < BITS_PER_LONG) {
37 tmp |= 1UL << ebit;
38 rcu_assign_pointer(*slot, (void *)tmp);
39 return 0;
40 }
41 bitmap = this_cpu_xchg(ida_bitmap, NULL);
42 if (!bitmap)
43 return -EAGAIN;
44 memset(bitmap, 0, sizeof(*bitmap));
45 bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
46 rcu_assign_pointer(*slot, bitmap);
47 }
48
49 if (!bitmap) {
50 if (ebit < BITS...
2017 Dec 12
0
[PATCH v19 2/7] xbitmap: potential improvement
...ELOAD_SIZE) < 0)
+ return false;
+
+ return true;
}
EXPORT_SYMBOL(xb_preload);
diff --git a/lib/xbitmap.c b/lib/xbitmap.c
index 2b547a73..182aa29 100644
--- a/lib/xbitmap.c
+++ b/lib/xbitmap.c
@@ -39,8 +39,10 @@ int xb_set_bit(struct xb *xb, unsigned long bit)
return 0;
}
bitmap = this_cpu_xchg(ida_bitmap, NULL);
- if (!bitmap)
+ if (!bitmap) {
+ __radix_tree_delete(root, node, slot);
return -EAGAIN;
+ }
memset(bitmap, 0, sizeof(*bitmap));
bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
rcu_assign_pointer(*slot, bitmap);
@@ -54,8 +56,10 @@ int xb_set_bi...
2017 Aug 03
0
[PATCH v13 1/5] Introduce xbitmap
...mp;slot);
+ if (err)
+ return err;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ unsigned long tmp = (unsigned long)bitmap;
+
+ if (ebit < BITS_PER_LONG) {
+ tmp |= 1UL << ebit;
+ rcu_assign_pointer(*slot, (void *)tmp);
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ rcu_assign_pointer(*slot, bitmap);
+ }
+
+ if (!bitmap) {
+ if (ebit < BITS_PER_LONG) {
+ bitmap = (void *)((1UL << ebit)...
2017 Sep 11
1
[PATCH v15 1/5] lib/xbitmap: Introduce xbitmap
...t; + bitmap = rcu_dereference_raw(*slot);
> + if (radix_tree_exception(bitmap)) {
> + tmp = (unsigned long)bitmap;
> + if (ebit < BITS_PER_LONG) {
> + tmp |= 1UL << ebit;
> + rcu_assign_pointer(*slot, (void *)tmp);
> + return 0;
> + }
> + bitmap = this_cpu_xchg(ida_bitmap, NULL);
> + if (!bitmap)
> + return -EAGAIN;
> + memset(bitmap, 0, sizeof(*bitmap));
> + bitmap->bitmap[0] =
> + tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
> + rcu_assign_pointer(*slot, bitmap);
> + }
> + if (!bitmap) {
> + if (ebit < B...
2017 Sep 11
1
[PATCH v15 1/5] lib/xbitmap: Introduce xbitmap
...t; + bitmap = rcu_dereference_raw(*slot);
> + if (radix_tree_exception(bitmap)) {
> + tmp = (unsigned long)bitmap;
> + if (ebit < BITS_PER_LONG) {
> + tmp |= 1UL << ebit;
> + rcu_assign_pointer(*slot, (void *)tmp);
> + return 0;
> + }
> + bitmap = this_cpu_xchg(ida_bitmap, NULL);
> + if (!bitmap)
> + return -EAGAIN;
> + memset(bitmap, 0, sizeof(*bitmap));
> + bitmap->bitmap[0] =
> + tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
> + rcu_assign_pointer(*slot, bitmap);
> + }
> + if (!bitmap) {
> + if (ebit < B...
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
This patch adds support to find next 1 or 0 bit in a xbmitmap range and
clear a range of bits.
More possible optimizations to add in the future:
1) xb_set_bit_range: set a range of bits.
2) when searching a bit, if the bit is not found in the slot, move on to
the next slot directly.
3) add tags to help searching.
Signed-off-by: Wei Wang <wei.w.wang at intel.com>
Cc: Matthew Wilcox
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
This patch adds support to find next 1 or 0 bit in a xbmitmap range and
clear a range of bits.
More possible optimizations to add in the future:
1) xb_set_bit_range: set a range of bits.
2) when searching a bit, if the bit is not found in the slot, move on to
the next slot directly.
3) add tags to help searching.
Signed-off-by: Wei Wang <wei.w.wang at intel.com>
Cc: Matthew Wilcox
2017 Nov 03
0
[PATCH v17 2/6] radix tree test suite: add tests for xbitmap
...mp;slot);
+ if (err)
+ return err;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ unsigned long tmp = (unsigned long)bitmap;
+
+ if (ebit < BITS_PER_LONG) {
+ tmp |= 1UL << ebit;
+ rcu_assign_pointer(*slot, (void *)tmp);
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ rcu_assign_pointer(*slot, bitmap);
+ }
+
+ if (!bitmap) {
+ if (ebit < BITS_PER_LONG) {
+ bitmap = (void *)((1UL << ebit)...
2018 Mar 14
0
[PATCH v2 06/27] x86/entry/64: Adapt assembly for PIE support
...x /* RIP */
> > > + xchgq %rax, (%rsp) /* Restore RAX, put 1f */
> > > iretq /* continues at repeat_nmi below */
> > > UNWIND_HINT_IRET_REGS
> > > 1:
> >
> > Urgh, xchg with a memop has an implicit LOCK prefix.
> this_cpu_xchg uses no lock cmpxchg as a replacement to reduce latency.
Great, I will update my implementation.
Thanks Peter and Christoph.
> From linux/arch/x86/include/asm/percpu.h
> /*
> * xchg is implemented using cmpxchg without a lock prefix. xchg is
> * expensive due to the implied lo...
2018 Mar 15
0
[PATCH v2 06/27] x86/entry/64: Adapt assembly for PIE support
...endent Code */
>>> + leaq 1f(%rip), %rax /* RIP */
>>> + xchgq %rax, (%rsp) /* Restore RAX, put 1f */
>>> iretq /* continues at repeat_nmi below */
>>> UNWIND_HINT_IRET_REGS
>>> 1:
>> Urgh, xchg with a memop has an implicit LOCK prefix.
> this_cpu_xchg uses no lock cmpxchg as a replacement to reduce latency.
That requires using a second register, since %rax is used as the
comparison source. At this point it's easier to just push %rax twice:
pushq %rax
pushq %rax
leaq 1f(%ip), %rax
movq %rax, 8(%rsp)
popq %rax
iretq
Paolo
2017 Dec 12
0
[PATCH v19 1/7] xbitmap: Introduce xbitmap
...mp;slot);
+ if (err)
+ return err;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ unsigned long tmp = (unsigned long)bitmap;
+
+ if (ebit < BITS_PER_LONG) {
+ tmp |= 1UL << ebit;
+ rcu_assign_pointer(*slot, (void *)tmp);
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ rcu_assign_pointer(*slot, bitmap);
+ }
+
+ if (!bitmap) {
+ if (ebit < BITS_PER_LONG) {
+ bitmap = (void *)((1UL << ebit)...
2017 Dec 23
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...4-2047
> > will all land in the same bitmap, so there's no need to preload for each
> > of them.
>
> Testcases also serves as how to use that API.
> Assuming such thing leads to incorrect usage.
Sure. Would you like to submit a patch?
> > > If bitmap == NULL at this_cpu_xchg(ida_bitmap, NULL) is allowed,
> > > you can use kzalloc(sizeof(*bitmap), GFP_NOWAIT | __GFP_NOWARN)
> > > and get rid of xb_preload()/xb_preload_end().
> >
> > No, we can't. GFP_NOWAIT | __GFP_NOWARN won't try very hard to allocate
> > memory. There...
2017 Dec 24
0
[PATCH v20 4/7] virtio-balloon: VIRTIO_BALLOON_F_SG
...can skip the preload; it has no value for you.
> Yes, that's why I suggest directly using kzalloc() at xb_set_bit().
It has some possibilities to remove that preload if we also do the
bitmap allocation in the xb_set_bit():
bitmap = rcu_dereference_raw(*slot);
if (!bitmap) {
bitmap = this_cpu_xchg(ida_bitmap, NULL);
if (!bitmap) {
bitmap = kmalloc(sizeof(*bitmap), gfp);
if (!bitmap)
return -ENOMEM;
}
}
But why not just follow the radix tree implementation style that puts
the allocation in preload, which would be invoked with a more relaxed
gfp in o...
2017 Nov 03
0
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...mp;slot);
+ if (err)
+ return err;
+ bitmap = rcu_dereference_raw(*slot);
+ if (radix_tree_exception(bitmap)) {
+ unsigned long tmp = (unsigned long)bitmap;
+
+ if (ebit < BITS_PER_LONG) {
+ tmp |= 1UL << ebit;
+ rcu_assign_pointer(*slot, (void *)tmp);
+ return 0;
+ }
+ bitmap = this_cpu_xchg(ida_bitmap, NULL);
+ if (!bitmap)
+ return -EAGAIN;
+ memset(bitmap, 0, sizeof(*bitmap));
+ bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
+ rcu_assign_pointer(*slot, bitmap);
+ }
+
+ if (!bitmap) {
+ if (ebit < BITS_PER_LONG) {
+ bitmap = (void *)((1UL << ebit)...
2017 Nov 03
12
[PATCH v17 0/6] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live