search for: rcu_dereference_raw

Displaying 20 results from an estimated 35 matches for "rcu_dereference_raw".

2017 Nov 03
1
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...uct radix_tree_node *node; > + void **slot; > + struct ida_bitmap *bitmap; > + unsigned long ebit; > + > + bit %= IDA_BITMAP_BITS; > + ebit = bit + 2; > + > + err = __radix_tree_create(root, index, 0, &node, &slot); > + if (err) > + return err; > + bitmap = rcu_dereference_raw(*slot); > + if (radix_tree_exception(bitmap)) { > + unsigned long tmp = (unsigned long)bitmap; > + > + if (ebit < BITS_PER_LONG) { > + tmp |= 1UL << ebit; > + rcu_assign_pointer(*slot, (void *)tmp); > + return 0; > + } > + bitmap = this_cpu_xchg(ida_bit...
2017 Nov 03
1
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...uct radix_tree_node *node; > + void **slot; > + struct ida_bitmap *bitmap; > + unsigned long ebit; > + > + bit %= IDA_BITMAP_BITS; > + ebit = bit + 2; > + > + err = __radix_tree_create(root, index, 0, &node, &slot); > + if (err) > + return err; > + bitmap = rcu_dereference_raw(*slot); > + if (radix_tree_exception(bitmap)) { > + unsigned long tmp = (unsigned long)bitmap; > + > + if (ebit < BITS_PER_LONG) { > + tmp |= 1UL << ebit; > + rcu_assign_pointer(*slot, (void *)tmp); > + return 0; > + } > + bitmap = this_cpu_xchg(ida_bit...
2017 Aug 03
0
[PATCH v13 1/5] Introduce xbitmap
..., delete_node(root, node, update_node, private); } -static bool __radix_tree_delete(struct radix_tree_root *root, - struct radix_tree_node *node, void __rcu **slot) +bool __radix_tree_delete(struct radix_tree_root *root, + struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; @@ -2137,6 +2148,130 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) } EXPORT_SYMBOL(ida_pre_get); +void xb_preload(gfp_t gfp) +{ + __radix_tree_preload(gfp, XB_PRELOAD_SIZE); + if (!this_cpu_read(ida_bitmap)) { + struct id...
2017 Dec 24
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
On 12/23/2017 10:33 PM, Tetsuo Handa wrote: >>>> + bitmap = rcu_dereference_raw(*slot); >>>> + if (!bitmap) { >>>> + bitmap = this_cpu_xchg(ida_bitmap, NULL); >>>> + if (!bitmap) >>>> + return -ENOMEM; >>> I can't understand this. I can understand if it were >>> >>> BUG_ON(!bitmap); >>&...
2017 Dec 23
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...a bitmap with the "bit" set? Yes. For radix trees tagged with IDR_RT_MARKER, newly created slots have the IDR_FREE tag set. We only clear the IDR_FREE tag once the bitmap is full. So if we try to find a free slot and the tag is clear, we know the bitmap is full. > > + bitmap = rcu_dereference_raw(*slot); > > + if (!bitmap) { > > + bitmap = this_cpu_xchg(ida_bitmap, NULL); > > + if (!bitmap) > > + return -ENOMEM; > > I can't understand this. I can understand if it were > > BUG_ON(!bitmap); > > because you called xb_preload(). > >...
2017 Dec 19
0
[PATCH v20 1/7] xbitmap: Introduce xbitmap
...7 @@ void __radix_tree_delete_node(struct radix_tree_root *root, delete_node(root, node, update_node); } -static bool __radix_tree_delete(struct radix_tree_root *root, +bool __radix_tree_delete(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); @@ -2135,6 +2142,20 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) } EXPORT_SYMBOL(ida_pre_get); +void xb_preload(gfp_t gfp) +{ + __radix_tree_preload(gfp, XB_PRELOAD_SIZE); + if (!this_cpu_read(ida_bitmap)) { + struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); + + if (!bitma...
2017 Dec 15
2
[PATCH v19 1/7] xbitmap: Introduce xbitmap
..._node *node; 22 void **slot; 23 struct ida_bitmap *bitmap; 24 unsigned long ebit; 25 26 bit %= IDA_BITMAP_BITS; 27 ebit = bit + 2; 28 > 29 err = __radix_tree_create(root, index, 0, &node, &slot); 30 if (err) 31 return err; 32 bitmap = rcu_dereference_raw(*slot); 33 if (radix_tree_exception(bitmap)) { 34 unsigned long tmp = (unsigned long)bitmap; 35 36 if (ebit < BITS_PER_LONG) { 37 tmp |= 1UL << ebit; 38 rcu_assign_pointer(*slot, (void *)tmp); 39 return 0; 40 } 41 bitmap = this_cpu_xch...
2017 Dec 15
2
[PATCH v19 1/7] xbitmap: Introduce xbitmap
..._node *node; 22 void **slot; 23 struct ida_bitmap *bitmap; 24 unsigned long ebit; 25 26 bit %= IDA_BITMAP_BITS; 27 ebit = bit + 2; 28 > 29 err = __radix_tree_create(root, index, 0, &node, &slot); 30 if (err) 31 return err; 32 bitmap = rcu_dereference_raw(*slot); 33 if (radix_tree_exception(bitmap)) { 34 unsigned long tmp = (unsigned long)bitmap; 35 36 if (ebit < BITS_PER_LONG) { 37 tmp |= 1UL << ebit; 38 rcu_assign_pointer(*slot, (void *)tmp); 39 return 0; 40 } 41 bitmap = this_cpu_xch...
2017 Dec 12
0
[PATCH v19 1/7] xbitmap: Introduce xbitmap
...7 @@ void __radix_tree_delete_node(struct radix_tree_root *root, delete_node(root, node, update_node); } -static bool __radix_tree_delete(struct radix_tree_root *root, +bool __radix_tree_delete(struct radix_tree_root *root, struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); @@ -2135,6 +2145,20 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) } EXPORT_SYMBOL(ida_pre_get); +void xb_preload(gfp_t gfp) +{ + __radix_tree_preload(gfp, XB_PRELOAD_SIZE); + if (!this_cpu_read(ida_bitmap)) { + struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp); + + if (!bitma...
2017 Dec 24
0
[PATCH v20 4/7] virtio-balloon: VIRTIO_BALLOON_F_SG
...ion with GFP_NOWAIT | __GFP_NOWARN, >> and then you can skip the preload; it has no value for you. > Yes, that's why I suggest directly using kzalloc() at xb_set_bit(). It has some possibilities to remove that preload if we also do the bitmap allocation in the xb_set_bit(): bitmap = rcu_dereference_raw(*slot); if (!bitmap) { bitmap = this_cpu_xchg(ida_bitmap, NULL); if (!bitmap) { bitmap = kmalloc(sizeof(*bitmap), gfp); if (!bitmap) return -ENOMEM; } } But why not just follow the radix tree implementation style that puts the allocation in preload, w...
2017 Nov 03
0
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
..., delete_node(root, node, update_node, private); } -static bool __radix_tree_delete(struct radix_tree_root *root, - struct radix_tree_node *node, void __rcu **slot) +bool __radix_tree_delete(struct radix_tree_root *root, + struct radix_tree_node *node, void __rcu **slot) { void *old = rcu_dereference_raw(*slot); int exceptional = radix_tree_exceptional_entry(old) ? -1 : 0; @@ -2005,6 +2020,38 @@ static bool __radix_tree_delete(struct radix_tree_root *root, } /** + * xb_preload - preload for xb_set_bit() + * @gfp_mask: allocation mask to use for preloading + * + * Preallocate memory to use f...
2017 Dec 21
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...DA_BITMAP_BITS)) +#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) +#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1) + /* * Per-cpu pool of preloaded nodes */ @@ -1781,7 +1786,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, child = rcu_dereference_raw(node->slots[offset]); } - if (!child) + if (!child && !is_idr(root)) goto restart; if (child == RADIX_TREE_RETRY) break; @@ -2135,6 +2140,35 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) } EXPORT_SYMBOL(ida_pre_get); +/** + * xb_preload - preload for xb_set_bit()...
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
This patch adds support to find next 1 or 0 bit in a xbmitmap range and clear a range of bits. More possible optimizations to add in the future: 1) xb_set_bit_range: set a range of bits. 2) when searching a bit, if the bit is not found in the slot, move on to the next slot directly. 3) add tags to help searching. Signed-off-by: Wei Wang <wei.w.wang at intel.com> Cc: Matthew Wilcox
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
This patch adds support to find next 1 or 0 bit in a xbmitmap range and clear a range of bits. More possible optimizations to add in the future: 1) xb_set_bit_range: set a range of bits. 2) when searching a bit, if the bit is not found in the slot, move on to the next slot directly. 3) add tags to help searching. Signed-off-by: Wei Wang <wei.w.wang at intel.com> Cc: Matthew Wilcox
2018 Jan 09
0
[PATCH v21 1/5] xbitmap: Introduce xbitmap
...DA_BITMAP_BITS)) +#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \ + RADIX_TREE_MAP_SHIFT)) +#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1) + /* * Per-cpu pool of preloaded nodes */ @@ -1781,7 +1786,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, child = rcu_dereference_raw(node->slots[offset]); } - if (!child) + if (!child && !is_idr(root)) goto restart; if (child == RADIX_TREE_RETRY) break; @@ -2135,6 +2140,35 @@ int ida_pre_get(struct ida *ida, gfp_t gfp) } EXPORT_SYMBOL(ida_pre_get); +/** + * xb_preload - preload for xb_set_bit()...
2017 Sep 11
1
[PATCH v15 1/5] lib/xbitmap: Introduce xbitmap
...p; > + unsigned long ebit, tmp; > + > + bit %= IDA_BITMAP_BITS; > + ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; > + > + switch (ops) { > + case XB_SET: > + ret = __radix_tree_create(root, index, 0, &node, &slot); > + if (ret) > + return ret; > + bitmap = rcu_dereference_raw(*slot); > + if (radix_tree_exception(bitmap)) { > + tmp = (unsigned long)bitmap; > + if (ebit < BITS_PER_LONG) { > + tmp |= 1UL << ebit; > + rcu_assign_pointer(*slot, (void *)tmp); > + return 0; > + } > + bitmap = this_cpu_xchg(ida_bitmap, NULL); &...
2017 Sep 11
1
[PATCH v15 1/5] lib/xbitmap: Introduce xbitmap
...p; > + unsigned long ebit, tmp; > + > + bit %= IDA_BITMAP_BITS; > + ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT; > + > + switch (ops) { > + case XB_SET: > + ret = __radix_tree_create(root, index, 0, &node, &slot); > + if (ret) > + return ret; > + bitmap = rcu_dereference_raw(*slot); > + if (radix_tree_exception(bitmap)) { > + tmp = (unsigned long)bitmap; > + if (ebit < BITS_PER_LONG) { > + tmp |= 1UL << ebit; > + rcu_assign_pointer(*slot, (void *)tmp); > + return 0; > + } > + bitmap = this_cpu_xchg(ida_bitmap, NULL); &...
2017 Dec 19
15
[PATCH v20 0/7] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following new features: 1) fast ballooning: transfer ballooned pages between the guest and host in chunks using sgs, instead of one array each time; and 2) free page block reporting: a new virtqueue to report guest free pages to the host. The second feature can be used to accelerate live migration of VMs. Here are some details: Live
2017 Dec 19
15
[PATCH v20 0/7] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following new features: 1) fast ballooning: transfer ballooned pages between the guest and host in chunks using sgs, instead of one array each time; and 2) free page block reporting: a new virtqueue to report guest free pages to the host. The second feature can be used to accelerate live migration of VMs. Here are some details: Live
2017 Nov 03
0
[PATCH v17 2/6] radix tree test suite: add tests for xbitmap
...struct radix_tree_root *root = &xb->xbrt; + struct radix_tree_node *node; + void **slot; + struct ida_bitmap *bitmap; + unsigned long ebit; + + bit %= IDA_BITMAP_BITS; + ebit = bit + 2; + + err = __radix_tree_create(root, index, 0, &node, &slot); + if (err) + return err; + bitmap = rcu_dereference_raw(*slot); + if (radix_tree_exception(bitmap)) { + unsigned long tmp = (unsigned long)bitmap; + + if (ebit < BITS_PER_LONG) { + tmp |= 1UL << ebit; + rcu_assign_pointer(*slot, (void *)tmp); + return 0; + } + bitmap = this_cpu_xchg(ida_bitmap, NULL); + if (!bitmap) + return -EAGAI...