Displaying 20 results from an estimated 28 matches for "xb_index_bits".
2017 Sep 11
1
[PATCH v15 1/5] lib/xbitmap: Introduce xbitmap
...itmap.c
> @@ -0,0 +1,176 @@
> +#include <linux/slab.h>
> +#include <linux/xbitmap.h>
> +
> +/*
> + * The xbitmap implementation supports up to ULONG_MAX bits, and it is
> + * implemented based on ida bitmaps. So, given an unsigned long index,
> + * the high order XB_INDEX_BITS bits of the index is used to find the
> + * corresponding item (i.e. ida bitmap) from the radix tree, and the low
> + * order (i.e. ilog2(IDA_BITMAP_BITS)) bits of the index are indexed into
> + * the ida bitmap to find the bit.
> + */
> +#define XB_INDEX_BITS (BITS_PER_LONG - ilog2...
2017 Sep 11
1
[PATCH v15 1/5] lib/xbitmap: Introduce xbitmap
...itmap.c
> @@ -0,0 +1,176 @@
> +#include <linux/slab.h>
> +#include <linux/xbitmap.h>
> +
> +/*
> + * The xbitmap implementation supports up to ULONG_MAX bits, and it is
> + * implemented based on ida bitmaps. So, given an unsigned long index,
> + * the high order XB_INDEX_BITS bits of the index is used to find the
> + * corresponding item (i.e. ida bitmap) from the radix tree, and the low
> + * order (i.e. ilog2(IDA_BITMAP_BITS)) bits of the index are indexed into
> + * the ida bitmap to find the bit.
> + */
> +#define XB_INDEX_BITS (BITS_PER_LONG - ilog2...
2017 Aug 09
1
[PATCH v13 1/5] Introduce xbitmap
...d tree of given height */
> @@ -78,6 +79,14 @@ static struct kmem_cache *radix_tree_node_cachep;
> #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
>
> /*
> + * The XB can go up to unsigned long, but also uses a bitmap.
This comment is hard to understand.
> + */
> +#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
> +#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
> + RADIX_TREE_MAP_SHIFT))
> +#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
> +
>
> ...
>
> +void xb_preload(gfp_t gfp)
> +{
> + __radix_tree_preload(gfp, XB_P...
2017 Aug 09
1
[PATCH v13 1/5] Introduce xbitmap
...d tree of given height */
> @@ -78,6 +79,14 @@ static struct kmem_cache *radix_tree_node_cachep;
> #define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
>
> /*
> + * The XB can go up to unsigned long, but also uses a bitmap.
This comment is hard to understand.
> + */
> +#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
> +#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
> + RADIX_TREE_MAP_SHIFT))
> +#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
> +
>
> ...
>
> +void xb_preload(gfp_t gfp)
> +{
> + __radix_tree_preload(gfp, XB_P...
2017 Dec 12
0
[PATCH v19 2/7] xbitmap: potential improvement
...adix_tree_node when failing to
get the per cpu ida bitmap, this avoids the kind of memory leak of the
unused radix tree node left in the tree.
- xb_clear_bit: change it to be a void function, since the original
implementation reurns nothing than a 0.
- remove the comment above "#define XB_INDEX_BITS", because it causes
confusion based on the feedbacks from the previous discussion;
- xb_preload: with the original implementation, the CPU that successfully
do __radix_tree_preload() may get into sleep by kmalloc(), which has a
risk of getting the caller of xb_preload() scheduled to ano...
2017 Aug 03
0
[PATCH v13 1/5] Introduce xbitmap
...include <linux/xbitmap.h>
/* Number of nodes in fully populated tree of given height */
@@ -78,6 +79,14 @@ static struct kmem_cache *radix_tree_node_cachep;
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
/*
+ * The XB can go up to unsigned long, but also uses a bitmap.
+ */
+#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
+#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
+
+/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
@@ -840,6 +849,8 @@ int __radix_tree_create(s...
2017 Nov 03
0
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...radix-tree.c
@@ -78,6 +78,19 @@ static struct kmem_cache *radix_tree_node_cachep;
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
/*
+ * The xbitmap implementation supports up to ULONG_MAX bits, and it is
+ * implemented based on ida bitmaps. So, given an unsigned long index,
+ * the high order XB_INDEX_BITS bits of the index is used to find the
+ * corresponding item (i.e. ida bitmap) from the radix tree, and the low
+ * order (i.e. ilog2(IDA_BITMAP_BITS)) bits of the index are indexed into
+ * the ida bitmap to find the bit.
+ */
+#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
+#defi...
2017 Dec 19
0
[PATCH v20 1/7] xbitmap: Introduce xbitmap
...Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
@@ -77,6 +77,11 @@ static struct kmem_cache *radix_tree_node_cachep;
RADIX_TREE_MAP_SHIFT))
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
+#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
+#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
+
/*
* Per-cpu pool of preloaded nodes
*/
@@ -839,6 +844,8 @@ int __radix_tree_create(struct radix_tree_root *root, unsig...
2017 Dec 12
0
[PATCH v19 1/7] xbitmap: Introduce xbitmap
...e.c b/lib/radix-tree.c
index c8d5556..7000ad6 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -78,6 +78,14 @@ static struct kmem_cache *radix_tree_node_cachep;
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
/*
+ * The XB can go up to unsigned long, but also uses a bitmap.
+ */
+#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
+#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
+
+/*
* Per-cpu pool of preloaded nodes
*/
struct radix_tree_preload {
@@ -839,6 +847,8 @@ int __radix_tree_create(struct...
2017 Dec 21
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
@@ -77,6 +77,11 @@ static struct kmem_cache *radix_tree_node_cachep;
RADIX_TREE_MAP_SHIFT))
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
+#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
+#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
+
/*
* Per-cpu pool of preloaded nodes
*/
@@ -1781,7 +1786,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tre...
2018 Jan 09
0
[PATCH v21 1/5] xbitmap: Introduce xbitmap
...Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
@@ -77,6 +77,11 @@ static struct kmem_cache *radix_tree_node_cachep;
RADIX_TREE_MAP_SHIFT))
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
+#define XB_INDEX_BITS (BITS_PER_LONG - ilog2(IDA_BITMAP_BITS))
+#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
+ RADIX_TREE_MAP_SHIFT))
+#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
+
/*
* Per-cpu pool of preloaded nodes
*/
@@ -1781,7 +1786,7 @@ void __rcu **radix_tree_next_chunk(const struct radix_tre...
2017 Aug 03
12
[PATCH v13 0/5] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one by one; and
2) free_page_vq: a new virtqueue to report guest free pages to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live migration needs to
2017 Aug 03
12
[PATCH v13 0/5] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one by one; and
2) free_page_vq: a new virtqueue to report guest free pages to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live migration needs to
2017 Dec 12
21
[PATCH v19 0/7] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live
2017 Dec 12
21
[PATCH v19 0/7] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live
2017 Nov 29
22
[PATCH v18 00/10] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live
2017 Nov 29
22
[PATCH v18 00/10] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
This patch adds support to find next 1 or 0 bit in a xbmitmap range and
clear a range of bits.
More possible optimizations to add in the future:
1) xb_set_bit_range: set a range of bits.
2) when searching a bit, if the bit is not found in the slot, move on to
the next slot directly.
3) add tags to help searching.
Signed-off-by: Wei Wang <wei.w.wang at intel.com>
Cc: Matthew Wilcox
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
This patch adds support to find next 1 or 0 bit in a xbmitmap range and
clear a range of bits.
More possible optimizations to add in the future:
1) xb_set_bit_range: set a range of bits.
2) when searching a bit, if the bit is not found in the slot, move on to
the next slot directly.
3) add tags to help searching.
Signed-off-by: Wei Wang <wei.w.wang at intel.com>
Cc: Matthew Wilcox
2018 Jan 09
6
[PATCH v21 0/5] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live