Displaying 20 results from an estimated 51 matches for "xb_preload_end".
2017 Dec 20
2
[PATCH v20 0/7] Virtio-balloon Enhancement
...arate patches with corresponding test cases in the future.
>
> You can't remove the !node path. You'll see !node when the highest set bit
> is less than 1024. So do something like this:
>
> unsigned long bit;
> xb_preload(GFP_KERNEL);
> xb_set_bit(xb, 700);
> xb_preload_end();
> bit = xb_find_set(xb, ULONG_MAX, 0);
> assert(bit == 700);
This above test will result in "!node with bitmap !=NULL", and it goes to the regular "if (bitmap)" path, which finds 700.
A better test would be
...
xb_set_bit(xb, 700);
assert(xb_find_set(xb, ULONG_MAX,...
2017 Dec 20
2
[PATCH v20 0/7] Virtio-balloon Enhancement
...arate patches with corresponding test cases in the future.
>
> You can't remove the !node path. You'll see !node when the highest set bit
> is less than 1024. So do something like this:
>
> unsigned long bit;
> xb_preload(GFP_KERNEL);
> xb_set_bit(xb, 700);
> xb_preload_end();
> bit = xb_find_set(xb, ULONG_MAX, 0);
> assert(bit == 700);
This above test will result in "!node with bitmap !=NULL", and it goes to the regular "if (bitmap)" path, which finds 700.
A better test would be
...
xb_set_bit(xb, 700);
assert(xb_find_set(xb, ULONG_MAX,...
2017 Dec 22
2
[PATCH v20 3/7 RESEND] xbitmap: add more operations
On 12/22/2017 05:03 AM, Matthew Wilcox wrote:
> OK, here's a rewrite of xbitmap.
>
> Compared to the version you sent:
> - xb_find_set() is the rewrite I sent out yesterday.
> - xb_find_clear() is a new implementation. I use the IDR_FREE tag to find
> clear bits. This led to me finding a bug in radix_tree_for_each_tagged().
> - xb_zero() is also a new
2017 Dec 22
2
[PATCH v20 3/7 RESEND] xbitmap: add more operations
On 12/22/2017 05:03 AM, Matthew Wilcox wrote:
> OK, here's a rewrite of xbitmap.
>
> Compared to the version you sent:
> - xb_find_set() is the rewrite I sent out yesterday.
> - xb_find_clear() is a new implementation. I use the IDR_FREE tag to find
> clear bits. This led to me finding a bug in radix_tree_for_each_tagged().
> - xb_zero() is also a new
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...nbits)
+ return ret + index * IDA_BITMAP_BITS;
+ } else {
+ return bit + index * IDA_BITMAP_BITS;
+ }
+ bit = 0;
+ index++;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(xb_find_zero);
+
#ifndef __KERNEL__
static DEFINE_XB(xb1);
@@ -111,6 +256,64 @@ void xbitmap_check_bit(unsigned long bit)
xb_preload_end();
}
+static void xbitmap_check_bit_range(void)
+{
+ /* Regular test1: node = NULL */
+ xb_preload(GFP_KERNEL);
+ xb_set_bit(&xb1, 700);
+ xb_preload_end();
+ assert(xb_find_set(&xb1, ULONG_MAX, 0) == 700);
+ assert(xb_find_set(&xb1, ULONG_MAX, 800) == ULONG_MAX);
+ xb_clear_bit_rang...
2017 Dec 21
7
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...nbits)
+ return ret + index * IDA_BITMAP_BITS;
+ } else {
+ return bit + index * IDA_BITMAP_BITS;
+ }
+ bit = 0;
+ index++;
+ }
+
+ return size;
+}
+EXPORT_SYMBOL(xb_find_zero);
+
#ifndef __KERNEL__
static DEFINE_XB(xb1);
@@ -111,6 +256,64 @@ void xbitmap_check_bit(unsigned long bit)
xb_preload_end();
}
+static void xbitmap_check_bit_range(void)
+{
+ /* Regular test1: node = NULL */
+ xb_preload(GFP_KERNEL);
+ xb_set_bit(&xb1, 700);
+ xb_preload_end();
+ assert(xb_find_set(&xb1, ULONG_MAX, 0) == 700);
+ assert(xb_find_set(&xb1, ULONG_MAX, 800) == ULONG_MAX);
+ xb_clear_bit_rang...
2017 Dec 20
0
[PATCH v20 0/7] Virtio-balloon Enhancement
On Wed, Dec 20, 2017 at 04:13:16PM +0000, Wang, Wei W wrote:
> On Wednesday, December 20, 2017 8:26 PM, Matthew Wilcox wrote:
> > unsigned long bit;
> > xb_preload(GFP_KERNEL);
> > xb_set_bit(xb, 700);
> > xb_preload_end();
> > bit = xb_find_set(xb, ULONG_MAX, 0);
> > assert(bit == 700);
>
> This above test will result in "!node with bitmap !=NULL", and it goes to the regular "if (bitmap)" path, which finds 700.
>
> A better test would be
> ...
> xb_set_bit(xb,...
2017 Dec 23
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...1 in [2002, 2040) --> none
> * Next 0 in [2000, 2048) --> 2002
> * Next 0 in [2048, 2060) --> 2048
> */
> xb_preload(GFP_KERNEL);
> assert(!xb_set_bit(&xb1, 2000));
> assert(!xb_set_bit(&xb1, 2001));
> assert(!xb_set_bit(&xb1, 2040));
[...]
> xb_preload_end();
>
> you are not calling xb_preload() prior to each xb_set_bit() call.
> This means that, if each xb_set_bit() is not surrounded with
> xb_preload()/xb_preload_end(), there is possibility of hitting
> this_cpu_xchg(ida_bitmap, NULL) == NULL.
This is just a lazy test. We "kn...
2018 Jan 09
0
[PATCH v21 1/5] xbitmap: Introduce xbitmap
...struct xb *xb, unsigned long max, unsigned long *bit);
+bool xb_find_zero(const struct xb *xb, unsigned long max, unsigned long *bit);
+
+static inline bool xb_empty(const struct xb *xb)
+{
+ return radix_tree_empty(&xb->xbrt);
+}
+
+int __must_check xb_preload(gfp_t);
+
+static inline void xb_preload_end(void)
+{
+ preempt_enable();
+}
diff --git a/lib/Makefile b/lib/Makefile
index d11c48e..08a8183 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o timerqueue.o\...
2017 Dec 20
2
[PATCH v20 0/7] Virtio-balloon Enhancement
...g exceptional path made this patch easier to read.
> But what I meant is
>
> Can you eliminate exception path and fold all xbitmap patches into one, and
> post only one xbitmap patch without virtio-balloon changes?
>
> .
>
> I still think we don't need xb_preload()/xb_preload_end().
Why would you think preload is not needed?
The bitmap is allocated via preload "bitmap =
this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);", this allocated bitmap
would be used in xb_set_bit().
> I think xb_find_set() has a bug in !node path.
I think we can probably remove the &quo...
2017 Dec 20
2
[PATCH v20 0/7] Virtio-balloon Enhancement
...g exceptional path made this patch easier to read.
> But what I meant is
>
> Can you eliminate exception path and fold all xbitmap patches into one, and
> post only one xbitmap patch without virtio-balloon changes?
>
> .
>
> I still think we don't need xb_preload()/xb_preload_end().
Why would you think preload is not needed?
The bitmap is allocated via preload "bitmap =
this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);", this allocated bitmap
would be used in xb_set_bit().
> I think xb_find_set() has a bug in !node path.
I think we can probably remove the &quo...
2017 Dec 21
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...struct xb *xb, unsigned long max, unsigned long *bit);
+bool xb_find_zero(const struct xb *xb, unsigned long max, unsigned long *bit);
+
+static inline bool xb_empty(const struct xb *xb)
+{
+ return radix_tree_empty(&xb->xbrt);
+}
+
+int __must_check xb_preload(gfp_t);
+
+static inline void xb_preload_end(void)
+{
+ preempt_enable();
+}
diff --git a/lib/Makefile b/lib/Makefile
index d11c48ec8ffd..08a8183c390b 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -19,7 +19,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o tim...
2017 Dec 24
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...> 2002
>>> * Next 0 in [2048, 2060) --> 2048
>>> */
>>> xb_preload(GFP_KERNEL);
>>> assert(!xb_set_bit(&xb1, 2000));
>>> assert(!xb_set_bit(&xb1, 2001));
>>> assert(!xb_set_bit(&xb1, 2040));
>> [...]
>>> xb_preload_end();
>>>
>>> you are not calling xb_preload() prior to each xb_set_bit() call.
>>> This means that, if each xb_set_bit() is not surrounded with
>>> xb_preload()/xb_preload_end(), there is possibility of hitting
>>> this_cpu_xchg(ida_bitmap, NULL) == NULL....
2017 Dec 23
0
[PATCH v20 3/7 RESEND] xbitmap: add more operations
...PI.
> Assuming such thing leads to incorrect usage.
Sure. Would you like to submit a patch?
> > > If bitmap == NULL at this_cpu_xchg(ida_bitmap, NULL) is allowed,
> > > you can use kzalloc(sizeof(*bitmap), GFP_NOWAIT | __GFP_NOWARN)
> > > and get rid of xb_preload()/xb_preload_end().
> >
> > No, we can't. GFP_NOWAIT | __GFP_NOWARN won't try very hard to allocate
> > memory. There's no reason to fail the call if the user is in a context
> > where they can try harder to free memory.
>
> But there is no reason to use GFP_NOWAIT at i...
2017 Oct 09
4
[PATCH v16 3/5] virtio-balloon: VIRTIO_BALLOON_F_SG
...> + unsigned long *pfn_min,
> + unsigned long *pfn_max)
> +{
> + unsigned long pfn = page_to_pfn(page);
> +
> + *pfn_min = min(pfn, *pfn_min);
> + *pfn_max = max(pfn, *pfn_max);
> + xb_preload(GFP_KERNEL);
> + xb_set_bit(&vb->page_xb, pfn);
> + xb_preload_end();
> +}
> +
So, this will allocate memory
...
> @@ -198,9 +327,12 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
> struct page *page;
> struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
> LIST_HEAD(pages);
> + bool use_sg = virtio_...
2017 Oct 09
4
[PATCH v16 3/5] virtio-balloon: VIRTIO_BALLOON_F_SG
...> + unsigned long *pfn_min,
> + unsigned long *pfn_max)
> +{
> + unsigned long pfn = page_to_pfn(page);
> +
> + *pfn_min = min(pfn, *pfn_min);
> + *pfn_max = max(pfn, *pfn_max);
> + xb_preload(GFP_KERNEL);
> + xb_set_bit(&vb->page_xb, pfn);
> + xb_preload_end();
> +}
> +
So, this will allocate memory
...
> @@ -198,9 +327,12 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
> struct page *page;
> struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
> LIST_HEAD(pages);
> + bool use_sg = virtio_...
2017 Dec 12
0
[PATCH v19 2/7] xbitmap: potential improvement
...xb *xb, unsigned long start, unsigned long nbits);
-int xb_fill(struct xb *xb, unsigned long start, unsigned long nbits);
-
static inline bool xb_empty(const struct xb *xb)
{
return radix_tree_empty(&xb->xbrt);
}
-void xb_preload(gfp_t);
+bool xb_preload(gfp_t);
static inline void xb_preload_end(void)
{
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7000ad6..a039588 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -77,9 +77,6 @@ static struct kmem_cache *radix_tree_node_cachep;
RADIX_TREE_MAP_SHIFT))
#define IDA_PRELOAD_SIZE (IDA_MAX_PATH * 2 - 1)
-/*
- * The XB c...
2018 Jan 09
6
[PATCH v21 0/5] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live
2017 Nov 03
0
[PATCH v17 1/6] lib/xbitmap: Introduce xbitmap
...art,
+ unsigned long end);
+void xb_clear_bit_range(struct xb *xb, unsigned long start, unsigned long end);
+
+/* Check if the xb tree is empty */
+static inline bool xb_is_empty(const struct xb *xb)
+{
+ return radix_tree_empty(&xb->xbrt);
+}
+
+bool xb_preload(gfp_t gfp);
+
+/**
+ * xb_preload_end - end preload section started with xb_preload()
+ *
+ * Each xb_preload() should be matched with an invocation of this
+ * function. See xb_preload() for details.
+ */
+static inline void xb_preload_end(void)
+{
+ preempt_enable();
+}
+
+#endif
diff --git a/lib/Makefile b/lib/Makefile
index dafa796...
2017 Dec 19
15
[PATCH v20 0/7] Virtio-balloon Enhancement
This patch series enhances the existing virtio-balloon with the following
new features:
1) fast ballooning: transfer ballooned pages between the guest and host in
chunks using sgs, instead of one array each time; and
2) free page block reporting: a new virtqueue to report guest free pages
to the host.
The second feature can be used to accelerate live migration of VMs. Here
are some details:
Live