Displaying 20 results from an estimated 35 matches for "n_pages".
Did you mean:
nr_pages
2019 Apr 24
1
[PATCH v3 1/4] mm/balloon_compaction: list interfaces
...emoving it from the guest system.
> + *
> + * Return: number of pages that were enqueued.
> + */
> +size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
> + struct list_head *pages)
> +{
> + struct page *page, *tmp;
> + unsigned long flags;
> + size_t n_pages = 0;
> +
> + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + list_for_each_entry_safe(page, tmp, pages, lru) {
> + balloon_page_enqueue_one(b_dev_info, page);
> + n_pages++;
> + }
> + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
> + return n...
2019 Apr 23
0
[PATCH v3 1/4] mm/balloon_compaction: list interfaces
...eue a balloon pages before definitively
+ * removing it from the guest system.
+ *
+ * Return: number of pages that were enqueued.
+ */
+size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages)
+{
+ struct page *page, *tmp;
+ unsigned long flags;
+ size_t n_pages = 0;
+
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ list_for_each_entry_safe(page, tmp, pages, lru) {
+ balloon_page_enqueue_one(b_dev_info, page);
+ n_pages++;
+ }
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ return n_pages;
+}
+EXPORT_SYMBOL_GPL(balloon_pa...
2019 Apr 23
5
[PATCH v3 0/4] vmw_balloon: compaction and shrinker support
VMware balloon enhancements: adding support for memory compaction,
memory shrinker (to prevent OOM) and splitting of refused pages to
prevent recurring inflations.
Patches 1-2: Support for compaction
Patch 3: Support for memory shrinker - disabled by default
Patch 4: Split refused pages to improve performance
v2->v3:
* Fixing wrong argument type (int->size_t) [Michael]
* Fixing a comment
2019 Apr 25
0
[PATCH v4 1/4] mm/balloon_compaction: List interfaces
...eue a balloon pages before definitively
+ * removing it from the guest system.
+ *
+ * Return: number of pages that were enqueued.
+ */
+size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
+ struct list_head *pages)
+{
+ struct page *page, *tmp;
+ unsigned long flags;
+ size_t n_pages = 0;
+
+ spin_lock_irqsave(&b_dev_info->pages_lock, flags);
+ list_for_each_entry_safe(page, tmp, pages, lru) {
+ balloon_page_enqueue_one(b_dev_info, page);
+ n_pages++;
+ }
+ spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+ return n_pages;
+}
+EXPORT_SYMBOL_GPL(balloon_pa...
2019 Apr 19
0
[PATCH v2 1/4] mm/balloon_compaction: list interfaces
...ally adding to the balloon?
> + *
> + * Return: number of pages that were enqueued.
> + */
> +size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
> + struct list_head *pages)
> +{
> + struct page *page, *tmp;
> + unsigned long flags;
> + size_t n_pages = 0;
> +
> + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + list_for_each_entry_safe(page, tmp, pages, lru) {
> + balloon_page_enqueue_one(b_dev_info, page);
Do we want to do something about an error here?
> + n_pages++;
> + }
> + spin_unlock_irqrestore(&...
2019 Apr 25
6
[PATCH v4 0/4] vmw_balloon: Compaction and shrinker support
VMware balloon enhancements: adding support for memory compaction,
memory shrinker (to prevent OOM) and splitting of refused pages to
prevent recurring inflations.
Patches 1-2: Support for compaction
Patch 3: Support for memory shrinker - disabled by default
Patch 4: Split refused pages to improve performance
v3->v4:
* "get around to" comment [Michael]
* Put list_add under page lock
2019 Apr 25
6
[PATCH v4 0/4] vmw_balloon: Compaction and shrinker support
VMware balloon enhancements: adding support for memory compaction,
memory shrinker (to prevent OOM) and splitting of refused pages to
prevent recurring inflations.
Patches 1-2: Support for compaction
Patch 3: Support for memory shrinker - disabled by default
Patch 4: Split refused pages to improve performance
v3->v4:
* "get around to" comment [Michael]
* Put list_add under page lock
2019 Feb 07
0
[PATCH 3/6] mm/balloon_compaction: list interfaces
...*/
> +int balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
> + struct list_head *pages, int n_req_pages)
Are we sure this int never overflows? Why not just use u64
or size_t straight away?
> +{
> + struct page *page, *tmp;
> + unsigned long flags;
> + int n_pages = 0;
> +
> + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> + list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
> + /*
> + * Block others from accessing the 'page' while we get around
> + * establishing additional references and prepar...
2007 Nov 10
1
[PATCH] virtio_pci updates
...queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
};
struct virtio_pci_vq_info
{
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
/* the number of entries in the queue */
int num;
- /* the number of pages the device needs for the ring queue */
- int n_pages;
+
/* the index of the queue */
int queue_index;
- /* the struct page of the ring queue */
- struct page *pages;
+
/* the virtual address of the ring queue */
void *queue;
- /* a pointer to the virtqueue */
- struct virtqueue *vq;
- /* the node pointer */
+
+ /* the list node for the virtqu...
2007 Nov 10
1
[PATCH] virtio_pci updates
...queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
};
struct virtio_pci_vq_info
{
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
/* the number of entries in the queue */
int num;
- /* the number of pages the device needs for the ring queue */
- int n_pages;
+
/* the index of the queue */
int queue_index;
- /* the struct page of the ring queue */
- struct page *pages;
+
/* the virtual address of the ring queue */
void *queue;
- /* a pointer to the virtqueue */
- struct virtqueue *vq;
- /* the node pointer */
+
+ /* the list node for the virtqu...
2019 Apr 19
0
[PATCH v2 1/4] mm/balloon_compaction: list interfaces
...f pages that were enqueued.
> >> + */
> >> +size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
> >> + struct list_head *pages)
> >> +{
> >> + struct page *page, *tmp;
> >> + unsigned long flags;
> >> + size_t n_pages = 0;
> >> +
> >> + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> >> + list_for_each_entry_safe(page, tmp, pages, lru) {
> >> + balloon_page_enqueue_one(b_dev_info, page);
> >
> > Do we want to do something about an error here?
>
&g...
2019 Feb 07
0
[PATCH 3/6] mm/balloon_compaction: list interfaces
...ad *pages, int n_req_pages)
> >
> > Are we sure this int never overflows? Why not just use u64
> > or size_t straight away?
>
> size_t it is.
>
> >
> >> +{
> >> + struct page *page, *tmp;
> >> + unsigned long flags;
> >> + int n_pages = 0;
> >> +
> >> + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
> >> + list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
> >> + /*
> >> + * Block others from accessing the 'page' while we get around
> >&g...
2019 Jul 18
1
[PATCH v3 2/2] balloon: fix up comments
...es.
Returns: pointer to the page struct of the dequeued page, or NULL if no page gets dequeued.
> */
> struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
> { @@ -177,9 +186,9 @@ struct page *balloon_page_dequeue(struct
> balloon_dev_info *b_dev_info)
> if (n_pages != 1) {
> /*
> * If we are unable to dequeue a balloon page because the
> page
> - * list is empty and there is no isolated pages, then
> something
> + * list is empty and there are no isolated pages, then
> something
> * went out of track and some balloon pag...
2019 Jul 18
2
[PATCH v3 1/2] mm/balloon_compaction: avoid duplicate page removal
...LLOON_INFLATE);
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
+ list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
@@ -128,13 +128,19 @@ struct page *balloon_page_alloc(void)
EXPORT_SYMBOL_GPL(balloon_page_alloc);
/*
- * balloon_page_enqueue - allocates a new page and inserts it into the balloon
- * page list.
+ * balloon_page_enqueue - inserts a new page into the balloon page list.
+ *
* @b_de...
2019 Jul 18
2
[PATCH v4 1/2] mm/balloon_compaction: avoid duplicate page removal
...LLOON_INFLATE);
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
+ list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
@@ -128,13 +128,19 @@ struct page *balloon_page_alloc(void)
EXPORT_SYMBOL_GPL(balloon_page_alloc);
/*
- * balloon_page_enqueue - allocates a new page and inserts it into the balloon
- * page list.
+ * balloon_page_enqueue - inserts a new page into the balloon page list.
+ *
* @b_de...
2019 Jul 18
0
[PATCH v3 2/2] balloon: fix up comments
...be dequeued.
+ *
+ * Returns: struct page address for the dequeued page, or NULL if it fails to
+ * dequeue any pages.
*/
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
@@ -177,9 +186,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
if (n_pages != 1) {
/*
* If we are unable to dequeue a balloon page because the page
- * list is empty and there is no isolated pages, then something
+ * list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost.
- * BUG() here, otherwise...
2019 Jul 18
0
[PATCH v4 2/2] balloon: fix up comments
...+ * until all pages can be dequeued.
+ *
+ * Returns: struct page for the dequeued page, or NULL if no page was dequeued.
*/
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
@@ -177,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
if (n_pages != 1) {
/*
* If we are unable to dequeue a balloon page because the page
- * list is empty and there is no isolated pages, then something
+ * list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost.
- * BUG() here, otherwise...
2019 Jul 16
1
[PATCH v3 2/3] drm: plumb attaching dev thru to prime_pin/unpin
...struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 76d95b5e289c..e7d12e93b1f0 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -307,7 +307,7 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
mutex_unlock(&bo->pages_lock);
}
-static int vgem_prime_pin(struct drm_gem_object *obj)
+static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >>...
2019 Jul 16
1
[PATCH v3 2/3] drm: plumb attaching dev thru to prime_pin/unpin
...struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 76d95b5e289c..e7d12e93b1f0 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -307,7 +307,7 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
mutex_unlock(&bo->pages_lock);
}
-static int vgem_prime_pin(struct drm_gem_object *obj)
+static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >>...
2019 Jul 16
1
[PATCH v3 2/3] drm: plumb attaching dev thru to prime_pin/unpin
...struct radeon_bo *bo = gem_to_radeon_bo(obj);
int ret = 0;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 76d95b5e289c..e7d12e93b1f0 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -307,7 +307,7 @@ static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
mutex_unlock(&bo->pages_lock);
}
-static int vgem_prime_pin(struct drm_gem_object *obj)
+static int vgem_prime_pin(struct drm_gem_object *obj, struct device *dev)
{
struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
long n_pages = obj->size >>...