Displaying 20 results from an estimated 42 matches for "zone_is_empty".
2016 Nov 30
2
[PATCH kernel v5 5/5] virtio-balloon: tell host vm's unused page info
...> +static int mark_unused_pages(struct zone *zone,
> + unsigned long *unused_pages, unsigned long size,
> + int order, unsigned long *pos)
> +{
> + unsigned long pfn, flags;
> + unsigned int t;
> + struct list_head *curr;
> + struct page_info_item *info;
> +
> + if (zone_is_empty(zone))
> + return 0;
> +
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + if (*pos + zone->free_area[order].nr_free > size)
> + return -ENOSPC;
Urg, so this won't partially fill? So, what the nr_free pages limit
where we no longer fit in the kmalloc()'d...
2016 Nov 30
2
[PATCH kernel v5 5/5] virtio-balloon: tell host vm's unused page info
...> +static int mark_unused_pages(struct zone *zone,
> + unsigned long *unused_pages, unsigned long size,
> + int order, unsigned long *pos)
> +{
> + unsigned long pfn, flags;
> + unsigned int t;
> + struct list_head *curr;
> + struct page_info_item *info;
> +
> + if (zone_is_empty(zone))
> + return 0;
> +
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + if (*pos + zone->free_area[order].nr_free > size)
> + return -ENOSPC;
Urg, so this won't partially fill? So, what the nr_free pages limit
where we no longer fit in the kmalloc()'d...
2016 Jul 27
1
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
...pfn);
>
> +static void mark_free_pages_bitmap(struct zone *zone, unsigned long start_pfn,
> + unsigned long end_pfn, unsigned long *bitmap, unsigned long len)
> +{
> + unsigned long pfn, flags, page_num;
> + unsigned int order, t;
> + struct list_head *curr;
> +
> + if (zone_is_empty(zone))
> + return;
> + end_pfn = min(start_pfn + len, end_pfn);
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + for_each_migratetype_order(order, t) {
Why not do each order separately? This way you can
use a single bit to pass a huge page to host.
Not a requirement but h...
2016 Jul 27
1
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
...pfn);
>
> +static void mark_free_pages_bitmap(struct zone *zone, unsigned long start_pfn,
> + unsigned long end_pfn, unsigned long *bitmap, unsigned long len)
> +{
> + unsigned long pfn, flags, page_num;
> + unsigned int order, t;
> + struct list_head *curr;
> +
> + if (zone_is_empty(zone))
> + return;
> + end_pfn = min(start_pfn + len, end_pfn);
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + for_each_migratetype_order(order, t) {
Why not do each order separately? This way you can
use a single bit to pass a huge page to host.
Not a requirement but h...
2016 Mar 04
2
[Qemu-devel] [RFC qemu 0/4] A PV solution for live migration optimization
...irs, so it's faster.
Some code snippet:
----------------------------------------------
+static void mark_free_pages_bitmap(struct zone *zone,
+ unsigned long *free_page_bitmap, unsigned long pfn_gap) {
+ unsigned long pfn, flags, i;
+ unsigned int order, t;
+ struct list_head *curr;
+
+ if (zone_is_empty(zone))
+ return;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ for_each_migratetype_order(order, t) {
+ list_for_each(curr, &zone->free_area[order].free_list[t]) {
+
+ pfn = page_to_pfn(list_entry(curr, struct page, lru));
+ for (i = 0; i < (1UL << order); i++) {
+...
2016 Mar 04
2
[Qemu-devel] [RFC qemu 0/4] A PV solution for live migration optimization
...irs, so it's faster.
Some code snippet:
----------------------------------------------
+static void mark_free_pages_bitmap(struct zone *zone,
+ unsigned long *free_page_bitmap, unsigned long pfn_gap) {
+ unsigned long pfn, flags, i;
+ unsigned int order, t;
+ struct list_head *curr;
+
+ if (zone_is_empty(zone))
+ return;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ for_each_migratetype_order(order, t) {
+ list_for_each(curr, &zone->free_area[order].free_list[t]) {
+
+ pfn = page_to_pfn(list_entry(curr, struct page, lru));
+ for (i = 0; i < (1UL << order); i++) {
+...
2017 Mar 16
4
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...;
> +static int __record_unused_pages(struct zone *zone, int order,
> + __le64 *buf, unsigned int size,
> + unsigned int *offset, bool part_fill)
> +{
> + unsigned long pfn, flags;
> + int t, ret = 0;
> + struct list_head *curr;
> + __le64 *chunk;
> +
> + if (zone_is_empty(zone))
> + return 0;
> +
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
> + ret = -ENOSPC;
> + goto out;
> + }
> + for (t = 0; t < MIGRATE_TYPES; t++) {
> + list_for_ea...
2017 Mar 16
4
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...;
> +static int __record_unused_pages(struct zone *zone, int order,
> + __le64 *buf, unsigned int size,
> + unsigned int *offset, bool part_fill)
> +{
> + unsigned long pfn, flags;
> + int t, ret = 0;
> + struct list_head *curr;
> + __le64 *chunk;
> +
> + if (zone_is_empty(zone))
> + return 0;
> +
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
> + ret = -ENOSPC;
> + goto out;
> + }
> + for (t = 0; t < MIGRATE_TYPES; t++) {
> + list_for_ea...
2017 Mar 29
2
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...__le64 *buf, unsigned int size,
> > > + unsigned int *offset, bool part_fill)
> > > +{
> > > + unsigned long pfn, flags;
> > > + int t, ret = 0;
> > > + struct list_head *curr;
> > > + __le64 *chunk;
> > > +
> > > + if (zone_is_empty(zone))
> > > + return 0;
> > > +
> > > + spin_lock_irqsave(&zone->lock, flags);
> > > +
> > > + if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
> > > + ret = -ENOSPC;
> > > + goto out;
> &...
2017 Mar 29
2
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...__le64 *buf, unsigned int size,
> > > + unsigned int *offset, bool part_fill)
> > > +{
> > > + unsigned long pfn, flags;
> > > + int t, ret = 0;
> > > + struct list_head *curr;
> > > + __le64 *chunk;
> > > +
> > > + if (zone_is_empty(zone))
> > > + return 0;
> > > +
> > > + spin_lock_irqsave(&zone->lock, flags);
> > > +
> > > + if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
> > > + ret = -ENOSPC;
> > > + goto out;
> &...
2016 Dec 05
1
[PATCH kernel v5 5/5] virtio-balloon: tell host vm's unused page info
...nsigned long *unused_pages, unsigned long size,
>>> + int order, unsigned long *pos)
>>> +{
>>> + unsigned long pfn, flags;
>>> + unsigned int t;
>>> + struct list_head *curr;
>>> + struct page_info_item *info;
>>> +
>>> + if (zone_is_empty(zone))
>>> + return 0;
>>> +
>>> + spin_lock_irqsave(&zone->lock, flags);
>>> +
>>> + if (*pos + zone->free_area[order].nr_free > size)
>>> + return -ENOSPC;
>>
>> Urg, so this won't partially fill? So, what the n...
2016 Dec 05
1
[PATCH kernel v5 5/5] virtio-balloon: tell host vm's unused page info
...nsigned long *unused_pages, unsigned long size,
>>> + int order, unsigned long *pos)
>>> +{
>>> + unsigned long pfn, flags;
>>> + unsigned int t;
>>> + struct list_head *curr;
>>> + struct page_info_item *info;
>>> +
>>> + if (zone_is_empty(zone))
>>> + return 0;
>>> +
>>> + spin_lock_irqsave(&zone->lock, flags);
>>> +
>>> + if (*pos + zone->free_area[order].nr_free > size)
>>> + return -ENOSPC;
>>
>> Urg, so this won't partially fill? So, what the n...
2016 Mar 08
0
[Qemu-devel] [RFC qemu 0/4] A PV solution for live migration optimization
...ppet:
> ----------------------------------------------
> +static void mark_free_pages_bitmap(struct zone *zone,
> + unsigned long *free_page_bitmap, unsigned long pfn_gap) {
> + unsigned long pfn, flags, i;
> + unsigned int order, t;
> + struct list_head *curr;
> +
> + if (zone_is_empty(zone))
> + return;
> +
> + spin_lock_irqsave(&zone->lock, flags);
> +
> + for_each_migratetype_order(order, t) {
> + list_for_each(curr, &zone->free_area[order].free_list[t]) {
> +
> + pfn = page_to_pfn(list_entry(curr, struct page, lru));
> + for (i =...
2016 Jul 27
0
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
...get_max_pfn(void)
}
EXPORT_SYMBOL(get_max_pfn);
+static void mark_free_pages_bitmap(struct zone *zone, unsigned long start_pfn,
+ unsigned long end_pfn, unsigned long *bitmap, unsigned long len)
+{
+ unsigned long pfn, flags, page_num;
+ unsigned int order, t;
+ struct list_head *curr;
+
+ if (zone_is_empty(zone))
+ return;
+ end_pfn = min(start_pfn + len, end_pfn);
+ spin_lock_irqsave(&zone->lock, flags);
+
+ for_each_migratetype_order(order, t) {
+ list_for_each(curr, &zone->free_area[order].free_list[t]) {
+ pfn = page_to_pfn(list_entry(curr, struct page, lru));
+ if (pfn >=...
2016 Dec 04
0
[PATCH kernel v5 5/5] virtio-balloon: tell host vm's unused page info
...ne *zone,
> > + unsigned long *unused_pages, unsigned long size,
> > + int order, unsigned long *pos)
> > +{
> > + unsigned long pfn, flags;
> > + unsigned int t;
> > + struct list_head *curr;
> > + struct page_info_item *info;
> > +
> > + if (zone_is_empty(zone))
> > + return 0;
> > +
> > + spin_lock_irqsave(&zone->lock, flags);
> > +
> > + if (*pos + zone->free_area[order].nr_free > size)
> > + return -ENOSPC;
>
> Urg, so this won't partially fill? So, what the nr_free pages limit where...
2017 Mar 31
0
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...unsigned int size,
>>>> + unsigned int *offset, bool part_fill)
>>>> +{
>>>> + unsigned long pfn, flags;
>>>> + int t, ret = 0;
>>>> + struct list_head *curr;
>>>> + __le64 *chunk;
>>>> +
>>>> + if (zone_is_empty(zone))
>>>> + return 0;
>>>> +
>>>> + spin_lock_irqsave(&zone->lock, flags);
>>>> +
>>>> + if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
>>>> + ret = -ENOSPC;
>>>> + go...
2017 Mar 16
0
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...s(unsigned int filter)
show_swap_cache_info();
}
+static int __record_unused_pages(struct zone *zone, int order,
+ __le64 *buf, unsigned int size,
+ unsigned int *offset, bool part_fill)
+{
+ unsigned long pfn, flags;
+ int t, ret = 0;
+ struct list_head *curr;
+ __le64 *chunk;
+
+ if (zone_is_empty(zone))
+ return 0;
+
+ spin_lock_irqsave(&zone->lock, flags);
+
+ if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ for (t = 0; t < MIGRATE_TYPES; t++) {
+ list_for_each(curr, &zone->free_area[order].free_list[...
2017 Mar 17
0
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
..._pages(struct zone *zone, int order,
>> + __le64 *buf, unsigned int size,
>> + unsigned int *offset, bool part_fill)
>> +{
>> + unsigned long pfn, flags;
>> + int t, ret = 0;
>> + struct list_head *curr;
>> + __le64 *chunk;
>> +
>> + if (zone_is_empty(zone))
>> + return 0;
>> +
>> + spin_lock_irqsave(&zone->lock, flags);
>> +
>> + if (*offset + zone->free_area[order].nr_free > size && !part_fill) {
>> + ret = -ENOSPC;
>> + goto out;
>> + }
>> + for (t = 0; t < MIGRA...
2016 Mar 04
2
[Qemu-devel] [RFC qemu 0/4] A PV solution for live migration optimization
> On Fri, Mar 04, 2016 at 09:12:12AM +0000, Li, Liang Z wrote:
> > > Although I wonder which is cheaper; that would be fairly expensive
> > > for the guest wouldn't it? And you'd somehow have to kick the guest
> > > before migration to do the ballooning - and how long would you wait for
> it to finish?
> >
> > About 5 seconds for an 8G guest,
2016 Mar 04
2
[Qemu-devel] [RFC qemu 0/4] A PV solution for live migration optimization
> On Fri, Mar 04, 2016 at 09:12:12AM +0000, Li, Liang Z wrote:
> > > Although I wonder which is cheaper; that would be fairly expensive
> > > for the guest wouldn't it? And you'd somehow have to kick the guest
> > > before migration to do the ballooning - and how long would you wait for
> it to finish?
> >
> > About 5 seconds for an 8G guest,