Displaying 20 results from an estimated 97 matches for "zoneref_set_zon".
Did you mean:
zoneref_set_zone
2017 Jul 14
4
[PATCH v12 6/8] mm: support reporting free page blocks
...he free list. Offer the next one.
> + */
> + *page = list_next_entry((*page), lru);
> + ret = 0;
> +out:
> + spin_unlock_irqrestore(&this_zone->lock, flags);
> + return ret;
> +}
> +EXPORT_SYMBOL(report_unused_page_block);
> +
> +#endif
> +
> static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> {
> zoneref->zone = zone;
> --
> 2.7.4
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo at kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ ....
2017 Jul 14
4
[PATCH v12 6/8] mm: support reporting free page blocks
...he free list. Offer the next one.
> + */
> + *page = list_next_entry((*page), lru);
> + ret = 0;
> +out:
> + spin_unlock_irqrestore(&this_zone->lock, flags);
> + return ret;
> +}
> +EXPORT_SYMBOL(report_unused_page_block);
> +
> +#endif
> +
> static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> {
> zoneref->zone = zone;
> --
> 2.7.4
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo at kvack.org. For more info on Linux MM,
> see: http://www.linux-mm.org/ ....
2016 Jul 27
0
[PATCH v2 repost 3/7] mm: add a function to get the max pfn
...ge_alloc.c b/mm/page_alloc.c
index 8b3e134..7da61ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4517,6 +4517,12 @@ void show_free_areas(unsigned int filter)
show_swap_cache_info();
}
+unsigned long get_max_pfn(void)
+{
+ return max_pfn;
+}
+EXPORT_SYMBOL(get_max_pfn);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
1.9.1
2016 Jul 27
0
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
...> end_pfn || start_pfn >= max_pfn)
+ return 0;
+ if (end_pfn < max_pfn)
+ ret = 1;
+ if (end_pfn >= max_pfn)
+ ret = 0;
+
+ for_each_populated_zone(zone)
+ mark_free_pages_bitmap(zone, start_pfn, end_pfn, bitmap, len);
+ return ret;
+}
+EXPORT_SYMBOL(get_free_pages);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
1.9.1
2016 Jul 27
1
[PATCH v2 repost 3/7] mm: add a function to get the max pfn
...show_swap_cache_info();
> }
>
> +unsigned long get_max_pfn(void)
> +{
> + return max_pfn;
> +}
> +EXPORT_SYMBOL(get_max_pfn);
> +
This needs a coment that this can change at any time.
So it's only good as a hint e.g. for sizing data structures.
> static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> {
> zoneref->zone = zone;
> --
> 1.9.1
2016 Oct 21
0
[RESEND PATCH v3 kernel 3/7] mm: add a function to get the max pfn
...w_free_areas(unsigned int filter)
show_swap_cache_info();
}
+/*
+ * The max_pfn can change because of memory hot plug, so it's only good
+ * as a hint. e.g. for sizing data structures.
+ */
+unsigned long get_max_pfn(void)
+{
+ return max_pfn;
+}
+EXPORT_SYMBOL(get_max_pfn);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
1.8.3.1
2016 Jul 27
1
[PATCH v2 repost 3/7] mm: add a function to get the max pfn
...show_swap_cache_info();
> }
>
> +unsigned long get_max_pfn(void)
> +{
> + return max_pfn;
> +}
> +EXPORT_SYMBOL(get_max_pfn);
> +
This needs a coment that this can change at any time.
So it's only good as a hint e.g. for sizing data structures.
> static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> {
> zoneref->zone = zone;
> --
> 1.9.1
2018 Jan 25
3
[PATCH v25 1/2 RESEND] mm: support reporting free page blocks
...1; order >= min_order; order--) {
+ for (mt = 0; mt < MIGRATE_TYPES; mt++) {
+ ret = walk_free_page_list(opaque, zone,
+ order, mt,
+ report_pfn_range);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(walk_free_mem_block);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2018 Jan 25
3
[PATCH v25 1/2 RESEND] mm: support reporting free page blocks
...1; order >= min_order; order--) {
+ for (mt = 0; mt < MIGRATE_TYPES; mt++) {
+ ret = walk_free_page_list(opaque, zone,
+ order, mt,
+ report_pfn_range);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(walk_free_mem_block);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2017 Apr 13
0
[PATCH v9 3/5] mm: function to offer a page block on the free list
...egular case: the page block passed from the
+ * caller is still on the free list. Offer the next one.
+ */
+ *page = list_next_entry((*page), lru);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&this_zone->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(inquire_unused_page_block);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2017 Jun 09
0
[PATCH v11 4/6] mm: function to offer a page block on the free list
...se: the page block passed from the
+ * caller is still on the free list. Offer the next one.
+ */
+ *page = list_next_entry((*page), lru);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&this_zone->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(report_unused_page_block);
+
+#endif
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2017 May 04
0
[PATCH v10 4/6] mm: function to offer a page block on the free list
...se: the page block passed from the
+ * caller is still on the free list. Offer the next one.
+ */
+ *page = list_next_entry((*page), lru);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&this_zone->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(report_unused_page_block);
+
+#endif
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2018 Jun 15
0
[PATCH v33 1/4] mm: add a function to get free page blocks
...buf[index++] = cpu_to_le64(addr);
+ } else {
+ spin_unlock_irqrestore(&zone->lock,
+ flags);
+ return index;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
+ return index;
+}
+EXPORT_SYMBOL_GPL(get_from_free_page_list);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2018 Apr 10
0
[PATCH v29 1/4] mm: support reporting free page blocks
...> > +}
> > +EXPORT_SYMBOL_GPL(walk_free_mem_block);
>
> This looks like it could take a long time. Will we end up needing to
> add cond_resched() in there somewhere?
Andrew, were your questions answered? If yes could I bother you for an ack on this?
> > static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> > {
> > zoneref->zone = zone;
> > --
> > 2.7.4
2016 Jul 27
1
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
...+ if (end_pfn < max_pfn)
> + ret = 1;
> + if (end_pfn >= max_pfn)
> + ret = 0;
> +
> + for_each_populated_zone(zone)
> + mark_free_pages_bitmap(zone, start_pfn, end_pfn, bitmap, len);
> + return ret;
> +}
> +EXPORT_SYMBOL(get_free_pages);
> +
> static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> {
> zoneref->zone = zone;
> --
> 1.9.1
2016 Jul 27
1
[PATCH v2 repost 6/7] mm: add the related functions to get free page info
...+ if (end_pfn < max_pfn)
> + ret = 1;
> + if (end_pfn >= max_pfn)
> + ret = 0;
> +
> + for_each_populated_zone(zone)
> + mark_free_pages_bitmap(zone, start_pfn, end_pfn, bitmap, len);
> + return ret;
> +}
> +EXPORT_SYMBOL(get_free_pages);
> +
> static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
> {
> zoneref->zone = zone;
> --
> 1.9.1
2017 Mar 16
0
[PATCH kernel v8 3/4] mm: add inerface to offer info about unused pages
...ntinue;
+ else
+ skip_check = true;
+ }
+ ret = __record_unused_pages(zone, order, buf, size,
+ offset, part_fill);
+ if (ret < 0) {
+ /* record the failed zone */
+ *start_zone = zone;
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(record_unused_pages);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2017 Jul 12
0
[PATCH v12 6/8] mm: support reporting free page blocks
...se: the page block passed from the
+ * caller is still on the free list. Offer the next one.
+ */
+ *page = list_next_entry((*page), lru);
+ ret = 0;
+out:
+ spin_unlock_irqrestore(&this_zone->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(report_unused_page_block);
+
+#endif
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2017 Sep 30
0
[PATCH v16 4/5] mm: support reporting free page blocks
...er = MAX_ORDER - 1; order >= min_order; order--) {
+ for (mt = 0; mt < MIGRATE_TYPES; mt++) {
+ ret = walk_free_page_list(opaque, zone,
+ order, mt,
+ report_pfn_range);
+ if (!ret)
+ return;
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(walk_free_mem_block);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4
2018 Jan 24
0
[PATCH v24 1/2] mm: support reporting free page blocks
...er = MAX_ORDER - 1; order >= min_order; order--) {
+ for (mt = 0; mt < MIGRATE_TYPES; mt++) {
+ ret = walk_free_page_list(opaque, zone,
+ order, mt,
+ report_pfn_range);
+ if (!ret)
+ return;
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(walk_free_mem_block);
+
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
zoneref->zone = zone;
--
2.7.4