UPSTREAM: mm: page_alloc: change move_freepages() to __move_freepages_block()

The function is now supposed to be called only on a single pageblock and
checks start_pfn and end_pfn accordingly.  Rename it to make this more
obvious and drop the end_pfn parameter which can be determined trivially
and none of the callers use it for anything else.

Also make the (now internal) end_pfn exclusive, which is more common.

Link: https://lkml.kernel.org/r/81b1d642-2ec0-49f5-89fc-19a3828419ff@suse.cz
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: "Huang, Ying" <ying.huang@intel.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

Bug: 420836317
(cherry picked from commit e1f42a577f63647dadf1abe4583053c03d6be045)
Change-Id: I1e9ecd1670fda3edafff834849fbac2705a36324
Signed-off-by: yipeng xiang <yipengxiang@honor.corp-partner.google.com>
This commit is contained in:
Vlastimil Babka
2024-04-25 20:56:04 -07:00
committed by Isaac Manjarres
parent e0a00524db
commit f45ef0a06f

View File

@@ -1766,18 +1766,18 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* Change the type of a block and move all its free pages to that * Change the type of a block and move all its free pages to that
* type's freelist. * type's freelist.
*/ */
static int move_freepages(struct zone *zone, unsigned long start_pfn, static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn, int old_mt, int new_mt) int old_mt, int new_mt)
{ {
struct page *page; struct page *page;
unsigned long pfn; unsigned long pfn, end_pfn;
unsigned int order; unsigned int order;
int pages_moved = 0; int pages_moved = 0;
VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1)); VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
VM_WARN_ON(start_pfn + pageblock_nr_pages - 1 != end_pfn); end_pfn = pageblock_end_pfn(start_pfn);
for (pfn = start_pfn; pfn <= end_pfn;) { for (pfn = start_pfn; pfn < end_pfn;) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (!PageBuddy(page)) { if (!PageBuddy(page)) {
pfn++; pfn++;
@@ -1803,14 +1803,13 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
static bool prep_move_freepages_block(struct zone *zone, struct page *page, static bool prep_move_freepages_block(struct zone *zone, struct page *page,
unsigned long *start_pfn, unsigned long *start_pfn,
unsigned long *end_pfn,
int *num_free, int *num_movable) int *num_free, int *num_movable)
{ {
unsigned long pfn, start, end; unsigned long pfn, start, end;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
start = pageblock_start_pfn(pfn); start = pageblock_start_pfn(pfn);
end = pageblock_end_pfn(pfn) - 1; end = pageblock_end_pfn(pfn);
/* /*
* The caller only has the lock for @zone, don't touch ranges * The caller only has the lock for @zone, don't touch ranges
@@ -1821,16 +1820,15 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
*/ */
if (!zone_spans_pfn(zone, start)) if (!zone_spans_pfn(zone, start))
return false; return false;
if (!zone_spans_pfn(zone, end)) if (!zone_spans_pfn(zone, end - 1))
return false; return false;
*start_pfn = start; *start_pfn = start;
*end_pfn = end;
if (num_free) { if (num_free) {
*num_free = 0; *num_free = 0;
*num_movable = 0; *num_movable = 0;
for (pfn = start; pfn <= end;) { for (pfn = start; pfn < end;) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (PageBuddy(page)) { if (PageBuddy(page)) {
int nr = 1 << buddy_order(page); int nr = 1 << buddy_order(page);
@@ -1856,13 +1854,12 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
static int move_freepages_block(struct zone *zone, struct page *page, static int move_freepages_block(struct zone *zone, struct page *page,
int old_mt, int new_mt) int old_mt, int new_mt)
{ {
unsigned long start_pfn, end_pfn; unsigned long start_pfn;
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
NULL, NULL))
return -1; return -1;
return move_freepages(zone, start_pfn, end_pfn, old_mt, new_mt); return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
} }
#ifdef CONFIG_MEMORY_ISOLATION #ifdef CONFIG_MEMORY_ISOLATION
@@ -1933,10 +1930,9 @@ static void split_large_buddy(struct zone *zone, struct page *page,
bool move_freepages_block_isolate(struct zone *zone, struct page *page, bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype) int migratetype)
{ {
unsigned long start_pfn, end_pfn, pfn; unsigned long start_pfn, pfn;
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
NULL, NULL))
return false; return false;
/* No splits needed if buddies can't span multiple blocks */ /* No splits needed if buddies can't span multiple blocks */
@@ -1967,8 +1963,9 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
return true; return true;
} }
move: move:
move_freepages(zone, start_pfn, end_pfn, __move_freepages_block(zone, start_pfn,
get_pfnblock_migratetype(page, start_pfn), migratetype); get_pfnblock_migratetype(page, start_pfn),
migratetype);
return true; return true;
} }
#endif /* CONFIG_MEMORY_ISOLATION */ #endif /* CONFIG_MEMORY_ISOLATION */
@@ -2068,7 +2065,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
unsigned int alloc_flags, bool whole_block) unsigned int alloc_flags, bool whole_block)
{ {
int free_pages, movable_pages, alike_pages; int free_pages, movable_pages, alike_pages;
unsigned long start_pfn, end_pfn; unsigned long start_pfn;
int block_type; int block_type;
block_type = get_pageblock_migratetype(page); block_type = get_pageblock_migratetype(page);
@@ -2101,8 +2098,8 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
goto single_page; goto single_page;
/* moving whole block can fail due to zone boundary conditions */ /* moving whole block can fail due to zone boundary conditions */
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
&free_pages, &movable_pages)) &movable_pages))
goto single_page; goto single_page;
/* /*
@@ -2132,7 +2129,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
*/ */
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) { page_group_by_mobility_disabled) {
move_freepages(zone, start_pfn, end_pfn, block_type, start_type); __move_freepages_block(zone, start_pfn, block_type, start_type);
return __rmqueue_smallest(zone, order, start_type); return __rmqueue_smallest(zone, order, start_type);
} }