Revert "UPSTREAM: mm: page_alloc: fix freelist movement during block conversion"

This reverts commit 4e814d99e0.

Change-Id: Ie2a654bcd14c184e12c90757fecdf2231bdbe068
Bug: 420771453
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2025-05-27 21:12:32 -07:00
parent 135ab7374e
commit d47518de38
3 changed files with 78 additions and 121 deletions

View File

@@ -34,7 +34,8 @@ static inline bool is_migrate_isolate(int migratetype)
#define REPORT_FAILURE 0x2 #define REPORT_FAILURE 0x2
void set_pageblock_migratetype(struct page *page, int migratetype); void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int flags, gfp_t gfp_flags); int migratetype, int flags, gfp_t gfp_flags);

View File

@@ -1796,8 +1796,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* Note that start_page and end_pages are not aligned on a pageblock * Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block() * boundary. If alignment is required, use move_freepages_block()
*/ */
static int move_freepages(struct zone *zone, unsigned long start_pfn, static int move_freepages(struct zone *zone,
unsigned long end_pfn, int migratetype) unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int *num_movable)
{ {
struct page *page; struct page *page;
unsigned long pfn; unsigned long pfn;
@@ -1807,6 +1808,14 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
for (pfn = start_pfn; pfn <= end_pfn;) { for (pfn = start_pfn; pfn <= end_pfn;) {
page = pfn_to_page(pfn); page = pfn_to_page(pfn);
if (!PageBuddy(page)) { if (!PageBuddy(page)) {
/*
* We assume that pages that could be isolated for
* migration are movable. But we don't actually try
* isolating, as that would be expensive.
*/
if (num_movable &&
(PageLRU(page) || __PageMovable(page)))
(*num_movable)++;
pfn++; pfn++;
continue; continue;
} }
@@ -1824,16 +1833,17 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
return pages_moved; return pages_moved;
} }
static bool prep_move_freepages_block(struct zone *zone, struct page *page, int move_freepages_block(struct zone *zone, struct page *page,
unsigned long *start_pfn, int migratetype, int *num_movable)
unsigned long *end_pfn,
int *num_free, int *num_movable)
{ {
unsigned long pfn, start, end; unsigned long start_pfn, end_pfn, pfn;
if (num_movable)
*num_movable = 0;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
start = pageblock_start_pfn(pfn); start_pfn = pageblock_start_pfn(pfn);
end = pageblock_end_pfn(pfn) - 1; end_pfn = pageblock_end_pfn(pfn) - 1;
/* /*
* The caller only has the lock for @zone, don't touch ranges * The caller only has the lock for @zone, don't touch ranges
@@ -1842,50 +1852,13 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
* accompanied by other operations such as migratetype updates * accompanied by other operations such as migratetype updates
* which also should be locked. * which also should be locked.
*/ */
if (!zone_spans_pfn(zone, start)) if (!zone_spans_pfn(zone, start_pfn))
return false; return 0;
if (!zone_spans_pfn(zone, end)) if (!zone_spans_pfn(zone, end_pfn))
return false; return 0;
*start_pfn = start; return move_freepages(zone, start_pfn, end_pfn, migratetype,
*end_pfn = end; num_movable);
if (num_free) {
*num_free = 0;
*num_movable = 0;
for (pfn = start; pfn <= end;) {
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
int nr = 1 << buddy_order(page);
*num_free += nr;
pfn += nr;
continue;
}
/*
* We assume that pages that could be isolated for
* migration are movable. But we don't actually try
* isolating, as that would be expensive.
*/
if (PageLRU(page) || __PageMovable(page))
(*num_movable)++;
pfn++;
}
}
return true;
}
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
NULL, NULL))
return -1;
return move_freepages(zone, start_pfn, end_pfn, migratetype);
} }
static void change_pageblock_range(struct page *pageblock_page, static void change_pageblock_range(struct page *pageblock_page,
@@ -1970,37 +1943,33 @@ static inline bool boost_watermark(struct zone *zone)
} }
/* /*
* This function implements actual steal behaviour. If order is large enough, we * This function implements actual steal behaviour. If order is large enough,
* can claim the whole pageblock for the requested migratetype. If not, we check * we can steal whole pageblock. If not, we first move freepages in this
* the pageblock for constituent pages; if at least half of the pages are free * pageblock to our migratetype and determine how many already-allocated pages
* or compatible, we can still claim the whole block, so pages freed in the * are there in the pageblock with a compatible migratetype. If at least half
* future will be put on the correct free list. Otherwise, we isolate exactly * of pages are free or compatible, we can change migratetype of the pageblock
* the order we need from the fallback block and leave its migratetype alone. * itself, so pages freed in the future will be put on the correct free list.
*/ */
static struct page * static void steal_suitable_fallback(struct zone *zone, struct page *page,
steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block)
int current_order, int order, int start_type,
unsigned int alloc_flags, bool whole_block)
{ {
unsigned int current_order = buddy_order(page);
int free_pages, movable_pages, alike_pages; int free_pages, movable_pages, alike_pages;
unsigned long start_pfn, end_pfn; int old_block_type;
int block_type;
block_type = get_pageblock_migratetype(page); old_block_type = get_pageblock_migratetype(page);
/* /*
* This can happen due to races and we want to prevent broken * This can happen due to races and we want to prevent broken
* highatomic accounting. * highatomic accounting.
*/ */
if (is_migrate_highatomic(block_type)) if (is_migrate_highatomic(old_block_type))
goto single_page; goto single_page;
/* Take ownership for orders >= pageblock_order */ /* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) { if (current_order >= pageblock_order) {
del_page_from_free_list(page, zone, current_order);
change_pageblock_range(page, current_order, start_type); change_pageblock_range(page, current_order, start_type);
expand(zone, page, order, current_order, start_type); goto single_page;
return page;
} }
/* /*
@@ -2015,9 +1984,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
if (!whole_block) if (!whole_block)
goto single_page; goto single_page;
free_pages = move_freepages_block(zone, page, start_type,
&movable_pages);
/* moving whole block can fail due to zone boundary conditions */ /* moving whole block can fail due to zone boundary conditions */
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, if (!free_pages)
&free_pages, &movable_pages))
goto single_page; goto single_page;
/* /*
@@ -2035,7 +2005,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
* vice versa, be conservative since we can't distinguish the * vice versa, be conservative since we can't distinguish the
* exact migratetype of non-movable pages. * exact migratetype of non-movable pages.
*/ */
if (block_type == MIGRATE_MOVABLE) if (old_block_type == MIGRATE_MOVABLE)
alike_pages = pageblock_nr_pages alike_pages = pageblock_nr_pages
- (free_pages + movable_pages); - (free_pages + movable_pages);
else else
@@ -2046,16 +2016,13 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
* compatible migratability as our allocation, claim the whole block. * compatible migratability as our allocation, claim the whole block.
*/ */
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) { page_group_by_mobility_disabled)
move_freepages(zone, start_pfn, end_pfn, start_type);
set_pageblock_migratetype(page, start_type); set_pageblock_migratetype(page, start_type);
return __rmqueue_smallest(zone, order, start_type);
} return;
single_page: single_page:
del_page_from_free_list(page, zone, current_order); move_to_free_list(page, zone, current_order, start_type);
expand(zone, page, order, current_order, block_type);
return page;
} }
/* /*
@@ -2127,10 +2094,9 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
mt = get_pageblock_migratetype(page); mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */ /* Only reserve normal pageblocks (i.e., they can merge with others) */
if (migratetype_is_mergeable(mt)) { if (migratetype_is_mergeable(mt)) {
if (move_freepages_block(zone, page, MIGRATE_HIGHATOMIC) != -1) { zone->nr_reserved_highatomic += pageblock_nr_pages;
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
zone->nr_reserved_highatomic += pageblock_nr_pages; move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
}
} }
out_unlock: out_unlock:
@@ -2155,7 +2121,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
struct zone *zone; struct zone *zone;
struct page *page; struct page *page;
int order; int order;
int ret; bool ret;
bool skip_unreserve_highatomic = false; bool skip_unreserve_highatomic = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
@@ -2210,14 +2176,10 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed * of pageblocks that cannot be completely freed
* may increase. * may increase.
*/ */
ret = move_freepages_block(zone, page, ac->migratetype);
/*
* Reserving this block already succeeded, so this should
* not fail on zone boundaries.
*/
WARN_ON_ONCE(ret == -1);
set_pageblock_migratetype(page, ac->migratetype); set_pageblock_migratetype(page, ac->migratetype);
if (ret > 0) { ret = move_freepages_block(zone, page, ac->migratetype,
NULL);
if (ret) {
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
return ret; return ret;
} }
@@ -2238,7 +2200,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* deviation from the rest of this file, to make the for loop * deviation from the rest of this file, to make the for loop
* condition simpler. * condition simpler.
*/ */
static __always_inline struct page * static __always_inline bool
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype, __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
unsigned int alloc_flags) unsigned int alloc_flags)
{ {
@@ -2285,7 +2247,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
goto do_steal; goto do_steal;
} }
return NULL; return false;
find_smallest: find_smallest:
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
@@ -2305,14 +2267,14 @@ find_smallest:
do_steal: do_steal:
page = get_page_from_free_area(area, fallback_mt); page = get_page_from_free_area(area, fallback_mt);
/* take off list, maybe claim block, expand remainder */ steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
page = steal_suitable_fallback(zone, page, current_order, order, can_steal);
start_migratetype, alloc_flags, can_steal);
trace_mm_page_alloc_extfrag(page, order, current_order, trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, fallback_mt); start_migratetype, fallback_mt);
return page; return true;
} }
/* /*
@@ -2343,15 +2305,15 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
return page; return page;
} }
} }
retry:
page = __rmqueue_smallest(zone, order, migratetype); page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) { if (unlikely(!page)) {
if (!cma_redirect_restricted() && alloc_flags & ALLOC_CMA) if (!cma_redirect_restricted() && alloc_flags & ALLOC_CMA)
page = __rmqueue_cma_fallback(zone, order); page = __rmqueue_cma_fallback(zone, order);
if (!page) if (!page && __rmqueue_fallback(zone, order, migratetype,
page = __rmqueue_fallback(zone, order, migratetype, alloc_flags))
alloc_flags); goto retry;
} }
return page; return page;
} }
@@ -2837,10 +2799,12 @@ int __isolate_free_page(struct page *page, unsigned int order)
* Only change normal pageblocks (i.e., they can merge * Only change normal pageblocks (i.e., they can merge
* with others) * with others)
*/ */
if (migratetype_is_mergeable(mt) && if (migratetype_is_mergeable(mt)) {
move_freepages_block(zone, page, set_pageblock_migratetype(page,
MIGRATE_MOVABLE) != -1) MIGRATE_MOVABLE);
set_pageblock_migratetype(page, MIGRATE_MOVABLE); move_freepages_block(zone, page,
MIGRATE_MOVABLE, NULL);
}
} }
} }

View File

@@ -179,18 +179,15 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
migratetype, isol_flags); migratetype, isol_flags);
if (!unmovable) { if (!unmovable) {
int nr_pages; unsigned long nr_pages;
int mt = get_pageblock_migratetype(page); int mt = get_pageblock_migratetype(page);
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
/* Block spans zone boundaries? */
if (nr_pages == -1) {
spin_unlock_irqrestore(&zone->lock, flags);
return -EBUSY;
}
__mod_zone_freepage_state(zone, -nr_pages, mt);
set_pageblock_migratetype(page, MIGRATE_ISOLATE); set_pageblock_migratetype(page, MIGRATE_ISOLATE);
zone->nr_isolate_pageblock++; zone->nr_isolate_pageblock++;
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
NULL);
__mod_zone_freepage_state(zone, -nr_pages, mt);
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
return 0; return 0;
} }
@@ -210,7 +207,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
static void unset_migratetype_isolate(struct page *page, int migratetype) static void unset_migratetype_isolate(struct page *page, int migratetype)
{ {
struct zone *zone; struct zone *zone;
unsigned long flags; unsigned long flags, nr_pages;
bool isolated_page = false; bool isolated_page = false;
unsigned int order; unsigned int order;
struct page *buddy; struct page *buddy;
@@ -256,12 +253,7 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
* allocation. * allocation.
*/ */
if (!isolated_page) { if (!isolated_page) {
int nr_pages = move_freepages_block(zone, page, migratetype); nr_pages = move_freepages_block(zone, page, migratetype, NULL);
/*
* Isolating this block already succeeded, so this
* should not fail on zone boundaries.
*/
WARN_ON_ONCE(nr_pages == -1);
__mod_zone_freepage_state(zone, nr_pages, migratetype); __mod_zone_freepage_state(zone, nr_pages, migratetype);
} }
set_pageblock_migratetype(page, migratetype); set_pageblock_migratetype(page, migratetype);