Revert "UPSTREAM: mm: page_alloc: fix freelist movement during block conversion"

This reverts commit 4e814d99e0.

Change-Id: Ie2a654bcd14c184e12c90757fecdf2231bdbe068
Bug: 420771453
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2025-05-27 21:12:32 -07:00
parent 135ab7374e
commit d47518de38
3 changed files with 78 additions and 121 deletions

View File

@@ -34,7 +34,8 @@ static inline bool is_migrate_isolate(int migratetype)
#define REPORT_FAILURE 0x2
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int flags, gfp_t gfp_flags);

View File

@@ -1796,8 +1796,9 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* Note that start_page and end_pages are not aligned on a pageblock
* boundary. If alignment is required, use move_freepages_block()
*/
static int move_freepages(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn, int migratetype)
static int move_freepages(struct zone *zone,
unsigned long start_pfn, unsigned long end_pfn,
int migratetype, int *num_movable)
{
struct page *page;
unsigned long pfn;
@@ -1807,6 +1808,14 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
for (pfn = start_pfn; pfn <= end_pfn;) {
page = pfn_to_page(pfn);
if (!PageBuddy(page)) {
/*
* We assume that pages that could be isolated for
* migration are movable. But we don't actually try
* isolating, as that would be expensive.
*/
if (num_movable &&
(PageLRU(page) || __PageMovable(page)))
(*num_movable)++;
pfn++;
continue;
}
@@ -1824,16 +1833,17 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
return pages_moved;
}
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
unsigned long *start_pfn,
unsigned long *end_pfn,
int *num_free, int *num_movable)
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable)
{
unsigned long pfn, start, end;
unsigned long start_pfn, end_pfn, pfn;
if (num_movable)
*num_movable = 0;
pfn = page_to_pfn(page);
start = pageblock_start_pfn(pfn);
end = pageblock_end_pfn(pfn) - 1;
start_pfn = pageblock_start_pfn(pfn);
end_pfn = pageblock_end_pfn(pfn) - 1;
/*
* The caller only has the lock for @zone, don't touch ranges
@@ -1842,50 +1852,13 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
* accompanied by other operations such as migratetype updates
* which also should be locked.
*/
if (!zone_spans_pfn(zone, start))
return false;
if (!zone_spans_pfn(zone, end))
return false;
if (!zone_spans_pfn(zone, start_pfn))
return 0;
if (!zone_spans_pfn(zone, end_pfn))
return 0;
*start_pfn = start;
*end_pfn = end;
if (num_free) {
*num_free = 0;
*num_movable = 0;
for (pfn = start; pfn <= end;) {
page = pfn_to_page(pfn);
if (PageBuddy(page)) {
int nr = 1 << buddy_order(page);
*num_free += nr;
pfn += nr;
continue;
}
/*
* We assume that pages that could be isolated for
* migration are movable. But we don't actually try
* isolating, as that would be expensive.
*/
if (PageLRU(page) || __PageMovable(page))
(*num_movable)++;
pfn++;
}
}
return true;
}
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn;
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
NULL, NULL))
return -1;
return move_freepages(zone, start_pfn, end_pfn, migratetype);
return move_freepages(zone, start_pfn, end_pfn, migratetype,
num_movable);
}
static void change_pageblock_range(struct page *pageblock_page,
@@ -1970,37 +1943,33 @@ static inline bool boost_watermark(struct zone *zone)
}
/*
* This function implements actual steal behaviour. If order is large enough, we
* can claim the whole pageblock for the requested migratetype. If not, we check
* the pageblock for constituent pages; if at least half of the pages are free
* or compatible, we can still claim the whole block, so pages freed in the
* future will be put on the correct free list. Otherwise, we isolate exactly
* the order we need from the fallback block and leave its migratetype alone.
* This function implements actual steal behaviour. If order is large enough,
* we can steal whole pageblock. If not, we first move freepages in this
* pageblock to our migratetype and determine how many already-allocated pages
* are there in the pageblock with a compatible migratetype. If at least half
* of pages are free or compatible, we can change migratetype of the pageblock
* itself, so pages freed in the future will be put on the correct free list.
*/
static struct page *
steal_suitable_fallback(struct zone *zone, struct page *page,
int current_order, int order, int start_type,
unsigned int alloc_flags, bool whole_block)
static void steal_suitable_fallback(struct zone *zone, struct page *page,
unsigned int alloc_flags, int start_type, bool whole_block)
{
unsigned int current_order = buddy_order(page);
int free_pages, movable_pages, alike_pages;
unsigned long start_pfn, end_pfn;
int block_type;
int old_block_type;
block_type = get_pageblock_migratetype(page);
old_block_type = get_pageblock_migratetype(page);
/*
* This can happen due to races and we want to prevent broken
* highatomic accounting.
*/
if (is_migrate_highatomic(block_type))
if (is_migrate_highatomic(old_block_type))
goto single_page;
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
del_page_from_free_list(page, zone, current_order);
change_pageblock_range(page, current_order, start_type);
expand(zone, page, order, current_order, start_type);
return page;
goto single_page;
}
/*
@@ -2015,9 +1984,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
if (!whole_block)
goto single_page;
free_pages = move_freepages_block(zone, page, start_type,
&movable_pages);
/* moving whole block can fail due to zone boundary conditions */
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
&free_pages, &movable_pages))
if (!free_pages)
goto single_page;
/*
@@ -2035,7 +2005,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
* vice versa, be conservative since we can't distinguish the
* exact migratetype of non-movable pages.
*/
if (block_type == MIGRATE_MOVABLE)
if (old_block_type == MIGRATE_MOVABLE)
alike_pages = pageblock_nr_pages
- (free_pages + movable_pages);
else
@@ -2046,16 +2016,13 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
* compatible migratability as our allocation, claim the whole block.
*/
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
move_freepages(zone, start_pfn, end_pfn, start_type);
page_group_by_mobility_disabled)
set_pageblock_migratetype(page, start_type);
return __rmqueue_smallest(zone, order, start_type);
}
return;
single_page:
del_page_from_free_list(page, zone, current_order);
expand(zone, page, order, current_order, block_type);
return page;
move_to_free_list(page, zone, current_order, start_type);
}
/*
@@ -2127,10 +2094,9 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */
if (migratetype_is_mergeable(mt)) {
if (move_freepages_block(zone, page, MIGRATE_HIGHATOMIC) != -1) {
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
zone->nr_reserved_highatomic += pageblock_nr_pages;
}
set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
}
out_unlock:
@@ -2155,7 +2121,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
struct zone *zone;
struct page *page;
int order;
int ret;
bool ret;
bool skip_unreserve_highatomic = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
@@ -2210,14 +2176,10 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed
* may increase.
*/
ret = move_freepages_block(zone, page, ac->migratetype);
/*
* Reserving this block already succeeded, so this should
* not fail on zone boundaries.
*/
WARN_ON_ONCE(ret == -1);
set_pageblock_migratetype(page, ac->migratetype);
if (ret > 0) {
ret = move_freepages_block(zone, page, ac->migratetype,
NULL);
if (ret) {
spin_unlock_irqrestore(&zone->lock, flags);
return ret;
}
@@ -2238,7 +2200,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* deviation from the rest of this file, to make the for loop
* condition simpler.
*/
static __always_inline struct page *
static __always_inline bool
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
unsigned int alloc_flags)
{
@@ -2285,7 +2247,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
goto do_steal;
}
return NULL;
return false;
find_smallest:
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
@@ -2305,14 +2267,14 @@ find_smallest:
do_steal:
page = get_page_from_free_area(area, fallback_mt);
/* take off list, maybe claim block, expand remainder */
page = steal_suitable_fallback(zone, page, current_order, order,
start_migratetype, alloc_flags, can_steal);
steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
can_steal);
trace_mm_page_alloc_extfrag(page, order, current_order,
start_migratetype, fallback_mt);
return page;
return true;
}
/*
@@ -2343,15 +2305,15 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
return page;
}
}
retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
if (!cma_redirect_restricted() && alloc_flags & ALLOC_CMA)
page = __rmqueue_cma_fallback(zone, order);
if (!page)
page = __rmqueue_fallback(zone, order, migratetype,
alloc_flags);
if (!page && __rmqueue_fallback(zone, order, migratetype,
alloc_flags))
goto retry;
}
return page;
}
@@ -2837,10 +2799,12 @@ int __isolate_free_page(struct page *page, unsigned int order)
* Only change normal pageblocks (i.e., they can merge
* with others)
*/
if (migratetype_is_mergeable(mt) &&
if (migratetype_is_mergeable(mt)) {
set_pageblock_migratetype(page,
MIGRATE_MOVABLE);
move_freepages_block(zone, page,
MIGRATE_MOVABLE) != -1)
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
MIGRATE_MOVABLE, NULL);
}
}
}

View File

@@ -179,18 +179,15 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end,
migratetype, isol_flags);
if (!unmovable) {
int nr_pages;
unsigned long nr_pages;
int mt = get_pageblock_migratetype(page);
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
/* Block spans zone boundaries? */
if (nr_pages == -1) {
spin_unlock_irqrestore(&zone->lock, flags);
return -EBUSY;
}
__mod_zone_freepage_state(zone, -nr_pages, mt);
set_pageblock_migratetype(page, MIGRATE_ISOLATE);
zone->nr_isolate_pageblock++;
nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE,
NULL);
__mod_zone_freepage_state(zone, -nr_pages, mt);
spin_unlock_irqrestore(&zone->lock, flags);
return 0;
}
@@ -210,7 +207,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
static void unset_migratetype_isolate(struct page *page, int migratetype)
{
struct zone *zone;
unsigned long flags;
unsigned long flags, nr_pages;
bool isolated_page = false;
unsigned int order;
struct page *buddy;
@@ -256,12 +253,7 @@ static void unset_migratetype_isolate(struct page *page, int migratetype)
* allocation.
*/
if (!isolated_page) {
int nr_pages = move_freepages_block(zone, page, migratetype);
/*
* Isolating this block already succeeded, so this
* should not fail on zone boundaries.
*/
WARN_ON_ONCE(nr_pages == -1);
nr_pages = move_freepages_block(zone, page, migratetype, NULL);
__mod_zone_freepage_state(zone, nr_pages, migratetype);
}
set_pageblock_migratetype(page, migratetype);