UPSTREAM: mm: page_alloc: remove remnants of unlocked migratetype updates

The freelist hygiene patches made migratetype accesses fully protected
under the zone->lock.  Remove remnants of handling the race conditions
that existed before from the MIGRATE_HIGHATOMIC code.

Link: https://lkml.kernel.org/r/20250225001023.1494422-3-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Brendan Jackman <jackmanb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

Bug: 420836317
(cherry picked from commit 020396a581dc69be2d30939fabde6c029d847034)
Change-Id: Ia1266c34f09db1c404df7f37c1a9ff06d61c0cce
Signed-off-by: yipeng xiang <yipengxiang@honor.corp-partner.google.com>
This commit is contained in:
Johannes Weiner
2025-02-24 19:08:25 -05:00
committed by Isaac Manjarres
parent 65b7c505d9
commit 707dfe67d6

View File

@@ -2082,20 +2082,10 @@ static inline bool boost_watermark(struct zone *zone)
static struct page *
try_to_steal_block(struct zone *zone, struct page *page,
int current_order, int order, int start_type,
unsigned int alloc_flags)
int block_type, unsigned int alloc_flags)
{
int free_pages, movable_pages, alike_pages;
unsigned long start_pfn;
int block_type;
block_type = get_pageblock_migratetype(page);
/*
* This can happen due to races and we want to prevent broken
* highatomic accounting.
*/
if (is_migrate_highatomic(block_type))
return NULL;
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
@@ -2280,22 +2270,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &(zone->free_area[order]);
int mt;
unsigned long size;
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
if (!page)
continue;
mt = get_pageblock_migratetype(page);
/*
* In page freeing path, migratetype change is racy so
* we can counter several free pages in a pageblock
* in this loop although we changed the pageblock type
* from highatomic to ac->migratetype. So we should
* adjust the count once.
*/
if (is_migrate_highatomic(mt)) {
unsigned long size;
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
@@ -2306,7 +2286,6 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
size = max(pageblock_nr_pages, 1UL << order);
size = min(size, zone->nr_reserved_highatomic);
zone->nr_reserved_highatomic -= size;
}
/*
* Convert to ac->migratetype and avoid the normal
@@ -2318,10 +2297,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* may increase.
*/
if (order < pageblock_order)
ret = move_freepages_block(zone, page, mt,
ret = move_freepages_block(zone, page,
MIGRATE_HIGHATOMIC,
ac->migratetype);
else {
move_to_free_list(page, zone, order, mt,
move_to_free_list(page, zone, order,
MIGRATE_HIGHATOMIC,
ac->migratetype);
change_pageblock_range(page, order,
ac->migratetype);
@@ -2395,7 +2376,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
page = get_page_from_free_area(area, fallback_mt);
page = try_to_steal_block(zone, page, current_order, order,
start_migratetype, alloc_flags);
start_migratetype, fallback_mt,
alloc_flags);
if (page)
goto got_one;
}