Revert "UPSTREAM: mm: page_alloc: fix highatomic typing in multi-block buddies"

This reverts commit b9e2be445a.

Change-Id: Iffcd9077371ecf2705acd742a2b86d25efa94dd7
Bug: 420771453
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2025-05-27 21:08:21 -07:00
parent bbc65a78d2
commit be6d3cc085

View File

@@ -2174,12 +2174,10 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
}
/*
* Reserve the pageblock(s) surrounding an allocation request for
* exclusive use of high-order atomic allocations if there are no
* empty page blocks that contain a page with a suitable order
* Reserve a pageblock for exclusive use of high-order atomic allocations if
* there are no empty page blocks that contain a page with a suitable order
*/
static void reserve_highatomic_pageblock(struct page *page, int order,
struct zone *zone)
static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
{
int mt;
unsigned long max_managed, flags;
@@ -2209,17 +2207,10 @@ static void reserve_highatomic_pageblock(struct page *page, int order,
/* Yoink! */
mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */
if (!migratetype_is_mergeable(mt))
goto out_unlock;
if (order < pageblock_order) {
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
goto out_unlock;
zone->nr_reserved_highatomic += pageblock_nr_pages;
} else {
change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
zone->nr_reserved_highatomic += 1 << order;
}
if (migratetype_is_mergeable(mt))
if (move_freepages_block(zone, page, mt,
MIGRATE_HIGHATOMIC) != -1)
zone->nr_reserved_highatomic += pageblock_nr_pages;
out_unlock:
spin_unlock_irqrestore(&zone->lock, flags);
@@ -2231,7 +2222,7 @@ out_unlock:
* intense memory pressure but failed atomic allocations should be easier
* to recover from than an OOM.
*
* If @force is true, try to unreserve pageblocks even though highatomic
* If @force is true, try to unreserve a pageblock even though highatomic
* pageblock is exhausted.
*/
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
@@ -2279,7 +2270,6 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* adjust the count once.
*/
if (is_migrate_highatomic(mt)) {
unsigned long size;
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
@@ -2287,9 +2277,9 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* while unreserving so be safe and watch for
* underflows.
*/
size = max(pageblock_nr_pages, 1UL << order);
size = min(size, zone->nr_reserved_highatomic);
zone->nr_reserved_highatomic -= size;
zone->nr_reserved_highatomic -= min(
pageblock_nr_pages,
zone->nr_reserved_highatomic);
}
/*
@@ -2301,19 +2291,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed
* may increase.
*/
if (order < pageblock_order)
ret = move_freepages_block(zone, page, mt,
ac->migratetype);
else {
move_to_free_list(page, zone, order, mt,
ac->migratetype);
change_pageblock_range(page, order,
ac->migratetype);
ret = 1;
}
ret = move_freepages_block(zone, page, mt,
ac->migratetype);
/*
* Reserving the block(s) already succeeded,
* so this should not fail on zone boundaries.
* Reserving this block already succeeded, so this should
* not fail on zone boundaries.
*/
WARN_ON_ONCE(ret == -1);
if (ret > 0) {
@@ -3605,7 +3587,7 @@ try_this_zone:
* if the pageblock should be reserved for the future
*/
if (unlikely(alloc_flags & ALLOC_HIGHATOMIC))
reserve_highatomic_pageblock(page, order, zone);
reserve_highatomic_pageblock(page, zone);
return page;
} else {