Revert "BACKPORT: mm: page_alloc: consolidate free page accounting"

This reverts commit 3dc7946030.

Change-Id: I2dce18da522526ea7b48c446dcfaf1f86d853552
Bug: 420771453
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Kalesh Singh
2025-05-27 21:09:52 -07:00
parent 8a91cd1d26
commit 7d424e0f80
5 changed files with 119 additions and 121 deletions

View File

@@ -3757,22 +3757,24 @@ static inline bool page_is_guard(struct page *page)
return PageGuard(page);
}
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype);
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order)
unsigned int order, int migratetype)
{
if (!debug_guardpage_enabled())
return false;
return __set_page_guard(zone, page, order);
return __set_page_guard(zone, page, order, migratetype);
}
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype);
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order)
unsigned int order, int migratetype)
{
if (!debug_guardpage_enabled())
return;
__clear_page_guard(zone, page, order);
__clear_page_guard(zone, page, order, migratetype);
}
#else /* CONFIG_DEBUG_PAGEALLOC */
@@ -3782,9 +3784,9 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
static inline bool debug_guardpage_enabled(void) { return false; }
static inline bool page_is_guard(struct page *page) { return false; }
static inline bool set_page_guard(struct zone *zone, struct page *page,
unsigned int order) { return false; }
unsigned int order, int migratetype) { return false; }
static inline void clear_page_guard(struct zone *zone, struct page *page,
unsigned int order) {}
unsigned int order, int migratetype) {}
#endif /* CONFIG_DEBUG_PAGEALLOC */
#ifdef __HAVE_ARCH_GATE_AREA

View File

@@ -487,6 +487,14 @@ static inline void node_stat_sub_folio(struct folio *folio,
mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio));
}
static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
int migratetype)
{
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}
extern const char * const vmstat_text[];
static inline const char *zone_stat_name(enum zone_stat_item item)

View File

@@ -32,7 +32,8 @@ static int __init debug_guardpage_minorder_setup(char *buf)
}
early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order)
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype)
{
if (order >= debug_guardpage_minorder())
return false;
@@ -40,12 +41,19 @@ bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order)
__SetPageGuard(page);
INIT_LIST_HEAD(&page->buddy_list);
set_page_private(page, order);
/* Guard pages are not available for any usage */
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, -(1 << order), migratetype);
return true;
}
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order)
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order,
int migratetype)
{
__ClearPageGuard(page);
set_page_private(page, 0);
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, (1 << order), migratetype);
}

View File

@@ -1213,6 +1213,11 @@ static inline bool is_migrate_highatomic(enum migratetype migratetype)
return migratetype == MIGRATE_HIGHATOMIC;
}
static inline bool is_migrate_highatomic_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC;
}
void setup_zone_pageset(struct zone *zone);
struct migration_target_control {

View File

@@ -756,33 +756,23 @@ compaction_capture(struct capture_control *capc, struct page *page,
}
#endif /* CONFIG_COMPACTION */
static inline void account_freepages(struct zone *zone, int nr_pages,
int migratetype)
{
if (is_migrate_isolate(migratetype))
return;
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
if (is_migrate_cma(migratetype))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
}
/* Used for pages not on another list */
static inline void __add_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype,
bool tail)
static inline void add_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
{
struct free_area *area = &zone->free_area[order];
VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
"page type is %lu, passed migratetype is %d (nr=%d)\n",
get_pageblock_migratetype(page), migratetype, 1 << order);
list_add(&page->buddy_list, &area->free_list[migratetype]);
area->nr_free++;
}
if (tail)
list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
else
list_add(&page->buddy_list, &area->free_list[migratetype]);
/* Used for pages not on another list */
static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
{
struct free_area *area = &zone->free_area[order];
list_add_tail(&page->buddy_list, &area->free_list[migratetype]);
area->nr_free++;
}
@@ -792,28 +782,16 @@ static inline void __add_to_free_list(struct page *page, struct zone *zone,
* allocation again (e.g., optimization for memory onlining).
*/
static inline void move_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int old_mt, int new_mt)
unsigned int order, int migratetype)
{
struct free_area *area = &zone->free_area[order];
/* Free page moving can fail, so it happens before the type update */
VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt,
"page type is %lu, passed migratetype is %d (nr=%d)\n",
get_pageblock_migratetype(page), old_mt, 1 << order);
list_move_tail(&page->buddy_list, &area->free_list[new_mt]);
account_freepages(zone, -(1 << order), old_mt);
account_freepages(zone, 1 << order, new_mt);
list_move_tail(&page->buddy_list, &area->free_list[migratetype]);
}
static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
static inline void del_page_from_free_list(struct page *page, struct zone *zone,
unsigned int order)
{
VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype,
"page type is %lu, passed migratetype is %d (nr=%d)\n",
get_pageblock_migratetype(page), migratetype, 1 << order);
/* clear reported state and update reported page count */
if (page_reported(page))
__ClearPageReported(page);
@@ -824,13 +802,6 @@ static inline void __del_page_from_free_list(struct page *page, struct zone *zon
zone->free_area[order].nr_free--;
}
static inline void del_page_from_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype)
{
__del_page_from_free_list(page, zone, order, migratetype);
account_freepages(zone, -(1 << order), migratetype);
}
static inline struct page *get_page_from_free_area(struct free_area *area,
int migratetype)
{
@@ -915,17 +886,16 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(page->flags & check_flags, page);
VM_BUG_ON(migratetype == -1);
if (likely(!is_migrate_isolate(migratetype)))
__mod_zone_freepage_state(zone, 1 << order, migratetype);
VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
account_freepages(zone, 1 << order, migratetype);
while (order < max_order) {
int buddy_mt = migratetype;
if (compaction_capture(capc, page, order, migratetype)) {
account_freepages(zone, -(1 << order), migratetype);
__mod_zone_freepage_state(zone, -(1 << order),
migratetype);
return;
}
@@ -940,12 +910,19 @@ static inline void __free_one_page(struct page *page,
* pageblock isolation could cause incorrect freepage or CMA
* accounting or HIGHATOMIC accounting.
*/
buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn);
if (migratetype != buddy_mt &&
(!migratetype_is_mergeable(migratetype) ||
!migratetype_is_mergeable(buddy_mt)))
goto done_merging;
if (migratetype != buddy_mt) {
if (!migratetype_is_mergeable(migratetype) ||
!migratetype_is_mergeable(buddy_mt))
goto done_merging;
/*
* Match buddy type. This ensures that
* an expand() down the line puts the
* sub-blocks on the right freelists.
*/
set_pageblock_migratetype(buddy, migratetype);
}
}
/*
@@ -953,19 +930,9 @@ static inline void __free_one_page(struct page *page,
* merge with it and move up one order.
*/
if (page_is_guard(buddy))
clear_page_guard(zone, buddy, order);
clear_page_guard(zone, buddy, order, migratetype);
else
__del_page_from_free_list(buddy, zone, order, buddy_mt);
if (unlikely(buddy_mt != migratetype)) {
/*
* Match buddy type. This ensures that an
* expand() down the line puts the sub-blocks
* on the right freelists.
*/
set_pageblock_migratetype(buddy, migratetype);
}
del_page_from_free_list(buddy, zone, order);
combined_pfn = buddy_pfn & pfn;
page = page + (combined_pfn - pfn);
pfn = combined_pfn;
@@ -984,7 +951,10 @@ done_merging:
else
to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
__add_to_free_list(page, zone, order, migratetype, to_tail);
if (to_tail)
add_to_free_list_tail(page, zone, order, migratetype);
else
add_to_free_list(page, zone, order, migratetype);
/* Notify page reporting subsystem of freed page */
if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
@@ -1531,10 +1501,10 @@ static inline void expand(struct zone *zone, struct page *page,
* Corresponding page table entries will not be touched,
* pages will stay not present in virtual address space
*/
if (set_page_guard(zone, &page[size], high))
if (set_page_guard(zone, &page[size], high, migratetype))
continue;
__add_to_free_list(&page[size], zone, high, migratetype, false);
add_to_free_list(&page[size], zone, high, migratetype);
set_buddy_order(&page[size], high);
}
}
@@ -1721,7 +1691,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
page = get_page_from_free_area(area, migratetype);
if (!page)
continue;
del_page_from_free_list(page, zone, current_order, migratetype);
del_page_from_free_list(page, zone, current_order);
expand(zone, page, order, current_order, migratetype);
trace_mm_page_alloc_zone_locked(page, order, migratetype,
pcp_allowed_order(order) &&
@@ -1761,7 +1731,7 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
* type's freelist.
*/
static int move_freepages(struct zone *zone, unsigned long start_pfn,
unsigned long end_pfn, int old_mt, int new_mt)
unsigned long end_pfn, int migratetype)
{
struct page *page;
unsigned long pfn;
@@ -1783,14 +1753,12 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
VM_BUG_ON_PAGE(page_zone(page) != zone, page);
order = buddy_order(page);
move_to_free_list(page, zone, order, old_mt, new_mt);
move_to_free_list(page, zone, order, migratetype);
pfn += 1 << order;
pages_moved += 1 << order;
}
set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt);
set_pageblock_migratetype(pfn_to_page(start_pfn), migratetype);
return pages_moved;
}
@@ -1848,7 +1816,7 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
}
static int move_freepages_block(struct zone *zone, struct page *page,
int old_mt, int new_mt)
int migratetype)
{
unsigned long start_pfn, end_pfn;
@@ -1856,7 +1824,7 @@ static int move_freepages_block(struct zone *zone, struct page *page,
NULL, NULL))
return -1;
return move_freepages(zone, start_pfn, end_pfn, old_mt, new_mt);
return move_freepages(zone, start_pfn, end_pfn, migratetype);
}
#ifdef CONFIG_MEMORY_ISOLATION
@@ -1928,6 +1896,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype)
{
unsigned long start_pfn, end_pfn, pfn;
int nr_moved, mt;
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
NULL, NULL))
@@ -1942,9 +1911,11 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
if (pfn != start_pfn) {
struct page *buddy = pfn_to_page(pfn);
int order = buddy_order(buddy);
int mt = get_pfnblock_migratetype(buddy, pfn);
del_page_from_free_list(buddy, zone, order,
get_pfnblock_migratetype(buddy, pfn));
if (!is_migrate_isolate(mt))
__mod_zone_freepage_state(zone, -(1UL << order), mt);
del_page_from_free_list(buddy, zone, order);
set_pageblock_migratetype(page, migratetype);
split_large_buddy(zone, buddy, pfn, order);
return true;
@@ -1952,17 +1923,23 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
/* We're the starting block of a larger buddy */
if (PageBuddy(page) && buddy_order(page) > pageblock_order) {
int mt = get_pfnblock_migratetype(page, pfn);
int order = buddy_order(page);
del_page_from_free_list(page, zone, order,
get_pfnblock_migratetype(page, pfn));
if (!is_migrate_isolate(mt))
__mod_zone_freepage_state(zone, -(1UL << order), mt);
del_page_from_free_list(page, zone, order);
set_pageblock_migratetype(page, migratetype);
split_large_buddy(zone, page, pfn, order);
return true;
}
move:
move_freepages(zone, start_pfn, end_pfn,
get_pfnblock_migratetype(page, start_pfn), migratetype);
mt = get_pfnblock_migratetype(page, start_pfn);
nr_moved = move_freepages(zone, start_pfn, end_pfn, migratetype);
if (!is_migrate_isolate(mt))
__mod_zone_freepage_state(zone, -nr_moved, mt);
else if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, nr_moved, migratetype);
return true;
}
#endif /* CONFIG_MEMORY_ISOLATION */
@@ -2076,7 +2053,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
/* Take ownership for orders >= pageblock_order */
if (current_order >= pageblock_order) {
del_page_from_free_list(page, zone, current_order, block_type);
del_page_from_free_list(page, zone, current_order);
change_pageblock_range(page, current_order, start_type);
expand(zone, page, order, current_order, start_type);
return page;
@@ -2126,12 +2103,12 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
*/
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
page_group_by_mobility_disabled) {
move_freepages(zone, start_pfn, end_pfn, block_type, start_type);
move_freepages(zone, start_pfn, end_pfn, start_type);
return __rmqueue_smallest(zone, order, start_type);
}
single_page:
del_page_from_free_list(page, zone, current_order, block_type);
del_page_from_free_list(page, zone, current_order);
expand(zone, page, order, current_order, block_type);
return page;
}
@@ -2205,7 +2182,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone)
mt = get_pageblock_migratetype(page);
/* Only reserve normal pageblocks (i.e., they can merge with others) */
if (migratetype_is_mergeable(mt))
if (move_freepages_block(zone, page, mt,
if (move_freepages_block(zone, page,
MIGRATE_HIGHATOMIC) != -1)
zone->nr_reserved_highatomic += pageblock_nr_pages;
@@ -2252,13 +2229,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
spin_lock_irqsave(&zone->lock, flags);
for (order = 0; order < NR_PAGE_ORDERS; order++) {
struct free_area *area = &(zone->free_area[order]);
int mt;
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
if (!page)
continue;
mt = get_pageblock_migratetype(page);
/*
* In page freeing path, migratetype change is racy so
* we can counter several free pages in a pageblock
@@ -2266,7 +2241,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* from highatomic to ac->migratetype. So we should
* adjust the count once.
*/
if (is_migrate_highatomic(mt)) {
if (is_migrate_highatomic_page(page)) {
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
@@ -2288,8 +2263,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
* of pageblocks that cannot be completely freed
* may increase.
*/
ret = move_freepages_block(zone, page, mt,
ac->migratetype);
ret = move_freepages_block(zone, page, ac->migratetype);
/*
* Reserving this block already succeeded, so this should
* not fail on zone boundaries.
@@ -2474,7 +2448,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
* pages are ordered properly.
*/
list_add_tail(&page->pcp_list, list);
if (is_migrate_cma(get_pageblock_migratetype(page)))
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
-(1 << order));
}
__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
spin_unlock_irqrestore(&zone->lock, flags);
return i;
@@ -2902,9 +2881,11 @@ int __isolate_free_page(struct page *page, unsigned int order)
watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
return 0;
__mod_zone_freepage_state(zone, -(1UL << order), mt);
}
del_page_from_free_list(page, zone, order, mt);
del_page_from_free_list(page, zone, order);
/*
* Set the pageblock if the isolated page is at least half of a
@@ -2919,7 +2900,7 @@ int __isolate_free_page(struct page *page, unsigned int order)
* with others)
*/
if (migratetype_is_mergeable(mt))
move_freepages_block(zone, page, mt,
move_freepages_block(zone, page,
MIGRATE_MOVABLE);
}
}
@@ -3009,6 +2990,8 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
return NULL;
}
}
__mod_zone_freepage_state(zone, -(1 << order),
get_pageblock_migratetype(page));
spin_unlock_irqrestore(&zone->lock, flags);
} while (check_new_pages(page, order));
@@ -6953,9 +6936,8 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
BUG_ON(page_count(page));
BUG_ON(!PageBuddy(page));
VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE);
order = buddy_order(page);
del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
del_page_from_free_list(page, zone, order);
pfn += (1 << order);
}
spin_unlock_irqrestore(&zone->lock, flags);
@@ -6983,14 +6965,6 @@ bool is_free_buddy_page(struct page *page)
EXPORT_SYMBOL(is_free_buddy_page);
#ifdef CONFIG_MEMORY_FAILURE
static inline void add_to_free_list(struct page *page, struct zone *zone,
unsigned int order, int migratetype,
bool tail)
{
__add_to_free_list(page, zone, order, migratetype, tail);
account_freepages(zone, 1 << order, migratetype);
}
/*
* Break down a higher-order page in sub-pages, and keep our target out of
* buddy allocator.
@@ -7015,11 +6989,11 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page,
}
page = next_page;
if (set_page_guard(zone, current_buddy, high))
if (set_page_guard(zone, current_buddy, high, migratetype))
continue;
if (current_buddy != target) {
add_to_free_list(current_buddy, zone, high, migratetype, false);
add_to_free_list(current_buddy, zone, high, migratetype);
set_buddy_order(current_buddy, high);
}
}
@@ -7046,11 +7020,12 @@ bool take_page_off_buddy(struct page *page)
int migratetype = get_pfnblock_migratetype(page_head,
pfn_head);
del_page_from_free_list(page_head, zone, page_order,
migratetype);
del_page_from_free_list(page_head, zone, page_order);
break_down_buddy_pages(zone, page_head, page, 0,
page_order, migratetype);
SetPageHWPoisonTakenOff(page);
if (!is_migrate_isolate(migratetype))
__mod_zone_freepage_state(zone, -1, migratetype);
ret = true;
break;
}
@@ -7155,7 +7130,7 @@ static bool try_to_accept_memory_one(struct zone *zone)
list_del(&page->lru);
last = list_empty(&zone->unaccepted_pages);
account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);
@@ -7223,7 +7198,7 @@ static bool __free_unaccepted(struct page *page)
spin_lock_irqsave(&zone->lock, flags);
first = list_empty(&zone->unaccepted_pages);
list_add_tail(&page->lru, &zone->unaccepted_pages);
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
spin_unlock_irqrestore(&zone->lock, flags);