From a7a0d95bca7a2161cdde8bb4fb85ca8fb0248360 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 27 May 2025 21:07:03 -0700 Subject: [PATCH] Revert "BACKPORT: mm: page_alloc: optimize free_unref_folios()" This reverts commit ab0ad8d1980c618ff6b69c175efcdb69539aac65. Change-Id: I39d5a5398c6844abdd670e1d27c50e92c4235df9 Bug: 420771453 Signed-off-by: Kalesh Singh --- mm/page_alloc.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 19adf2d41e79..039961345aa0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2802,10 +2802,22 @@ void free_unref_page_list(struct list_head *list) /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { + unsigned long pfn = page_to_pfn(page); if (!free_pages_prepare(page, 0, FPI_NONE)) { list_del(&page->lru); continue; } + + /* + * Free isolated pages directly to the allocator, see + * comment in free_unref_page. + */ + migratetype = get_pfnblock_migratetype(page, pfn); + if (unlikely(is_migrate_isolate(migratetype))) { + list_del(&page->lru); + free_one_page(page_zone(page), page, pfn, 0, FPI_NONE); + continue; + } } trace_android_vh_free_unref_page_list_bypass(list, &skip_free); @@ -2824,13 +2836,10 @@ void free_unref_page_list(struct list_head *list) * excessive lock hold times when freeing a large list of * pages. */ - if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX || - is_migrate_isolate(migratetype)) { + if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) { if (pcp) { pcp_spin_unlock(pcp); pcp_trylock_finish(UP_flags); - locked_zone = NULL; - pcp = NULL; } /*