UPSTREAM: mm: page_alloc: optimize free_unref_folios()

Move direct freeing of isolated pages to the lock-breaking block in the
second loop.  This saves an unnecessary migratetype reassessment.

Minor comment and local variable scoping cleanups.

Link: https://lkml.kernel.org/r/20240320180429.678181-3-hannes@cmpxchg.org
Change-Id: I480456d59c272cfc8470ac5d0644cc45bf684954
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: "Huang, Ying" <ying.huang@intel.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Tested-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(cherry picked from commit 9cbe97bad5cd75b5b493734bd2695febb8e95281)
Bug: 420771453
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Johannes Weiner
2024-03-20 14:02:07 -04:00
committed by Kalesh Singh
parent 606130dacb
commit cb610236ed

View File

@@ -2644,7 +2644,7 @@ void free_unref_folios(struct folio_batch *folios)
unsigned long __maybe_unused UP_flags;
struct per_cpu_pages *pcp = NULL;
struct zone *locked_zone = NULL;
int i, j, migratetype;
int i, j;
/* Prepare folios for freeing */
for (i = 0, j = 0; i < folios->nr; i++) {
@@ -2656,14 +2656,15 @@ void free_unref_folios(struct folio_batch *folios)
folio_unqueue_deferred_split(folio);
if (!free_pages_prepare(&folio->page, order, FPI_NONE))
continue;
/*
* Free isolated folios and orders not handled on the PCP
* directly to the allocator, see comment in free_unref_page.
* Free orders not handled on the PCP directly to the
* allocator.
*/
migratetype = get_pfnblock_migratetype(&folio->page, pfn);
if (!pcp_allowed_order(order) ||
is_migrate_isolate(migratetype)) {
if (!pcp_allowed_order(order)) {
int migratetype;
migratetype = get_pfnblock_migratetype(&folio->page,
pfn);
free_one_page(folio_zone(folio), &folio->page, pfn,
order, migratetype, FPI_NONE);
continue;
@@ -2680,15 +2681,29 @@ void free_unref_folios(struct folio_batch *folios)
struct zone *zone = folio_zone(folio);
unsigned long pfn = folio_pfn(folio);
unsigned int order = (unsigned long)folio->private;
int migratetype;
folio->private = NULL;
migratetype = get_pfnblock_migratetype(&folio->page, pfn);
/* Different zone requires a different pcp lock */
if (zone != locked_zone) {
if (zone != locked_zone ||
is_migrate_isolate(migratetype)) {
if (pcp) {
pcp_spin_unlock(pcp);
pcp_trylock_finish(UP_flags);
locked_zone = NULL;
pcp = NULL;
}
/*
* Free isolated pages directly to the
* allocator, see comment in free_unref_page.
*/
if (is_migrate_isolate(migratetype)) {
free_one_page(zone, &folio->page, pfn,
order, migratetype, FPI_NONE);
continue;
}
/*
@@ -2701,7 +2716,6 @@ void free_unref_folios(struct folio_batch *folios)
pcp_trylock_finish(UP_flags);
free_one_page(zone, &folio->page, pfn,
order, migratetype, FPI_NONE);
locked_zone = NULL;
continue;
}
locked_zone = zone;