BACKPORT: mm: convert free_unref_page_list() to use folios
Most of its callees are not yet ready to accept a folio, but we know all of the pages passed in are actually folios because they're linked through ->lru. Link: https://lkml.kernel.org/r/20240227174254.710559-3-willy@infradead.org Change-Id: I32ee6ea69cd1bd3f446659474c9e9df580e2243b Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> (cherry picked from commit 7c76d92253dbb7c53ba03a4cd6639113cd1f7d3a) [ Fix trivial conflict due to vendor hook - Kalesh Singh ] Bug: 420771453 Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
committed by
Kalesh Singh
parent
980cb4e2ba
commit
cc058410b3
@@ -2679,7 +2679,7 @@ void free_unref_page(struct page *page, unsigned int order)
|
|||||||
void free_unref_page_list(struct list_head *list)
|
void free_unref_page_list(struct list_head *list)
|
||||||
{
|
{
|
||||||
unsigned long __maybe_unused UP_flags;
|
unsigned long __maybe_unused UP_flags;
|
||||||
struct page *page, *next;
|
struct folio *folio, *next;
|
||||||
struct per_cpu_pages *pcp = NULL;
|
struct per_cpu_pages *pcp = NULL;
|
||||||
struct zone *locked_zone = NULL;
|
struct zone *locked_zone = NULL;
|
||||||
int batch_count = 0;
|
int batch_count = 0;
|
||||||
@@ -2687,10 +2687,10 @@ void free_unref_page_list(struct list_head *list)
|
|||||||
bool skip_free = false;
|
bool skip_free = false;
|
||||||
|
|
||||||
/* Prepare pages for freeing */
|
/* Prepare pages for freeing */
|
||||||
list_for_each_entry_safe(page, next, list, lru) {
|
list_for_each_entry_safe(folio, next, list, lru) {
|
||||||
unsigned long pfn = page_to_pfn(page);
|
unsigned long pfn = folio_pfn(folio);
|
||||||
if (!free_unref_page_prepare(page, pfn, 0)) {
|
if (!free_unref_page_prepare(&folio->page, pfn, 0)) {
|
||||||
list_del(&page->lru);
|
list_del(&folio->lru);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2698,10 +2698,11 @@ void free_unref_page_list(struct list_head *list)
|
|||||||
* Free isolated pages directly to the allocator, see
|
* Free isolated pages directly to the allocator, see
|
||||||
* comment in free_unref_page.
|
* comment in free_unref_page.
|
||||||
*/
|
*/
|
||||||
migratetype = get_pcppage_migratetype(page);
|
migratetype = get_pcppage_migratetype(&folio->page);
|
||||||
if (unlikely(is_migrate_isolate(migratetype))) {
|
if (unlikely(is_migrate_isolate(migratetype))) {
|
||||||
list_del(&page->lru);
|
list_del(&folio->lru);
|
||||||
free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE);
|
free_one_page(folio_zone(folio), &folio->page, pfn,
|
||||||
|
0, migratetype, FPI_NONE);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -2710,16 +2711,16 @@ void free_unref_page_list(struct list_head *list)
|
|||||||
if (skip_free)
|
if (skip_free)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry_safe(page, next, list, lru) {
|
list_for_each_entry_safe(folio, next, list, lru) {
|
||||||
struct zone *zone = page_zone(page);
|
struct zone *zone = folio_zone(folio);
|
||||||
|
|
||||||
list_del(&page->lru);
|
list_del(&folio->lru);
|
||||||
migratetype = get_pcppage_migratetype(page);
|
migratetype = get_pcppage_migratetype(&folio->page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Either different zone requiring a different pcp lock or
|
* Either different zone requiring a different pcp lock or
|
||||||
* excessive lock hold times when freeing a large list of
|
* excessive lock hold times when freeing a large list of
|
||||||
* pages.
|
* folios.
|
||||||
*/
|
*/
|
||||||
if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
|
if (zone != locked_zone || batch_count == SWAP_CLUSTER_MAX) {
|
||||||
if (pcp) {
|
if (pcp) {
|
||||||
@@ -2730,15 +2731,16 @@ void free_unref_page_list(struct list_head *list)
|
|||||||
batch_count = 0;
|
batch_count = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* trylock is necessary as pages may be getting freed
|
* trylock is necessary as folios may be getting freed
|
||||||
* from IRQ or SoftIRQ context after an IO completion.
|
* from IRQ or SoftIRQ context after an IO completion.
|
||||||
*/
|
*/
|
||||||
pcp_trylock_prepare(UP_flags);
|
pcp_trylock_prepare(UP_flags);
|
||||||
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
|
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
|
||||||
if (unlikely(!pcp)) {
|
if (unlikely(!pcp)) {
|
||||||
pcp_trylock_finish(UP_flags);
|
pcp_trylock_finish(UP_flags);
|
||||||
free_one_page(zone, page, page_to_pfn(page),
|
free_one_page(zone, &folio->page,
|
||||||
0, migratetype, FPI_NONE);
|
folio_pfn(folio), 0,
|
||||||
|
migratetype, FPI_NONE);
|
||||||
locked_zone = NULL;
|
locked_zone = NULL;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -2756,8 +2758,8 @@ void free_unref_page_list(struct list_head *list)
|
|||||||
migratetype = MIGRATE_MOVABLE;
|
migratetype = MIGRATE_MOVABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_mm_page_free_batched(page);
|
trace_mm_page_free_batched(&folio->page);
|
||||||
free_unref_page_commit(zone, pcp, page, migratetype, 0);
|
free_unref_page_commit(zone, pcp, &folio->page, migratetype, 0);
|
||||||
batch_count++;
|
batch_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user