BACKPORT: mm: use __page_cache_release() in folios_put()

Pass a pointer to the lruvec so we can take advantage of the
folio_lruvec_relock_irqsave().  Adjust the calling convention of
folio_lruvec_relock_irqsave() to suit and add a page_cache_release()
wrapper.

Link: https://lkml.kernel.org/r/20240227174254.710559-9-willy@infradead.org
Change-Id: Ifd67e108d7659ef983e7c4704bb0c83a3aaf88e2
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
(cherry picked from commit f1ee018baee9f4e724e08859c2559323be768be3)
[ Fix trivial conflist in mm/swap.c: __page_cache_release() and
  folios_put_ref() - Kalesh Singh]
Bug: 419599659
Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
Matthew Wilcox (Oracle)
2024-02-27 17:42:42 +00:00
committed by Kalesh Singh
parent 4d61851d14
commit a8553b4e2a
2 changed files with 27 additions and 25 deletions

View File

@@ -1696,18 +1696,18 @@ static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
return folio_lruvec_lock_irq(folio);
}
/* Don't lock again iff page's lruvec locked */
static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec *locked_lruvec, unsigned long *flags)
/* Don't lock again iff folio's lruvec locked */
static inline void folio_lruvec_relock_irqsave(struct folio *folio,
struct lruvec **lruvecp, unsigned long *flags)
{
if (locked_lruvec) {
if (folio_matches_lruvec(folio, locked_lruvec))
return locked_lruvec;
if (*lruvecp) {
if (folio_matches_lruvec(folio, *lruvecp))
return;
unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
unlock_page_lruvec_irqrestore(*lruvecp, *flags);
}
return folio_lruvec_lock_irqsave(folio, flags);
*lruvecp = folio_lruvec_lock_irqsave(folio, flags);
}
#ifdef CONFIG_CGROUP_WRITEBACK

View File

@@ -77,26 +77,33 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
.lock = INIT_LOCAL_LOCK(lock),
};
static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp,
unsigned long *flagsp)
{
if (folio_test_lru(folio)) {
folio_lruvec_relock_irqsave(folio, lruvecp, flagsp);
lruvec_del_folio(*lruvecp, folio);
__folio_clear_lru_flags(folio);
}
}
/*
* This path almost never happens for VM activity - pages are normally freed
* in batches. But it gets used by networking - and for compound pages.
*/
static void __page_cache_release(struct folio *folio)
static void page_cache_release(struct folio *folio)
{
if (folio_test_lru(folio)) {
struct lruvec *lruvec;
struct lruvec *lruvec = NULL;
unsigned long flags;
lruvec = folio_lruvec_lock_irqsave(folio, &flags);
lruvec_del_folio(lruvec, folio);
__folio_clear_lru_flags(folio);
__page_cache_release(folio, &lruvec, &flags);
if (lruvec)
unlock_page_lruvec_irqrestore(lruvec, flags);
}
}
static void __folio_put_small(struct folio *folio)
{
__page_cache_release(folio);
page_cache_release(folio);
mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, 0);
}
@@ -110,7 +117,7 @@ static void __folio_put_large(struct folio *folio)
* be called for hugetlb (it has a separate hugetlb_cgroup.)
*/
if (!folio_test_hugetlb(folio))
__page_cache_release(folio);
page_cache_release(folio);
destroy_large_folio(folio);
}
@@ -211,7 +218,7 @@ static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
if (move_fn != lru_add_fn && !folio_test_clear_lru(folio))
continue;
lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags);
folio_lruvec_relock_irqsave(folio, &lruvec, &flags);
move_fn(lruvec, folio);
folio_set_lru(folio);
@@ -1012,12 +1019,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs)
continue;
}
if (folio_test_lru(folio)) {
lruvec = folio_lruvec_relock_irqsave(folio, lruvec,
&flags);
lruvec_del_folio(lruvec, folio);
__folio_clear_lru_flags(folio);
}
__page_cache_release(folio, &lruvec, &flags);
if (j != i)
folios->folios[j] = folio;