BACKPORT: mm: count the number of anonymous THPs per size
Patch series "mm: count the number of anonymous THPs per size", v4. Knowing the number of transparent anon THPs in the system is crucial for performance analysis. It helps in understanding the ratio and distribution of THPs versus small folios throughout the system. Additionally, partial unmapping by userspace can lead to significant waste of THPs over time and increase memory reclamation pressure. We need this information for comprehensive system tuning. This patch (of 2): Let's track for each anonymous THP size, how many of them are currently allocated. We'll track the complete lifespan of an anon THP, starting when it becomes an anon THP ("large anon folio") (->mapping gets set), until it gets freed (->mapping gets cleared). Introduce a new "nr_anon" counter per THP size and adjust the corresponding counter in the following cases: * We allocate a new THP and call folio_add_new_anon_rmap() to map it the first time and turn it into an anon THP. * We split an anon THP into multiple smaller ones. * We migrate an anon THP, when we prepare the destination. * We free an anon THP back to the buddy. Note that AnonPages in /proc/meminfo currently tracks the total number of *mapped* anonymous *pages*, and therefore has slightly different semantics. In the future, we might also want to track "nr_anon_mapped" for each THP size, which might be helpful when comparing it to the number of allocated anon THPs (long-term pinning, stuck in swapcache, memory leaks, ...). Further note that for now, we only track anon THPs after they got their ->mapping set, for example via folio_add_new_anon_rmap(). If we would allocate some in the swapcache, they will only show up in the statistics for now after they have been mapped to user space the first time, where we call folio_add_new_anon_rmap(). [akpm@linux-foundation.org: documentation fixups, per David] Link: https://lkml.kernel.org/r/3e8add35-e26b-443b-8a04-1078f4bc78f6@redhat.com Link: https://lkml.kernel.org/r/20240824010441.21308-1-21cnbao@gmail.com Link: https://lkml.kernel.org/r/20240824010441.21308-2-21cnbao@gmail.com Change-Id: I80ad283f1434e8baf70bb629c14e73a019878abf Signed-off-by: Barry Song <v-songbaohua@oppo.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Chris Li <chrisl@kernel.org> Cc: Chuanhua Han <hanchuanhua@oppo.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: Lance Yang <ioworker0@gmail.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Shuai Yuan <yuanshuai@oppo.com> Cc: Usama Arif <usamaarif642@gmail.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> (cherry picked from commit 5d65c8d758f2596c008009e39bb2614deed2c730) [ Fix trivial conflict in split_huge_page_to_list() - Kalesh Singh ] Bug: 419599659 Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
@@ -510,6 +510,11 @@ split_deferred
|
||||
it would free up some memory. Pages on split queue are going to
|
||||
be split under memory pressure, if splitting is possible.
|
||||
|
||||
nr_anon
|
||||
the number of anonymous THP we have in the whole system. These THPs
|
||||
might be currently entirely mapped or have partially unmapped/unused
|
||||
subpages.
|
||||
|
||||
As the system ages, allocating huge pages may be expensive as the
|
||||
system uses memory compaction to copy data around memory to free a
|
||||
huge page for use. There are some counters in ``/proc/vmstat`` to help
|
||||
|
@@ -267,6 +267,7 @@ enum mthp_stat_item {
|
||||
MTHP_STAT_SPLIT,
|
||||
MTHP_STAT_SPLIT_FAILED,
|
||||
MTHP_STAT_SPLIT_DEFERRED,
|
||||
MTHP_STAT_NR_ANON,
|
||||
__MTHP_STAT_COUNT
|
||||
};
|
||||
|
||||
@@ -277,15 +278,25 @@ struct mthp_stat {
|
||||
#ifdef CONFIG_SYSFS
|
||||
DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
|
||||
|
||||
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
|
||||
static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
|
||||
{
|
||||
if (order <= 0 || order > PMD_ORDER)
|
||||
return;
|
||||
|
||||
this_cpu_inc(mthp_stats.stats[order][item]);
|
||||
this_cpu_add(mthp_stats.stats[order][item], delta);
|
||||
}
|
||||
|
||||
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
|
||||
{
|
||||
mod_mthp_stat(order, item, 1);
|
||||
}
|
||||
|
||||
unsigned long sum_mthp_stat(int order, enum mthp_stat_item item);
|
||||
#else
|
||||
static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void count_mthp_stat(int order, enum mthp_stat_item item)
|
||||
{
|
||||
}
|
||||
|
@@ -558,6 +558,7 @@ DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
|
||||
DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
|
||||
DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
|
||||
DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
|
||||
DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
|
||||
|
||||
static struct attribute *stats_attrs[] = {
|
||||
&anon_fault_alloc_attr.attr,
|
||||
@@ -568,6 +569,7 @@ static struct attribute *stats_attrs[] = {
|
||||
&split_attr.attr,
|
||||
&split_failed_attr.attr,
|
||||
&split_deferred_attr.attr,
|
||||
&nr_anon_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -3376,8 +3378,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
struct folio *folio = page_folio(page);
|
||||
struct deferred_split *ds_queue = get_deferred_split_queue(folio);
|
||||
XA_STATE(xas, &folio->mapping->i_pages, folio->index);
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
bool is_anon = folio_test_anon(folio);
|
||||
struct address_space *mapping = NULL;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
int order = folio_order(folio);
|
||||
int extra_pins, ret;
|
||||
pgoff_t end;
|
||||
bool is_hzp;
|
||||
@@ -3394,7 +3398,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
if (folio_test_writeback(folio))
|
||||
return -EBUSY;
|
||||
|
||||
if (folio_test_anon(folio)) {
|
||||
if (is_anon) {
|
||||
/*
|
||||
* The caller does not necessarily hold an mmap_lock that would
|
||||
* prevent the anon_vma disappearing so we first we take a
|
||||
@@ -3512,6 +3516,10 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
}
|
||||
}
|
||||
|
||||
if (is_anon) {
|
||||
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
|
||||
mod_mthp_stat(0, MTHP_STAT_NR_ANON, 1);
|
||||
}
|
||||
__split_huge_page(page, list, end);
|
||||
if (ret) {
|
||||
fail:
|
||||
|
@@ -499,6 +499,8 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||
/* No turning back from here */
|
||||
newfolio->index = folio->index;
|
||||
newfolio->mapping = folio->mapping;
|
||||
if (folio_test_anon(folio) && folio_test_large(folio))
|
||||
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
|
||||
if (folio_test_swapbacked(folio))
|
||||
__folio_set_swapbacked(newfolio);
|
||||
|
||||
|
@@ -1240,8 +1240,11 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
||||
}
|
||||
}
|
||||
if (PageMappingFlags(page))
|
||||
if (PageMappingFlags(page)) {
|
||||
if (PageAnon(page))
|
||||
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
|
||||
page->mapping = NULL;
|
||||
}
|
||||
if (memcg_kmem_online() && PageMemcgKmem(page))
|
||||
__memcg_kmem_uncharge_page(page, order);
|
||||
if (is_check_pages_enabled()) {
|
||||
|
@@ -1465,6 +1465,9 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
|
||||
|
||||
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
|
||||
|
||||
trace_android_vh_page_add_new_anon_rmap(&folio->page, vma, address);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user