From dd8fcb53983ced78cff12b5cd37d3a9c06ea0161 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Wed, 28 May 2025 23:23:05 -0700 Subject: [PATCH] ANDROID: GKI: deferred split queue corruption - ABI fixup Drop MTHP_STAT_NR_ANON and MTHP_STAT_NR_ANON_PARTIALLY_MAPPED and THP_UNDERUSED_SPLIT_PAGE counters to preserve frozen ABI. Change-Id: Iee642a0bef61d3f299e63e00ead344159c2f0113 Bug: 419599659 Signed-off-by: Kalesh Singh --- include/linux/huge_mm.h | 2 -- include/linux/vm_event_item.h | 1 - mm/huge_memory.c | 27 +++------------------------ mm/migrate.c | 2 -- mm/page_alloc.c | 5 +---- mm/rmap.c | 3 --- 6 files changed, 4 insertions(+), 36 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index a9bcef250958..f58c44dfc06b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -267,8 +267,6 @@ enum mthp_stat_item { MTHP_STAT_SPLIT, MTHP_STAT_SPLIT_FAILED, MTHP_STAT_SPLIT_DEFERRED, - MTHP_STAT_NR_ANON, - MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, __MTHP_STAT_COUNT }; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index f28d65f3f7b4..faa993839c22 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -107,7 +107,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, - THP_UNDERUSED_SPLIT_PAGE, THP_SPLIT_PMD, THP_SHATTER_PAGE, THP_SHATTER_PAGE_FAILED, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e8d20aa04547..6ac6febf6fad 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -581,8 +581,6 @@ DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT); DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED); DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED); -DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON); -DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED); static struct attribute *stats_attrs[] = { &anon_fault_alloc_attr.attr, @@ -593,8 +591,6 @@ static struct attribute *stats_attrs[] = { &split_attr.attr, &split_failed_attr.attr, &split_deferred_attr.attr, - &nr_anon_attr.attr, - &nr_anon_partially_mapped_attr.attr, NULL, }; @@ -3407,7 +3403,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) bool is_anon = folio_test_anon(folio); struct address_space *mapping = NULL; struct anon_vma *anon_vma = NULL; - int order = folio_order(folio); int extra_pins, ret; pgoff_t end; bool is_hzp; @@ -3525,11 +3520,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (folio_order(folio) > 1 && !list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; - if (folio_test_partially_mapped(folio)) { + if (folio_test_partially_mapped(folio)) __folio_clear_partially_mapped(folio); - mod_mthp_stat(folio_order(folio), - MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); - } /* * Reinitialize page_deferred_list after removing the * page from the split_queue, otherwise a subsequent @@ -3553,10 +3545,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) } } - if (is_anon) { - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); - mod_mthp_stat(0, MTHP_STAT_NR_ANON, 1); - } __split_huge_page(page, list, end); if (ret) { fail: @@ -3617,11 +3605,8 @@ bool __folio_unqueue_deferred_split(struct folio *folio) spin_lock_irqsave(&ds_queue->split_queue_lock, flags); if (!list_empty(&folio->_deferred_list)) { ds_queue->split_queue_len--; - if (folio_test_partially_mapped(folio)) { + if (folio_test_partially_mapped(folio)) __folio_clear_partially_mapped(folio); - mod_mthp_stat(folio_order(folio), - MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); - } list_del_init(&folio->_deferred_list); unqueued = true; } @@ -3666,7 +3651,6 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped) if (folio_test_pmd_mappable(folio)) count_vm_event(THP_DEFERRED_SPLIT_PAGE); count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED); - mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1); } } else { @@ -3758,11 +3742,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, list_move(&folio->_deferred_list, &list); } else { /* We lost race with folio_put() */ - if (folio_test_partially_mapped(folio)) { + if (folio_test_partially_mapped(folio)) __folio_clear_partially_mapped(folio); - mod_mthp_stat(folio_order(folio), - MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1); - } list_del_init(&folio->_deferred_list); ds_queue->split_queue_len--; } @@ -3784,8 +3765,6 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, goto next; if (!split_folio(folio)) { did_split = true; - if (underused) - count_vm_event(THP_UNDERUSED_SPLIT_PAGE); split++; } folio_unlock(folio); diff --git a/mm/migrate.c b/mm/migrate.c index c18c4ea71de8..b7cd1f7e6bc2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -499,8 +499,6 @@ int folio_migrate_mapping(struct address_space *mapping, /* No turning back from here */ newfolio->index = folio->index; newfolio->mapping = folio->mapping; - if (folio_test_anon(folio) && folio_test_large(folio)) - mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 41371db24afa..419e9bbd2c0c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1240,11 +1240,8 @@ static __always_inline bool free_pages_prepare(struct page *page, (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; } } - if (PageMappingFlags(page)) { - if (PageAnon(page)) - mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); + if (PageMappingFlags(page)) page->mapping = NULL; - } if (memcg_kmem_online() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); if (is_check_pages_enabled()) { diff --git a/mm/rmap.c b/mm/rmap.c index 1fd7dae3a255..12a6f9d9af85 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1465,9 +1465,6 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, } __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr); - - mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1); - trace_android_vh_page_add_new_anon_rmap(&folio->page, vma, address); }