ANDROID: GKI: deferred split queue corruption - ABI fixup
Drop MTHP_STAT_NR_ANON and MTHP_STAT_NR_ANON_PARTIALLY_MAPPED and THP_UNDERUSED_SPLIT_PAGE counters to preserve frozen ABI. Change-Id: Iee642a0bef61d3f299e63e00ead344159c2f0113 Bug: 419599659 Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
@@ -267,8 +267,6 @@ enum mthp_stat_item {
|
||||
MTHP_STAT_SPLIT,
|
||||
MTHP_STAT_SPLIT_FAILED,
|
||||
MTHP_STAT_SPLIT_DEFERRED,
|
||||
MTHP_STAT_NR_ANON,
|
||||
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
|
||||
__MTHP_STAT_COUNT
|
||||
};
|
||||
|
||||
|
@@ -107,7 +107,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
||||
THP_SPLIT_PAGE,
|
||||
THP_SPLIT_PAGE_FAILED,
|
||||
THP_DEFERRED_SPLIT_PAGE,
|
||||
THP_UNDERUSED_SPLIT_PAGE,
|
||||
THP_SPLIT_PMD,
|
||||
THP_SHATTER_PAGE,
|
||||
THP_SHATTER_PAGE_FAILED,
|
||||
|
@@ -581,8 +581,6 @@ DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK);
|
||||
DEFINE_MTHP_STAT_ATTR(split, MTHP_STAT_SPLIT);
|
||||
DEFINE_MTHP_STAT_ATTR(split_failed, MTHP_STAT_SPLIT_FAILED);
|
||||
DEFINE_MTHP_STAT_ATTR(split_deferred, MTHP_STAT_SPLIT_DEFERRED);
|
||||
DEFINE_MTHP_STAT_ATTR(nr_anon, MTHP_STAT_NR_ANON);
|
||||
DEFINE_MTHP_STAT_ATTR(nr_anon_partially_mapped, MTHP_STAT_NR_ANON_PARTIALLY_MAPPED);
|
||||
|
||||
static struct attribute *stats_attrs[] = {
|
||||
&anon_fault_alloc_attr.attr,
|
||||
@@ -593,8 +591,6 @@ static struct attribute *stats_attrs[] = {
|
||||
&split_attr.attr,
|
||||
&split_failed_attr.attr,
|
||||
&split_deferred_attr.attr,
|
||||
&nr_anon_attr.attr,
|
||||
&nr_anon_partially_mapped_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@@ -3407,7 +3403,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
bool is_anon = folio_test_anon(folio);
|
||||
struct address_space *mapping = NULL;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
int order = folio_order(folio);
|
||||
int extra_pins, ret;
|
||||
pgoff_t end;
|
||||
bool is_hzp;
|
||||
@@ -3525,11 +3520,8 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
if (folio_order(folio) > 1 &&
|
||||
!list_empty(&folio->_deferred_list)) {
|
||||
ds_queue->split_queue_len--;
|
||||
if (folio_test_partially_mapped(folio)) {
|
||||
if (folio_test_partially_mapped(folio))
|
||||
__folio_clear_partially_mapped(folio);
|
||||
mod_mthp_stat(folio_order(folio),
|
||||
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
|
||||
}
|
||||
/*
|
||||
* Reinitialize page_deferred_list after removing the
|
||||
* page from the split_queue, otherwise a subsequent
|
||||
@@ -3553,10 +3545,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
||||
}
|
||||
}
|
||||
|
||||
if (is_anon) {
|
||||
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
|
||||
mod_mthp_stat(0, MTHP_STAT_NR_ANON, 1);
|
||||
}
|
||||
__split_huge_page(page, list, end);
|
||||
if (ret) {
|
||||
fail:
|
||||
@@ -3617,11 +3605,8 @@ bool __folio_unqueue_deferred_split(struct folio *folio)
|
||||
spin_lock_irqsave(&ds_queue->split_queue_lock, flags);
|
||||
if (!list_empty(&folio->_deferred_list)) {
|
||||
ds_queue->split_queue_len--;
|
||||
if (folio_test_partially_mapped(folio)) {
|
||||
if (folio_test_partially_mapped(folio))
|
||||
__folio_clear_partially_mapped(folio);
|
||||
mod_mthp_stat(folio_order(folio),
|
||||
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
|
||||
}
|
||||
list_del_init(&folio->_deferred_list);
|
||||
unqueued = true;
|
||||
}
|
||||
@@ -3666,7 +3651,6 @@ void deferred_split_folio(struct folio *folio, bool partially_mapped)
|
||||
if (folio_test_pmd_mappable(folio))
|
||||
count_vm_event(THP_DEFERRED_SPLIT_PAGE);
|
||||
count_mthp_stat(folio_order(folio), MTHP_STAT_SPLIT_DEFERRED);
|
||||
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, 1);
|
||||
|
||||
}
|
||||
} else {
|
||||
@@ -3758,11 +3742,8 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
|
||||
list_move(&folio->_deferred_list, &list);
|
||||
} else {
|
||||
/* We lost race with folio_put() */
|
||||
if (folio_test_partially_mapped(folio)) {
|
||||
if (folio_test_partially_mapped(folio))
|
||||
__folio_clear_partially_mapped(folio);
|
||||
mod_mthp_stat(folio_order(folio),
|
||||
MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, -1);
|
||||
}
|
||||
list_del_init(&folio->_deferred_list);
|
||||
ds_queue->split_queue_len--;
|
||||
}
|
||||
@@ -3784,8 +3765,6 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
|
||||
goto next;
|
||||
if (!split_folio(folio)) {
|
||||
did_split = true;
|
||||
if (underused)
|
||||
count_vm_event(THP_UNDERUSED_SPLIT_PAGE);
|
||||
split++;
|
||||
}
|
||||
folio_unlock(folio);
|
||||
|
@@ -499,8 +499,6 @@ int folio_migrate_mapping(struct address_space *mapping,
|
||||
/* No turning back from here */
|
||||
newfolio->index = folio->index;
|
||||
newfolio->mapping = folio->mapping;
|
||||
if (folio_test_anon(folio) && folio_test_large(folio))
|
||||
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
|
||||
if (folio_test_swapbacked(folio))
|
||||
__folio_set_swapbacked(newfolio);
|
||||
|
||||
|
@@ -1240,11 +1240,8 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
||||
}
|
||||
}
|
||||
if (PageMappingFlags(page)) {
|
||||
if (PageAnon(page))
|
||||
mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1);
|
||||
if (PageMappingFlags(page))
|
||||
page->mapping = NULL;
|
||||
}
|
||||
if (memcg_kmem_online() && PageMemcgKmem(page))
|
||||
__memcg_kmem_uncharge_page(page, order);
|
||||
if (is_check_pages_enabled()) {
|
||||
|
@@ -1465,9 +1465,6 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
|
||||
|
||||
mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
|
||||
|
||||
trace_android_vh_page_add_new_anon_rmap(&folio->page, vma, address);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user