UPSTREAM: mm: page_alloc: remove remnants of unlocked migratetype updates
The freelist hygiene patches made migratetype accesses fully protected under the zone->lock. Remove remnants of handling the race conditions that existed before from the MIGRATE_HIGHATOMIC code. Link: https://lkml.kernel.org/r/20250225001023.1494422-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Brendan Jackman <jackmanb@google.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Bug: 420836317 (cherry picked from commit 020396a581dc69be2d30939fabde6c029d847034) Change-Id: Ia1266c34f09db1c404df7f37c1a9ff06d61c0cce Signed-off-by: yipeng xiang <yipengxiang@honor.corp-partner.google.com>
This commit is contained in:
committed by
Isaac Manjarres
parent
65b7c505d9
commit
707dfe67d6
@@ -2082,20 +2082,10 @@ static inline bool boost_watermark(struct zone *zone)
|
|||||||
static struct page *
|
static struct page *
|
||||||
try_to_steal_block(struct zone *zone, struct page *page,
|
try_to_steal_block(struct zone *zone, struct page *page,
|
||||||
int current_order, int order, int start_type,
|
int current_order, int order, int start_type,
|
||||||
unsigned int alloc_flags)
|
int block_type, unsigned int alloc_flags)
|
||||||
{
|
{
|
||||||
int free_pages, movable_pages, alike_pages;
|
int free_pages, movable_pages, alike_pages;
|
||||||
unsigned long start_pfn;
|
unsigned long start_pfn;
|
||||||
int block_type;
|
|
||||||
|
|
||||||
block_type = get_pageblock_migratetype(page);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This can happen due to races and we want to prevent broken
|
|
||||||
* highatomic accounting.
|
|
||||||
*/
|
|
||||||
if (is_migrate_highatomic(block_type))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
/* Take ownership for orders >= pageblock_order */
|
/* Take ownership for orders >= pageblock_order */
|
||||||
if (current_order >= pageblock_order) {
|
if (current_order >= pageblock_order) {
|
||||||
@@ -2280,33 +2270,22 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||||
struct free_area *area = &(zone->free_area[order]);
|
struct free_area *area = &(zone->free_area[order]);
|
||||||
int mt;
|
unsigned long size;
|
||||||
|
|
||||||
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
|
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
|
||||||
if (!page)
|
if (!page)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
mt = get_pageblock_migratetype(page);
|
|
||||||
/*
|
/*
|
||||||
* In page freeing path, migratetype change is racy so
|
* It should never happen but changes to
|
||||||
* we can counter several free pages in a pageblock
|
* locking could inadvertently allow a per-cpu
|
||||||
* in this loop although we changed the pageblock type
|
* drain to add pages to MIGRATE_HIGHATOMIC
|
||||||
* from highatomic to ac->migratetype. So we should
|
* while unreserving so be safe and watch for
|
||||||
* adjust the count once.
|
* underflows.
|
||||||
*/
|
*/
|
||||||
if (is_migrate_highatomic(mt)) {
|
size = max(pageblock_nr_pages, 1UL << order);
|
||||||
unsigned long size;
|
size = min(size, zone->nr_reserved_highatomic);
|
||||||
/*
|
zone->nr_reserved_highatomic -= size;
|
||||||
* It should never happen but changes to
|
|
||||||
* locking could inadvertently allow a per-cpu
|
|
||||||
* drain to add pages to MIGRATE_HIGHATOMIC
|
|
||||||
* while unreserving so be safe and watch for
|
|
||||||
* underflows.
|
|
||||||
*/
|
|
||||||
size = max(pageblock_nr_pages, 1UL << order);
|
|
||||||
size = min(size, zone->nr_reserved_highatomic);
|
|
||||||
zone->nr_reserved_highatomic -= size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert to ac->migratetype and avoid the normal
|
* Convert to ac->migratetype and avoid the normal
|
||||||
@@ -2318,10 +2297,12 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|||||||
* may increase.
|
* may increase.
|
||||||
*/
|
*/
|
||||||
if (order < pageblock_order)
|
if (order < pageblock_order)
|
||||||
ret = move_freepages_block(zone, page, mt,
|
ret = move_freepages_block(zone, page,
|
||||||
|
MIGRATE_HIGHATOMIC,
|
||||||
ac->migratetype);
|
ac->migratetype);
|
||||||
else {
|
else {
|
||||||
move_to_free_list(page, zone, order, mt,
|
move_to_free_list(page, zone, order,
|
||||||
|
MIGRATE_HIGHATOMIC,
|
||||||
ac->migratetype);
|
ac->migratetype);
|
||||||
change_pageblock_range(page, order,
|
change_pageblock_range(page, order,
|
||||||
ac->migratetype);
|
ac->migratetype);
|
||||||
@@ -2395,7 +2376,8 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
|||||||
|
|
||||||
page = get_page_from_free_area(area, fallback_mt);
|
page = get_page_from_free_area(area, fallback_mt);
|
||||||
page = try_to_steal_block(zone, page, current_order, order,
|
page = try_to_steal_block(zone, page, current_order, order,
|
||||||
start_migratetype, alloc_flags);
|
start_migratetype, fallback_mt,
|
||||||
|
alloc_flags);
|
||||||
if (page)
|
if (page)
|
||||||
goto got_one;
|
goto got_one;
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user