BACKPORT: mm: page_alloc: tighten up find_suitable_fallback()
find_suitable_fallback() is not as efficient as it could be, and somewhat difficult to follow. 1. should_try_claim_block() is a loop invariant. There is no point in checking fallback areas if the caller is interested in claimable blocks but the order and the migratetype don't allow for that. 2. __rmqueue_steal() doesn't care about claimability, so it shouldn't have to run those tests. Different callers want different things from this helper: 1. __compact_finished() scans orders up until it finds a claimable block 2. __rmqueue_claim() scans orders down as long as blocks are claimable 3. __rmqueue_steal() doesn't care about claimability at all Move should_try_claim_block() out of the loop. Only test it for the two callers who care in the first place. Distinguish "no blocks" from "order + mt are not claimable" in the return value; __rmqueue_claim() can stop once order becomes unclaimable, __compact_finished() can keep advancing until order becomes claimable. Before: Performance counter stats for './run case-lru-file-mmap-read' (5 runs): 85,294.85 msec task-clock # 5.644 CPUs utilized ( +- 0.32% ) 15,968 context-switches # 187.209 /sec ( +- 3.81% ) 153 cpu-migrations # 1.794 /sec ( +- 3.29% ) 801,808 page-faults # 9.400 K/sec ( +- 0.10% ) 733,358,331,786 instructions # 1.87 insn per cycle ( +- 0.20% ) (64.94%) 392,622,904,199 cycles # 4.603 GHz ( +- 0.31% ) (64.84%) 148,563,488,531 branches # 1.742 G/sec ( +- 0.18% ) (63.86%) 152,143,228 branch-misses # 0.10% of all branches ( +- 1.19% ) (62.82%) 15.1128 +- 0.0637 seconds time elapsed ( +- 0.42% ) After: Performance counter stats for './run case-lru-file-mmap-read' (5 runs): 84,380.21 msec task-clock # 5.664 CPUs utilized ( +- 0.21% ) 16,656 context-switches # 197.392 /sec ( +- 3.27% ) 151 cpu-migrations # 1.790 /sec ( +- 3.28% ) 801,703 page-faults # 9.501 K/sec ( +- 0.09% ) 731,914,183,060 instructions # 1.88 insn per cycle ( +- 0.38% ) (64.90%) 388,673,535,116 cycles # 4.606 GHz ( +- 0.24% ) (65.06%) 148,251,482,143 branches # 1.757 G/sec ( +- 0.37% ) (63.92%) 149,766,550 branch-misses # 0.10% of all branches ( +- 1.22% ) (62.88%) 14.8968 +- 0.0486 seconds time elapsed ( +- 0.33% ) Link: https://lkml.kernel.org/r/20250407180154.63348-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Brendan Jackman <jackmanb@google.com> Tested-by: Shivank Garg <shivankg@amd.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Carlos Song <carlos.song@nxp.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Bug: 420836317 Change-Id: I2886de9da0fd99047cf5c675cd2ae7c386267770 (cherry picked from commit ee414bd97b3fa0a4f74e40004e3b4191326bd46c) [In the original patch, the variable MIGRATE_PCPTYPES in the find_suitable_fallback function should be MIGRATE_FALLBACKS in Linux 6.6, causing the patch to fail to apply directly.] Signed-off-by: yipeng xiang <yipengxiang@honor.corp-partner.google.com>
This commit is contained in:
committed by
Isaac Manjarres
parent
b5b61c9e57
commit
2bc327484e
@@ -2279,7 +2279,6 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||
ret = COMPACT_NO_SUITABLE_PAGE;
|
||||
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
|
||||
struct free_area *area = &cc->zone->free_area[order];
|
||||
bool claim_block;
|
||||
|
||||
/* Job done if page is free of the right migratetype */
|
||||
if (!free_area_empty(area, migratetype))
|
||||
@@ -2295,8 +2294,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
||||
* Job done if allocation would steal freepages from
|
||||
* other migratetype buddy lists.
|
||||
*/
|
||||
if (find_suitable_fallback(area, order, migratetype,
|
||||
true, &claim_block) != -1)
|
||||
if (find_suitable_fallback(area, order, migratetype, true) >= 0)
|
||||
/*
|
||||
* Movable pages are OK in any pageblock. If we are
|
||||
* stealing for a non-movable allocation, make sure
|
||||
|
@@ -815,7 +815,7 @@ void init_cma_reserved_pageblock(struct page *page);
|
||||
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
|
||||
|
||||
int find_suitable_fallback(struct free_area *area, unsigned int order,
|
||||
int migratetype, bool claim_only, bool *claim_block);
|
||||
int migratetype, bool claimable);
|
||||
|
||||
static inline bool free_area_empty(struct free_area *area, int migratetype)
|
||||
{
|
||||
|
@@ -2080,31 +2080,25 @@ static bool should_try_claim_block(unsigned int order, int start_mt)
|
||||
|
||||
/*
|
||||
* Check whether there is a suitable fallback freepage with requested order.
|
||||
* Sets *claim_block to instruct the caller whether it should convert a whole
|
||||
* pageblock to the returned migratetype.
|
||||
* If only_claim is true, this function returns fallback_mt only if
|
||||
* If claimable is true, this function returns fallback_mt only if
|
||||
* we would do this whole-block claiming. This would help to reduce
|
||||
* fragmentation due to mixed migratetype pages in one pageblock.
|
||||
*/
|
||||
int find_suitable_fallback(struct free_area *area, unsigned int order,
|
||||
int migratetype, bool only_claim, bool *claim_block)
|
||||
int migratetype, bool claimable)
|
||||
{
|
||||
int i;
|
||||
int fallback_mt;
|
||||
|
||||
if (claimable && !should_try_claim_block(order, migratetype))
|
||||
return -2;
|
||||
|
||||
if (area->nr_free == 0)
|
||||
return -1;
|
||||
|
||||
*claim_block = false;
|
||||
for (i = 0; i < MIGRATE_FALLBACKS - 1 ; i++) {
|
||||
fallback_mt = fallbacks[migratetype][i];
|
||||
if (free_area_empty(area, fallback_mt))
|
||||
continue;
|
||||
int fallback_mt = fallbacks[migratetype][i];
|
||||
|
||||
if (should_try_claim_block(order, migratetype))
|
||||
*claim_block = true;
|
||||
|
||||
if (*claim_block || !only_claim)
|
||||
if (!free_area_empty(area, fallback_mt))
|
||||
return fallback_mt;
|
||||
}
|
||||
|
||||
@@ -2201,7 +2195,6 @@ __rmqueue_claim(struct zone *zone, int order, int start_migratetype,
|
||||
int min_order = order;
|
||||
struct page *page;
|
||||
int fallback_mt;
|
||||
bool claim_block;
|
||||
|
||||
/*
|
||||
* Do not steal pages from freelists belonging to other pageblocks
|
||||
@@ -2220,11 +2213,14 @@ __rmqueue_claim(struct zone *zone, int order, int start_migratetype,
|
||||
--current_order) {
|
||||
area = &(zone->free_area[current_order]);
|
||||
fallback_mt = find_suitable_fallback(area, current_order,
|
||||
start_migratetype, false, &claim_block);
|
||||
start_migratetype, true);
|
||||
|
||||
/* No block in that order */
|
||||
if (fallback_mt == -1)
|
||||
continue;
|
||||
|
||||
if (!claim_block)
|
||||
/* Advanced into orders too low to claim, abort */
|
||||
if (fallback_mt == -2)
|
||||
break;
|
||||
|
||||
page = get_page_from_free_area(area, fallback_mt);
|
||||
@@ -2252,12 +2248,11 @@ __rmqueue_steal(struct zone *zone, int order, int start_migratetype)
|
||||
int current_order;
|
||||
struct page *page;
|
||||
int fallback_mt;
|
||||
bool claim_block;
|
||||
|
||||
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
|
||||
area = &(zone->free_area[current_order]);
|
||||
fallback_mt = find_suitable_fallback(area, current_order,
|
||||
start_migratetype, false, &claim_block);
|
||||
start_migratetype, false);
|
||||
if (fallback_mt == -1)
|
||||
continue;
|
||||
|
||||
|
Reference in New Issue
Block a user