diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0f23045788df..cc74733d420b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -259,6 +259,24 @@ EXPORT_SYMBOL(node_states); gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; +/* + * A cached value of the page's pageblock's migratetype, used when the page is + * put on a pcplist. Used to avoid the pageblock migratetype lookup when + * freeing from pcplists in most cases, at the cost of possibly becoming stale. + * Also the migratetype set in the page does not necessarily match the pcplist + * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any + * other index - this ensures that it will be put on the correct CMA freelist. + */ +static inline int get_pcppage_migratetype(struct page *page) +{ + return page->index; +} + +static inline void set_pcppage_migratetype(struct page *page, int migratetype) +{ + page->index = migratetype; +} + #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE unsigned int pageblock_order __read_mostly; #endif @@ -1326,6 +1344,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, { unsigned long flags; unsigned int order; + bool isolated_pageblocks; struct page *page; /* @@ -1338,6 +1357,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, pindex = pindex - 1; spin_lock_irqsave(&zone->lock, flags); + isolated_pageblocks = has_isolate_pageblock(zone); while (count > 0) { struct list_head *list; @@ -1353,19 +1373,23 @@ static void free_pcppages_bulk(struct zone *zone, int count, order = pindex_to_order(pindex); nr_pages = 1 << order; do { - unsigned long pfn; int mt; page = list_last_entry(list, struct page, pcp_list); - pfn = page_to_pfn(page); - mt = get_pfnblock_migratetype(page, pfn); + mt = get_pcppage_migratetype(page); /* must delete to avoid corrupting pcp list */ list_del(&page->pcp_list); count -= nr_pages; pcp->count -= nr_pages; - __free_one_page(page, pfn, zone, order, mt, FPI_NONE); + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ + if (unlikely(isolated_pageblocks)) + mt = get_pageblock_migratetype(page); + + __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); trace_mm_page_pcpu_drain(page, order, mt); } while (count > 0 && !list_empty(list)); } @@ -1751,6 +1775,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, continue; del_page_from_free_list(page, zone, current_order); expand(zone, page, order, current_order, migratetype); + set_pcppage_migratetype(page, migratetype); trace_mm_page_alloc_zone_locked(page, order, migratetype, pcp_allowed_order(order) && migratetype < MIGRATE_PCPTYPES); @@ -2345,7 +2370,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * pages are ordered properly. */ list_add_tail(&page->pcp_list, list); - if (is_migrate_cma(get_pageblock_migratetype(page))) + if (is_migrate_cma(get_pcppage_migratetype(page))) __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, -(1 << order)); } @@ -2511,6 +2536,19 @@ void drain_all_pages(struct zone *zone) __drain_all_pages(zone, false); } +static bool free_unref_page_prepare(struct page *page, unsigned long pfn, + unsigned int order) +{ + int migratetype; + + if (!free_pages_prepare(page, order, FPI_NONE)) + return false; + + migratetype = get_pfnblock_migratetype(page, pfn); + set_pcppage_migratetype(page, migratetype); + return true; +} + static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high) { int min_nr_free, max_nr_free; @@ -2597,7 +2635,7 @@ void free_unref_page(struct page *page, unsigned int order) int migratetype, pcpmigratetype; bool skip_free_unref_page = false; - if (!free_pages_prepare(page, order, FPI_NONE)) + if (!free_unref_page_prepare(page, pfn, order)) return; /* @@ -2608,7 +2646,7 @@ void free_unref_page(struct page *page, unsigned int order) * get those areas back if necessary. Otherwise, we may have to free * excessively into the page allocator */ - migratetype = pcpmigratetype = get_pfnblock_migratetype(page, pfn); + migratetype = pcpmigratetype = get_pcppage_migratetype(page); trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page); if (skip_free_unref_page) return; @@ -2651,7 +2689,7 @@ void free_unref_page_list(struct list_head *list) /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { unsigned long pfn = page_to_pfn(page); - if (!free_pages_prepare(page, 0, FPI_NONE)) { + if (!free_unref_page_prepare(page, pfn, 0)) { list_del(&page->lru); continue; } @@ -2660,7 +2698,7 @@ void free_unref_page_list(struct list_head *list) * Free isolated pages directly to the allocator, see * comment in free_unref_page. */ - migratetype = get_pfnblock_migratetype(page, pfn); + migratetype = get_pcppage_migratetype(page); if (unlikely(is_migrate_isolate(migratetype))) { list_del(&page->lru); free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); @@ -2674,10 +2712,9 @@ void free_unref_page_list(struct list_head *list) list_for_each_entry_safe(page, next, list, lru) { struct zone *zone = page_zone(page); - unsigned long pfn = page_to_pfn(page); list_del(&page->lru); - migratetype = get_pfnblock_migratetype(page, pfn); + migratetype = get_pcppage_migratetype(page); /* * Either different zone requiring a different pcp lock or @@ -2700,7 +2737,7 @@ void free_unref_page_list(struct list_head *list) pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (unlikely(!pcp)) { pcp_trylock_finish(UP_flags); - free_one_page(zone, page, pfn, + free_one_page(zone, page, page_to_pfn(page), 0, migratetype, FPI_NONE); locked_zone = NULL; continue; @@ -2878,7 +2915,7 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, } } __mod_zone_freepage_state(zone, -(1 << order), - get_pageblock_migratetype(page)); + get_pcppage_migratetype(page)); spin_unlock_irqrestore(&zone->lock, flags); } while (check_new_pages(page, order));