ANDROID: GKI: page_alloc ABI fixup
Move nr_free_highatomic out of struct zone to avoid ABI breakage. Change-Id: I7f2e4ffb4c1e335d638fc76ddb864a67461da131 Bug: 420771453 Signed-off-by: Kalesh Singh <kaleshsingh@google.com>
This commit is contained in:
@@ -839,7 +839,6 @@ struct zone {
|
||||
unsigned long watermark_boost;
|
||||
|
||||
unsigned long nr_reserved_highatomic;
|
||||
unsigned long nr_free_highatomic;
|
||||
|
||||
/*
|
||||
* We don't know if the memory that we're going to allocate will be
|
||||
|
@@ -470,7 +470,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
|
||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||
|
||||
extern char * const zone_names[MAX_NR_ZONES];
|
||||
extern unsigned long free_highatomics[MAX_NR_ZONES];
|
||||
extern unsigned long nr_free_highatomic[MAX_NR_ZONES];
|
||||
|
||||
/* perform sanity checks on struct pages being allocated or freed */
|
||||
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
|
||||
|
@@ -324,6 +324,8 @@ const char * const migratetype_names[MIGRATE_TYPES] = {
|
||||
#endif
|
||||
};
|
||||
|
||||
unsigned long nr_free_highatomic[MAX_NR_ZONES] = {0};
|
||||
|
||||
int min_free_kbytes = 1024;
|
||||
int user_min_free_kbytes = -1;
|
||||
static int watermark_boost_factor __read_mostly = 15000;
|
||||
@@ -770,8 +772,8 @@ static inline void account_freepages(struct zone *zone, int nr_pages,
|
||||
if (is_migrate_cma(migratetype))
|
||||
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
|
||||
else if (is_migrate_highatomic(migratetype))
|
||||
WRITE_ONCE(zone->nr_free_highatomic,
|
||||
zone->nr_free_highatomic + nr_pages);
|
||||
WRITE_ONCE(nr_free_highatomic[zone_idx(zone)],
|
||||
nr_free_highatomic[zone_idx(zone)] + nr_pages);
|
||||
}
|
||||
|
||||
/* Used for pages not on another list */
|
||||
@@ -3232,7 +3234,7 @@ static inline long __zone_watermark_unusable_free(struct zone *z,
|
||||
* watermark then subtract the free pages reserved for highatomic.
|
||||
*/
|
||||
if (likely(!(alloc_flags & ALLOC_RESERVES)))
|
||||
unusable_free += READ_ONCE(z->nr_free_highatomic);
|
||||
unusable_free += READ_ONCE(nr_free_highatomic[zone_idx(z)]);
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
/* If allocation can't use CMA areas don't use free CMA pages */
|
||||
|
@@ -342,7 +342,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
|
||||
K(low_wmark_pages(zone)),
|
||||
K(high_wmark_pages(zone)),
|
||||
K(zone->nr_reserved_highatomic),
|
||||
K(free_highatomics[zone_idx(zone)]),
|
||||
K(nr_free_highatomic[zone_idx(zone)]),
|
||||
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
|
||||
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
|
||||
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
|
||||
|
Reference in New Issue
Block a user