Add samsung specific changes
This commit is contained in:
9
mm/sec_mm/Kbuild
Normal file
9
mm/sec_mm/Kbuild
Normal file
@@ -0,0 +1,9 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
|
||||
obj-$(CONFIG_SEC_MM) := sec_mm.o
|
||||
sec_mm-objs := sec_mm_init.o sec_mm_debug.o sec_mm_tune.o \
|
||||
sec_mm_sysfs.o sec_mm_trace.o lowfile_detect.o \
|
||||
panic_handler.o dump_tasks.o
|
||||
|
||||
obj-$(CONFIG_HUGEPAGE_POOL) += hpp.o
|
41
mm/sec_mm/Kconfig
Normal file
41
mm/sec_mm/Kconfig
Normal file
@@ -0,0 +1,41 @@
|
||||
menu "SEC Memory Management Options"
|
||||
|
||||
config SEC_MM
|
||||
tristate "Enable SEC MM"
|
||||
depends on ANDROID_VENDOR_HOOKS
|
||||
default m
|
||||
help
|
||||
SEC memory management module.
|
||||
With this feature enabled, system memory status and process lists
|
||||
are printed in kernel log when the device goes low on file cache or
|
||||
kernel panic events.
|
||||
This also supports tuning mm params via vendor hook.
|
||||
|
||||
config SEC_MM_DUMP_DMABUF_TASKS
|
||||
bool "Support for printing dmabuf usage information"
|
||||
depends on SEC_MM
|
||||
depends on !DMABUF_HEAPS_SAMSUNG_SYSTEM
|
||||
default y
|
||||
help
|
||||
Print dmabuf usage information.
|
||||
|
||||
config MMAP_READAROUND_LIMIT
|
||||
int "Limit mmap readaround upperbound"
|
||||
default 16
|
||||
help
|
||||
Inappropriate mmap readaround size can hurt device performance
|
||||
during the sluggish situation. Add the hard upper-limit for
|
||||
mmap readaround.
|
||||
|
||||
config HUGEPAGE_POOL
|
||||
tristate "hugepage pool management"
|
||||
default m
|
||||
depends on !HIGHMEM
|
||||
depends on !NEED_MULTIPLE_NODES
|
||||
depends on TRANSPARENT_HUGEPAGE
|
||||
depends on SEC_MM
|
||||
help
|
||||
Enable this to provide 2MB hugepage to THP anon, ION, and GPU memory
|
||||
allocation requests efficiently.
|
||||
|
||||
endmenu
|
187
mm/sec_mm/dump_tasks.c
Normal file
187
mm/sec_mm/dump_tasks.c
Normal file
@@ -0,0 +1,187 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2020 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/fdtable.h>
|
||||
#include <linux/sec_mm.h>
|
||||
|
||||
/* return true if the task is not adequate as candidate victim task. */
|
||||
static bool oom_unkillable_task(struct task_struct *p)
|
||||
{
|
||||
if (is_global_init(p))
|
||||
return true;
|
||||
if (p->flags & PF_KTHREAD)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* The process p may have detached its own ->mm while exiting or through
|
||||
* use_mm(), but one or more of its subthreads may still have a valid
|
||||
* pointer. Return p, or any of its subthreads with a valid ->mm, with
|
||||
* task_lock() held.
|
||||
*/
|
||||
static struct task_struct *mm_debug_find_lock_task_mm(struct task_struct *p)
|
||||
{
|
||||
struct task_struct *t;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
for_each_thread(p, t) {
|
||||
if (!spin_trylock(&t->alloc_lock))
|
||||
continue;
|
||||
if (likely(t->mm))
|
||||
goto found;
|
||||
task_unlock(t);
|
||||
}
|
||||
t = NULL;
|
||||
found:
|
||||
rcu_read_unlock();
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SEC_MM_DUMP_DMABUF_TASKS
|
||||
struct dmabuf_ifd_data {
|
||||
int cnt;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
static int dmabuf_iterate_fd(const void *t, struct file *file, unsigned fd)
|
||||
{
|
||||
struct dmabuf_ifd_data *dmabuf_usage = (struct dmabuf_ifd_data*)t;
|
||||
struct dma_buf *dmabuf;
|
||||
|
||||
if (!is_dma_buf_file(file))
|
||||
return 0;
|
||||
|
||||
dmabuf = file->private_data;
|
||||
|
||||
dmabuf_usage->cnt++;
|
||||
dmabuf_usage->size += dmabuf->size >> 10;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mm_debug_dump_dma_buf_tasks(void)
|
||||
{
|
||||
struct task_struct *p;
|
||||
struct task_struct *task;
|
||||
long heaviest_dma_buf = 0;
|
||||
char heaviest_comm[TASK_COMM_LEN];
|
||||
pid_t heaviest_pid;
|
||||
|
||||
pr_info("mm_debug dma_buf tasks\n");
|
||||
pr_info("[ pid ] uid tgid dmabufcnt dmabufsz(kb) name\n");
|
||||
rcu_read_lock();
|
||||
for_each_process(p) {
|
||||
struct dmabuf_ifd_data dmabuf_usage;
|
||||
struct files_struct *files = p->files;
|
||||
bool skipped = false;
|
||||
|
||||
if (oom_unkillable_task(p))
|
||||
continue;
|
||||
|
||||
task = mm_debug_find_lock_task_mm(p);
|
||||
if (!task) {
|
||||
/*
|
||||
* This is a kthread or all of p's threads have already
|
||||
* detached their mm's. There's no need to report
|
||||
* them; they can't be oom killed anyway.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
dmabuf_usage.cnt = 0;
|
||||
dmabuf_usage.size = 0;
|
||||
|
||||
if (files && spin_is_locked(&files->file_lock))
|
||||
skipped = true;
|
||||
else
|
||||
iterate_fd(files, 0, dmabuf_iterate_fd, &dmabuf_usage);
|
||||
|
||||
if (!dmabuf_usage.size) {
|
||||
if (skipped)
|
||||
pr_info("[%7d] %5d %5d %9d %12zu %s (skipped size check)\n",
|
||||
task->pid,
|
||||
from_kuid(&init_user_ns, task_uid(task)),
|
||||
task->tgid, dmabuf_usage.cnt,
|
||||
dmabuf_usage.size, task->comm);
|
||||
task_unlock(task);
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_info("[%7d] %5d %5d %9d %12zu %s\n",
|
||||
task->pid, from_kuid(&init_user_ns, task_uid(task)),
|
||||
task->tgid, dmabuf_usage.cnt, dmabuf_usage.size,
|
||||
task->comm);
|
||||
|
||||
if (dmabuf_usage.size > heaviest_dma_buf) {
|
||||
heaviest_dma_buf = dmabuf_usage.size;
|
||||
strncpy(heaviest_comm, task->comm, TASK_COMM_LEN);
|
||||
heaviest_pid = task->pid;
|
||||
}
|
||||
task_unlock(task);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (heaviest_dma_buf)
|
||||
pr_info("heaviest_task_dma_buf:%s(%d) size:%luKB\n",
|
||||
heaviest_comm, heaviest_pid, heaviest_dma_buf);
|
||||
}
|
||||
#endif
|
||||
|
||||
void mm_debug_dump_tasks(void)
|
||||
{
|
||||
struct task_struct *p;
|
||||
struct task_struct *task;
|
||||
unsigned long cur_rss_sum;
|
||||
unsigned long heaviest_rss_sum = 0;
|
||||
char heaviest_comm[TASK_COMM_LEN];
|
||||
pid_t heaviest_pid;
|
||||
|
||||
pr_info("mm_debug dump tasks\n");
|
||||
pr_info("[ pid ] uid tgid total_vm rss pgtables_bytes swapents oom_score_adj name\n");
|
||||
rcu_read_lock();
|
||||
for_each_process(p) {
|
||||
if (oom_unkillable_task(p))
|
||||
continue;
|
||||
|
||||
task = mm_debug_find_lock_task_mm(p);
|
||||
if (!task) {
|
||||
/*
|
||||
* This is a kthread or all of p's threads have already
|
||||
* detached their mm's. There's no need to report
|
||||
* them; they can't be oom killed anyway.
|
||||
*/
|
||||
continue;
|
||||
}
|
||||
|
||||
pr_info("[%7d] %5d %5d %8lu %8lu %8ld %8lu %5hd %s\n",
|
||||
task->pid, from_kuid(&init_user_ns, task_uid(task)),
|
||||
task->tgid, task->mm->total_vm, get_mm_rss(task->mm),
|
||||
mm_pgtables_bytes(task->mm),
|
||||
get_mm_counter(task->mm, MM_SWAPENTS),
|
||||
task->signal->oom_score_adj, task->comm);
|
||||
cur_rss_sum = get_mm_rss(task->mm) +
|
||||
get_mm_counter(task->mm, MM_SWAPENTS);
|
||||
if (cur_rss_sum > heaviest_rss_sum) {
|
||||
heaviest_rss_sum = cur_rss_sum;
|
||||
strscpy(heaviest_comm, task->comm, TASK_COMM_LEN);
|
||||
heaviest_pid = task->pid;
|
||||
}
|
||||
task_unlock(task);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
if (heaviest_rss_sum)
|
||||
pr_info("heaviest_task_rss:%s(%d) size:%luKB, totalram_pages:%luKB\n",
|
||||
heaviest_comm, heaviest_pid, K(heaviest_rss_sum),
|
||||
K(totalram_pages()));
|
||||
}
|
739
mm/sec_mm/hpp.c
Normal file
739
mm/sec_mm/hpp.c
Normal file
@@ -0,0 +1,739 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* linux/mm/hpp.c
|
||||
*
|
||||
* Copyright (C) 2019 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/compaction.h>
|
||||
#include <linux/sec_mm.h>
|
||||
#include <uapi/linux/sched/types.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#include <trace/hooks/dmabuf.h>
|
||||
|
||||
#define HUGEPAGE_ORDER HPAGE_PMD_ORDER
|
||||
#define HPP_FPI_MAGIC ((__force int __bitwise)BIT(31))
|
||||
|
||||
struct task_struct *khppd_task;
|
||||
|
||||
enum hpp_state_enum {
|
||||
HPP_OFF,
|
||||
HPP_ON,
|
||||
HPP_ACTIVATED,
|
||||
HPP_STATE_MAX
|
||||
};
|
||||
|
||||
static unsigned int hpp_state;
|
||||
static bool hpp_debug;
|
||||
static bool app_launch;
|
||||
static int khppd_wakeup = 1;
|
||||
static unsigned long last_wakeup_stamp;
|
||||
|
||||
DECLARE_WAIT_QUEUE_HEAD(khppd_wait);
|
||||
static struct list_head hugepage_list[MAX_NR_ZONES];
|
||||
static struct list_head hugepage_nonzero_list[MAX_NR_ZONES];
|
||||
int nr_hugepages_quota[MAX_NR_ZONES];
|
||||
int nr_hugepages_limit[MAX_NR_ZONES];
|
||||
int nr_hugepages_to_fill[MAX_NR_ZONES];
|
||||
int nr_hugepages[MAX_NR_ZONES];
|
||||
int nr_hugepages_nonzero[MAX_NR_ZONES];
|
||||
int nr_hugepages_tried[MAX_NR_ZONES];
|
||||
int nr_hugepages_alloced[MAX_NR_ZONES];
|
||||
int nr_hugepages_fill_tried[MAX_NR_ZONES];
|
||||
int nr_hugepages_fill_done[MAX_NR_ZONES];
|
||||
static spinlock_t hugepage_list_lock[MAX_NR_ZONES];
|
||||
static spinlock_t hugepage_nonzero_list_lock[MAX_NR_ZONES];
|
||||
|
||||
/* free pool if available memory is below this value */
|
||||
static unsigned long hugepage_avail_low[MAX_NR_ZONES];
|
||||
/* fill pool if available memory is above this value */
|
||||
static unsigned long hugepage_avail_high[MAX_NR_ZONES];
|
||||
|
||||
static unsigned long get_zone_nr_hugepages(int zidx)
|
||||
{
|
||||
return nr_hugepages[zidx] + nr_hugepages_nonzero[zidx];
|
||||
}
|
||||
|
||||
static unsigned long total_hugepage_pool_pages(void)
|
||||
{
|
||||
unsigned long total_nr_hugepages = 0;
|
||||
int zidx;
|
||||
|
||||
if (hpp_state == HPP_OFF)
|
||||
return 0;
|
||||
|
||||
for (zidx = MAX_NR_ZONES - 1; zidx >= 0; zidx--)
|
||||
total_nr_hugepages += get_zone_nr_hugepages(zidx);
|
||||
|
||||
return total_nr_hugepages << HUGEPAGE_ORDER;
|
||||
}
|
||||
|
||||
static inline unsigned long zone_available_simple(int zidx)
|
||||
{
|
||||
struct pglist_data *pgdat = &contig_page_data;
|
||||
struct zone *zone = &pgdat->node_zones[zidx];
|
||||
|
||||
return zone_page_state(zone, NR_FREE_PAGES) +
|
||||
zone_page_state(zone, NR_ZONE_INACTIVE_FILE) +
|
||||
zone_page_state(zone, NR_ZONE_ACTIVE_FILE);
|
||||
}
|
||||
|
||||
/*
|
||||
* adjust limits depending on available memory
|
||||
* then return total limits in #pages under the specified zone.
|
||||
* If ratelimited, it returns -1. Caller should check returned value.
|
||||
*/
|
||||
static void hugepage_calculate_limits_under_zone(void)
|
||||
{
|
||||
int zidx, prev_limit;
|
||||
bool print_debug_log = false;
|
||||
/* calculate only after 100ms passed */
|
||||
static DEFINE_RATELIMIT_STATE(rs, HZ/10, 1);
|
||||
static DEFINE_RATELIMIT_STATE(log_rs, HZ, 1);
|
||||
|
||||
ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
|
||||
if (!__ratelimit(&rs))
|
||||
return;
|
||||
|
||||
if (unlikely(hpp_debug) && __ratelimit(&log_rs)) {
|
||||
print_debug_log = true;
|
||||
pr_err("%s: zidx curavail d_avail curpool curlimit newlimit\n", __func__);
|
||||
}
|
||||
|
||||
for (zidx = 0; zidx < MAX_NR_ZONES; zidx++) {
|
||||
long avail_pages = zone_available_simple(zidx);
|
||||
long delta_avail = 0;
|
||||
long current_pool_pages = get_zone_nr_hugepages(zidx) << HUGEPAGE_ORDER;
|
||||
|
||||
prev_limit = nr_hugepages_limit[zidx];
|
||||
if (avail_pages < hugepage_avail_low[zidx]) {
|
||||
delta_avail = hugepage_avail_low[zidx] - avail_pages;
|
||||
if (current_pool_pages - delta_avail < 0)
|
||||
delta_avail = current_pool_pages;
|
||||
nr_hugepages_limit[zidx] = (current_pool_pages - delta_avail) >> HUGEPAGE_ORDER;
|
||||
} else {
|
||||
nr_hugepages_limit[zidx] = nr_hugepages_quota[zidx];
|
||||
}
|
||||
|
||||
if (print_debug_log)
|
||||
pr_err("%s: %4d %8ld %8ld %8ld %8d %8d\n",
|
||||
__func__, zidx, avail_pages, delta_avail,
|
||||
current_pool_pages, prev_limit, nr_hugepages_limit[zidx]);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __try_to_wake_up_khppd(enum zone_type zidx)
|
||||
{
|
||||
bool do_wakeup = false;
|
||||
|
||||
if (app_launch || mem_boost_mode_high())
|
||||
return;
|
||||
|
||||
if (time_is_after_jiffies(last_wakeup_stamp + 10 * HZ))
|
||||
return;
|
||||
|
||||
if (khppd_wakeup)
|
||||
return;
|
||||
|
||||
if (nr_hugepages_limit[zidx]) {
|
||||
if (nr_hugepages[zidx] * 2 < nr_hugepages_limit[zidx] ||
|
||||
nr_hugepages_nonzero[zidx])
|
||||
do_wakeup = true;
|
||||
} else if (zone_available_simple(zidx) > hugepage_avail_high[zidx]) {
|
||||
do_wakeup = true;
|
||||
}
|
||||
|
||||
if (do_wakeup) {
|
||||
khppd_wakeup = 1;
|
||||
if (unlikely(hpp_debug))
|
||||
pr_info("khppd: woken up\n");
|
||||
wake_up(&khppd_wait);
|
||||
}
|
||||
}
|
||||
|
||||
static void try_to_wake_up_khppd(void)
|
||||
{
|
||||
int zidx;
|
||||
|
||||
for (zidx = MAX_NR_ZONES - 1; zidx >= 0; zidx--)
|
||||
__try_to_wake_up_khppd(zidx);
|
||||
}
|
||||
|
||||
static inline gfp_t get_gfp(enum zone_type zidx)
|
||||
{
|
||||
gfp_t ret;
|
||||
|
||||
if (zidx == ZONE_MOVABLE)
|
||||
ret = __GFP_MOVABLE | __GFP_HIGHMEM;
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
else if (zidx == ZONE_DMA)
|
||||
ret = __GFP_DMA;
|
||||
#elif defined(CONFIG_ZONE_DMA32)
|
||||
else if (zidx == ZONE_DMA32)
|
||||
ret = __GFP_DMA32;
|
||||
#endif
|
||||
else
|
||||
ret = 0;
|
||||
return ret & ~__GFP_RECLAIM;
|
||||
}
|
||||
|
||||
bool insert_hugepage_pool(struct page *page)
|
||||
{
|
||||
enum zone_type zidx = page_zonenum(page);
|
||||
|
||||
if (hpp_state == HPP_OFF)
|
||||
return false;
|
||||
|
||||
if (get_zone_nr_hugepages(zidx) >= nr_hugepages_quota[zidx])
|
||||
return false;
|
||||
|
||||
/*
|
||||
* note that, at this point, the page is in the free page state except
|
||||
* it is not in buddy. need prep_new_hpage before going to hugepage list.
|
||||
*/
|
||||
spin_lock(&hugepage_nonzero_list_lock[zidx]);
|
||||
list_add(&page->lru, &hugepage_nonzero_list[zidx]);
|
||||
nr_hugepages_nonzero[zidx]++;
|
||||
spin_unlock(&hugepage_nonzero_list_lock[zidx]);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void zeroing_nonzero_list(enum zone_type zidx)
|
||||
{
|
||||
if (!nr_hugepages_nonzero[zidx])
|
||||
return;
|
||||
|
||||
spin_lock(&hugepage_nonzero_list_lock[zidx]);
|
||||
while (!list_empty(&hugepage_nonzero_list[zidx])) {
|
||||
struct page *page = list_first_entry(&hugepage_nonzero_list[zidx],
|
||||
struct page, lru);
|
||||
list_del(&page->lru);
|
||||
nr_hugepages_nonzero[zidx]--;
|
||||
spin_unlock(&hugepage_nonzero_list_lock[zidx]);
|
||||
|
||||
if (nr_hugepages[zidx] < nr_hugepages_quota[zidx]) {
|
||||
prep_new_hpage(page, __GFP_ZERO, 0);
|
||||
spin_lock(&hugepage_list_lock[zidx]);
|
||||
list_add(&page->lru, &hugepage_list[zidx]);
|
||||
nr_hugepages[zidx]++;
|
||||
spin_unlock(&hugepage_list_lock[zidx]);
|
||||
} else {
|
||||
free_hpage(page, HPP_FPI_MAGIC);
|
||||
}
|
||||
spin_lock(&hugepage_nonzero_list_lock[zidx]);
|
||||
}
|
||||
spin_unlock(&hugepage_nonzero_list_lock[zidx]);
|
||||
}
|
||||
|
||||
/*
|
||||
* this function should be called within hugepage_poold context, only.
|
||||
*/
|
||||
static void prepare_hugepage_alloc(void)
|
||||
{
|
||||
#ifdef CONFIG_COMPACTION
|
||||
struct sched_param param_normal = { .sched_priority = 0 };
|
||||
struct sched_param param_idle = { .sched_priority = 0 };
|
||||
static DEFINE_RATELIMIT_STATE(rs, 60 * 60 * HZ, 1);
|
||||
static int compact_count;
|
||||
|
||||
if (!__ratelimit(&rs))
|
||||
return;
|
||||
|
||||
if (!sched_setscheduler(current, SCHED_NORMAL, ¶m_normal)) {
|
||||
pr_info("khppd: compact start\n");
|
||||
compact_node_async(0);
|
||||
pr_info("khppd: compact end (%d done)\n", ++compact_count);
|
||||
if (sched_setscheduler(current, SCHED_IDLE, ¶m_idle))
|
||||
pr_err("khppd: fail to set sched_idle\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void calculate_nr_hugepages_to_fill(void)
|
||||
{
|
||||
int zidx;
|
||||
|
||||
if (unlikely(hpp_debug))
|
||||
pr_err("%s: zidx curavail d_avail curpool tofill\n", __func__);
|
||||
|
||||
for (zidx = 0; zidx < MAX_NR_ZONES; zidx++) {
|
||||
long avail_pages = zone_available_simple(zidx);
|
||||
long delta_avail = 0;
|
||||
long current_pool_pages = get_zone_nr_hugepages(zidx) << HUGEPAGE_ORDER;
|
||||
long quota_pages = ((long)nr_hugepages_quota[zidx]) << HUGEPAGE_ORDER;
|
||||
|
||||
if (avail_pages > hugepage_avail_high[zidx]) {
|
||||
delta_avail = avail_pages - hugepage_avail_high[zidx];
|
||||
if (current_pool_pages + delta_avail > quota_pages)
|
||||
delta_avail = quota_pages - current_pool_pages;
|
||||
nr_hugepages_to_fill[zidx] = delta_avail >> HUGEPAGE_ORDER;
|
||||
} else {
|
||||
nr_hugepages_to_fill[zidx] = 0;
|
||||
}
|
||||
|
||||
if (unlikely(hpp_debug))
|
||||
pr_err("%s: %4d %8ld %8ld %8ld %8d\n",
|
||||
__func__, zidx, avail_pages, delta_avail,
|
||||
current_pool_pages, nr_hugepages_to_fill[zidx]);
|
||||
}
|
||||
}
|
||||
|
||||
static void fill_hugepage_pool(enum zone_type zidx)
|
||||
{
|
||||
struct page *page;
|
||||
int trial = nr_hugepages_to_fill[zidx];
|
||||
|
||||
prepare_hugepage_alloc();
|
||||
|
||||
nr_hugepages_fill_tried[zidx] += trial;
|
||||
while (trial--) {
|
||||
if (nr_hugepages[zidx] >= nr_hugepages_quota[zidx])
|
||||
break;
|
||||
|
||||
page = alloc_pages(get_gfp(zidx) | __GFP_ZERO |
|
||||
__GFP_NOWARN, HUGEPAGE_ORDER);
|
||||
|
||||
/* if alloc fails, future requests may fail also. stop here. */
|
||||
if (!page)
|
||||
break;
|
||||
|
||||
if (page_zonenum(page) != zidx) {
|
||||
/* Note that we should use __free_pages to call to free_pages_prepare */
|
||||
__free_pages(page, HUGEPAGE_ORDER);
|
||||
|
||||
/*
|
||||
* if page is from the lower zone, future requests may
|
||||
* also get the lower zone pages. stop here.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
nr_hugepages_fill_done[zidx]++;
|
||||
spin_lock(&hugepage_list_lock[zidx]);
|
||||
list_add(&page->lru, &hugepage_list[zidx]);
|
||||
nr_hugepages[zidx]++;
|
||||
spin_unlock(&hugepage_list_lock[zidx]);
|
||||
}
|
||||
}
|
||||
|
||||
static struct page *alloc_zeroed_hugepage(gfp_t gfp,
|
||||
enum zone_type highest_zoneidx)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
int zidx;
|
||||
|
||||
if (hpp_state != HPP_ACTIVATED)
|
||||
return NULL;
|
||||
if (current == khppd_task)
|
||||
return NULL;
|
||||
|
||||
nr_hugepages_tried[highest_zoneidx]++;
|
||||
for (zidx = highest_zoneidx; zidx >= 0; zidx--) {
|
||||
__try_to_wake_up_khppd(zidx);
|
||||
if (!nr_hugepages[zidx])
|
||||
continue;
|
||||
if (unlikely(!spin_trylock(&hugepage_list_lock[zidx])))
|
||||
continue;
|
||||
|
||||
if (!list_empty(&hugepage_list[zidx])) {
|
||||
page = list_first_entry(&hugepage_list[zidx],
|
||||
struct page, lru);
|
||||
list_del(&page->lru);
|
||||
nr_hugepages[zidx]--;
|
||||
}
|
||||
spin_unlock(&hugepage_list_lock[zidx]);
|
||||
|
||||
if (page)
|
||||
goto got_page;
|
||||
}
|
||||
|
||||
for (zidx = highest_zoneidx; zidx >= 0; zidx--) {
|
||||
if (!nr_hugepages_nonzero[zidx])
|
||||
continue;
|
||||
if (unlikely(!spin_trylock(&hugepage_nonzero_list_lock[zidx])))
|
||||
continue;
|
||||
|
||||
if (!list_empty(&hugepage_nonzero_list[zidx])) {
|
||||
page = list_first_entry(&hugepage_nonzero_list[zidx],
|
||||
struct page, lru);
|
||||
list_del(&page->lru);
|
||||
nr_hugepages_nonzero[zidx]--;
|
||||
}
|
||||
spin_unlock(&hugepage_nonzero_list_lock[zidx]);
|
||||
|
||||
if (page) {
|
||||
prep_new_hpage(page, __GFP_ZERO, 0);
|
||||
goto got_page;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
|
||||
got_page:
|
||||
nr_hugepages_alloced[zidx]++;
|
||||
if (gfp & __GFP_COMP)
|
||||
prep_compound_page(page, HUGEPAGE_ORDER);
|
||||
return page;
|
||||
}
|
||||
|
||||
static int khppd(void *p)
|
||||
{
|
||||
while (!kthread_should_stop()) {
|
||||
int zidx;
|
||||
|
||||
wait_event_freezable(khppd_wait, khppd_wakeup ||
|
||||
kthread_should_stop());
|
||||
|
||||
khppd_wakeup = 0;
|
||||
last_wakeup_stamp = jiffies;
|
||||
|
||||
calculate_nr_hugepages_to_fill();
|
||||
for (zidx = 0; zidx < MAX_NR_ZONES; zidx++) {
|
||||
if (app_launch || mem_boost_mode_high())
|
||||
break;
|
||||
|
||||
zeroing_nonzero_list(zidx);
|
||||
fill_hugepage_pool(zidx);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool is_hugepage_avail_low_ok(void)
|
||||
{
|
||||
long total_avail_pages = 0;
|
||||
long total_avail_low_pages = 0;
|
||||
int zidx;
|
||||
|
||||
for (zidx = 0; zidx < MAX_NR_ZONES; zidx++) {
|
||||
total_avail_pages += zone_available_simple(zidx);
|
||||
total_avail_low_pages += hugepage_avail_low[zidx];
|
||||
}
|
||||
return total_avail_pages >= total_avail_low_pages;
|
||||
}
|
||||
|
||||
static unsigned long hugepage_pool_count(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
long count, total_count = 0;
|
||||
int zidx;
|
||||
static DEFINE_RATELIMIT_STATE(log_rs, HZ, 1);
|
||||
|
||||
if (!current_is_kswapd())
|
||||
return 0;
|
||||
|
||||
if (is_hugepage_avail_low_ok())
|
||||
return 0;
|
||||
|
||||
hugepage_calculate_limits_under_zone();
|
||||
for (zidx = MAX_NR_ZONES - 1; zidx >= 0; zidx--) {
|
||||
count = get_zone_nr_hugepages(zidx) - nr_hugepages_limit[zidx];
|
||||
if (count > 0)
|
||||
total_count += (count << HUGEPAGE_ORDER);
|
||||
}
|
||||
|
||||
if (unlikely(hpp_debug) && __ratelimit(&log_rs))
|
||||
pr_err("%s returned %ld\n", __func__, total_count);
|
||||
|
||||
return total_count;
|
||||
}
|
||||
|
||||
static unsigned long hugepage_pool_scan(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long total_freed = 0;
|
||||
int zidx, nr_to_scan, nr_freed;
|
||||
bool print_debug_log = false;
|
||||
static DEFINE_RATELIMIT_STATE(log_rs, HZ, 1);
|
||||
|
||||
if (!current_is_kswapd())
|
||||
return SHRINK_STOP;
|
||||
|
||||
if (unlikely(hpp_debug) && __ratelimit(&log_rs)) {
|
||||
print_debug_log = true;
|
||||
pr_err("%s was requested %lu\n", __func__, sc->nr_to_scan);
|
||||
}
|
||||
|
||||
hugepage_calculate_limits_under_zone();
|
||||
for (zidx = 0; zidx < MAX_NR_ZONES; zidx++) {
|
||||
nr_to_scan = get_zone_nr_hugepages(zidx) - nr_hugepages_limit[zidx];
|
||||
if (nr_to_scan <= 0)
|
||||
continue;
|
||||
nr_freed = 0;
|
||||
spin_lock(&hugepage_nonzero_list_lock[zidx]);
|
||||
while (!list_empty(&hugepage_nonzero_list[zidx]) &&
|
||||
nr_freed < nr_to_scan) {
|
||||
page = list_first_entry(&hugepage_nonzero_list[zidx],
|
||||
struct page, lru);
|
||||
list_del(&page->lru);
|
||||
free_hpage(page, HPP_FPI_MAGIC);
|
||||
nr_hugepages_nonzero[zidx]--;
|
||||
nr_freed++;
|
||||
}
|
||||
spin_unlock(&hugepage_nonzero_list_lock[zidx]);
|
||||
|
||||
spin_lock(&hugepage_list_lock[zidx]);
|
||||
while (!list_empty(&hugepage_list[zidx]) &&
|
||||
nr_freed < nr_to_scan) {
|
||||
page = list_first_entry(&hugepage_list[zidx],
|
||||
struct page, lru);
|
||||
list_del(&page->lru);
|
||||
free_hpage(page, HPP_FPI_MAGIC);
|
||||
nr_hugepages[zidx]--;
|
||||
nr_freed++;
|
||||
}
|
||||
spin_unlock(&hugepage_list_lock[zidx]);
|
||||
total_freed += nr_freed;
|
||||
}
|
||||
|
||||
if (print_debug_log)
|
||||
pr_err("%s freed %lu hugepages(%luK)\n",
|
||||
__func__, total_freed, K(total_freed << HUGEPAGE_ORDER));
|
||||
|
||||
return total_freed ? total_freed << HUGEPAGE_ORDER : SHRINK_STOP;
|
||||
}
|
||||
|
||||
static struct shrinker hugepage_pool_shrinker_info = {
|
||||
.scan_objects = hugepage_pool_scan,
|
||||
.count_objects = hugepage_pool_count,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
};
|
||||
|
||||
module_param_array(nr_hugepages, int, NULL, 0444);
|
||||
module_param_array(nr_hugepages_nonzero, int, NULL, 0444);
|
||||
module_param_array(nr_hugepages_alloced, int, NULL, 0444);
|
||||
module_param_array(nr_hugepages_tried, int, NULL, 0444);
|
||||
module_param_array(nr_hugepages_fill_tried, int, NULL, 0444);
|
||||
module_param_array(nr_hugepages_fill_done, int, NULL, 0444);
|
||||
module_param_array(nr_hugepages_quota, int, NULL, 0644);
|
||||
module_param_array(nr_hugepages_limit, int, NULL, 0444);
|
||||
|
||||
module_param_array(hugepage_avail_low, ulong, NULL, 0644);
|
||||
module_param_array(hugepage_avail_high, ulong, NULL, 0644);
|
||||
|
||||
static int khppd_app_launch_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
bool prev_launch;
|
||||
|
||||
if (hpp_state == HPP_OFF)
|
||||
return 0;
|
||||
|
||||
prev_launch = app_launch;
|
||||
app_launch = action ? true : false;
|
||||
|
||||
if (prev_launch && !app_launch)
|
||||
try_to_wake_up_khppd();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct notifier_block khppd_app_launch_nb = {
|
||||
.notifier_call = khppd_app_launch_notifier,
|
||||
};
|
||||
|
||||
static void hpp_meminfo(void *data, struct seq_file *m)
|
||||
{
|
||||
show_val_meminfo(m, "HugepagePool", K(total_hugepage_pool_pages()));
|
||||
}
|
||||
|
||||
static void hpp_show_mem(void *data, unsigned int filter, nodemask_t *nodemask)
|
||||
{
|
||||
pr_info("%s: %lu kB\n", "HugepagePool", K(total_hugepage_pool_pages()));
|
||||
}
|
||||
|
||||
static void hpp_meminfo_adjust(void *data, unsigned long *totalram, unsigned long *freeram)
|
||||
{
|
||||
*freeram += total_hugepage_pool_pages();
|
||||
}
|
||||
|
||||
static void hpp_try_alloc_pages_gfp(void *data, struct page **page,
|
||||
unsigned int order, gfp_t gfp, enum zone_type highest_zoneidx)
|
||||
{
|
||||
if (order == HUGEPAGE_ORDER && !in_atomic())
|
||||
*page = alloc_zeroed_hugepage(gfp, highest_zoneidx);
|
||||
}
|
||||
|
||||
static void hpp_free_pages_prepare_bypass(void *data, struct page *page,
|
||||
unsigned int order, int __bitwise fpi_flags, bool *bypass)
|
||||
{
|
||||
if (fpi_flags != HPP_FPI_MAGIC)
|
||||
return;
|
||||
if (page_ref_count(page))
|
||||
put_page_testzero(page);
|
||||
else
|
||||
*bypass = true;
|
||||
}
|
||||
|
||||
static void hpp_free_pages_ok_bypass(void *data, struct page *page,
|
||||
unsigned int order, int __bitwise fpi_flags, bool *bypass)
|
||||
{
|
||||
if (fpi_flags == HPP_FPI_MAGIC)
|
||||
return;
|
||||
if (is_migrate_cma_or_isolate_page(page))
|
||||
return;
|
||||
if (order == HUGEPAGE_ORDER && insert_hugepage_pool(page))
|
||||
*bypass = true;
|
||||
}
|
||||
|
||||
static void hpp_dmabuf_page_pool_free_bypass(void *data,
|
||||
struct page *page, bool *bypass)
|
||||
{
|
||||
static bool did_check;
|
||||
static bool is_huge_dram;
|
||||
|
||||
if (unlikely(!did_check)) {
|
||||
is_huge_dram = totalram_pages() > GB_TO_PAGES(6);
|
||||
did_check = true;
|
||||
}
|
||||
if (is_huge_dram && compound_order(page) == HUGEPAGE_ORDER) {
|
||||
__free_pages(page, HUGEPAGE_ORDER);
|
||||
*bypass = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void hpp_split_large_folio_bypass(void *data, bool *bypass)
|
||||
{
|
||||
*bypass = is_hugepage_avail_low_ok();
|
||||
}
|
||||
|
||||
static int __init init_hugepage_pool(void)
|
||||
{
|
||||
struct pglist_data *pgdat = &contig_page_data;
|
||||
unsigned long managed_pages;
|
||||
long hugepage_quota, avail_low, avail_high;
|
||||
uint32_t totalram_pages_uint = totalram_pages();
|
||||
u64 num_pages;
|
||||
int zidx;
|
||||
|
||||
if (totalram_pages_uint > GB_TO_PAGES(10)) {
|
||||
hugepage_quota = GB_TO_PAGES(1);
|
||||
avail_low = MB_TO_PAGES(2560);
|
||||
} else if (totalram_pages_uint > GB_TO_PAGES(6)) {
|
||||
hugepage_quota = GB_TO_PAGES(1);
|
||||
avail_low = MB_TO_PAGES(1100);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
avail_high = avail_low + (avail_low >> 2);
|
||||
|
||||
for (zidx = 0; zidx < MAX_NR_ZONES; zidx++) {
|
||||
managed_pages = zone_managed_pages(&pgdat->node_zones[zidx]);
|
||||
/*
|
||||
* calculate without zone lock as we assume managed_pages of
|
||||
* zones do not change at runtime
|
||||
*/
|
||||
num_pages = (u64)hugepage_quota * managed_pages;
|
||||
do_div(num_pages, totalram_pages_uint);
|
||||
nr_hugepages_quota[zidx] = (num_pages >> HUGEPAGE_ORDER);
|
||||
nr_hugepages_limit[zidx] = nr_hugepages_quota[zidx];
|
||||
|
||||
hugepage_avail_low[zidx] = (u64)avail_low * managed_pages;
|
||||
do_div(hugepage_avail_low[zidx], totalram_pages_uint);
|
||||
|
||||
hugepage_avail_high[zidx] = (u64)avail_high * managed_pages;
|
||||
do_div(hugepage_avail_high[zidx], totalram_pages_uint);
|
||||
|
||||
spin_lock_init(&hugepage_list_lock[zidx]);
|
||||
spin_lock_init(&hugepage_nonzero_list_lock[zidx]);
|
||||
INIT_LIST_HEAD(&hugepage_list[zidx]);
|
||||
INIT_LIST_HEAD(&hugepage_nonzero_list[zidx]);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init hpp_init(void)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = 0 };
|
||||
int ret;
|
||||
|
||||
if (init_hugepage_pool())
|
||||
goto skip_all;
|
||||
|
||||
khppd_task = kthread_run(khppd, NULL, "khppd");
|
||||
if (IS_ERR(khppd_task)) {
|
||||
pr_err("Failed to start khppd\n");
|
||||
khppd_task = NULL;
|
||||
goto skip_all;
|
||||
}
|
||||
try_to_wake_up_khppd();
|
||||
sched_setscheduler(khppd_task, SCHED_IDLE, ¶m);
|
||||
|
||||
atomic_notifier_chain_register(&am_app_launch_notifier,
|
||||
&khppd_app_launch_nb);
|
||||
ret = register_shrinker(&hugepage_pool_shrinker_info, "hugepage_pool");
|
||||
if (ret) {
|
||||
kthread_stop(khppd_task);
|
||||
goto skip_all;
|
||||
}
|
||||
register_trace_android_vh_meminfo_proc_show(hpp_meminfo, NULL);
|
||||
register_trace_android_vh_show_mem(hpp_show_mem, NULL);
|
||||
register_trace_android_vh_si_meminfo_adjust(hpp_meminfo_adjust, NULL);
|
||||
register_trace_android_vh_free_pages_prepare_bypass(
|
||||
hpp_free_pages_prepare_bypass, NULL);
|
||||
register_trace_android_vh_free_pages_ok_bypass(
|
||||
hpp_free_pages_ok_bypass, NULL);
|
||||
register_trace_android_vh_dmabuf_page_pool_free_bypass(
|
||||
hpp_dmabuf_page_pool_free_bypass, NULL);
|
||||
register_trace_android_vh_split_large_folio_bypass(
|
||||
hpp_split_large_folio_bypass, NULL);
|
||||
register_trace_android_rvh_try_alloc_pages_gfp(
|
||||
hpp_try_alloc_pages_gfp, NULL);
|
||||
|
||||
hpp_state = HPP_ACTIVATED;
|
||||
skip_all:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit hpp_exit(void)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(khppd_task))
|
||||
kthread_stop(khppd_task);
|
||||
atomic_notifier_chain_unregister(&am_app_launch_notifier,
|
||||
&khppd_app_launch_nb);
|
||||
unregister_shrinker(&hugepage_pool_shrinker_info);
|
||||
unregister_trace_android_vh_meminfo_proc_show(hpp_meminfo, NULL);
|
||||
unregister_trace_android_vh_show_mem(hpp_show_mem, NULL);
|
||||
unregister_trace_android_vh_si_meminfo_adjust(hpp_meminfo_adjust, NULL);
|
||||
unregister_trace_android_vh_free_pages_prepare_bypass(
|
||||
hpp_free_pages_prepare_bypass, NULL);
|
||||
unregister_trace_android_vh_free_pages_ok_bypass(
|
||||
hpp_free_pages_ok_bypass, NULL);
|
||||
unregister_trace_android_vh_dmabuf_page_pool_free_bypass(
|
||||
hpp_dmabuf_page_pool_free_bypass, NULL);
|
||||
unregister_trace_android_vh_split_large_folio_bypass(
|
||||
hpp_split_large_folio_bypass, NULL);
|
||||
}
|
||||
|
||||
static int hpp_debug_param_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
return param_set_bool(val, kp);
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops hpp_debug_param_ops = {
|
||||
.set = hpp_debug_param_set,
|
||||
.get = param_get_bool,
|
||||
};
|
||||
module_param_cb(debug, &hpp_debug_param_ops, &hpp_debug, 0644);
|
||||
|
||||
static int hpp_state_param_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
return param_set_uint_minmax(val, kp, 0, 2);
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops hpp_state_param_ops = {
|
||||
.set = hpp_state_param_set,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
module_param_cb(state, &hpp_state_param_ops, &hpp_state, 0644);
|
||||
module_init(hpp_init)
|
||||
module_exit(hpp_exit);
|
||||
MODULE_LICENSE("GPL");
|
53
mm/sec_mm/lowfile_detect.c
Normal file
53
mm/sec_mm/lowfile_detect.c
Normal file
@@ -0,0 +1,53 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2020 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/sec_mm.h>
|
||||
|
||||
#define MIN_FILE_SIZE_HIGH 300
|
||||
#define MIN_FILE_SIZE_LOW 200
|
||||
#define MIN_FILE_SIZE_THR_GB 3
|
||||
|
||||
static unsigned long min_file;
|
||||
|
||||
static unsigned long lowfile_count(struct shrinker *s,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
unsigned long inactive_file, active_file, file;
|
||||
static DEFINE_RATELIMIT_STATE(mm_debug_rs, 10 * HZ, 1);
|
||||
|
||||
inactive_file = global_node_page_state(NR_INACTIVE_FILE);
|
||||
active_file = global_node_page_state(NR_ACTIVE_FILE);
|
||||
file = inactive_file + active_file;
|
||||
if (file < min_file && __ratelimit(&mm_debug_rs)) {
|
||||
pr_info("low file detected : %lukB < %luKB\n", K(file),
|
||||
K(min_file));
|
||||
show_mem();
|
||||
}
|
||||
return 0; /* return 0 not to call to scan_objects */
|
||||
}
|
||||
|
||||
static struct shrinker mm_debug_shrinker = {
|
||||
.count_objects = lowfile_count,
|
||||
};
|
||||
|
||||
void init_lowfile_detect(void)
|
||||
{
|
||||
if (totalram_pages() > GB_TO_PAGES(MIN_FILE_SIZE_THR_GB))
|
||||
min_file = MB_TO_PAGES(MIN_FILE_SIZE_HIGH);
|
||||
else
|
||||
min_file = MB_TO_PAGES(MIN_FILE_SIZE_LOW);
|
||||
|
||||
register_shrinker(&mm_debug_shrinker, "mm_debug_shrinker");
|
||||
}
|
||||
|
||||
void exit_lowfile_detect(void)
|
||||
{
|
||||
unregister_shrinker(&mm_debug_shrinker);
|
||||
}
|
38
mm/sec_mm/panic_handler.c
Normal file
38
mm/sec_mm/panic_handler.c
Normal file
@@ -0,0 +1,38 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2020 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/panic_notifier.h>
|
||||
#include <linux/sec_mm.h>
|
||||
|
||||
static int sec_mm_panic_handler(struct notifier_block *nb,
|
||||
unsigned long action, void *str_buf)
|
||||
{
|
||||
WRITE_ONCE(dump_tasks_rs.interval, 0);
|
||||
#ifdef CONFIG_SEC_MM_DUMP_DMABUF_TASKS
|
||||
WRITE_ONCE(dma_buf_tasks_rs.interval, 0);
|
||||
#endif
|
||||
show_mem();
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block panic_block = {
|
||||
.notifier_call = sec_mm_panic_handler,
|
||||
.priority = 1 /* prior to priority 0 */
|
||||
};
|
||||
|
||||
void init_panic_handler(void)
|
||||
{
|
||||
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
|
||||
}
|
||||
|
||||
void exit_panic_handler(void)
|
||||
{
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
|
||||
}
|
249
mm/sec_mm/sec_mm_debug.c
Normal file
249
mm/sec_mm/sec_mm_debug.c
Normal file
@@ -0,0 +1,249 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2024 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dma-heap.h>
|
||||
#include <linux/psi.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/sec_mm.h>
|
||||
#include <linux/sched/cputime.h>
|
||||
#include <trace/events/mmap.h>
|
||||
#include <trace/events/samsung.h>
|
||||
#include <trace/hooks/dmabuf.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#include <trace/hooks/psi.h>
|
||||
|
||||
DEFINE_RATELIMIT_STATE(dump_tasks_rs, 5 * HZ, 1);
|
||||
#ifdef CONFIG_SEC_MM_DUMP_DMABUF_TASKS
|
||||
DEFINE_RATELIMIT_STATE(dma_buf_tasks_rs, 10 * HZ, 1);
|
||||
#endif
|
||||
|
||||
static void sec_mm_alloc_contig_range_not_isolated(void *data,
|
||||
unsigned long start, unsigned end)
|
||||
{
|
||||
pr_info_ratelimited("alloc_contig_range: [%lx, %x) PFNs busy\n",
|
||||
start, end);
|
||||
}
|
||||
|
||||
static void sec_mm_alloc_pages_slowpath_start(void *data, u64 *stime)
|
||||
{
|
||||
u64 utime;
|
||||
|
||||
task_cputime(current, &utime, stime);
|
||||
}
|
||||
|
||||
static void sec_mm_alloc_pages_slowpath_end(void *data, gfp_t *gfp,
|
||||
unsigned int order, unsigned long jiffies_start, u64 start,
|
||||
unsigned long did_some_progress, unsigned long pages_reclaimed,
|
||||
int retry)
|
||||
{
|
||||
u64 utime, cputime, end;
|
||||
|
||||
task_cputime(current, &utime, &end);
|
||||
cputime = (end - start) / NSEC_PER_MSEC;
|
||||
if (cputime < 256)
|
||||
return;
|
||||
|
||||
pr_info("alloc stall: timeJS(ms):%u|%llu rec:%lu|%lu ret:%d o:%d gfp:%#x(%pGg) AaiFai:%lukB|%lukB|%lukB|%lukB\n",
|
||||
jiffies_to_msecs(jiffies - jiffies_start), cputime,
|
||||
did_some_progress, pages_reclaimed, retry, order, *gfp, gfp,
|
||||
K(global_node_page_state(NR_ACTIVE_ANON)),
|
||||
K(global_node_page_state(NR_INACTIVE_ANON)),
|
||||
K(global_node_page_state(NR_ACTIVE_FILE)),
|
||||
K(global_node_page_state(NR_INACTIVE_FILE)));
|
||||
}
|
||||
|
||||
static void sec_mm_cma_debug_show_areas(void *data, bool *show)
|
||||
{
|
||||
*show = true;
|
||||
}
|
||||
|
||||
static void sec_mm_dma_heap_buffer_alloc_start(void *data,
|
||||
const char *name, size_t len, u32 fd_flags, u64 heap_flags)
|
||||
{
|
||||
tracing_mark_begin("%s(%s, %zu, 0x%x, 0x%llx)", "dma-buf_alloc",
|
||||
name, len, fd_flags, heap_flags);
|
||||
}
|
||||
|
||||
static void sec_mm_dma_heap_buffer_alloc_end(void *data,
|
||||
const char *name, size_t len)
|
||||
{
|
||||
tracing_mark_end();
|
||||
}
|
||||
|
||||
static void sec_mm_tracing_mark_begin(struct file *file, pgoff_t pgoff,
|
||||
unsigned int size, bool sync)
|
||||
{
|
||||
char buf[TRACING_MARK_BUF_SIZE], *path;
|
||||
|
||||
if (!trace_tracing_mark_write_enabled())
|
||||
return;
|
||||
|
||||
path = file_path(file, buf, TRACING_MARK_BUF_SIZE);
|
||||
if (IS_ERR(path)) {
|
||||
sprintf(buf, "file_path failed(%ld)", PTR_ERR(path));
|
||||
path = buf;
|
||||
}
|
||||
tracing_mark_begin("%d , %s , %lu , %d", sync, path, pgoff, size);
|
||||
}
|
||||
|
||||
static void sec_mm_filemap_fault_start(void *data,
|
||||
struct file *file, pgoff_t pgoff)
|
||||
{
|
||||
sec_mm_tracing_mark_begin(file, pgoff, 1, true);
|
||||
}
|
||||
|
||||
static void sec_mm_filemap_fault_end(void *data,
|
||||
struct file *file, pgoff_t pgoff)
|
||||
{
|
||||
tracing_mark_end();
|
||||
}
|
||||
|
||||
static void sec_mm_page_cache_readahead_start(void *data,
|
||||
struct file *file, pgoff_t pgoff, unsigned int size, bool sync)
|
||||
{
|
||||
sec_mm_tracing_mark_begin(file, pgoff, size, sync);
|
||||
}
|
||||
|
||||
static void sec_mm_page_cache_readahead_end(void *data,
|
||||
struct file *file, pgoff_t pgoff)
|
||||
{
|
||||
tracing_mark_end();
|
||||
}
|
||||
|
||||
static void sec_mm_show_mem(void *data, unsigned int filter, nodemask_t *nodes)
|
||||
{
|
||||
long dma_heap_pool_size_kb = dma_heap_try_get_pool_size_kb();
|
||||
|
||||
pr_info("%s: %lu kB\n", "VmallocUsed", K(vmalloc_nr_pages()));
|
||||
if (dma_heap_pool_size_kb >= 0)
|
||||
pr_info("%s: %ld kB\n", "DmaHeapPool", dma_heap_pool_size_kb);
|
||||
|
||||
if (in_interrupt())
|
||||
return;
|
||||
|
||||
if (__ratelimit(&dump_tasks_rs))
|
||||
mm_debug_dump_tasks();
|
||||
#ifdef CONFIG_SEC_MM_DUMP_DMABUF_TASKS
|
||||
if (__ratelimit(&dma_buf_tasks_rs))
|
||||
mm_debug_dump_dma_buf_tasks();
|
||||
#endif
|
||||
}
|
||||
|
||||
static void sec_mm_meminfo(void *data, struct seq_file *m)
|
||||
{
|
||||
long dma_heap_pool_size_kb = dma_heap_try_get_pool_size_kb();
|
||||
|
||||
if (dma_heap_pool_size_kb >= 0)
|
||||
show_val_meminfo(m, "DmaHeapPool", dma_heap_pool_size_kb);
|
||||
}
|
||||
|
||||
#define WINDOW_MIN_NS 1000000000 /* 1s */
|
||||
#define THRESHOLD_MIN_NS 100000000 /* 100ms */
|
||||
|
||||
static void sec_mm_psi_monitor(void *data,
|
||||
struct psi_trigger *t, u64 now, u64 growth)
|
||||
{
|
||||
if (t->win.size >= WINDOW_MIN_NS && t->threshold >= THRESHOLD_MIN_NS)
|
||||
printk_deferred("psi: %s %llu %llu %d %llu %llu\n",
|
||||
"update_triggers", now, t->last_event_time,
|
||||
t->state, t->threshold, growth);
|
||||
}
|
||||
|
||||
static void sec_mm_warn_alloc_show_mem_bypass(void *data, bool *bypass)
|
||||
{
|
||||
static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
|
||||
|
||||
if (!__ratelimit(&show_mem_rs))
|
||||
*bypass = true;
|
||||
}
|
||||
|
||||
static void sec_mm_warn_alloc_tune_ratelimit(void *data,
|
||||
struct ratelimit_state *rs)
|
||||
{
|
||||
rs->interval = 5*HZ;
|
||||
rs->burst = 2;
|
||||
}
|
||||
|
||||
static void sec_mm_vm_unmapped_area(void *data,
|
||||
unsigned long addr, struct vm_unmapped_area_info *info)
|
||||
{
|
||||
if (!IS_ERR_VALUE(addr))
|
||||
return;
|
||||
|
||||
pr_warn_ratelimited("%s err:%ld total_vm:0x%lx flags:0x%lx len:0x%lx low:0x%lx high:0x%lx mask:0x%lx offset:0x%lx\n",
|
||||
__func__, addr, current->mm->total_vm, info->flags,
|
||||
info->length, info->low_limit, info->high_limit,
|
||||
info->align_mask, info->align_offset);
|
||||
}
|
||||
|
||||
void init_sec_mm_debug(void)
|
||||
{
|
||||
register_trace_android_vh_alloc_contig_range_not_isolated(
|
||||
sec_mm_alloc_contig_range_not_isolated, NULL);
|
||||
register_trace_android_vh_alloc_pages_slowpath_start(
|
||||
sec_mm_alloc_pages_slowpath_start, NULL);
|
||||
register_trace_android_vh_alloc_pages_slowpath_end(
|
||||
sec_mm_alloc_pages_slowpath_end, NULL);
|
||||
register_trace_android_vh_cma_debug_show_areas(
|
||||
sec_mm_cma_debug_show_areas, NULL);
|
||||
register_trace_android_vh_dma_heap_buffer_alloc_start(
|
||||
sec_mm_dma_heap_buffer_alloc_start, NULL);
|
||||
register_trace_android_vh_dma_heap_buffer_alloc_end(
|
||||
sec_mm_dma_heap_buffer_alloc_end, NULL);
|
||||
register_trace_android_vh_filemap_fault_start(
|
||||
sec_mm_filemap_fault_start, NULL);
|
||||
register_trace_android_vh_filemap_fault_end(
|
||||
sec_mm_filemap_fault_end, NULL);
|
||||
register_trace_android_vh_page_cache_readahead_start(
|
||||
sec_mm_page_cache_readahead_start, NULL);
|
||||
register_trace_android_vh_page_cache_readahead_end(
|
||||
sec_mm_page_cache_readahead_end, NULL);
|
||||
register_trace_android_vh_meminfo_proc_show(sec_mm_meminfo, NULL);
|
||||
register_trace_android_vh_psi_update_triggers(sec_mm_psi_monitor, NULL);
|
||||
register_trace_android_vh_warn_alloc_show_mem_bypass(
|
||||
sec_mm_warn_alloc_show_mem_bypass, NULL);
|
||||
register_trace_android_vh_warn_alloc_tune_ratelimit(
|
||||
sec_mm_warn_alloc_tune_ratelimit, NULL);
|
||||
register_trace_prio_android_vh_show_mem(sec_mm_show_mem, NULL, 0);
|
||||
register_trace_vm_unmapped_area(sec_mm_vm_unmapped_area, NULL);
|
||||
}
|
||||
|
||||
void exit_sec_mm_debug(void)
|
||||
{
|
||||
unregister_trace_android_vh_alloc_contig_range_not_isolated(
|
||||
sec_mm_alloc_contig_range_not_isolated, NULL);
|
||||
unregister_trace_android_vh_alloc_pages_slowpath_start(
|
||||
sec_mm_alloc_pages_slowpath_start, NULL);
|
||||
unregister_trace_android_vh_alloc_pages_slowpath_end(
|
||||
sec_mm_alloc_pages_slowpath_end, NULL);
|
||||
unregister_trace_android_vh_cma_debug_show_areas(
|
||||
sec_mm_cma_debug_show_areas, NULL);
|
||||
unregister_trace_android_vh_dma_heap_buffer_alloc_start(
|
||||
sec_mm_dma_heap_buffer_alloc_start, NULL);
|
||||
unregister_trace_android_vh_dma_heap_buffer_alloc_end(
|
||||
sec_mm_dma_heap_buffer_alloc_end, NULL);
|
||||
unregister_trace_android_vh_filemap_fault_start(
|
||||
sec_mm_filemap_fault_start, NULL);
|
||||
unregister_trace_android_vh_filemap_fault_end(
|
||||
sec_mm_filemap_fault_end, NULL);
|
||||
unregister_trace_android_vh_page_cache_readahead_start(
|
||||
sec_mm_page_cache_readahead_start, NULL);
|
||||
unregister_trace_android_vh_page_cache_readahead_end(
|
||||
sec_mm_page_cache_readahead_end, NULL);
|
||||
unregister_trace_android_vh_show_mem(sec_mm_show_mem, NULL);
|
||||
unregister_trace_android_vh_meminfo_proc_show(sec_mm_meminfo, NULL);
|
||||
unregister_trace_android_vh_psi_update_triggers(
|
||||
sec_mm_psi_monitor, NULL);
|
||||
unregister_trace_android_vh_warn_alloc_show_mem_bypass(
|
||||
sec_mm_warn_alloc_show_mem_bypass, NULL);
|
||||
unregister_trace_android_vh_warn_alloc_tune_ratelimit(
|
||||
sec_mm_warn_alloc_tune_ratelimit, NULL);
|
||||
unregister_trace_vm_unmapped_area(sec_mm_vm_unmapped_area, NULL);
|
||||
}
|
35
mm/sec_mm/sec_mm_init.c
Normal file
35
mm/sec_mm/sec_mm_init.c
Normal file
@@ -0,0 +1,35 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2020 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sec_mm.h>
|
||||
|
||||
static int __init sec_mm_init(void)
|
||||
{
|
||||
init_lowfile_detect();
|
||||
init_panic_handler();
|
||||
init_sec_mm_debug();
|
||||
init_sec_mm_tune();
|
||||
init_sec_mm_sysfs();
|
||||
|
||||
pr_info("sec_mm init was done\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit sec_mm_exit(void)
|
||||
{
|
||||
exit_lowfile_detect();
|
||||
exit_panic_handler();
|
||||
exit_sec_mm_debug();
|
||||
exit_sec_mm_tune();
|
||||
exit_sec_mm_sysfs();
|
||||
}
|
||||
module_init(sec_mm_init);
|
||||
module_exit(sec_mm_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
134
mm/sec_mm/sec_mm_sysfs.c
Normal file
134
mm/sec_mm/sec_mm_sysfs.c
Normal file
@@ -0,0 +1,134 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2024 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sec_mm.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/vmstat.h>
|
||||
|
||||
ATOMIC_NOTIFIER_HEAD(am_app_launch_notifier);
|
||||
EXPORT_SYMBOL_GPL(am_app_launch_notifier);
|
||||
|
||||
bool am_app_launch;
|
||||
EXPORT_SYMBOL_GPL(am_app_launch);
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
#define MEM_BOOST_MAX_TIME (5 * HZ) /* 5 sec */
|
||||
/* mem_boost throttles only kswapd's behavior */
|
||||
enum mem_boost {
|
||||
NO_BOOST,
|
||||
BOOST_MID = 1,
|
||||
BOOST_HIGH = 2,
|
||||
BOOST_KILL = 3,
|
||||
};
|
||||
|
||||
static int mem_boost_mode = NO_BOOST;
|
||||
static unsigned long last_mode_change;
|
||||
|
||||
bool mem_boost_mode_high(void)
|
||||
{
|
||||
if (time_after(jiffies, last_mode_change + MEM_BOOST_MAX_TIME))
|
||||
mem_boost_mode = NO_BOOST;
|
||||
return mem_boost_mode >= BOOST_HIGH;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mem_boost_mode_high);
|
||||
|
||||
static ssize_t mem_boost_mode_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
if (time_after(jiffies, last_mode_change + MEM_BOOST_MAX_TIME))
|
||||
mem_boost_mode = NO_BOOST;
|
||||
return sprintf(buf, "%d\n", mem_boost_mode);
|
||||
}
|
||||
|
||||
static ssize_t mem_boost_mode_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int mode, err;
|
||||
|
||||
err = kstrtoint(buf, 10, &mode);
|
||||
if (err || mode > BOOST_KILL || mode < NO_BOOST)
|
||||
return -EINVAL;
|
||||
mem_boost_mode = mode;
|
||||
last_mode_change = jiffies;
|
||||
#ifdef CONFIG_RBIN
|
||||
if (mem_boost_mode >= BOOST_HIGH)
|
||||
wake_dmabuf_rbin_heap_prereclaim();
|
||||
#endif
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct kobj_attribute mem_boost_mode_attr = __ATTR_RW(mem_boost_mode);
|
||||
|
||||
static ssize_t am_app_launch_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%d\n", am_app_launch ? 1 : 0);
|
||||
}
|
||||
|
||||
static ssize_t am_app_launch_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int mode, err;
|
||||
bool am_app_launch_new;
|
||||
|
||||
err = kstrtoint(buf, 10, &mode);
|
||||
if (err || (mode != 0 && mode != 1))
|
||||
return -EINVAL;
|
||||
|
||||
am_app_launch_new = mode ? true : false;
|
||||
if (am_app_launch != am_app_launch_new)
|
||||
atomic_notifier_call_chain(&am_app_launch_notifier, mode, NULL);
|
||||
am_app_launch = am_app_launch_new;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct kobj_attribute am_app_launch_attr = __ATTR_RW(am_app_launch);
|
||||
|
||||
static ssize_t mmap_readaround_limit_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%u\n", mmap_readaround_limit);
|
||||
}
|
||||
|
||||
static struct kobj_attribute mmap_readaround_limit_attr = __ATTR_RO(mmap_readaround_limit);
|
||||
|
||||
static struct attribute *sec_mm_attrs[] = {
|
||||
&mem_boost_mode_attr.attr,
|
||||
&am_app_launch_attr.attr,
|
||||
&mmap_readaround_limit_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct attribute_group sec_mm_attr_group = {
|
||||
.attrs = sec_mm_attrs,
|
||||
.name = "sec_mm",
|
||||
};
|
||||
|
||||
void init_sec_mm_sysfs(void)
|
||||
{
|
||||
if (sysfs_create_group(kernel_kobj, &sec_mm_attr_group))
|
||||
pr_err("sec_mm_sysfs: failed to create\n");
|
||||
}
|
||||
|
||||
void exit_sec_mm_sysfs(void)
|
||||
{
|
||||
}
|
||||
#else
|
||||
void init_sec_mm_sysfs(void)
|
||||
{
|
||||
}
|
||||
|
||||
void exit_sec_mm_sysfs(void);
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
5
mm/sec_mm/sec_mm_trace.c
Normal file
5
mm/sec_mm/sec_mm_trace.c
Normal file
@@ -0,0 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/samsung.h>
|
||||
|
174
mm/sec_mm/sec_mm_tune.c
Normal file
174
mm/sec_mm/sec_mm_tune.c
Normal file
@@ -0,0 +1,174 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* sec_mm/
|
||||
*
|
||||
* Copyright (C) 2024 Samsung Electronics
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/swap.h>
|
||||
#include <linux/sec_mm.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#include <trace/hooks/vmscan.h>
|
||||
|
||||
static void sec_mm_cma_alloc_set_max_retries(void *data, int *max_retries)
|
||||
{
|
||||
*max_retries = 10;
|
||||
}
|
||||
|
||||
static void sec_mm_drain_all_pages_bypass(void *data, gfp_t gfp_mask,
|
||||
unsigned int order, unsigned long alloc_flags, int migratetype,
|
||||
unsigned long did_some_progress, bool *bypass)
|
||||
{
|
||||
*bypass = mem_boost_mode_high();
|
||||
}
|
||||
|
||||
static void sec_mm_rebalance_anon_lru_bypass(void *data, bool *bypass)
|
||||
{
|
||||
*bypass = mem_boost_mode_high();
|
||||
}
|
||||
|
||||
static void sec_mm_shrink_slab_bypass(void *data, gfp_t gfp_mask,
|
||||
int nid, struct mem_cgroup *memcg, int priority, bool *bypass)
|
||||
{
|
||||
/*
|
||||
* Allow shrink_slab only for kswapd, d.reclaim with priority == 0 and
|
||||
* drop caches.
|
||||
*/
|
||||
if (!current_is_kswapd() && priority > 0)
|
||||
*bypass = true;
|
||||
}
|
||||
|
||||
static void sec_mm_suitable_migration_target_bypass(void *data,
|
||||
struct page *page, bool *bypass)
|
||||
{
|
||||
if (is_migrate_cma_or_isolate_page(page))
|
||||
*bypass = true;
|
||||
}
|
||||
|
||||
#if CONFIG_MMAP_READAROUND_LIMIT == 0
|
||||
unsigned int mmap_readaround_limit = VM_READAHEAD_PAGES;
|
||||
#else
|
||||
unsigned int mmap_readaround_limit = CONFIG_MMAP_READAROUND_LIMIT;
|
||||
#endif
|
||||
|
||||
static void sec_mm_tune_mmap_readaround(void *data,
|
||||
unsigned int ra_pages, pgoff_t pgoff, pgoff_t *start,
|
||||
unsigned int *size, unsigned int *async_size)
|
||||
{
|
||||
unsigned int new_ra_pages = mmap_readaround_limit;
|
||||
|
||||
if (mem_boost_mode_high())
|
||||
new_ra_pages = min_t(unsigned int, new_ra_pages, 8);
|
||||
if (ra_pages <= new_ra_pages)
|
||||
return;
|
||||
*start = max_t(long, 0, pgoff - new_ra_pages / 2);
|
||||
*size = new_ra_pages;
|
||||
*async_size = new_ra_pages / 4;
|
||||
}
|
||||
|
||||
enum scan_balance {
|
||||
SCAN_EQUAL,
|
||||
SCAN_FRACT,
|
||||
SCAN_ANON,
|
||||
SCAN_FILE,
|
||||
};
|
||||
|
||||
static void sec_mm_tune_scan_control(void *data, bool *skip_swap)
|
||||
{
|
||||
*skip_swap = true;
|
||||
}
|
||||
|
||||
static unsigned long low_threshold;
|
||||
|
||||
static void sec_mm_tune_scan_type(void *data, enum scan_balance *scan_type)
|
||||
{
|
||||
if (*scan_type == SCAN_FRACT && current_is_kswapd() &&
|
||||
mem_boost_mode_high() && !file_is_tiny(low_threshold))
|
||||
*scan_type = SCAN_FILE;
|
||||
}
|
||||
|
||||
static void sec_mm_use_vm_swappiness(void *data, bool *use_vm_swappiness)
|
||||
{
|
||||
*use_vm_swappiness = true;
|
||||
}
|
||||
|
||||
static void sec_mm_set_balance_anon_file_reclaim(void *data,
|
||||
bool *balance_anon_file_reclaim)
|
||||
{
|
||||
*balance_anon_file_reclaim = true;
|
||||
}
|
||||
|
||||
#define ZS_SHRINKER_THRESHOLD 1024
|
||||
#define ZS_SHRINKER_INTERVAL (10 * HZ)
|
||||
|
||||
static unsigned long time_stamp;
|
||||
|
||||
static void sec_mm_zs_shrinker_adjust(void *data, unsigned long *pages_to_free)
|
||||
{
|
||||
if (*pages_to_free > ZS_SHRINKER_THRESHOLD)
|
||||
time_stamp = jiffies + ZS_SHRINKER_INTERVAL;
|
||||
else
|
||||
*pages_to_free = 0;
|
||||
}
|
||||
|
||||
static void sec_mm_zs_shrinker_bypass(void *data, bool *bypass)
|
||||
{
|
||||
if (!current_is_kswapd() || time_is_after_jiffies(time_stamp))
|
||||
*bypass = true;
|
||||
}
|
||||
|
||||
void init_sec_mm_tune(void)
|
||||
{
|
||||
low_threshold = get_low_threshold();
|
||||
|
||||
register_trace_android_vh_cma_alloc_set_max_retries(
|
||||
sec_mm_cma_alloc_set_max_retries, NULL);
|
||||
register_trace_android_vh_drain_all_pages_bypass(
|
||||
sec_mm_drain_all_pages_bypass, NULL);
|
||||
register_trace_android_vh_rebalance_anon_lru_bypass(
|
||||
sec_mm_rebalance_anon_lru_bypass, NULL);
|
||||
register_trace_android_vh_shrink_slab_bypass(
|
||||
sec_mm_shrink_slab_bypass, NULL);
|
||||
register_trace_android_vh_suitable_migration_target_bypass(
|
||||
sec_mm_suitable_migration_target_bypass, NULL);
|
||||
register_trace_android_vh_tune_mmap_readaround(
|
||||
sec_mm_tune_mmap_readaround, NULL);
|
||||
register_trace_android_vh_tune_scan_control(
|
||||
sec_mm_tune_scan_control, NULL);
|
||||
register_trace_android_vh_tune_scan_type(sec_mm_tune_scan_type, NULL);
|
||||
register_trace_android_vh_use_vm_swappiness(
|
||||
sec_mm_use_vm_swappiness, NULL);
|
||||
register_trace_android_vh_zs_shrinker_adjust(
|
||||
sec_mm_zs_shrinker_adjust, NULL);
|
||||
register_trace_android_vh_zs_shrinker_bypass(
|
||||
sec_mm_zs_shrinker_bypass, NULL);
|
||||
register_trace_android_rvh_set_balance_anon_file_reclaim(
|
||||
sec_mm_set_balance_anon_file_reclaim, NULL);
|
||||
}
|
||||
|
||||
void exit_sec_mm_tune(void)
|
||||
{
|
||||
unregister_trace_android_vh_cma_alloc_set_max_retries(
|
||||
sec_mm_cma_alloc_set_max_retries, NULL);
|
||||
unregister_trace_android_vh_drain_all_pages_bypass(
|
||||
sec_mm_drain_all_pages_bypass, NULL);
|
||||
unregister_trace_android_vh_rebalance_anon_lru_bypass(
|
||||
sec_mm_rebalance_anon_lru_bypass, NULL);
|
||||
unregister_trace_android_vh_shrink_slab_bypass(
|
||||
sec_mm_shrink_slab_bypass, NULL);
|
||||
unregister_trace_android_vh_suitable_migration_target_bypass(
|
||||
sec_mm_suitable_migration_target_bypass, NULL);
|
||||
unregister_trace_android_vh_tune_mmap_readaround(
|
||||
sec_mm_tune_mmap_readaround, NULL);
|
||||
unregister_trace_android_vh_tune_scan_control(
|
||||
sec_mm_tune_scan_control, NULL);
|
||||
unregister_trace_android_vh_tune_scan_type(sec_mm_tune_scan_type, NULL);
|
||||
unregister_trace_android_vh_use_vm_swappiness(
|
||||
sec_mm_use_vm_swappiness, NULL);
|
||||
unregister_trace_android_vh_zs_shrinker_adjust(
|
||||
sec_mm_zs_shrinker_adjust, NULL);
|
||||
unregister_trace_android_vh_zs_shrinker_bypass(
|
||||
sec_mm_zs_shrinker_bypass, NULL);
|
||||
}
|
Reference in New Issue
Block a user