Add samsung specific changes

This commit is contained in:
2025-08-11 14:29:00 +02:00
parent c66122e619
commit 4d134a1294
2688 changed files with 1127995 additions and 11475 deletions

View File

@@ -73,6 +73,23 @@ config DEVFREQ_GOV_PASSIVE
through sysfs entries. The passive governor recommends that
devfreq device uses the OPP table to get the frequency/voltage.
config DEVFREQ_GOV_QCOM_ADRENO_TZ
tristate "Qualcomm Technologies, Inc. GPU frequency governor"
help
GPU frequency governor for the Adreno GPU. Sets the frequency
using an "on demand" algorithm in conjunction with other
components on Adreno platforms. This is not useful for non-Adreno
devices.
config DEVFREQ_GOV_QCOM_GPUBW_MON
tristate "Qualcomm Technologies, Inc. GPU bandwidth governor"
depends on DEVFREQ_GOV_QCOM_ADRENO_TZ
help
This governor works together with the Adreno GPU governor to
select bus frequency votes using an "on-demand" algorithm.
This governor will not be useful for non-Adreno based
targets.
comment "DEVFREQ Drivers"
config ARM_EXYNOS_BUS_DEVFREQ

View File

@@ -6,6 +6,8 @@ obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
obj-$(CONFIG_DEVFREQ_GOV_PASSIVE) += governor_passive.o
obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_gpubw_mon.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS_BUS_DEVFREQ) += exynos-bus.o

View File

@@ -0,0 +1,346 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/devfreq.h>
#include <linux/slab.h>
#include <linux/soc/qcom/msm_adreno_devfreq.h>
#include "governor.h"
#define MIN_BUSY 1000
#define LONG_FLOOR 50000
#define HIST 5
#define TARGET 80
#define CAP 75
#define WAIT_THRESHOLD 10
/* AB vote is in multiple of BW_STEP Mega bytes */
#define BW_STEP 50
static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
unsigned int norm_max)
{
int i;
priv->bus.max = norm_max;
for (i = 0; i < priv->bus.num; i++) {
priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
}
}
static ssize_t cur_ab_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct devfreq *df = to_devfreq(dev);
struct msm_busmon_extended_profile *bus_profile = container_of(
(df->profile),
struct msm_busmon_extended_profile,
profile);
return scnprintf(buf, PAGE_SIZE, "%lu\n", bus_profile->ab_mbytes);
}
static ssize_t sampling_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq *df = to_devfreq(dev);
struct msm_busmon_extended_profile *bus_profile = container_of(
(df->profile),
struct msm_busmon_extended_profile,
profile);
return scnprintf(buf, PAGE_SIZE, "%d\n", bus_profile->sampling_ms);
}
static ssize_t sampling_interval_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
struct msm_busmon_extended_profile *bus_profile = container_of(
(df->profile),
struct msm_busmon_extended_profile,
profile);
u32 value;
int ret;
ret = kstrtou32(buf, 0, &value);
if (ret)
return ret;
bus_profile->sampling_ms = value;
return count;
}
static DEVICE_ATTR_RW(sampling_interval);
static DEVICE_ATTR_RO(cur_ab);
static const struct device_attribute *gpubw_attr_list[] = {
&dev_attr_sampling_interval,
&dev_attr_cur_ab,
NULL
};
static u32 generate_hint(struct devfreq_msm_adreno_tz_data *priv, int buslevel,
unsigned long freq, unsigned long minfreq)
{
int act_level;
int norm_max_cycles;
int norm_cycles;
int wait_active_percent;
int gpu_percent;
norm_max_cycles = (unsigned int)(priv->bus.ram_time) /
(unsigned int) priv->bus.total_time;
norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) /
(unsigned int) priv->bus.total_time;
wait_active_percent = (100 * (unsigned int)priv->bus.ram_wait) /
(unsigned int) priv->bus.ram_time;
gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
(unsigned int) priv->bus.total_time;
/*
* If there's a new high watermark, update the cutoffs and send the
* FAST hint, provided that we are using a floating watermark.
* Otherwise check the current value against the current
* cutoffs.
*/
if (norm_max_cycles > priv->bus.max && priv->bus.floating) {
_update_cutoff(priv, norm_max_cycles);
return BUSMON_FLAG_FAST_HINT;
}
/* Increase BW vote to avoid starving GPU for BW if required */
if (priv->fast_bus_hint && minfreq == freq) {
if (wait_active_percent > 95)
return BUSMON_FLAG_SUPER_FAST_HINT;
if (wait_active_percent > 80)
return BUSMON_FLAG_FAST_HINT;
}
/* GPU votes for IB not AB so don't under vote the system */
norm_cycles = (100 * norm_cycles) / TARGET;
act_level = max_t(int, buslevel, 0);
act_level = min_t(int, act_level, priv->bus.num - 1);
if ((norm_cycles > priv->bus.up[act_level] ||
wait_active_percent > WAIT_THRESHOLD) &&
gpu_percent > CAP)
return BUSMON_FLAG_FAST_HINT;
if (norm_cycles < priv->bus.down[act_level] && buslevel)
return BUSMON_FLAG_SLOW_HINT;
return 0;
}
static int devfreq_gpubw_get_target(struct devfreq *df,
unsigned long *freq)
{
struct devfreq_msm_adreno_tz_data *priv = df->data;
struct msm_busmon_extended_profile *bus_profile = container_of(
(df->profile),
struct msm_busmon_extended_profile,
profile);
struct devfreq_dev_status *stats = &df->last_status;
struct xstats b = {0};
int result;
int norm_ab;
unsigned long ab_mbytes = 0;
/*
* Normalized AB should at max usage be the gpu_bimc frequency in MHz.
* Start with a reasonable value and let the system push it up to max.
*/
static int norm_ab_max = 300;
if (priv == NULL)
return 0;
stats->private_data = &b;
result = devfreq_update_stats(df);
/* Return if devfreq is not enabled */
if (result)
return result;
*freq = stats->current_frequency;
priv->bus.total_time += stats->total_time;
priv->bus.gpu_time += stats->busy_time;
priv->bus.ram_time += b.ram_time;
priv->bus.ram_wait += b.ram_wait;
if (priv->bus.total_time < bus_profile->sampling_ms)
return result;
bus_profile->flag = generate_hint(priv, b.buslevel, *freq,
b.gpu_minfreq);
/* Calculate the AB vote based on bus width if defined */
if (priv->bus.width) {
norm_ab = (unsigned int)priv->bus.ram_time /
(unsigned int) priv->bus.total_time;
/* Calculate AB in Mega Bytes and roundup in BW_STEP */
ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20;
bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP);
} else if (bus_profile->flag) {
/* Re-calculate the AB percentage for a new IB vote */
norm_ab = (unsigned int)priv->bus.ram_time /
(unsigned int) priv->bus.total_time;
if (norm_ab > norm_ab_max)
norm_ab_max = norm_ab;
bus_profile->percent_ab = (100 * norm_ab) / norm_ab_max;
}
priv->bus.total_time = 0;
priv->bus.gpu_time = 0;
priv->bus.ram_time = 0;
priv->bus.ram_wait = 0;
return result;
}
static int gpubw_start(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv;
struct msm_busmon_extended_profile *bus_profile = container_of(
(devfreq->profile),
struct msm_busmon_extended_profile,
profile);
unsigned int t1, t2 = 2 * HIST;
int i, bus_size;
devfreq->data = bus_profile->private_data;
priv = devfreq->data;
bus_size = sizeof(u32) * priv->bus.num;
priv->bus.up = kzalloc(bus_size, GFP_KERNEL);
priv->bus.down = kzalloc(bus_size, GFP_KERNEL);
priv->bus.p_up = kzalloc(bus_size, GFP_KERNEL);
priv->bus.p_down = kzalloc(bus_size, GFP_KERNEL);
if (priv->bus.up == NULL || priv->bus.down == NULL ||
priv->bus.p_up == NULL || priv->bus.p_down == NULL)
return -ENOMEM;
/* Set up the cut-over percentages for the bus calculation. */
for (i = 0; i < priv->bus.num; i++) {
t1 = (u32)(100 * priv->bus.ib_kbps[i]) /
(u32)priv->bus.ib_kbps[priv->bus.num - 1];
priv->bus.p_up[i] = t1 - HIST;
priv->bus.p_down[i] = t2 - 2 * HIST;
t2 = t1;
}
/* Set the upper-most and lower-most bounds correctly. */
priv->bus.p_down[0] = 0;
for (i = 0; i < priv->bus.num; i++) {
if (priv->bus.p_down[i] < 2 * HIST)
priv->bus.p_down[i] = 2 * HIST;
}
if (priv->bus.num >= 1)
priv->bus.p_up[priv->bus.num - 1] = 100;
_update_cutoff(priv, priv->bus.max);
bus_profile->sampling_ms = LONG_FLOOR;
for (i = 0; gpubw_attr_list[i] != NULL; i++)
device_create_file(&devfreq->dev, gpubw_attr_list[i]);
return 0;
}
static int gpubw_stop(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
int i;
for (i = 0; gpubw_attr_list[i] != NULL; i++)
device_remove_file(&devfreq->dev, gpubw_attr_list[i]);
if (priv) {
kfree(priv->bus.up);
kfree(priv->bus.down);
kfree(priv->bus.p_up);
kfree(priv->bus.p_down);
}
devfreq->data = NULL;
return 0;
}
static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
int result = 0;
unsigned long freq;
if (strcmp(dev_name(devfreq->dev.parent), "kgsl-busmon"))
return -EINVAL;
mutex_lock(&devfreq->lock);
freq = devfreq->previous_freq;
switch (event) {
case DEVFREQ_GOV_START:
result = gpubw_start(devfreq);
break;
case DEVFREQ_GOV_STOP:
result = gpubw_stop(devfreq);
break;
case DEVFREQ_GOV_RESUME:
/* TODO ..... */
/* ret = update_devfreq(devfreq); */
break;
case DEVFREQ_GOV_SUSPEND:
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
if (priv) {
priv->bus.total_time = 0;
priv->bus.gpu_time = 0;
priv->bus.ram_time = 0;
}
}
break;
default:
result = 0;
break;
}
mutex_unlock(&devfreq->lock);
return result;
}
static struct devfreq_governor devfreq_gpubw = {
.name = "gpubw_mon",
.get_target_freq = devfreq_gpubw_get_target,
.event_handler = devfreq_gpubw_event_handler,
.flags = DEVFREQ_GOV_FLAG_IMMUTABLE,
};
int devfreq_gpubw_init(void)
{
return devfreq_add_governor(&devfreq_gpubw);
}
subsys_initcall(devfreq_gpubw_init);
void devfreq_gpubw_exit(void)
{
int ret;
ret = devfreq_remove_governor(&devfreq_gpubw);
if (ret)
pr_err("%s: failed remove governor %d\n", __func__, ret);
}
module_exit(devfreq_gpubw_exit);
MODULE_DESCRIPTION("GPU bus bandwidth voting driver. Uses VBIF counters");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,586 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/cacheflush.h>
#include <linux/devfreq.h>
#include <linux/dma-mapping.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/math64.h>
#include <linux/of_platform.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/mm.h>
#include <linux/qtee_shmbridge.h>
#include <linux/soc/qcom/msm_adreno_devfreq.h>
#include "governor.h"
static DEFINE_SPINLOCK(tz_lock);
static DEFINE_SPINLOCK(sample_lock);
static DEFINE_SPINLOCK(suspend_lock);
/*
* FLOOR is 5msec to capture up to 3 re-draws
* per frame for 60fps content.
*/
#define FLOOR 5000
/*
* MIN_BUSY is 1 msec for the sample to be sent
*/
#define MIN_BUSY 1000
#define MAX_TZ_VERSION 0
/*
* CEILING is 50msec, larger than any standard
* frame length, but less than the idle timer.
*/
#define CEILING 50000
#define TZ_RESET_ID 0x3
#define TZ_UPDATE_ID 0x4
#define TZ_INIT_ID 0x6
#define TZ_RESET_ID_64 0x7
#define TZ_UPDATE_ID_64 0x8
#define TZ_INIT_ID_64 0x9
#define TZ_V2_UPDATE_ID_64 0xA
#define TZ_V2_INIT_ID_64 0xB
#define TZ_V2_INIT_CA_ID_64 0xC
#define TZ_V2_UPDATE_WITH_CA_ID_64 0xD
#define TAG "msm_adreno_tz: "
static u64 suspend_time;
static u64 suspend_start;
static unsigned long acc_total, acc_relative_busy;
/*
* Returns GPU suspend time in millisecond.
*/
u64 suspend_time_ms(void)
{
u64 suspend_sampling_time;
u64 time_diff = 0;
if (suspend_start == 0)
return 0;
suspend_sampling_time = (u64)ktime_to_ms(ktime_get());
time_diff = suspend_sampling_time - suspend_start;
/* Update the suspend_start sample again */
suspend_start = suspend_sampling_time;
return time_diff;
}
static ssize_t gpu_load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned long sysfs_busy_perc = 0;
/*
* Average out the samples taken since last read
* This will keep the average value in sync with
* the client sampling duration.
*/
spin_lock(&sample_lock);
if (acc_total)
sysfs_busy_perc = (acc_relative_busy * 100) / acc_total;
/* Reset the parameters */
acc_total = 0;
acc_relative_busy = 0;
spin_unlock(&sample_lock);
return snprintf(buf, PAGE_SIZE, "%lu\n", sysfs_busy_perc);
}
/*
* Returns the time in ms for which gpu was in suspend state
* since last time the entry is read.
*/
static ssize_t suspend_time_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u64 time_diff = 0;
spin_lock(&suspend_lock);
time_diff = suspend_time_ms();
/*
* Adding the previous suspend time also as the gpu
* can go and come out of suspend states in between
* reads also and we should have the total suspend
* since last read.
*/
time_diff += suspend_time;
suspend_time = 0;
spin_unlock(&suspend_lock);
return snprintf(buf, PAGE_SIZE, "%llu\n", time_diff);
}
static ssize_t mod_percent_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned int val;
struct devfreq *devfreq = to_devfreq(dev);
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
ret = kstrtou32(buf, 0, &val);
if (ret)
return ret;
priv->mod_percent = clamp_t(u32, val, 10, 1000);
return count;
}
static ssize_t mod_percent_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct devfreq *devfreq = to_devfreq(dev);
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
return scnprintf(buf, PAGE_SIZE, "%u\n", priv->mod_percent);
}
static DEVICE_ATTR_RO(gpu_load);
static DEVICE_ATTR_RO(suspend_time);
static DEVICE_ATTR_RW(mod_percent);
static const struct device_attribute *adreno_tz_attr_list[] = {
&dev_attr_gpu_load,
&dev_attr_suspend_time,
&dev_attr_mod_percent,
NULL
};
void compute_work_load(struct devfreq_dev_status *stats,
struct devfreq_msm_adreno_tz_data *priv,
struct devfreq *devfreq)
{
u64 busy;
spin_lock(&sample_lock);
/*
* Keep collecting the stats till the client
* reads it. Average of all samples and reset
* is done when the entry is read
*/
acc_total += stats->total_time;
busy = (u64)stats->busy_time * stats->current_frequency;
do_div(busy, devfreq->profile->freq_table[0]);
acc_relative_busy += busy;
spin_unlock(&sample_lock);
}
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data,
bool is_64)
{
int ret;
/* sync memory before sending the commands to tz */
__iowmb();
if (!is_64) {
spin_lock(&tz_lock);
ret = qcom_scm_io_reset();
spin_unlock(&tz_lock);
} else {
ret = qcom_scm_dcvs_reset();
}
return ret;
}
static int __secure_tz_update_entry3(int level, s64 total_time, s64 busy_time,
int context_count, struct devfreq_msm_adreno_tz_data *priv)
{
int ret;
/* sync memory before sending the commands to tz */
__iowmb();
if (!priv->is_64) {
spin_lock(&tz_lock);
ret = qcom_scm_dcvs_update(level, total_time, busy_time);
spin_unlock(&tz_lock);
} else if (!priv->ctxt_aware_enable) {
ret = qcom_scm_dcvs_update_v2(level, total_time, busy_time);
} else {
ret = qcom_scm_dcvs_update_ca_v2(level, total_time, busy_time,
context_count);
}
return ret;
}
static int tz_init_ca(struct device *dev,
struct devfreq_msm_adreno_tz_data *priv)
{
unsigned int tz_ca_data[2];
phys_addr_t paddr;
u8 *tz_buf;
int ret;
struct qtee_shm shm;
/* Set data for TZ */
tz_ca_data[0] = priv->bin.ctxt_aware_target_pwrlevel;
tz_ca_data[1] = priv->bin.ctxt_aware_busy_penalty;
if (!qtee_shmbridge_is_enabled()) {
tz_buf = kzalloc(PAGE_ALIGN(sizeof(tz_ca_data)), GFP_KERNEL);
if (!tz_buf)
return -ENOMEM;
paddr = virt_to_phys(tz_buf);
} else {
ret = qtee_shmbridge_allocate_shm(
PAGE_ALIGN(sizeof(tz_ca_data)), &shm);
if (ret)
return -ENOMEM;
tz_buf = shm.vaddr;
paddr = shm.paddr;
}
memcpy(tz_buf, tz_ca_data, sizeof(tz_ca_data));
/* Ensure memcpy completes execution */
mb();
dma_sync_single_for_device(dev, paddr,
PAGE_ALIGN(sizeof(tz_ca_data)), DMA_BIDIRECTIONAL);
ret = qcom_scm_dcvs_init_ca_v2(paddr, sizeof(tz_ca_data));
if (!qtee_shmbridge_is_enabled())
kfree_sensitive(tz_buf);
else
qtee_shmbridge_free_shm(&shm);
return ret;
}
static int tz_init(struct device *dev, struct devfreq_msm_adreno_tz_data *priv,
unsigned int *tz_pwrlevels, u32 size_pwrlevels,
unsigned int *version, u32 size_version)
{
int ret;
phys_addr_t paddr;
if (qcom_scm_dcvs_core_available()) {
u8 *tz_buf;
struct qtee_shm shm;
if (!qtee_shmbridge_is_enabled()) {
tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels),
GFP_KERNEL);
if (!tz_buf)
return -ENOMEM;
paddr = virt_to_phys(tz_buf);
} else {
ret = qtee_shmbridge_allocate_shm(
PAGE_ALIGN(size_pwrlevels), &shm);
if (ret)
return -ENOMEM;
tz_buf = shm.vaddr;
paddr = shm.paddr;
}
memcpy(tz_buf, tz_pwrlevels, size_pwrlevels);
/* Ensure memcpy completes execution */
mb();
dma_sync_single_for_device(dev, paddr,
PAGE_ALIGN(size_pwrlevels), DMA_BIDIRECTIONAL);
ret = qcom_scm_dcvs_init_v2(paddr, size_pwrlevels, version);
if (!ret)
priv->is_64 = true;
if (!qtee_shmbridge_is_enabled())
kfree_sensitive(tz_buf);
else
qtee_shmbridge_free_shm(&shm);
} else
ret = -EINVAL;
/* Initialize context aware feature, if enabled. */
if (!ret && priv->ctxt_aware_enable) {
if (priv->is_64 && qcom_scm_dcvs_ca_available()) {
ret = tz_init_ca(dev, priv);
/*
* If context aware feature initialization fails,
* just print an error message and return
* success as normal DCVS will still work.
*/
if (ret) {
pr_err(TAG "tz: context aware DCVS init failed\n");
priv->ctxt_aware_enable = false;
return 0;
}
} else {
pr_warn(TAG "tz: context aware DCVS not supported\n");
priv->ctxt_aware_enable = false;
}
}
return ret;
}
static inline int devfreq_get_freq_level(struct devfreq *devfreq,
unsigned long freq)
{
int lev;
for (lev = 0; lev < devfreq->profile->max_state; lev++)
if (freq == devfreq->profile->freq_table[lev])
return lev;
return -EINVAL;
}
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq)
{
int result = 0;
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
struct devfreq_dev_status *stats = &devfreq->last_status;
int val, level = 0;
int context_count = 0;
u64 busy_time;
if (!priv)
return 0;
/* keeps stats.private_data == NULL */
result = devfreq_update_stats(devfreq);
if (result) {
pr_err(TAG "get_status failed %d\n", result);
return result;
}
*freq = stats->current_frequency;
priv->bin.total_time += stats->total_time;
/* Update gpu busy time as per mod_percent */
busy_time = stats->busy_time * priv->mod_percent;
do_div(busy_time, 100);
/* busy_time should not go over total_time */
stats->busy_time = min_t(u64, busy_time, stats->total_time);
priv->bin.busy_time += stats->busy_time;
if (stats->private_data)
context_count = *((int *)stats->private_data);
/* Update the GPU load statistics */
compute_work_load(stats, priv, devfreq);
/*
* Do not waste CPU cycles running this algorithm if
* the GPU just started, or if less than FLOOR time
* has passed since the last run or the gpu hasn't been
* busier than MIN_BUSY.
*/
if ((stats->total_time == 0) ||
(priv->bin.total_time < FLOOR) ||
(unsigned int) priv->bin.busy_time < MIN_BUSY) {
return 0;
}
level = devfreq_get_freq_level(devfreq, stats->current_frequency);
if (level < 0) {
pr_err(TAG "bad freq %ld\n", stats->current_frequency);
return level;
}
/*
* If there is an extended block of busy processing,
* increase frequency. Otherwise run the normal algorithm.
*/
if (!priv->disable_busy_time_burst &&
priv->bin.busy_time > CEILING) {
val = -1 * level;
} else {
val = __secure_tz_update_entry3(level, priv->bin.total_time,
priv->bin.busy_time, context_count, priv);
}
priv->bin.total_time = 0;
priv->bin.busy_time = 0;
/*
* If the decision is to move to a different level, make sure the GPU
* frequency changes.
*/
if (val) {
level += val;
level = max(level, 0);
level = min_t(int, level, devfreq->profile->max_state - 1);
}
*freq = devfreq->profile->freq_table[level];
return 0;
}
static int __tz_init(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv;
unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
int i, out, ret;
unsigned int version;
struct msm_adreno_extended_profile *gpu_profile = container_of(
(devfreq->profile),
struct msm_adreno_extended_profile,
profile);
/*
* Assuming that we have only one instance of the adreno device
* connected to this governor,
* can safely restore the pointer to the governor private data
* from the container of the device profile
*/
devfreq->data = gpu_profile->private_data;
priv = devfreq->data;
out = 1;
if (devfreq->profile->max_state < ARRAY_SIZE(tz_pwrlevels)) {
for (i = 0; i < devfreq->profile->max_state; i++)
tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
tz_pwrlevels[0] = i;
} else {
pr_err(TAG "tz_pwrlevels[] is too short\n");
return -EINVAL;
}
ret = tz_init(&devfreq->dev, priv, tz_pwrlevels, sizeof(tz_pwrlevels),
&version, sizeof(version));
if (ret != 0 || version > MAX_TZ_VERSION) {
pr_err(TAG "tz_init failed\n");
return ret;
}
return 0;
}
static int tz_start(struct devfreq *devfreq)
{
int i, ret;
ret = __tz_init(devfreq);
if (ret)
return ret;
for (i = 0; adreno_tz_attr_list[i] != NULL; i++)
device_create_file(&devfreq->dev, adreno_tz_attr_list[i]);
return 0;
}
static int tz_stop(struct devfreq *devfreq)
{
int i;
for (i = 0; adreno_tz_attr_list[i] != NULL; i++)
device_remove_file(&devfreq->dev, adreno_tz_attr_list[i]);
/* leaving the governor and cleaning the pointer to private data */
devfreq->data = NULL;
return 0;
}
static int tz_suspend(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
unsigned int scm_data[2] = {0, 0};
if (!priv)
return 0;
__secure_tz_reset_entry2(scm_data, sizeof(scm_data), priv->is_64);
priv->bin.total_time = 0;
priv->bin.busy_time = 0;
return 0;
}
static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
{
int result;
struct device_node *node = devfreq->dev.parent->of_node;
if (!of_device_is_compatible(node, "qcom,kgsl-3d0"))
return -EINVAL;
switch (event) {
case DEVFREQ_GOV_START:
result = tz_start(devfreq);
break;
case DEVFREQ_GOV_STOP:
spin_lock(&suspend_lock);
suspend_start = 0;
spin_unlock(&suspend_lock);
result = tz_stop(devfreq);
break;
case DEVFREQ_GOV_SUSPEND:
result = tz_suspend(devfreq);
if (!result) {
spin_lock(&suspend_lock);
/* Collect the start sample for suspend time */
suspend_start = (u64)ktime_to_ms(ktime_get());
spin_unlock(&suspend_lock);
}
break;
case DEVFREQ_GOV_RESUME:
spin_lock(&suspend_lock);
suspend_time += suspend_time_ms();
/* Reset the suspend_start when gpu resumes */
suspend_start = 0;
spin_unlock(&suspend_lock);
fallthrough;
case DEVFREQ_GOV_UPDATE_INTERVAL:
/* This governor doesn't use polling */
fallthrough;
default:
result = 0;
break;
}
return result;
}
static struct devfreq_governor msm_adreno_tz = {
.name = "msm-adreno-tz",
.get_target_freq = tz_get_target_freq,
.event_handler = tz_handler,
.flags = DEVFREQ_GOV_FLAG_IMMUTABLE,
};
int msm_adreno_tz_reinit(struct devfreq *devfreq)
{
return __tz_init(devfreq);
}
EXPORT_SYMBOL_GPL(msm_adreno_tz_reinit);
int msm_adreno_tz_init(void)
{
return devfreq_add_governor(&msm_adreno_tz);
}
subsys_initcall(msm_adreno_tz_init);
void msm_adreno_tz_exit(void)
{
int ret = devfreq_remove_governor(&msm_adreno_tz);
if (ret)
pr_err(TAG "failed to remove governor %d\n", ret);
}
module_exit(msm_adreno_tz_exit);
MODULE_LICENSE("GPL");