Add samsung specific changes

This commit is contained in:
2025-08-11 14:29:00 +02:00
parent c66122e619
commit 4d134a1294
2688 changed files with 1127995 additions and 11475 deletions

4
drivers/samsung/Kconfig Normal file
View File

@@ -0,0 +1,4 @@
source "drivers/samsung/bsp/Kconfig"
source "drivers/samsung/debug/Kconfig"
source "drivers/samsung/power/Kconfig"
source "drivers/samsung/ipc/Kconfig"

4
drivers/samsung/Makefile Normal file
View File

@@ -0,0 +1,4 @@
obj-y += bsp/
obj-y += debug/
obj-y += ipc/
obj-y += power/

View File

@@ -0,0 +1,28 @@
menu "Samsung Factory Feature"
config SEC_FACTORY
bool "when it comes to sec factory mode"
default n
select SEC_PANIC_PCIE_ERR
help
It will support a sec factory mode
endmenu
config SEC_FACTORY_INTERPOSER
bool "Samsung Factory interposer binary"
default n
help
Samsung Factory interposer binary
config SAMSUNG_PRODUCT_SHIP
bool "set up for product shipping"
default n
source "drivers/samsung/bsp/class/Kconfig"
source "drivers/samsung/bsp/param/Kconfig"
source "drivers/samsung/bsp/key_notifier/Kconfig"
source "drivers/samsung/bsp/reloc_gpio/Kconfig"
source "drivers/samsung/bsp/argos/Kconfig"
source "drivers/samsung/bsp/of_kunit/Kconfig"
source "drivers/samsung/bsp/blk_helper/Kconfig"
source "drivers/samsung/bsp/qcom/Kconfig"

View File

@@ -0,0 +1,9 @@
obj-$(CONFIG_SEC_CLASS) += class/
obj-$(CONFIG_SEC_KEY_NOTIFIER) += key_notifier/
obj-$(CONFIG_SEC_PARAM) += param/
obj-$(CONFIG_SEC_RELOC_GPIO) += reloc_gpio/
obj-$(CONFIG_ARGOS) += argos/
obj-$(CONFIG_SEC_OF_KUNIT) += of_kunit/
obj-$(CONFIG_SEC_BLK_HELPER) += blk_helper/
obj-y += qcom/

View File

@@ -0,0 +1,23 @@
config ARGOS
tristate "Throughput monitoring Feature"
help
This option enables monitoring the data throughput and doing several actions for
enhancing the performance such as adjusting the CPU freqency, allocating the tasks
to the appropriate CPU and so on
config ARGOS_THROUGHPUT
default y if ARGOS=m
default n if ARGOS=y
bool "argos Throughput device"
help
This option make /dev/network_throughput in sec_argos module
for substitiution of pm qos /dev/network_throughput
which is deprecated in kernel 5.4.
config ARGOS_VOTING_DDR
default n
depends on ARGOS
bool "argos voting ddr clock"
help
This option enable ddr voting part in sec_argos module.

View File

@@ -0,0 +1 @@
obj-$(CONFIG_ARGOS) += sec_argos.o

View File

@@ -0,0 +1,937 @@
/*
* argos.c
*
* Copyright (c) 2012-2023 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pm_qos.h>
#include <linux/reboot.h>
#include <linux/of.h>
#include <linux/gfp.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#ifdef CONFIG_ARGOS_THROUGHPUT
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#endif
#if defined(CONFIG_ARCH_LAHAINA) || defined(CONFIG_ARGOS_VOTING_DDR)
#define ARGOS_VOTING_DDR_CLK
#include <linux/interconnect.h>
#endif
#if IS_ENABLED(CONFIG_CPU_FREQ_LIMIT)
#include <linux/cpufreq.h>
#include <linux/cpufreq_limit.h>
#ifndef CONFIG_CPU_FREQ_LIMIT_USERSPACE
#define DVFS_ARGOS_ID CFLM_ARGOS
int set_freq_limit(unsigned long id, unsigned int freq);
#endif
#else
#define DVFS_ARGOS_ID 0
int set_freq_limit(unsigned long id, unsigned int freq)
{
pr_err("%s is not yet implemented\n", __func__);
return 0;
}
#endif
#define ARGOS_NAME "argos"
#define TYPE_SHIFT 4
#define TYPE_MASK_BIT ((1 << TYPE_SHIFT) - 1)
#define LOCK_RELEASE 0
#define FREQ_UNLOCK -1
#define SKIP_FREQ_UPDATE 0
#define FREQ_UPDATE 1
#define CPU_UNLOCK_FREQ -1
#ifdef ARGOS_VOTING_DDR_CLK
#define DDR_UNLOCK_FREQ 0
#endif
//Refer to "include/dt-bindings/interconnect/qcom,lahaina.h"
#define MASTER_APPSS_PROC 2
#define SLAVE_EBI1 512
#define BUS_W 4 /* SM8350 DDR Voting('w' for DDR is 4) */
#define MHZ_TO_KBPS(mhz, w) ((uint64_t)mhz * 1000 * w)
static DEFINE_SPINLOCK(argos_irq_lock);
static DEFINE_SPINLOCK(argos_task_lock);
static DEFINE_SPINLOCK(argos_boost_list_lock);
enum {
THRESHOLD,
BIG_MIN_FREQ,
BIG_MAX_FREQ,
LITTLE_MIN_FREQ,
LITTLE_MAX_FREQ,
DDR_FREQ,
RESERVED,
TASK_AFFINITY_EN,
IRQ_AFFINITY_EN,
SCHED_BOOST_EN,
ITEM_MAX,
};
enum {
BOOST_CPU,
#ifdef ARGOS_VOTING_DDR_CLK
BOOST_DDR,
#endif
BOOST_MAX
};
struct boost_table {
unsigned int items[ITEM_MAX];
};
struct argos_task_affinity {
struct task_struct *p;
struct cpumask *affinity_cpu_mask;
struct cpumask *default_cpu_mask;
struct list_head entry;
};
struct argos_irq_affinity {
unsigned int irq;
struct cpumask *affinity_cpu_mask;
struct cpumask *default_cpu_mask;
struct list_head entry;
};
struct argos {
const char *desc;
struct platform_device *pdev;
struct boost_table *tables;
int ntables;
int prev_level;
struct list_head task_affinity_list;
bool task_hotplug_disable;
struct list_head irq_affinity_list;
bool irq_hotplug_disable;
bool hmpboost_enable;
bool argos_block;
struct blocking_notifier_head argos_notifier;
/* protect prev_level, qos, task/irq_hotplug_disable, hmpboost_enable */
struct mutex level_mutex;
};
#ifdef CONFIG_ARGOS_THROUGHPUT
#define TPUT_MAX 16
char argos_throughput[TPUT_MAX];
#endif
struct argos_platform_data {
struct argos *devices;
struct device *dev;
int ndevice;
#ifndef CONFIG_ARGOS_THROUGHPUT
struct notifier_block pm_qos_nfb;
#endif
int *boost_list[BOOST_MAX];
int boost_max[BOOST_MAX];
};
static struct argos_platform_data *argos_pdata;
static int boost_unlock_freq[BOOST_MAX] = {
CPU_UNLOCK_FREQ
#ifdef ARGOS_VOTING_DDR_CLK
, DDR_UNLOCK_FREQ
#endif
};
#ifdef ARGOS_VOTING_DDR_CLK
struct icc_path *path_argos_bw;
int argos_icc_register = 0;
#endif
static int argos_find_index(const char *label)
{
int i;
int dev_num = -1;
if (!argos_pdata) {
pr_err("argos not initialized\n");
return -1;
}
for (i = 0; i < argos_pdata->ndevice; i++)
if (strcmp(argos_pdata->devices[i].desc, label) == 0)
dev_num = i;
return dev_num;
}
int sec_argos_register_notifier(struct notifier_block *n, char *label)
{
struct blocking_notifier_head *cnotifier;
int dev_num;
dev_num = argos_find_index(label);
if (dev_num < 0) {
pr_err("No match found for label: %d", dev_num);
return -ENODEV;
}
cnotifier = &argos_pdata->devices[dev_num].argos_notifier;
if (!cnotifier) {
pr_err("argos notifier not found(dev_num:%d)\n", dev_num);
return -ENXIO;
}
pr_info("%pf(dev_num:%d)\n", n->notifier_call, dev_num);
return blocking_notifier_chain_register(cnotifier, n);
}
EXPORT_SYMBOL_GPL(sec_argos_register_notifier);
int sec_argos_unregister_notifier(struct notifier_block *n, char *label)
{
struct blocking_notifier_head *cnotifier;
int dev_num;
dev_num = argos_find_index(label);
if (dev_num < 0) {
pr_err("No match found for label: %d", dev_num);
return -ENODEV;
}
cnotifier = &argos_pdata->devices[dev_num].argos_notifier;
if (!cnotifier) {
pr_err("argos notifier not found(dev_num:%d)\n", dev_num);
return -ENXIO;
}
pr_info("%pf(dev_num:%d)\n", n->notifier_call, dev_num);
return blocking_notifier_chain_unregister(cnotifier, n);
}
EXPORT_SYMBOL_GPL(sec_argos_unregister_notifier);
static int argos_task_affinity_setup(struct task_struct *p, int dev_num,
struct cpumask *affinity_cpu_mask,
struct cpumask *default_cpu_mask)
{
struct argos_task_affinity *this;
struct list_head *head;
if (!argos_pdata) {
pr_err("argos not initialized\n");
return -ENXIO;
}
if (dev_num < 0 || dev_num >= argos_pdata->ndevice) {
pr_err("dev_num:%d should be dev_num:0 ~ %d in boundary\n",
dev_num, argos_pdata->ndevice - 1);
return -EINVAL;
}
head = &argos_pdata->devices[dev_num].task_affinity_list;
this = kzalloc(sizeof(*this), GFP_ATOMIC);
if (!this)
return -ENOMEM;
this->p = p;
this->affinity_cpu_mask = affinity_cpu_mask;
this->default_cpu_mask = default_cpu_mask;
spin_lock(&argos_task_lock);
list_add(&this->entry, head);
spin_unlock(&argos_task_lock);
return 0;
}
int argos_task_affinity_setup_label(struct task_struct *p, const char *label,
struct cpumask *affinity_cpu_mask,
struct cpumask *default_cpu_mask)
{
int dev_num;
dev_num = argos_find_index(label);
return argos_task_affinity_setup(p, dev_num, affinity_cpu_mask,
default_cpu_mask);
}
static int argos_irq_affinity_setup(unsigned int irq, int dev_num,
struct cpumask *affinity_cpu_mask,
struct cpumask *default_cpu_mask)
{
struct argos_irq_affinity *this;
struct list_head *head;
if (!argos_pdata) {
pr_err("argos not initialized\n");
return -ENXIO;
}
if (dev_num < 0 || dev_num >= argos_pdata->ndevice) {
pr_err("dev_num:%d should be dev_num:0 ~ %d in boundary\n",
dev_num, argos_pdata->ndevice - 1);
return -EINVAL;
}
head = &argos_pdata->devices[dev_num].irq_affinity_list;
this = kzalloc(sizeof(*this), GFP_ATOMIC);
if (!this)
return -ENOMEM;
this->irq = irq;
this->affinity_cpu_mask = affinity_cpu_mask;
this->default_cpu_mask = default_cpu_mask;
spin_lock(&argos_irq_lock);
list_add(&this->entry, head);
spin_unlock(&argos_irq_lock);
return 0;
}
int argos_irq_affinity_setup_label(unsigned int irq, const char *label,
struct cpumask *affinity_cpu_mask,
struct cpumask *default_cpu_mask)
{
int dev_num;
dev_num = argos_find_index(label);
return argos_irq_affinity_setup(irq, dev_num, affinity_cpu_mask,
default_cpu_mask);
}
int argos_task_affinity_apply(int dev_num, bool enable)
{
struct argos_task_affinity *this;
struct list_head *head;
int result = 0;
struct cpumask *mask;
bool *hotplug_disable;
head = &argos_pdata->devices[dev_num].task_affinity_list;
hotplug_disable = &argos_pdata->devices[dev_num].task_hotplug_disable;
if (list_empty(head)) {
pr_debug("task_affinity_list is empty\n");
return result;
}
list_for_each_entry(this, head, entry) {
if (enable) {
if (!*hotplug_disable)
*hotplug_disable = true;
mask = this->affinity_cpu_mask;
} else {
if (*hotplug_disable)
*hotplug_disable = false;
mask = this->default_cpu_mask;
}
result = set_cpus_allowed_ptr(this->p, mask);
pr_info("%s affinity %s to cpu_mask:0x%X\n",
this->p->comm,
(enable ? "enable" : "disable"),
(int)*mask->bits);
}
return result;
}
int argos_irq_affinity_apply(int dev_num, bool enable)
{
struct argos_irq_affinity *this;
struct list_head *head;
int result = 0;
struct cpumask *mask;
bool *hotplug_disable;
head = &argos_pdata->devices[dev_num].irq_affinity_list;
hotplug_disable = &argos_pdata->devices[dev_num].irq_hotplug_disable;
if (list_empty(head)) {
pr_debug("irq_affinity_list is empty\n");
return result;
}
list_for_each_entry(this, head, entry) {
if (enable) {
if (!*hotplug_disable)
*hotplug_disable = true;
mask = this->affinity_cpu_mask;
} else {
if (*hotplug_disable)
*hotplug_disable = false;
mask = this->default_cpu_mask;
}
#ifndef CONFIG_ARGOS_THROUGHPUT
result = irq_set_affinity(this->irq, mask);
#endif
pr_info("irq%d affinity %s to cpu_mask:0x%X\n",
this->irq, (enable ? "enable" : "disable"),
(int)*mask->bits);
}
return result;
}
int argos_hmpboost_apply(int dev_num, bool enable)
{
bool *hmpboost_enable;
hmpboost_enable = &argos_pdata->devices[dev_num].hmpboost_enable;
if (enable) {
/* disable -> enable */
if (!*hmpboost_enable) {
*hmpboost_enable = true;
pr_info("hmp boost enable [%d]\n", dev_num);
}
} else {
/* enable -> disable */
if (*hmpboost_enable) {
*hmpboost_enable = false;
pr_info("hmp boost disable [%d]\n", dev_num);
}
}
return 0;
}
static int find_max(int dev_type, int *freq, int ndevice){
int i, max = boost_unlock_freq[dev_type];
for (i = 0; i < ndevice; i++){
if(freq[i] > max) max = freq[i];
}
return max;
}
static int check_update_freq(int boost_type, int dev_type, int target)
{
int ret = SKIP_FREQ_UPDATE, new_max, prev;
spin_lock(&argos_boost_list_lock);
prev = argos_pdata->boost_list[boost_type][dev_type];
argos_pdata->boost_list[boost_type][dev_type] = target;
new_max = find_max(boost_type, argos_pdata->boost_list[boost_type], \
argos_pdata->ndevice);
spin_unlock(&argos_boost_list_lock);
if(new_max > argos_pdata->boost_max[boost_type] \
|| (prev == argos_pdata->boost_max[boost_type] \
&& new_max != argos_pdata->boost_max[boost_type])){
argos_pdata->boost_max[boost_type] = new_max;
ret = FREQ_UPDATE;
}
return ret;
}
static void argos_freq_lock(int type, int level)
{
unsigned int big_min_freq, little_min_freq;
int target_freq, need_update;
struct boost_table *t = &argos_pdata->devices[type].tables[level];
const char *cname;
cname = argos_pdata->devices[type].desc;
if(level != FREQ_UNLOCK){
t = &argos_pdata->devices[type].tables[level];
big_min_freq = t->items[BIG_MIN_FREQ];
little_min_freq = t->items[LITTLE_MIN_FREQ];
}
if(level != FREQ_UNLOCK)
target_freq = (big_min_freq > little_min_freq) ?
big_min_freq : little_min_freq;
else
target_freq = boost_unlock_freq[BOOST_CPU];
need_update = check_update_freq(BOOST_CPU, type, target_freq);
if(need_update != SKIP_FREQ_UPDATE){
pr_info("update cpu freq %d\n", argos_pdata->boost_max[BOOST_CPU]);
set_freq_limit(DVFS_ARGOS_ID, argos_pdata->boost_max[BOOST_CPU]);
}
#ifdef ARGOS_VOTING_DDR_CLK
if(level != FREQ_UNLOCK)
target_freq = t->items[DDR_FREQ];
else
target_freq = boost_unlock_freq[BOOST_DDR];
need_update = check_update_freq(BOOST_DDR, type, target_freq);
if(need_update != SKIP_FREQ_UPDATE){
pr_info("update ddr freq %d\n", argos_pdata->boost_max[BOOST_DDR]);
icc_set_bw(path_argos_bw, 0, MHZ_TO_KBPS(argos_pdata->boost_max[BOOST_DDR], BUS_W));
}
#endif
}
void argos_block_enable(char *req_name, bool set)
{
int dev_num;
struct argos *cnode;
dev_num = argos_find_index(req_name);
if (dev_num < 0) {
pr_err("No match found for label: %s", req_name);
return;
}
cnode = &argos_pdata->devices[dev_num];
if (set) {
cnode->argos_block = true;
mutex_lock(&cnode->level_mutex);
argos_freq_lock(dev_num, FREQ_UNLOCK);
argos_task_affinity_apply(dev_num, 0);
argos_irq_affinity_apply(dev_num, 0);
argos_hmpboost_apply(dev_num, 0);
cnode->prev_level = -1;
mutex_unlock(&cnode->level_mutex);
} else {
cnode->argos_block = false;
}
pr_info("req_name:%s block:%d\n",
req_name, cnode->argos_block);
}
#ifndef CONFIG_ARGOS_THROUGHPUT
static int argos_cpuidle_reboot_notifier(struct notifier_block *this,
unsigned long event, void *_cmd)
{
switch (event) {
case SYSTEM_POWER_OFF:
case SYS_RESTART:
pr_info("called\n");
pm_qos_remove_notifier(PM_QOS_NETWORK_THROUGHPUT,
&argos_pdata->pm_qos_nfb);
break;
}
return NOTIFY_OK;
}
static struct notifier_block argos_cpuidle_reboot_nb = {
.notifier_call = argos_cpuidle_reboot_notifier,
};
#endif
#ifdef ARGOS_VOTING_DDR_CLK
static void get_icc_path(void)
{
struct device *dev = argos_pdata->dev;
int bus_ret = 0;
path_argos_bw = icc_get(dev, MASTER_APPSS_PROC, SLAVE_EBI1);
if (IS_ERR(path_argos_bw)) {
bus_ret = PTR_ERR(path_argos_bw);
dev_err(dev, "Failed to get path_argos_bw. ret=%d\n", bus_ret);
if (bus_ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get icc path. ret=%d\n", bus_ret);
} else {
dev_info(dev, "Success to get path_argos_bw.\n");
argos_icc_register = 1;
}
}
#endif
#if defined (CONFIG_ARGOS_THROUGHPUT)
static int argos_pm_qos_notify(unsigned long speedtype)
#else
static int argos_pm_qos_notify(struct notifier_block *nfb,
unsigned long speedtype, void *arg)
#endif
{
int type, level, prev_level;
unsigned long speed;
bool argos_blocked;
struct argos *cnode;
type = (speedtype & TYPE_MASK_BIT) - 1;
if (type < 0 || type > argos_pdata->ndevice) {
pr_err("There is no type for devices type[%d], ndevice[%d]\n",
type, argos_pdata->ndevice);
return NOTIFY_BAD;
}
#ifdef ARGOS_VOTING_DDR_CLK
if (argos_icc_register == 0){
get_icc_path();
}
#endif
speed = speedtype >> TYPE_SHIFT;
cnode = &argos_pdata->devices[type];
prev_level = cnode->prev_level;
argos_blocked = cnode->argos_block;
if (cnode->tables[0].items[THRESHOLD] == 0) {
pr_debug("skip not used name:%s, speed:%ldMbps\n",\
cnode->desc, speed);
goto out;
}
/* Find proper level */
for (level = 0; level < cnode->ntables; level++) {
struct boost_table *t = &cnode->tables[level];
if (speed < t->items[THRESHOLD]) {
break;
} else if (argos_pdata->devices[type].ntables == level) {
level++;
break;
}
}
/* decrease 1 level to match proper table */
level--;
if (!argos_blocked) {
if (level != prev_level) {
if (mutex_trylock(&cnode->level_mutex) == 0) {
/*
* If the mutex is already locked, it means this argos
* is being blocked or is handling another change.
* We don't need to wait.
*/
pr_warn("skip name:%s, speed:%ldMbps, prev level:%d, request level:%d\n",
cnode->desc, speed, prev_level, level);
goto out;
}
pr_info("name:%s, speed:%ldMbps, prev level:%d, request level:%d\n",
cnode->desc, speed, prev_level, level);
if (level == FREQ_UNLOCK) {
if (cnode->argos_notifier.head) {
pr_debug("Call argos notifier(%s lev:%d)\n",
cnode->desc, level);
blocking_notifier_call_chain(&cnode->argos_notifier,
speed, NULL);
}
argos_freq_lock(type, FREQ_UNLOCK);
argos_task_affinity_apply(type, 0);
argos_irq_affinity_apply(type, 0);
argos_hmpboost_apply(type, 0);
} else {
unsigned int enable_flag;
argos_freq_lock(type, level);
/* FIXME should control affinity and hmp boost */
enable_flag = argos_pdata->devices[type].tables[level].items[TASK_AFFINITY_EN];
argos_task_affinity_apply(type, enable_flag);
enable_flag = argos_pdata->devices[type].tables[level].items[IRQ_AFFINITY_EN];
argos_irq_affinity_apply(type, enable_flag);
enable_flag =
argos_pdata->devices[type].tables[level].items[SCHED_BOOST_EN];
argos_hmpboost_apply(type, enable_flag);
if (cnode->argos_notifier.head) {
pr_debug("Call argos notifier(%s lev:%d)\n",
cnode->desc, level);
blocking_notifier_call_chain(&cnode->argos_notifier,
speed, NULL);
}
}
cnode->prev_level = level;
mutex_unlock(&cnode->level_mutex);
} else {
pr_debug("same level (%d) is requested", level);
}
}
out:
return NOTIFY_OK;
}
#ifdef CONFIG_OF
static int argos_parse_dt(struct device *dev)
{
struct argos_platform_data *pdata = dev->platform_data;
struct argos *cnode;
struct device_node *np, *cnp;
int device_count = 0, num_level = 0;
int retval = 0, i, j;
np = dev->of_node;
pdata->ndevice = of_get_child_count(np);
if (!pdata->ndevice)
return -ENODEV;
pdata->devices = devm_kzalloc(dev, sizeof(struct argos) * pdata->ndevice, GFP_KERNEL);
if (!pdata->devices)
return -ENOMEM;
for_each_child_of_node(np, cnp) {
cnode = &pdata->devices[device_count];
cnode->desc = of_get_property(cnp, "net_boost,label", NULL);
if (of_property_read_u32(cnp, "net_boost,table_size", &num_level)) {
dev_err(dev, "Failed to get table size: node not exist\n");
retval = -EINVAL;
goto err_out;
}
cnode->ntables = num_level;
/* Allocation for freq and time table */
if (!cnode->tables) {
cnode->tables = devm_kzalloc(dev,
sizeof(struct boost_table) * cnode->ntables, GFP_KERNEL);
if (!cnode->tables) {
retval = -ENOMEM;
goto err_out;
}
}
/* Get and add frequency and time table */
for (i = 0; i < num_level; i++) {
for (j = 0; j < ITEM_MAX; j++) {
retval = of_property_read_u32_index(cnp, "net_boost,table",
i * ITEM_MAX + j, &cnode->tables[i].items[j]);
if (retval) {
dev_err(dev, "Failed to get property\n");
retval = -EINVAL;
goto err_out;
}
}
}
INIT_LIST_HEAD(&cnode->task_affinity_list);
INIT_LIST_HEAD(&cnode->irq_affinity_list);
cnode->task_hotplug_disable = false;
cnode->irq_hotplug_disable = false;
cnode->hmpboost_enable = false;
cnode->argos_block = false;
cnode->prev_level = -1;
mutex_init(&cnode->level_mutex);
BLOCKING_INIT_NOTIFIER_HEAD(&cnode->argos_notifier);
device_count++;
}
return 0;
err_out:
return retval;
}
#endif
static int argos_probe(struct platform_device *pdev)
{
int i, j, ret = 0;
struct argos_platform_data *pdata;
pr_info("Start probe\n");
if (pdev->dev.of_node) {
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct argos_platform_data),
GFP_KERNEL);
if (!pdata) {
dev_err(&pdev->dev, "Failed to allocate platform data\n");
return -ENOMEM;
}
pdev->dev.platform_data = pdata;
ret = argos_parse_dt(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Failed to parse dt data\n");
return ret;
}
pr_info("parse dt done\n");
for(i = 0; i < BOOST_MAX; i++){
pdata->boost_list[i] = devm_kzalloc(&pdev->dev, sizeof(int) * pdata->ndevice, GFP_KERNEL);
if (!pdata->boost_list[i]) {
dev_err(&pdev->dev, "Failed to allocate boosting frequency list\n");
return -ENOMEM;
}
for (j = 0; j < pdata->ndevice; j++){
pdata->boost_list[i][j] = boost_unlock_freq[i];
}
pdata->boost_max[i] = boost_unlock_freq[i];
}
} else {
pdata = pdev->dev.platform_data;
}
if (!pdata) {
dev_err(&pdev->dev, "There are no platform data\n");
return -EINVAL;
}
if (!pdata->ndevice || !pdata->devices) {
dev_err(&pdev->dev, "There are no devices\n");
return -EINVAL;
}
#ifndef CONFIG_ARGOS_THROUGHPUT
pdata->pm_qos_nfb.notifier_call = argos_pm_qos_notify;
pm_qos_add_notifier(PM_QOS_NETWORK_THROUGHPUT, &pdata->pm_qos_nfb);
register_reboot_notifier(&argos_cpuidle_reboot_nb);
#endif
argos_pdata = pdata;
argos_pdata->dev = &pdev->dev;
platform_set_drvdata(pdev, pdata);
return 0;
}
static int argos_remove(struct platform_device *pdev)
{
struct argos_platform_data *pdata = platform_get_drvdata(pdev);
if (!pdata || !argos_pdata)
return 0;
#ifndef CONFIG_ARGOS_THROUGHPUT
pm_qos_remove_notifier(PM_QOS_NETWORK_THROUGHPUT, &pdata->pm_qos_nfb);
unregister_reboot_notifier(&argos_cpuidle_reboot_nb);
#endif
#ifdef ARGOS_VOTING_DDR_CLK
if (argos_icc_register == 1)
icc_put(path_argos_bw);
#endif
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id argos_dt_ids[] = {
{ .compatible = "samsung,argos"},
{ }
};
#endif
static struct platform_driver argos_driver = {
.driver = {
.name = ARGOS_NAME,
.owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = of_match_ptr(argos_dt_ids),
#endif
},
.probe = argos_probe,
.remove = argos_remove
};
#ifdef CONFIG_ARGOS_THROUGHPUT
static ssize_t argos_tput_read(struct file *filep, char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
//pr_info("argos_throughput %s\n", argos_throughput);
ret = copy_to_user(buf, (void *)argos_throughput, TPUT_MAX);
if (ret < 0) {
pr_err("fail to copy argos throughput value.\n");
return -EINVAL;
}
return ret;
}
static ssize_t argos_tput_write(struct file *filep, const char __user *buf,
size_t count, loff_t *ppos)
{
int ret;
unsigned long val;
ret = copy_from_user(argos_throughput, buf, TPUT_MAX);
if (ret < 0) {
pr_err("fail to get argos throughput value.\n");
return -EINVAL;
}
ret = kstrtoul(argos_throughput, 16, &val);
if (ret < 0) {
pr_err("fail to convertet throughput unsigned long.\n");
return -EINVAL;
}
argos_pm_qos_notify(val);
//pr_info("tput : %s\n", argos_throughput);
return count;
}
static const struct file_operations argos_tput_fops = {
.owner = THIS_MODULE,
.open = NULL,
.read = argos_tput_read,
.write = argos_tput_write,
.llseek = NULL,
};
static struct miscdevice argos_tput_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "network_throughput",
.fops = &argos_tput_fops,
};
#endif
static int __init argos_init(void)
{
int ret;
#ifdef CONFIG_ARGOS_THROUGHPUT
ret = misc_register(&argos_tput_miscdev);
if (ret) {
pr_err("Failed to register miscdevice");
goto err;
}
#endif
ret = platform_driver_register(&argos_driver);
if (ret) {
pr_err("Failed to register platform driver");
goto err;
}
err:
return ret;
}
static void __exit argos_exit(void)
{
return platform_driver_unregister(&argos_driver);
}
late_initcall(argos_init);
module_exit(argos_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("SAMSUNG Electronics");
MODULE_DESCRIPTION("ARGOS DEVICE");

View File

@@ -0,0 +1,21 @@
config SEC_BLK_HELPER
tristate "Samsung sec block-dev helper facilities"
help
This driver is a collection of helper functions to read/write block devices.
config SEC_BLK_HELPER_TEST_FOR_ON_DEVICE
tristate "KUnit test for SEC_BLK_HELPER_test"
depends on KUNIT
depends on SEC_BLK_HELPER
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_BLK_HELPER_TEST_FOR_ONLY_UML
tristate "KUnit test for SEC_BLK_HELPER_test"
depends on KUNIT
depends on UML
depends on SEC_BLK_HELPER
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_BLK_HELPER) += sec_blk_helper.o
GCOV_PROFILE_sec_blk_helper.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,248 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2024 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/blkdev.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/xarray.h>
#include <linux/samsung/sec_kunit.h>
static const inline struct class *__blk_class(void)
{
struct gendisk *gendisk;
const struct class *blk_cls;
gendisk = blk_alloc_disk(NUMA_NO_NODE);
if (!gendisk) {
pr_err("blk_alloc_disk failed\n");
return ERR_PTR(-ENOMEM);
}
blk_cls = disk_to_dev(gendisk)->class;
put_disk(gendisk);
return blk_cls;
}
__ss_static const struct class *blk_class(void)
{
static const struct class *blk_cls;
if (IS_ERR_OR_NULL(blk_cls))
blk_cls = __blk_class();
return blk_cls;
}
/* NOTE: these functions are inlined from 'block/genhd.c */
static dev_t __part_devt(struct gendisk *disk, u8 partno)
{
struct block_device *part;
dev_t devt = 0;
rcu_read_lock();
part = xa_load(&disk->part_tbl, partno);
if (part)
devt = part->bd_dev;
rcu_read_unlock();
return devt;
}
/* NOTE: these functions are inlined from 'block/early-lookup.c */
struct uuidcmp {
const char *uuid;
int len;
};
static int match_dev_by_uuid(struct device *dev, const void *data)
{
struct block_device *bdev = dev_to_bdev(dev);
const struct uuidcmp *cmp = data;
if (!bdev->bd_meta_info ||
strncasecmp(cmp->uuid, bdev->bd_meta_info->uuid, cmp->len))
return 0;
return 1;
}
static int devt_from_partuuid(const char *uuid_str, dev_t *devt)
{
struct uuidcmp cmp;
struct device *dev = NULL;
int offset = 0;
char *slash;
#if IS_BUILTIN(CONFIG_SEC_BLK_HELPER)
const struct class *blk_cls = &block_class;
#else
const struct class *blk_cls = blk_class();
#endif
cmp.uuid = uuid_str;
slash = strchr(uuid_str, '/');
/* Check for optional partition number offset attributes. */
if (slash) {
char c = 0;
/* Explicitly fail on poor PARTUUID syntax. */
if (sscanf(slash + 1, "PARTNROFF=%d%c", &offset, &c) != 1)
goto out_invalid;
cmp.len = slash - uuid_str;
} else {
cmp.len = strlen(uuid_str);
}
if (!cmp.len)
goto out_invalid;
dev = class_find_device(blk_cls, NULL, &cmp, &match_dev_by_uuid);
if (!dev)
return -ENODEV;
if (offset) {
/*
* Attempt to find the requested partition by adding an offset
* to the partition number found by UUID.
*/
*devt = __part_devt(dev_to_disk(dev),
dev_to_bdev(dev)->bd_partno + offset);
} else {
*devt = dev->devt;
}
put_device(dev);
return 0;
out_invalid:
pr_err("VFS: PARTUUID= is invalid.\n"
"Expected PARTUUID=<valid-uuid-id>[/PARTNROFF=%%d]\n");
return -EINVAL;
}
int sec_devt_from_partuuid(const char *uuid_str, dev_t *devt)
{
if (strncmp(uuid_str, "PARTUUID=", 9) == 0)
return devt_from_partuuid(uuid_str + 9, devt);
pr_warn("only PARTUUID= format is allowed.\n");
return -EINVAL;
}
EXPORT_SYMBOL_GPL(sec_devt_from_partuuid);
/* NOTE: see fs/pstore/blk.c of linux-5.10.y */
ssize_t sec_blk_read(struct block_device *bdev,
void *buf, size_t bytes, loff_t pos)
{
struct file file;
struct kiocb kiocb;
struct iov_iter iter;
struct kvec iov = {.iov_base = buf, .iov_len = bytes};
memset(&file, 0, sizeof(struct file));
file.f_mapping = bdev->bd_inode->i_mapping;
file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME;
file.f_inode = bdev->bd_inode;
file_ra_state_init(&file.f_ra, file.f_mapping);
init_sync_kiocb(&kiocb, &file);
kiocb.ki_pos = pos;
iov_iter_kvec(&iter, READ, &iov, 1, bytes);
return generic_file_read_iter(&kiocb, &iter);
}
EXPORT_SYMBOL_GPL(sec_blk_read);
/* NOTE: this is a copy of 'blkdev_fsync' of 'block/fops.c' */
static int __blkdev_fsync(struct file *filp, loff_t start, loff_t end,
int datasync)
{
struct block_device *bdev = I_BDEV(filp->f_mapping->host);
int error;
error = file_write_and_wait_range(filp, start, end);
if (error)
return error;
/*
* There is no need to serialise calls to blkdev_issue_flush with
* i_mutex and doing so causes performance issues with concurrent
* O_SYNC writers to a block device.
*/
error = blkdev_issue_flush(bdev);
if (error == -EOPNOTSUPP)
error = 0;
return error;
}
/* NOTE: see fs/pstore/blk.c of linux-5.10.y */
ssize_t sec_blk_write(struct block_device *bdev,
const void *buf, size_t bytes, loff_t pos)
{
struct iov_iter iter;
struct kiocb kiocb;
struct file file;
ssize_t ret;
struct kvec iov = {.iov_base = (void *)buf, .iov_len = bytes};
/* Console/Ftrace backend may handle buffer until flush dirty zones */
if (in_interrupt() || irqs_disabled())
return -EBUSY;
memset(&file, 0, sizeof(struct file));
file.private_data = bdev;
file.f_mapping = bdev->bd_inode->i_mapping;
file.f_flags = O_DSYNC | __O_SYNC | O_NOATIME;
file.f_inode = bdev->bd_inode;
file.f_iocb_flags = iocb_flags(&file);
init_sync_kiocb(&kiocb, &file);
kiocb.ki_pos = pos;
iov_iter_kvec(&iter, WRITE, &iov, 1, bytes);
inode_lock(bdev->bd_inode);
ret = generic_write_checks(&kiocb, &iter);
if (ret > 0)
ret = generic_perform_write(&kiocb, &iter);
inode_unlock(bdev->bd_inode);
if (likely(ret > 0)) {
const struct file_operations f_op = {
.fsync = __blkdev_fsync,
};
file.f_op = &f_op;
kiocb.ki_pos += ret;
ret = generic_write_sync(&kiocb, ret);
}
sync_blockdev(bdev);
return ret;
}
EXPORT_SYMBOL_GPL(sec_blk_write);
static int __init sec_blk_helper_init(void)
{
const struct class *blk_cls;
if (IS_BUILTIN(CONFIG_SEC_BLK_HELPER))
return 0;
blk_cls = blk_class();
if (IS_ERR_OR_NULL(blk_cls))
return -EBUSY;
return 0;
}
module_init(sec_blk_helper_init);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Samsung, Block-Device Helper Driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,9 @@
config SEC_CLASS
tristate "Samsung sec class/sysfs Feature"
select DRV_SAMSUNG
help
Samsung sysfs name 'sec' directory create
# legacy compatibility
config DRV_SAMSUNG
bool

View File

@@ -0,0 +1 @@
obj-$(CONFIG_SEC_CLASS) += sec_class.o

View File

@@ -0,0 +1,80 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2014-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/err.h>
#include <linux/module.h>
/* CAUTION : Do not be declared as external sec_class */
static struct class *sec_class;
static atomic_t sec_dev;
static int sec_class_match_device_by_name(struct device *dev, const void *data)
{
const char *name = data;
return sysfs_streq(name, dev_name(dev));
}
struct device *sec_dev_get_by_name(const char *name)
{
return class_find_device(sec_class, NULL, name,
sec_class_match_device_by_name);
}
EXPORT_SYMBOL_GPL(sec_dev_get_by_name);
struct device *sec_device_create(void *drvdata, const char *fmt)
{
struct device *dev;
if (unlikely(!sec_class)) {
pr_err("Not yet created class(sec)!\n");
BUG();
}
if (IS_ERR(sec_class)) {
pr_err("Failed to create class(sec) %ld\n", PTR_ERR(sec_class));
BUG();
}
dev = device_create(sec_class, NULL, atomic_inc_return(&sec_dev),
drvdata, "%s", fmt);
if (IS_ERR(dev))
pr_err("Failed to create device %s %ld\n", fmt, PTR_ERR(dev));
else
pr_debug("%s : %d\n", fmt, dev->devt);
return dev;
}
EXPORT_SYMBOL_GPL(sec_device_create);
void sec_device_destroy(dev_t devt)
{
if (unlikely(!devt)) {
pr_err("Not allowed to destroy dev\n");
} else {
pr_info("%d\n", devt);
device_destroy(sec_class, devt);
}
}
EXPORT_SYMBOL_GPL(sec_device_destroy);
static int __init sec_class_create(void)
{
sec_class = class_create("sec");
if (IS_ERR(sec_class)) {
pr_err("Failed to create class(sec) %ld\n", PTR_ERR(sec_class));
return PTR_ERR(sec_class);
}
return 0;
}
core_initcall(sec_class_create);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("sec-class");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,4 @@
config SEC_KEY_NOTIFIER
tristate "SEC Atomic keyboard notifierr"
help
TODO: help is not ready.

View File

@@ -0,0 +1 @@
obj-$(CONFIG_SEC_KEY_NOTIFIER) += sec_key_notifier.o

View File

@@ -0,0 +1,187 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2016-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/module.h>
#include <linux/input.h>
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/samsung/bsp/sec_key_notifier.h>
static DEFINE_SPINLOCK(sec_kn_event_lock);
static RAW_NOTIFIER_HEAD(sec_kn_notifier_list);
static int sec_kn_acceptable_event[KEY_MAX] __read_mostly;
static void inline update_acceptable_event(unsigned int event_code, bool is_add)
{
if (unlikely(event_code >= KEY_MAX)) {
pr_warn("event_code (%d) must be less thant KEY_MAX!\n",
event_code);
pr_warn("Caller is %pS\n", __builtin_return_address(0));
return;
}
if (is_add)
sec_kn_acceptable_event[event_code]++;
else
sec_kn_acceptable_event[event_code]--;
BUG_ON(sec_kn_acceptable_event[event_code] < 0);
}
static inline void increase_num_of_acceptable_event(unsigned int event_code)
{
update_acceptable_event(event_code, true);
}
int sec_kn_register_notifier(struct notifier_block *nb,
const unsigned int *events, const size_t nr_events)
{
unsigned long flags;
size_t i;
int err;
spin_lock_irqsave(&sec_kn_event_lock, flags);
for (i = 0; i < nr_events; i++)
increase_num_of_acceptable_event(events[i]);
err = raw_notifier_chain_register(&sec_kn_notifier_list, nb);
spin_unlock_irqrestore(&sec_kn_event_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(sec_kn_register_notifier);
static inline void decrease_num_of_acceptable_event(unsigned int event_code)
{
update_acceptable_event(event_code, false);
}
int sec_kn_unregister_notifier(struct notifier_block *nb,
const unsigned int *events, const size_t nr_events)
{
unsigned long flags;
size_t i;
int err;
spin_lock_irqsave(&sec_kn_event_lock, flags);
for (i = 0; i < nr_events; i++)
decrease_num_of_acceptable_event(events[i]);
err = raw_notifier_chain_unregister(&sec_kn_notifier_list, nb);
spin_unlock_irqrestore(&sec_kn_event_lock, flags);
return err;
}
EXPORT_SYMBOL_GPL(sec_kn_unregister_notifier);
static inline bool is_event_supported_locked(unsigned int event_type,
unsigned int event_code)
{
if (event_type != EV_KEY || event_code >= KEY_MAX)
return false;
return !!sec_kn_acceptable_event[event_code];
}
static void sec_kn_event(struct input_handle *handle, unsigned int event_type,
unsigned int event_code, int value)
{
struct sec_key_notifier_param param = {
.keycode = event_code,
.down = value,
};
spin_lock(&sec_kn_event_lock);
if (!is_event_supported_locked(event_type, event_code)) {
spin_unlock(&sec_kn_event_lock);
return;
}
raw_notifier_call_chain(&sec_kn_notifier_list, 0, &param);
spin_unlock(&sec_kn_event_lock);
}
static int sec_kn_connect(struct input_handler *handler, struct input_dev *dev,
const struct input_device_id *id)
{
struct input_handle *handle;
int error;
handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
if (!handle)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = "sec_key_notifier";
error = input_register_handle(handle);
if (error)
goto err_free_handle;
error = input_open_device(handle);
if (error)
goto err_unregister_handle;
return 0;
err_unregister_handle:
input_unregister_handle(handle);
err_free_handle:
kfree(handle);
return error;
}
static void sec_kn_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
static const struct input_device_id sec_kn_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_KEY) },
},
{},
};
static struct input_handler sec_kn_handler = {
.event = sec_kn_event,
.connect = sec_kn_connect,
.disconnect = sec_kn_disconnect,
.name = "sec_key_notifier",
.id_table = sec_kn_ids,
};
static int __init sec_kn_init(void)
{
return input_register_handler(&sec_kn_handler);
}
#if IS_BUILTIN(CONFIG_SEC_KEY_NOTIFIER)
pure_initcall(sec_kn_init);
#else
module_init(sec_kn_init);
#endif
static void __exit sec_kn_exit(void)
{
input_unregister_handler(&sec_kn_handler);
}
module_exit(sec_kn_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Atomic keyboard notifier");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,6 @@
config SEC_OF_KUNIT
tristate "Samsung additional facilites for of / kunit testing"
depends on OF && KUNIT
default n
help
TODO: help is not ready.

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_OF_KUNIT) += sec_of_kunit.o
CFLAGS_sec_of_kunit.o = -I$(srctree)/drivers/of

View File

@@ -0,0 +1,112 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/libfdt.h>
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/samsung/sec_kunit.h>
#include <linux/samsung/sec_of_kunit.h>
#include "of_private.h"
static int __of_kunit_prepare_device_tree(struct sec_of_kunit_data *testdata,
const char *compatible, struct sec_of_dtb_info *info)
{
if (!compatible || !info)
return 0;
testdata->root = sec_of_kunit_dtb_to_fdt(info);
if (!testdata->root)
return -ENODEV;
testdata->of_node = of_find_compatible_node(testdata->root, NULL,
compatible);
if (IS_ERR_OR_NULL(testdata->of_node))
return -ENOENT;
return 0;
}
int sec_of_kunit_data_init(struct sec_of_kunit_data *testdata,
const char *name, struct builder *bd,
const char *compatible, struct sec_of_dtb_info *info)
{
struct miscdevice *misc = &testdata->misc;
int err;
err = sec_kunit_init_miscdevice(misc, name);
if (err)
return err;
err = __of_kunit_prepare_device_tree(testdata, compatible, info);
if (err) {
sec_kunit_exit_miscdevice(misc);
return err;
}
bd->dev = misc->this_device;
testdata->bd = bd;
return 0;
}
EXPORT_SYMBOL_GPL(sec_of_kunit_data_init);
void sec_of_kunit_data_exit(struct sec_of_kunit_data *testdata)
{
sec_kunit_exit_miscdevice(&testdata->misc);
kfree_sensitive(testdata->root);
}
EXPORT_SYMBOL_GPL(sec_of_kunit_data_exit);
static void *__dt_alloc(u64 __size, u64 align)
{
u64 size = ALIGN(__size, align);
void *ptr = kmalloc(size, GFP_KERNEL);
if (!ptr)
panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
__func__, size, align);
return ptr;
}
/* NOTE: Inspired from 'drivers/of/unittest.c'. */
struct device_node *sec_of_kunit_dtb_to_fdt(struct sec_of_dtb_info *info)
{
struct device_node *root;
u32 data_size;
u32 size;
data_size = info->dtb_end - info->dtb_begin;
if (!data_size) {
pr_err("No dtb 'overlay_base' to attach\n");
return ERR_PTR(-ENOENT);
}
size = fdt_totalsize(info->dtb_begin);
if (size != data_size) {
pr_err("dtb 'overlay_base' header totalsize != actual size");
return ERR_PTR(-EINVAL);
}
__unflatten_device_tree((void *)info->dtb_begin, NULL, &root,
__dt_alloc, true);
return root;
}
EXPORT_SYMBOL_GPL(sec_of_kunit_dtb_to_fdt);
int __init sec_of_kunit_init(void)
{
return 0;
}
module_init(sec_of_kunit_init);
void __exit sec_of_kunit_exit(void)
{
}
module_exit(sec_of_kunit_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Samsung additional facilites for of / kunit testing");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,4 @@
config SEC_PARAM
tristate "Samsung PARAM driver"
help
TODO: help is not ready.

View File

@@ -0,0 +1 @@
obj-$(CONFIG_SEC_PARAM) += sec_param.o

View File

@@ -0,0 +1,185 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2011-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/bsp/sec_param.h>
struct sec_param_drvdata {
struct builder bd;
struct sec_param_operations *ops;
};
static struct sec_param_drvdata *sec_param;
static __always_inline bool __param_is_probed(void)
{
return !!sec_param;
}
static bool __param_get(size_t index, void *value)
{
struct sec_param_operations *ops = sec_param->ops;
if (!ops || !ops->read)
return false;
return ops->read(index, value);
}
bool sec_param_get(size_t index, void *value)
{
if (!__param_is_probed())
return false;
return __param_get(index, value);
}
EXPORT_SYMBOL_GPL(sec_param_get);
static bool __param_set(size_t index, const void *value)
{
struct sec_param_operations *ops = sec_param->ops;
if (!ops || !ops->write)
return false;
return ops->write(index, value);
}
bool sec_param_set(size_t index, const void *value)
{
if (!__param_is_probed())
return false;
return __param_set(index, value);
}
EXPORT_SYMBOL_GPL(sec_param_set);
int sec_param_register_operations(struct sec_param_operations *ops)
{
if (!__param_is_probed())
return -EBUSY;
if (sec_param->ops) {
dev_warn(sec_param->bd.dev, "ops is already set (%p)\n",
sec_param->ops);
return -EPERM;
}
sec_param->ops = ops;
return 0;
}
EXPORT_SYMBOL_GPL(sec_param_register_operations);
void sec_param_unregister_operations(struct sec_param_operations *ops)
{
if (ops != sec_param->ops) {
dev_warn(sec_param->bd.dev,
"%p is not a registered ops.\n", ops);
return;
}
sec_param->ops = NULL;
}
EXPORT_SYMBOL_GPL(sec_param_unregister_operations);
static noinline int __param_probe_epilog(struct builder *bd)
{
struct sec_param_drvdata *drvdata =
container_of(bd, struct sec_param_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
sec_param = drvdata;
return 0;
}
static noinline void __param_remove_prolog(struct builder *bd)
{
/* FIXME: This is not a graceful exit. */
sec_param = NULL;
}
static const struct dev_builder __param_dev_builder[] = {
DEVICE_BUILDER(__param_probe_epilog, __param_remove_prolog),
};
static int __param_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct sec_param_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __param_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct sec_param_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static int sec_param_probe(struct platform_device *pdev)
{
return __param_probe(pdev, __param_dev_builder,
ARRAY_SIZE(__param_dev_builder));
}
static int sec_param_remove(struct platform_device *pdev)
{
return __param_remove(pdev, __param_dev_builder,
ARRAY_SIZE(__param_dev_builder));
}
static const struct of_device_id sec_param_match_table[] = {
{ .compatible = "samsung,param" },
{},
};
MODULE_DEVICE_TABLE(of, sec_param_match_table);
static struct platform_driver sec_param_driver = {
.driver = {
.name = "sec,param",
.of_match_table = of_match_ptr(sec_param_match_table),
},
.probe = sec_param_probe,
.remove = sec_param_remove,
};
static int __init sec_param_init(void)
{
return platform_driver_register(&sec_param_driver);
}
module_init(sec_param_init);
static void __exit sec_param_exit(void)
{
platform_driver_unregister(&sec_param_driver);
}
module_exit(sec_param_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("SEC PARAM driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1 @@
source "drivers/samsung/bsp/qcom/param/Kconfig"

View File

@@ -0,0 +1 @@
obj-$(CONFIG_SEC_QC_PARAM) += param/

View File

@@ -0,0 +1,22 @@
config SEC_QC_PARAM
tristate "Samsung PARAM driver for RAW Partion & Qualcomm based devices"
depends on SEC_PARAM
help
TODO: help is not ready.
config SEC_QC_PARAM_TEST_FOR_ON_DEVICE
tristate "KUnit test for sec_qc_param_test"
depends on KUNIT
depends on SEC_QC_PARAM
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_QC_PARAM_TEST_FOR_ONLY_UML
tristate "KUnit test for sec_qc_param_test"
depends on KUNIT
depends on UML
depends on SEC_QC_PARAM
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,4 @@
obj-$(CONFIG_SEC_QC_PARAM) += sec_qc_param.o
CFLAGS_sec_qc_param.o = -I$(srctree)
GCOV_PROFILE_sec_qc_param.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,756 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2011-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/blkdev.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/uio.h>
#include <linux/samsung/sec_kunit.h>
#include <linux/samsung/bsp/sec_blk_helper.h>
#include <linux/samsung/bsp/sec_class.h>
#include <linux/samsung/debug/sec_debug.h>
#include <block/blk.h>
#include "sec_qc_param.h"
static struct qc_param_drvdata *qc_param;
static __always_inline bool __qc_param_is_probed(void)
{
return !!qc_param;
}
__ss_static bool __qc_param_verify_debuglevel(const struct qc_param_info *info,
const void *value)
{
const unsigned int debuglevel = *(const unsigned int *)value;
bool ret;
switch (debuglevel) {
case SEC_DEBUG_LEVEL_LOW:
case SEC_DEBUG_LEVEL_MID:
case SEC_DEBUG_LEVEL_HIGH:
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
__ss_static bool __qc_param_verify_sapa(const struct qc_param_info *info,
const void *value)
{
const unsigned int sapa = *(const unsigned int *)value;
if (sapa == SAPA_KPARAM_MAGIC || !sapa)
return true;
return false;
}
__ss_static bool __qc_param_verify_afc_disable(const struct qc_param_info *info,
const void *value)
{
const char mode = *(const char *)value;
if (mode == '0' || mode == '1')
return true;
return false;
}
__ss_static bool __qc_param_verify_pd_disable(const struct qc_param_info *info,
const void *value)
{
const char mode = *(const char *)value;
if (mode == '0' || mode == '1')
return true;
return false;
}
__ss_static bool __qc_param_verify_cp_reserved_mem(const struct qc_param_info *info,
const void *value)
{
const unsigned int cp_reserved_mem = *(const unsigned int *)value;
bool ret;
switch (cp_reserved_mem) {
case CP_MEM_RESERVE_OFF:
case CP_MEM_RESERVE_ON_1:
case CP_MEM_RESERVE_ON_2:
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
__ss_static bool __qc_param_verify_FMM_lock(const struct qc_param_info *info,
const void *value)
{
const unsigned int fmm_lock_magic = *(const unsigned int *)value;
if (fmm_lock_magic == FMMLOCK_MAGIC_NUM || !fmm_lock_magic)
return true;
return false;
}
__ss_static bool __qc_param_verify_fiemap_update(const struct qc_param_info *info,
const void *value)
{
const unsigned int edtbo_fiemap_magic = *(const unsigned int *)value;
if (edtbo_fiemap_magic == EDTBO_FIEMAP_MAGIC || !edtbo_fiemap_magic)
return true;
return false;
}
static const struct qc_param_info qc_param_info[] = {
QC_PARAM_INFO(param_index_debuglevel, debuglevel, __qc_param_verify_debuglevel),
QC_PARAM_INFO(param_index_uartsel, uartsel, NULL),
QC_PARAM_INFO(param_index_product_device, product_device, NULL),
QC_PARAM_INFO(param_rory_control, rory_control, NULL),
QC_PARAM_INFO(param_cp_debuglevel, cp_debuglevel, NULL),
QC_PARAM_INFO(param_index_sapa, sapa, __qc_param_verify_sapa),
QC_PARAM_INFO(param_index_normal_poweroff, normal_poweroff, NULL),
QC_PARAM_INFO(param_index_wireless_ic, wireless_ic, NULL),
QC_PARAM_INFO(param_index_wireless_charging_mode, wireless_charging_mode, NULL),
QC_PARAM_INFO(param_index_afc_disable, afc_disable, __qc_param_verify_afc_disable),
QC_PARAM_INFO(param_index_cp_reserved_mem, cp_reserved_mem, __qc_param_verify_cp_reserved_mem),
QC_PARAM_INFO(param_index_api_gpio_test, api_gpio_test, NULL),
QC_PARAM_INFO(param_index_api_gpio_test_result, api_gpio_test_result, NULL),
QC_PARAM_INFO(param_index_reboot_recovery_cause, reboot_recovery_cause, NULL),
QC_PARAM_INFO(param_index_user_partition_flashed, user_partition_flashed, NULL),
QC_PARAM_INFO(param_index_force_upload_flag, force_upload_flag, NULL),
// FIXME: QC_PARAM_INFO(param_index_cp_reserved_mem_backup, cp_reserved_mem_backup, NULL),
QC_PARAM_INFO(param_index_FMM_lock, FMM_lock, __qc_param_verify_FMM_lock),
QC_PARAM_INFO(param_index_dump_sink, dump_sink, NULL),
QC_PARAM_INFO(param_index_fiemap_update, fiemap_update, __qc_param_verify_fiemap_update),
QC_PARAM_INFO(param_index_fiemap_result, fiemap_result, NULL),
QC_PARAM_INFO(param_index_window_color, window_color, NULL),
QC_PARAM_INFO(param_index_VrrStatus, VrrStatus, NULL),
QC_PARAM_INFO(param_index_pd_hv_disable, pd_disable, __qc_param_verify_pd_disable),
QC_PARAM_INFO(param_vib_le_est, vib_le_est, NULL),
};
static inline bool __qc_param_is_param_data(loff_t pos)
{
if ((pos >= qc_param->offset) ||
(pos < qc_param->offset + sizeof(struct sec_qc_param_data)))
return true;
return false;
}
ssize_t sec_qc_param_read_raw(void *buf, size_t len, loff_t pos)
{
if (!__qc_param_is_probed())
return -EBUSY;
if (__qc_param_is_param_data(pos))
return -ENXIO;
return sec_blk_read(qc_param->bdev, buf, len, pos);
}
EXPORT_SYMBOL_GPL(sec_qc_param_read_raw);
__ss_static bool __qc_param_is_valid_index(size_t index)
{
size_t size;
if (index >= ARRAY_SIZE(qc_param_info))
return false;
size = qc_param_info[index].size;
if (!size)
return false;
return true;
}
static bool __qc_param_read(struct qc_param_drvdata *drvdata,
size_t index, void *value)
{
struct device *dev = drvdata->bd.dev;
const struct qc_param_info *info;
loff_t offset;
ssize_t read;
info = &qc_param_info[index];
offset = info->offset + drvdata->offset;
read = sec_blk_read(drvdata->bdev,
value, info->size, offset);
if (read < 0) {
dev_warn(dev, "read failed (idx:%zu, err:%zd)\n", index, read);
return false;
} else if (read != info->size) {
dev_warn(dev, "wrong size (idx:%zu)- requested(%zu) != read(%zd)\n",
index, info->size, read);
return false;
}
return true;
}
static bool sec_qc_param_read(size_t index, void *value)
{
if (!__qc_param_is_probed())
return false;
if (!__qc_param_is_valid_index(index)) {
dev_warn(qc_param->bd.dev, "invalid index (%zu)\n", index);
return false;
}
return __qc_param_read(qc_param, index, value);
}
ssize_t sec_qc_param_write_raw(const void *buf, size_t len, loff_t pos)
{
if (!__qc_param_is_probed())
return -EBUSY;
if (__qc_param_is_param_data(pos))
return -ENXIO;
return sec_blk_write(qc_param->bdev, buf, len, pos);
}
EXPORT_SYMBOL_GPL(sec_qc_param_write_raw);
static bool __qc_param_write(struct qc_param_drvdata *drvdata,
size_t index, const void *value)
{
struct device *dev = drvdata->bd.dev;
loff_t offset;
ssize_t written;
const struct qc_param_info *info;
info = &qc_param_info[index];
offset = info->offset + drvdata->offset;
if (info->verify_input && !info->verify_input(info, value)) {
dev_warn(dev, "wrong data %pS\n",
__builtin_return_address(0));
print_hex_dump_bytes(" + ", DUMP_PREFIX_OFFSET,
value, info->size);
return false;
}
written = sec_blk_write(drvdata->bdev,
value, info->size, offset);
if (written < 0) {
dev_warn(dev, "write failed (idx:%zu, err:%zd)\n",
index, written);
return false;
} else if (written != info->size) {
dev_warn(dev, "wrong size (idx:%zu) - requested(%zu) != written(%zd)\n",
index, info->size, written);
return false;
}
return true;
}
static bool sec_qc_param_write(size_t index, const void *value)
{
if (!__qc_param_is_probed())
return false;
if (!__qc_param_is_valid_index(index)) {
dev_warn(qc_param->bd.dev, "invalid index (%zu)\n", index);
return false;
}
return __qc_param_write(qc_param, index, value);
}
__ss_static noinline int __qc_param_parse_dt_bdev_path(struct builder *bd,
struct device_node *np)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
return of_property_read_string(np, "sec,bdev_path",
&drvdata->bdev_path);
}
__ss_static noinline int __qc_param_parse_dt_negative_offset(struct builder *bd,
struct device_node *np)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
u32 negative_offset;
int err;
err = of_property_read_u32(np, "sec,negative_offset", &negative_offset);
if (err)
return -EINVAL;
drvdata->negative_offset = (loff_t)negative_offset;
return 0;
}
static const struct dt_builder __qc_param_dt_builder[] = {
DT_BUILDER(__qc_param_parse_dt_bdev_path),
DT_BUILDER(__qc_param_parse_dt_negative_offset),
};
static noinline int __qc_param_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __qc_param_dt_builder,
ARRAY_SIZE(__qc_param_dt_builder));
}
static int __qc_param_sec_class_create(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct device *param_dev;
param_dev = sec_device_create(NULL, "sec_param");
if (IS_ERR(param_dev))
return PTR_ERR(param_dev);
dev_set_drvdata(param_dev, drvdata);
drvdata->param_dev = param_dev;
return 0;
}
static void __qc_param_sec_class_remove(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct device *param_dev = drvdata->param_dev;
if (!param_dev)
return;
sec_device_destroy(param_dev->devt);
}
static noinline int __qc_param_init_blkdev(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct device *dev = bd->dev;
fmode_t mode = FMODE_READ | FMODE_WRITE;
struct block_device *bdev;
sector_t nr_sects;
int err;
bdev = blkdev_get_by_path(drvdata->bdev_path, mode, NULL, NULL);
if (IS_ERR(bdev)) {
dev_t devt;
if (sec_devt_from_partuuid(drvdata->bdev_path, &devt)) {
dev_warn(dev, "'sec_devt_from_partuuid' failed!\n");
err = -EPROBE_DEFER;
goto err_blkdev;
}
bdev = blkdev_get_by_dev(devt, mode, NULL, NULL);
if (IS_ERR(bdev)) {
dev_warn(dev, "'blkdev_get_by_dev' failed! (%ld)\n",
PTR_ERR(bdev));
err = -EPROBE_DEFER;
goto err_blkdev;
}
}
nr_sects = bdev_nr_sectors(bdev);
if (!nr_sects) {
dev_err(dev, "not enough space for %s\n",
drvdata->bdev_path);
blkdev_put(bdev, NULL);
return -ENOSPC;
}
drvdata->bdev = bdev;
drvdata->offset = (loff_t)(nr_sects << SECTOR_SHIFT)
- drvdata->negative_offset;
return 0;
err_blkdev:
dev_err(dev, "can't find a block device - %s\n",
drvdata->bdev_path);
return err;
}
static noinline void __qc_param_exit_blkdev(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
blkdev_put(drvdata->bdev, NULL);
}
static int __qc_param_register_operations(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct sec_param_operations *ops = &drvdata->ops;
int err;
ops->read = sec_qc_param_read;
ops->write = sec_qc_param_write;
err = sec_param_register_operations(ops);
if (err == -EBUSY)
return -EPROBE_DEFER;
return err;
}
static void __qc_param_unregister_operations(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct sec_param_operations *ops = &drvdata->ops;
sec_param_unregister_operations(ops);
}
static ssize_t __used __qc_param_show_simple_uint(struct device *sec_class_dev,
struct device_attribute *attr, char *buf,
size_t index)
{
struct qc_param_drvdata *drvdata = dev_get_drvdata(sec_class_dev);
unsigned int value;
if (!__qc_param_read(drvdata, index, &value))
return -EINVAL;
return scnprintf(buf, PAGE_SIZE, "%u", value);
}
static ssize_t __used __qc_param_show_simple_str(struct device *sec_class_dev,
struct device_attribute *attr, char *buf,
size_t index, size_t len)
{
struct qc_param_drvdata *drvdata = dev_get_drvdata(sec_class_dev);
char *context;
int ret;
/* NOTE: I use a boucing buffer to prevent 'buf' is not corrupted,
* when '__qc_paam_read' is failed.
*/
context = kmalloc(len, GFP_KERNEL);
if (!context) {
ret = -ENOMEM;
goto __err_nomem;
}
if (!__qc_param_read(drvdata, index, context)) {
ret = -EINVAL;
goto __err_read_fail;
}
ret = scnprintf(buf, PAGE_SIZE, "%s", context);
__err_read_fail:
kfree(context);
__err_nomem:
return ret;
}
static ssize_t api_gpio_test_show(struct device *sec_class_dev,
struct device_attribute *attr, char *buf)
{
return __qc_param_show_simple_uint(sec_class_dev, attr, buf,
param_index_api_gpio_test);
}
DEVICE_ATTR_RO(api_gpio_test);
static ssize_t api_gpio_test_result_show(struct device *sec_class_dev,
struct device_attribute *attr, char *buf)
{
return __qc_param_show_simple_str(sec_class_dev, attr, buf,
param_index_api_gpio_test_result,
QC_PARAM_SIZE(api_gpio_test_result));
}
DEVICE_ATTR_RO(api_gpio_test_result);
static inline void __api_gpio_test_clear(struct qc_param_drvdata *drvdata)
{
unsigned int zero = 0;
__qc_param_write(drvdata, param_index_api_gpio_test, &zero);
}
static inline void __api_gpio_test_result_clear(
struct qc_param_drvdata *drvdata)
{
char empty[QC_PARAM_SIZE(api_gpio_test_result)] = { '\0', };
__qc_param_write(drvdata, param_index_api_gpio_test_result, &empty);
}
static ssize_t api_gpio_test_clear_store(struct device *sec_class_dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qc_param_drvdata *drvdata = dev_get_drvdata(sec_class_dev);
struct device *dev = drvdata->bd.dev;
int written;
int err;
err = kstrtoint(buf, 10, &written);
if (err < 0) {
dev_warn(dev, "requested written code is malformed or wrong\n");
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
buf, count, 1);
return err;
}
if (written != 1)
return count;
__api_gpio_test_clear(drvdata);
__api_gpio_test_result_clear(drvdata);
return count;
}
DEVICE_ATTR_WO(api_gpio_test_clear);
static struct attribute *sec_qc_param_attrs[] = {
&dev_attr_api_gpio_test.attr,
&dev_attr_api_gpio_test_result.attr,
&dev_attr_api_gpio_test_clear.attr,
NULL,
};
static const struct attribute_group sec_qc_param_attr_group = {
.attrs = sec_qc_param_attrs,
};
static noinline int __qc_param_sysfs_create(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct device *dev = drvdata->param_dev;
int err;
err = sysfs_create_group(&dev->kobj, &sec_qc_param_attr_group);
if (err)
return err;
return 0;
}
static noinline void __qc_param_sysfs_remove(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct device *dev = drvdata->param_dev;
sysfs_remove_group(&dev->kobj, &sec_qc_param_attr_group);
}
static noinline int __qc_param_probe_epilog(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
qc_param = drvdata;
return 0;
}
static noinline void __qc_param_remove_prolog(struct builder *bd)
{
/* FIXME: This is not a graceful exit. */
qc_param = NULL;
}
static int __qc_param_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct qc_param_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __qc_param_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct qc_param_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
static void __qc_param_dbgfs_show_bdev(struct seq_file *m)
{
struct qc_param_drvdata *drvdata = m->private;
struct block_device *bdev = drvdata->bdev;
seq_puts(m, "* Block Device :\n");
seq_printf(m, " - bdevname : %pg\n", bdev);
seq_printf(m, " - uuid : %s\n", bdev->bd_meta_info->uuid);
seq_printf(m, " - volname : %s\n", bdev->bd_meta_info->volname);
seq_puts(m, "\n");
}
static void __qc_param_dbgfs_show_each(struct seq_file *m, size_t index)
{
const struct qc_param_info *info = &qc_param_info[index];
uint8_t *buf;
if (!info->size)
return;
seq_printf(m, "[%zu] = %s\n", index, info->name);
seq_printf(m, " - offset : %zu\n", (size_t)info->offset);
seq_printf(m, " - size : %zu\n", info->size);
buf = kmalloc(info->size, GFP_KERNEL);
if (!sec_qc_param_read(index, buf)) {
seq_puts(m, " - failed to read param!\n");
goto warn_read_fail;
}
seq_hex_dump(m, " + ", DUMP_PREFIX_OFFSET, 16, 1,
buf, info->size, true);
warn_read_fail:
seq_puts(m, "\n");
kfree(buf);
}
static int sec_qc_param_dbgfs_show_all(struct seq_file *m, void *unsed)
{
size_t i;
__qc_param_dbgfs_show_bdev(m);
for (i = 0; i < ARRAY_SIZE(qc_param_info); i++)
__qc_param_dbgfs_show_each(m, i);
return 0;
}
static int sec_qc_param_dbgfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sec_qc_param_dbgfs_show_all,
inode->i_private);
}
static const struct file_operations sec_qc_param_dgbfs_fops = {
.open = sec_qc_param_dbgfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static noinline int __qc_param_debugfs_create(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
drvdata->dbgfs = debugfs_create_file("sec_qc_param", 0440,
NULL, drvdata, &sec_qc_param_dgbfs_fops);
return 0;
}
static noinline void __qc_param_debugfs_remove(struct builder *bd)
{
struct qc_param_drvdata *drvdata =
container_of(bd, struct qc_param_drvdata, bd);
debugfs_remove(drvdata->dbgfs);
}
#else
static noinline int __qc_param_debugfs_create(struct builder *bd) { return 0; }
static noinline void __qc_param_debugfs_remove(struct builder *bd) {}
#endif
static const struct dev_builder __qc_param_dev_builder[] = {
DEVICE_BUILDER(__qc_param_parse_dt, NULL),
DEVICE_BUILDER(__qc_param_sec_class_create,
__qc_param_sec_class_remove),
DEVICE_BUILDER(__qc_param_init_blkdev, __qc_param_exit_blkdev),
DEVICE_BUILDER(__qc_param_register_operations,
__qc_param_unregister_operations),
DEVICE_BUILDER(__qc_param_sysfs_create, __qc_param_sysfs_remove),
DEVICE_BUILDER(__qc_param_debugfs_create, __qc_param_debugfs_remove),
DEVICE_BUILDER(__qc_param_probe_epilog, __qc_param_remove_prolog),
};
static int sec_qc_param_probe(struct platform_device *pdev)
{
return __qc_param_probe(pdev, __qc_param_dev_builder,
ARRAY_SIZE(__qc_param_dev_builder));
}
static int sec_qc_param_remove(struct platform_device *pdev)
{
return __qc_param_remove(pdev, __qc_param_dev_builder,
ARRAY_SIZE(__qc_param_dev_builder));
}
static const struct of_device_id sec_qc_param_match_table[] = {
{ .compatible = "samsung,qcom-param" },
{},
};
MODULE_DEVICE_TABLE(of, sec_qc_param_match_table);
static struct platform_driver sec_qc_param_driver = {
.driver = {
.name = "sec,qc-param",
.of_match_table = of_match_ptr(sec_qc_param_match_table),
},
.probe = sec_qc_param_probe,
.remove = sec_qc_param_remove,
};
static int __init sec_qc_param_init(void)
{
return platform_driver_register(&sec_qc_param_driver);
}
module_init(sec_qc_param_init);
static void __exit sec_qc_param_exit(void)
{
platform_driver_unregister(&sec_qc_param_driver);
}
module_exit(sec_qc_param_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("SEC PARAM driver for RAW Partion & Qualcomm based devices");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,91 @@
#ifndef __INTERNAL__SEC_QC_PARAM_H__
#define __INTERNAL__SEC_QC_PARAM_H__
#include <linux/debugfs.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/bsp/sec_param.h>
#include <linux/samsung/bsp/sec_sysup.h> /* deprecated */
struct qc_param_drvdata {
struct builder bd;
struct device *param_dev;
struct sec_param_operations ops;
const char *bdev_path;
loff_t negative_offset;
struct block_device *bdev;
loff_t offset;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs;
#endif
};
#define QC_PARAM_OFFSET(__member) \
(offsetof(struct sec_qc_param_data, __member))
#define QC_PARAM_SIZE(__member) \
(sizeof(((struct sec_qc_param_data *)NULL)->__member))
#define __QC_PARAM_INFO(__member, __verify_input) { \
.name = #__member, \
.offset = QC_PARAM_OFFSET(__member), \
.size = QC_PARAM_SIZE(__member), \
.verify_input = __verify_input, \
}
#define QC_PARAM_INFO(__index, __member, __verify_input) \
[__index] = __QC_PARAM_INFO(__member, __verify_input)
struct qc_param_info;
typedef bool (*qc_param_verify_input_t)(const struct qc_param_info *info,
const void *value);
struct qc_param_info {
const char *name;
loff_t offset;
size_t size;
qc_param_verify_input_t verify_input;
};
struct sec_qc_param_data {
unsigned int debuglevel;
unsigned int uartsel;
unsigned int rory_control;
unsigned int product_device; /* product/dev device */
unsigned int reserved1;
unsigned int cp_debuglevel;
unsigned int reserved2;
unsigned int sapa[3];
unsigned int normal_poweroff;
unsigned int wireless_ic;
char used0[80];
char used1[80];
char used2[80];
char used3[80];
char used4[80];
unsigned int wireless_charging_mode;
unsigned int afc_disable;
unsigned int cp_reserved_mem;
char used5[4];
char used6[4];
char reserved8[8];
char used7[16];
unsigned int api_gpio_test;
char api_gpio_test_result[256];
char reboot_recovery_cause[256];
unsigned int user_partition_flashed;
unsigned int force_upload_flag;
unsigned int cp_reserved_mem_backup;
unsigned int FMM_lock;
unsigned int dump_sink;
unsigned int fiemap_update;
struct fiemap_p fiemap_result;
char used8[80];
char window_color[2];
char VrrStatus[16];
unsigned int pd_disable;
unsigned int vib_le_est;
};
#endif /* __INTERNAL__SEC_QC_PARAM_H__ */

View File

@@ -0,0 +1,32 @@
config SEC_RELOC_GPIO
tristate "Samsung Legacy-Style Relocated GPIO Interface"
default m if SEC_CLASS=m
default y if SEC_CLASS=y
help
TODO: help is not ready.
config SEC_RELOC_GPIO_EN
bool "Samsung Legacy-Style Relocated GPIO Interface for Factory Mode"
depends on SEC_RELOC_GPIO
default y if SEC_FACTORY
default n
help
TODO: help is not ready.
config SEC_RELOC_GPIO_TEST_FOR_ON_DEVICE
tristate "KUnit test for sec_reloc_gpio_test"
depends on KUNIT
depends on SEC_RELOC_GPIO
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_RELOC_GPIO_TEST_FOR_ONLY_UML
tristate "KUnit test for sec_reloc_gpio_test"
depends on KUNIT
depends on UML
depends on SEC_RELOC_GPIO
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_RELOC_GPIO) += sec_reloc_gpio.o
GCOV_PROFILE_sec_reloc_gpio.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,401 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2021-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/samsung/sec_kunit.h>
#include <linux/samsung/bsp/sec_class.h>
#include "sec_reloc_gpio.h"
__ss_static noinline int __reloc_gpio_parse_dt_reloc_base(struct builder *bd,
struct device_node *np)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *dev = bd->dev;
int nr_chip;
struct reloc_gpio_chip *chip;
u32 base;
int i;
nr_chip = of_property_count_elems_of_size(np, "sec,reloc-base",
sizeof(u32));
if (nr_chip < 0) {
/* assume '0' if -EINVAL or -ENODATA is returned */
drvdata->nr_chip = 0;
return 0;
}
chip = devm_kmalloc_array(dev, nr_chip, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
for (i = 0; i < nr_chip; i++) {
int err = of_property_read_u32_index(np, "sec,reloc-base",
i, &base);
if (err) {
dev_err(dev, "can't read sec,reloc-base [%d]\n", i);
return err;
}
chip[i].base = (int)base;
}
drvdata->chip = chip;
drvdata->nr_chip = nr_chip;
return 0;
}
__ss_static noinline int __reloc_gpio_parse_dt_gpio_label(struct builder *bd,
struct device_node *np)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *dev = bd->dev;
const char *label;
struct reloc_gpio_chip *chip;
int nr_chip;
int i;
chip = drvdata->chip;
nr_chip = drvdata->nr_chip;
for (i = 0; i < nr_chip; i++) {
int err = of_property_read_string_helper(np,
"sec,gpio-label", &label, 1, i);
if (err < 0) {
dev_err(dev, "can't read sec,gpio-label [%d]\n", i);
return err;
}
chip[i].label = label;
chip[i].label_len = strlen(label);
}
return 0;
}
static const struct dt_builder __reloc_gpio_dt_builder[] = {
DT_BUILDER(__reloc_gpio_parse_dt_reloc_base),
DT_BUILDER(__reloc_gpio_parse_dt_gpio_label),
};
static noinline int __reloc_gpio_probe_prolog(struct builder *bd)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
drvdata->gpio_num = -EINVAL;
return 0;
}
static noinline int __reloc_gpio_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __reloc_gpio_dt_builder,
ARRAY_SIZE(__reloc_gpio_dt_builder));
}
static int __reloc_gpio_sec_class_create(struct builder *bd)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *reloc_gpio_dev;
reloc_gpio_dev = sec_device_create(NULL, "gpio");
if (IS_ERR(reloc_gpio_dev))
return PTR_ERR(reloc_gpio_dev);
dev_set_drvdata(reloc_gpio_dev, drvdata);
drvdata->reloc_gpio_dev = reloc_gpio_dev;
return 0;
}
static void __reloc_gpio_sec_class_remove(struct builder *bd)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *reloc_gpio_dev = drvdata->reloc_gpio_dev;
if (!reloc_gpio_dev)
return;
sec_device_destroy(reloc_gpio_dev->devt);
}
__ss_static bool __reloc_gpio_is_valid_gpio_num(struct reloc_gpio_chip *chip,
int nr_gpio, int gpio_num)
{
int min = chip->base;
int max = chip->base + nr_gpio - 1;
if ((gpio_num >= min) && (gpio_num <= max))
return true;
return false;
}
__ss_static bool __reloc_gpio_is_matched(struct gpio_chip *gc,
struct reloc_gpio_chip *chip, int gpio_num)
{
size_t len = strnlen(gc->label, chip->label_len + 1);
if (len != chip->label_len)
return false;
if (strncmp(gc->label, chip->label, chip->label_len))
return false;
return __reloc_gpio_is_valid_gpio_num(chip, gc->ngpio, gpio_num);
}
__ss_static int sec_reloc_gpio_is_matched_gpio_chip(struct gpio_chip *gc,
void *__drvdata)
{
struct reloc_gpio_drvdata *drvdata = __drvdata;
struct reloc_gpio_chip *chip = drvdata->chip;
size_t nr_chip = drvdata->nr_chip;
int gpio_num = drvdata->gpio_num;
size_t i;
for (i = 0; i < nr_chip; i++) {
struct reloc_gpio_chip *this_chip = &chip[i];
if (__reloc_gpio_is_matched(gc, this_chip, gpio_num)) {
drvdata->chip_idx_found = i;
return 1;
}
}
return 0;
}
static inline bool __reloc_gpio_test_range(struct reloc_gpio_drvdata *drvdata,
struct reloc_gpio_chip *chip, struct gpio_chip *gc)
{
if (drvdata->gpio_num < chip->base)
return false;
if (drvdata->gpio_num >= (chip->base + gc->ngpio))
return false;
return true;
}
__ss_static int __reloc_gpio_from_legacy_number(
struct reloc_gpio_drvdata *drvdata, struct gpio_chip *gc)
{
struct reloc_gpio_chip *chip;
if (drvdata->nr_chip < drvdata->chip_idx_found)
return -EINVAL;
/* drvdata->chip_idx_found is determined when 'gpiochip_find' is run. */
chip = &drvdata->chip[drvdata->chip_idx_found];
if (!__reloc_gpio_test_range(drvdata, chip, gc))
return -ERANGE;
return (drvdata->gpio_num - chip->base) + gc->base;
}
static int __reloc_gpio_relocated_to_actual(struct reloc_gpio_drvdata *drvdata)
{
struct gpio_chip *gc;
if (!drvdata->nr_chip)
return drvdata->gpio_num;
gc = gpiochip_find(drvdata, sec_reloc_gpio_is_matched_gpio_chip);
if (IS_ERR_OR_NULL(gc))
return -ENOENT;
return __reloc_gpio_from_legacy_number(drvdata, gc);
}
static ssize_t check_requested_gpio_show(struct device *sec_class_dev,
struct device_attribute *attr, char *buf)
{
struct reloc_gpio_drvdata *drvdata = dev_get_drvdata(sec_class_dev);
int gpio_actual = -EINVAL;
int val;
if (drvdata->gpio_num < 0) {
val = -ENODEV;
goto __finally;
}
gpio_actual = __reloc_gpio_relocated_to_actual(drvdata);
if (gpio_actual < 0) {
val = -ENOENT;
goto __finally;
}
val = gpio_get_value(gpio_actual);
__finally:
drvdata->gpio_num = -EINVAL;
return scnprintf(buf, PAGE_SIZE, "GPIO[%d] : [%d]", gpio_actual, val);
}
static ssize_t check_requested_gpio_store(struct device *sec_class_dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct reloc_gpio_drvdata *drvdata = dev_get_drvdata(sec_class_dev);
struct device *dev = drvdata->bd.dev;
int gpio_num;
int err;
err = kstrtoint(buf, 10, &gpio_num);
if (err < 0) {
dev_warn(dev, "requested gpio number is malformed or wrong\n");
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1,
buf, count, 1);
return err;
}
drvdata->gpio_num = gpio_num;
return count;
}
static DEVICE_ATTR_RW(check_requested_gpio);
static struct attribute *sec_reloc_gpio_attrs[] = {
&dev_attr_check_requested_gpio.attr,
NULL,
};
static const struct attribute_group sec_reloc_gpio_attr_group = {
.attrs = sec_reloc_gpio_attrs,
};
static int __reloc_gpio_sysfs_create(struct builder *bd)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *dev = drvdata->reloc_gpio_dev;
int err;
err = sysfs_create_group(&dev->kobj, &sec_reloc_gpio_attr_group);
if (err)
return err;
return 0;
}
static void __reloc_gpio_sysfs_remove(struct builder *bd)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *dev = drvdata->reloc_gpio_dev;
sysfs_remove_group(&dev->kobj, &sec_reloc_gpio_attr_group);
}
static noinline int __reloc_gpio_epilog(struct builder *bd)
{
struct reloc_gpio_drvdata *drvdata =
container_of(bd, struct reloc_gpio_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
return 0;
}
static int __reloc_gpio_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct reloc_gpio_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __reloc_gpio_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct reloc_gpio_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __reloc_gpio_dev_builder[] = {
DEVICE_BUILDER(__reloc_gpio_parse_dt, NULL),
DEVICE_BUILDER(__reloc_gpio_probe_prolog, NULL),
DEVICE_BUILDER(__reloc_gpio_sec_class_create,
__reloc_gpio_sec_class_remove),
DEVICE_BUILDER(__reloc_gpio_sysfs_create,
__reloc_gpio_sysfs_remove),
DEVICE_BUILDER(__reloc_gpio_epilog, NULL),
};
static int sec_reloc_gpio_probe(struct platform_device *pdev)
{
return __reloc_gpio_probe(pdev, __reloc_gpio_dev_builder,
ARRAY_SIZE(__reloc_gpio_dev_builder));
}
static int sec_reloc_gpio_remove(struct platform_device *pdev)
{
return __reloc_gpio_remove(pdev, __reloc_gpio_dev_builder,
ARRAY_SIZE(__reloc_gpio_dev_builder));
}
static const struct of_device_id sec_reloc_gpio_match_table[] = {
{ .compatible = "samsung,reloc_gpio" },
{},
};
MODULE_DEVICE_TABLE(of, sec_reloc_gpio_match_table);
static struct platform_driver sec_reloc_gpio_driver = {
.driver = {
.name = "sec,reloc_gpio",
.of_match_table = of_match_ptr(sec_reloc_gpio_match_table),
},
.probe = sec_reloc_gpio_probe,
.remove = sec_reloc_gpio_remove,
};
static int __init sec_reloc_gpio_init(void)
{
if (!IS_ENABLED(CONFIG_SEC_RELOC_GPIO_EN))
return 0;
return platform_driver_register(&sec_reloc_gpio_driver);
}
module_init(sec_reloc_gpio_init);
static void __exit sec_reloc_gpio_exit(void)
{
if (!IS_ENABLED(CONFIG_SEC_RELOC_GPIO_EN))
return;
platform_driver_unregister(&sec_reloc_gpio_driver);
}
module_exit(sec_reloc_gpio_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Legacy-Style Relocated GPIO Interface for Factory Mode");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,24 @@
#ifndef __INTERNAL__SEC_RELOC_GPIO_H__
#define __INTERNAL__SEC_RELOC_GPIO_H__
#include <linux/device.h>
#include <linux/samsung/builder_pattern.h>
struct reloc_gpio_chip {
const char *label;
size_t label_len;
int base;
};
struct reloc_gpio_drvdata {
struct builder bd;
struct device *reloc_gpio_dev;
struct reloc_gpio_chip *chip;
size_t nr_chip;
/* relocated gpio number which is request from sysfs interface */
int gpio_num;
/* found index after calling gpiochip_find */
int chip_idx_found;
};
#endif /* */

View File

@@ -0,0 +1,17 @@
source "drivers/samsung/debug/common/Kconfig"
source "drivers/samsung/debug/boot_stat/Kconfig"
source "drivers/samsung/debug/log_buf/Kconfig"
source "drivers/samsung/debug/pmsg/Kconfig"
source "drivers/samsung/debug/reboot_cmd/Kconfig"
source "drivers/samsung/debug/upload_cause/Kconfig"
source "drivers/samsung/debug/crashkey/Kconfig"
source "drivers/samsung/debug/crashkey_long/Kconfig"
source "drivers/samsung/debug/debug_region/Kconfig"
source "drivers/samsung/debug/rdx_bootdev/Kconfig"
# TODO: architecture specific drivers at here
source "drivers/samsung/debug/arm64/Kconfig"
source "drivers/samsung/debug/riscv64/Kconfig"
# TODO: soc specific drivers at here
source "drivers/samsung/debug/qcom/Kconfig"

View File

@@ -0,0 +1,15 @@
obj-$(CONFIG_SEC_DEBUG) += common/
obj-$(CONFIG_SEC_BOOT_STAT) += boot_stat/
obj-$(CONFIG_SEC_LOG_BUF) += log_buf/
obj-$(CONFIG_SEC_PMSG) += pmsg/
obj-$(CONFIG_SEC_REBOOT_CMD) += reboot_cmd/
obj-$(CONFIG_SEC_UPLOAD_CAUSE) += upload_cause/
obj-$(CONFIG_SEC_CRASHKEY) += crashkey/
obj-$(CONFIG_SEC_CRASHKEY_LONG) += crashkey_long/
obj-$(CONFIG_SEC_DEBUG_REGION) += debug_region/
obj-$(CONFIG_SEC_RDX_BOOTDEV) += rdx_bootdev/
obj-$(CONFIG_ARM64) += arm64/
obj-$(CONFIG_RISCV) += riscv64/
obj-y += qcom/

View File

@@ -0,0 +1,3 @@
source "drivers/samsung/debug/arm64/ap_context/Kconfig"
source "drivers/samsung/debug/arm64/fsimd_debug/Kconfig"
source "drivers/samsung/debug/arm64/debug/Kconfig"

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_ARM64_AP_CONTEXT) += ap_context/
obj-$(CONFIG_SEC_ARM64_FSIMD_DEBUG) += fsimd_debug/
obj-$(CONFIG_SEC_ARM64_DEBUG) += debug/

View File

@@ -0,0 +1,5 @@
config SEC_ARM64_AP_CONTEXT
tristate "SEC AP CORE/MMU context snaphot"
depends on SEC_DEBUG_REGION && ARM64 && ANDROID_VENDOR_HOOKS
help
TODO: help is not ready.

View File

@@ -0,0 +1 @@
obj-$(CONFIG_SEC_ARM64_AP_CONTEXT) += sec_arm64_ap_context.o

View File

@@ -0,0 +1,523 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2020-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kdebug.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <trace/hooks/debug.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_arm64_ap_context.h>
#include <linux/samsung/debug/sec_debug_region.h>
#define __ap_context_read_special_reg(x) ({ \
uint64_t val; \
asm volatile ("mrs %0, " # x : "=r"(val)); \
val; \
})
struct ap_context_drvdata {
struct builder bd;
const char *name;
uint32_t unique_id;
struct sec_dbg_region_client *client;
struct sec_arm64_ap_context *ctx;
struct notifier_block nb_die;
struct notifier_block nb_panic;
};
enum {
TYPE_VH_IPI_STOP = 0,
/* */
TYPE_VH_MAX,
TYPE_VH_UNKNOWN = -EINVAL,
};
static void __always_inline __ap_context_save_core_regs_from_pt_regs(
struct sec_arm64_ap_context *ctx, struct pt_regs *regs)
{
memcpy_toio(&ctx->core_regs, regs, sizeof(struct pt_regs));
}
/* FIXME: tempoary workaround to prevent linking errors */
void __naked __ap_context_save_core_regs_on_current(struct pt_regs *regs)
{
asm volatile (
"stp x1, x2, [sp, #-0x20]! \n\t"
"stp x3, x4, [sp, #0x10] \n\t"
/* x0 ~ x28 */
"stp x0, x1, [x0] \n\t"
"stp x2, x3, [x0, #0x10] \n\t"
"stp x4, x5, [x0, #0x20] \n\t"
"stp x6, x7, [x0, #0x30] \n\t"
"stp x8, x9, [x0, #0x40] \n\t"
"stp x10, x11, [x0, #0x50] \n\t"
"stp x12, x13, [x0, #0x60] \n\t"
"stp x14, x15, [x0, #0x70] \n\t"
"stp x16, x17, [x0, #0x80] \n\t"
"stp x18, x19, [x0, #0x90] \n\t"
"stp x20, x21, [x0, #0xA0] \n\t"
"stp x22, x23, [x0, #0xB0] \n\t"
"stp x24, x25, [x0, #0xC0] \n\t"
"stp x26, x27, [x0, #0xD0] \n\t"
"str x28, [x0, #0xE0] \n\t"
/* pstate */
"mrs x1, nzcv \n\t"
"bic x1, x1, #0xFFFFFFFF0FFFFFFF \n\t"
"mrs x2, daif \n\t"
"bic x2, x2, #0xFFFFFFFFFFFFFC3F \n\t"
"orr x1, x1, x2 \n\t"
"mrs x3, currentel \n\t"
"bic x3, x3, #0xFFFFFFFFFFFFFFF3 \n\t"
"orr x1, x1, x3 \n\t"
"mrs x4, spsel \n\t"
"bic x4, x4, #0xFFFFFFFFFFFFFFFE \n\t"
"orr x1, x1, x4 \n\t"
"str x1, [x0, #0x108] \n\t"
"ldp x3, x4, [sp, #0x10] \n\t"
"ldp x1, x2, [sp], #0x20 \n\t"
"ret \n\t"
);
}
static void __always_inline __ap_context_save_core_extra_regs(
struct sec_arm64_ap_context *ctx)
{
uint64_t pstate, which_el;
uint64_t *regs = &ctx->core_extra_regs[0];
pstate = __ap_context_read_special_reg(CurrentEl);
which_el = pstate & PSR_MODE_MASK;
regs[IDX_CORE_EXTRA_SP_EL0] = __ap_context_read_special_reg(sp_el0);
if (which_el >= PSR_MODE_EL2t) {
regs[IDX_CORE_EXTRA_SP_EL1] =
__ap_context_read_special_reg(sp_el1);
regs[IDX_CORE_EXTRA_ELR_EL1] =
__ap_context_read_special_reg(elr_el1);
regs[IDX_CORE_EXTRA_SPSR_EL1] =
__ap_context_read_special_reg(spsr_el1);
regs[IDX_CORE_EXTRA_SP_EL2] =
__ap_context_read_special_reg(sp_el2);
regs[IDX_CORE_EXTRA_ELR_EL2] =
__ap_context_read_special_reg(elr_el2);
regs[IDX_CORE_EXTRA_SPSR_EL2] =
__ap_context_read_special_reg(spsr_el2);
}
}
static void __always_inline __ap_context_save_mmu_regs(
struct sec_arm64_ap_context *ctx)
{
uint64_t *mmu = &ctx->mmu_regs[0];
mmu[IDX_MMU_TTBR0_EL1] = __ap_context_read_special_reg(TTBR0_EL1);
mmu[IDX_MMU_TTBR1_EL1] = __ap_context_read_special_reg(TTBR1_EL1);
mmu[IDX_MMU_TCR_EL1] = __ap_context_read_special_reg(TCR_EL1);
mmu[IDX_MMU_MAIR_EL1] = __ap_context_read_special_reg(MAIR_EL1);
mmu[IDX_MMU_AMAIR_EL1] = __ap_context_read_special_reg(AMAIR_EL1);
}
static ssize_t __ap_context_unique_id_to_type(uint32_t unique_id)
{
ssize_t type;
switch (unique_id) {
case SEC_ARM64_VH_IPI_STOP_MAGIC:
type = TYPE_VH_IPI_STOP;
break;
default:
type = TYPE_VH_UNKNOWN;
break;
}
return type;
}
static noinline int __ap_context_parse_dt_name(struct builder *bd,
struct device_node *np)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
return of_property_read_string(np, "sec,name", &drvdata->name);
}
static noinline int __ap_context_parse_dt_unique_id(struct builder *bd,
struct device_node *np)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
u32 unique_id;
int err;
err = of_property_read_u32(np, "sec,unique_id", &unique_id);
if (err)
return -EINVAL;
drvdata->unique_id = (uint32_t)unique_id;
return 0;
}
static const struct dt_builder __ap_context_dt_builder[] = {
DT_BUILDER(__ap_context_parse_dt_name),
DT_BUILDER(__ap_context_parse_dt_unique_id),
};
static noinline int __ap_context_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __ap_context_dt_builder,
ARRAY_SIZE(__ap_context_dt_builder));
}
static noinline int __ap_context_alloc_client(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
size_t size = sizeof(struct sec_arm64_ap_context) * num_possible_cpus();
struct sec_dbg_region_client *client;
ssize_t type;
type = __ap_context_unique_id_to_type(drvdata->unique_id);
if (type >= TYPE_VH_MAX || type == TYPE_VH_UNKNOWN)
return -ERANGE;
client = sec_dbg_region_alloc(drvdata->unique_id, size);
if (PTR_ERR(client) == -EBUSY)
return -EPROBE_DEFER;
else if (IS_ERR_OR_NULL(client))
return -ENOMEM;
client->name = drvdata->name;
drvdata->client = client;
drvdata->ctx = (struct sec_arm64_ap_context *)client->virt;
return 0;
}
static noinline void __ap_context_free_client(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
ssize_t type;
type = __ap_context_unique_id_to_type(drvdata->unique_id);
sec_dbg_region_free(drvdata->client);
}
static void __trace_android_vh_ipi_stop(void *__ctx, struct pt_regs *regs)
{
struct sec_arm64_ap_context *ctx_arr = __ctx;
int cpu = smp_processor_id();
struct sec_arm64_ap_context *ctx = &ctx_arr[cpu];
if (ctx->used)
return;
__ap_context_save_core_regs_from_pt_regs(ctx, regs);
__ap_context_save_core_extra_regs(ctx);
__ap_context_save_mmu_regs(ctx);
ctx->used = true;
pr_emerg("context saved (CPU:%d)\n", cpu);
}
static noinline int __ap_context_register_vh(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
ssize_t type;
int err;
type = __ap_context_unique_id_to_type(drvdata->unique_id);
if (type >= TYPE_VH_MAX || type == TYPE_VH_UNKNOWN)
return -ERANGE;
switch (type) {
case TYPE_VH_IPI_STOP:
err = register_trace_android_vh_ipi_stop(
__trace_android_vh_ipi_stop, drvdata->ctx);
break;
default:
err = -EINVAL;
}
return err;
}
static noinline void __ap_context_unregister_vh(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
struct device *dev = bd->dev;
ssize_t type;
type = __ap_context_unique_id_to_type(drvdata->unique_id);
if (type >= TYPE_VH_MAX || type == TYPE_VH_UNKNOWN) {
dev_warn(dev, "invalid type number - %zd\n", type);
return;
}
switch (type) {
case TYPE_VH_IPI_STOP:
unregister_trace_android_vh_ipi_stop(
__trace_android_vh_ipi_stop, NULL);
break;
default:
dev_warn(dev, "%zd is not a valid vendor hook\n", type);
}
}
static __always_inline void __ap_context_hack_core_regs_for_panic(
struct pt_regs *regs)
{
/* FIXME: stack is corrupted by another callees of 'panic'. */
regs->sp = (uintptr_t)__builtin_frame_address(3);
regs->regs[29] = (uintptr_t)__builtin_frame_address(3);
regs->regs[30] = (uintptr_t)__builtin_return_address(2) - AARCH64_INSN_SIZE;
regs->pc = (uintptr_t)__builtin_return_address(2) - AARCH64_INSN_SIZE;
}
static int __used __sec_arm64_ap_context_on_panic(struct pt_regs *regs)
{
/* NOTE: x0 MUST BE SAVED before this function is called.
* see, 'sec_arm64_ap_context_on_panic'.
*/
struct notifier_block *this = (void *)regs->regs[0];
struct ap_context_drvdata *drvdata =
container_of(this, struct ap_context_drvdata, nb_panic);
struct sec_arm64_ap_context *__ctx = drvdata->ctx;
struct sec_arm64_ap_context *ctx;
int cpu;
if (!__ctx)
return NOTIFY_DONE;
cpu = smp_processor_id();
ctx = &__ctx[cpu];
if (ctx->used)
return NOTIFY_DONE;
__ap_context_hack_core_regs_for_panic(regs);
__ap_context_save_core_regs_from_pt_regs(ctx, regs);
__ap_context_save_core_extra_regs(ctx);
__ap_context_save_mmu_regs(ctx);
ctx->used = true;
pr_emerg("context saved (CPU:%d)\n", cpu);
return NOTIFY_OK;
}
static int __naked sec_arm64_ap_context_on_panic(struct notifier_block *nb,
unsigned long l, void *d)
{
asm volatile (
"stp x0, x30, [sp, #-0x10]! \n\t"
/* 'sp' indicates 'struct pt_regs' */
"sub sp, sp, %0 \n\t"
"mov x0, sp \n\t"
"bl __ap_context_save_core_regs_on_current \n\t"
/* save 'x0' on 'struct pt_regs' before calling
* '__sec_arm64_ap_context_on_panic'
*/
"ldr x0, [sp, %0] \n\t"
"str x0, [sp] \n\t"
/* concrete notifier */
"mov x0, sp \n\t"
"bl __sec_arm64_ap_context_on_panic \n\t"
"add sp, sp, %0 \n\t"
"ldp x1, x30, [sp], #0x10 \n\t"
"ret \n\t"
:
: "i"(sizeof(struct pt_regs))
:
);
}
static int __ap_context_register_panic_notifier(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_panic;
nb->notifier_call = sec_arm64_ap_context_on_panic;
nb->priority = 0x7FFFFFFF;
return atomic_notifier_chain_register(&panic_notifier_list, nb);
}
static void __ap_context_unregister_panic_notifier(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_panic;
atomic_notifier_chain_unregister(&panic_notifier_list, nb);
}
static int sec_arm64_ap_context_on_die(struct notifier_block *this,
unsigned long l, void *data)
{
struct ap_context_drvdata *drvdata =
container_of(this, struct ap_context_drvdata, nb_die);
struct die_args *args = data;
struct pt_regs *regs = args->regs;
struct sec_arm64_ap_context *__ctx = drvdata->ctx;
struct sec_arm64_ap_context *ctx;
int cpu;
if (!__ctx)
return NOTIFY_DONE;
cpu = smp_processor_id();
ctx = &__ctx[cpu];
if (ctx->used)
return NOTIFY_DONE;
__ap_context_save_core_regs_from_pt_regs(ctx, regs);
__ap_context_save_core_extra_regs(ctx);
__ap_context_save_mmu_regs(ctx);
ctx->used = true;
pr_emerg("context saved (CPU:%d)\n", cpu);
return NOTIFY_OK;
}
static int __ap_context_register_die_notifier(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_die;
nb->notifier_call = sec_arm64_ap_context_on_die;
return register_die_notifier(nb);
}
static void __ap_context_unregister_die_notifier(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_die;
unregister_die_notifier(nb);
}
static noinline int __ap_context_probe_epilog(struct builder *bd)
{
struct ap_context_drvdata *drvdata =
container_of(bd, struct ap_context_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
return 0;
}
static int __ap_context_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct ap_context_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __ap_context_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct ap_context_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __ap_context_dev_builder[] = {
DEVICE_BUILDER(__ap_context_parse_dt, NULL),
DEVICE_BUILDER(__ap_context_alloc_client, __ap_context_free_client),
DEVICE_BUILDER(__ap_context_register_vh, __ap_context_unregister_vh),
DEVICE_BUILDER(__ap_context_register_panic_notifier,
__ap_context_unregister_panic_notifier),
DEVICE_BUILDER(__ap_context_register_die_notifier,
__ap_context_unregister_die_notifier),
DEVICE_BUILDER(__ap_context_probe_epilog, NULL),
};
static int sec_ap_context_probe(struct platform_device *pdev)
{
return __ap_context_probe(pdev, __ap_context_dev_builder,
ARRAY_SIZE(__ap_context_dev_builder));
}
static int sec_ap_context_remove(struct platform_device *pdev)
{
return __ap_context_remove(pdev, __ap_context_dev_builder,
ARRAY_SIZE(__ap_context_dev_builder));
}
static const struct of_device_id sec_ap_context_match_table[] = {
{ .compatible = "samsung,arm64-ap_context" },
{ .compatible = "samsung,ap_context" }, /* TODO: should be removed in future */
{},
};
MODULE_DEVICE_TABLE(of, sec_ap_context_match_table);
static struct platform_driver sec_ap_context_driver = {
.driver = {
.name = "sec,arm64-ap_context",
.of_match_table = of_match_ptr(sec_ap_context_match_table),
},
.probe = sec_ap_context_probe,
.remove = sec_ap_context_remove,
};
static int __init sec_ap_context_init(void)
{
return platform_driver_register(&sec_ap_context_driver);
}
arch_initcall(sec_ap_context_init);
static void __exit sec_ap_context_exit(void)
{
platform_driver_unregister(&sec_ap_context_driver);
}
module_exit(sec_ap_context_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("AP CORE/MMU context snaphot (ARM64)");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,5 @@
config SEC_ARM64_DEBUG
tristate "SEC Common debugging feature for ARM64 based devices"
depends on ARM64
help
TODO: help is not ready.

View File

@@ -0,0 +1,5 @@
obj-$(CONFIG_SEC_ARM64_DEBUG) += sec_arm64_debug.o
sec_arm64_debug-objs := sec_arm64_debug_main.o \
sec_arm64_force_err.o
CFLAGS_REMOVE_sec_arm64_force_err.o += -mgeneral-regs-only

View File

@@ -0,0 +1,17 @@
#ifndef __INTERNAL__SEC_ARM64_DEBUG_H__
#define __INTERNAL__SEC_ARM64_DEBUG_H__
#include <linux/notifier.h>
#include <linux/samsung/builder_pattern.h>
struct arm64_debug_drvdata {
struct builder bd;
};
/* sec_arm64_force_err.c */
extern int sec_fsimd_debug_init_random_pi_work(struct builder *bd);
extern int sec_arm64_force_err_init(struct builder *bd);
extern void sec_arm64_force_err_exit(struct builder *bd);
#endif /* __INTERNAL__SEC_ARM64_DEBUG_H__ */

View File

@@ -0,0 +1,86 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2022-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include "sec_arm64_debug.h"
static const struct dev_builder __arm64_debug_dev_builder[] = {
DEVICE_BUILDER(sec_fsimd_debug_init_random_pi_work, NULL),
DEVICE_BUILDER(sec_arm64_force_err_init, sec_arm64_force_err_exit),
};
static int __arm64_debug_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct arm64_debug_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __arm64_debug_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct arm64_debug_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static int sec_arm64_debug_probe(struct platform_device *pdev)
{
return __arm64_debug_probe(pdev, __arm64_debug_dev_builder,
ARRAY_SIZE(__arm64_debug_dev_builder));
}
static int sec_arm64_debug_remove(struct platform_device *pdev)
{
return __arm64_debug_remove(pdev, __arm64_debug_dev_builder,
ARRAY_SIZE(__arm64_debug_dev_builder));
}
static const struct of_device_id sec_arm64_debug_match_table[] = {
{ .compatible = "samsung,arm64-debug" },
{},
};
MODULE_DEVICE_TABLE(of, sec_arm64_debug_match_table);
static struct platform_driver sec_arm64_debug_driver = {
.driver = {
.name = "sec,arm64-debug",
.of_match_table = of_match_ptr(sec_arm64_debug_match_table),
},
.probe = sec_arm64_debug_probe,
.remove = sec_arm64_debug_remove,
};
static int __init sec_arm64_debug_init(void)
{
return platform_driver_register(&sec_arm64_debug_driver);
}
module_init(sec_arm64_debug_init);
static void __exit sec_arm64_debug_exit(void)
{
platform_driver_unregister(&sec_arm64_debug_driver);
}
module_exit(sec_arm64_debug_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Common debugging feature for ARM64 based devices");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,185 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2020-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/samsung/debug/sec_force_err.h>
#include "sec_arm64_debug.h"
static void __arm64_simulate_undef(struct force_err_handle *h)
{
asm volatile(".word 0xDEADBEEF");
}
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
static noinline unsigned long __fsimd_random_pi(unsigned long seed)
{
unsigned long pi_int;
pi_int = (unsigned long)(M_PI * seed);
return pi_int;
}
static void sec_arm64_fsimd_random_pi(struct work_struct *work)
{
unsigned long pi_int, seed;
size_t i;
for (i = 0; i < 80; i++) {
seed = get_random_long() % 100UL;
pi_int = __fsimd_random_pi(seed);
pr_info("int(M_PI * %lu) = %lu\n",
seed, pi_int);
msleep(20);
}
}
static struct work_struct random_pi_work[10];
int sec_fsimd_debug_init_random_pi_work(struct builder *bd)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(random_pi_work); i++)
INIT_WORK(&random_pi_work[i], sec_arm64_fsimd_random_pi);
return 0;
}
static void __arm64_simulate_fsimd_error(struct force_err_handle *h)
{
size_t i;
pr_emerg("Simulating fsimd error in kernel space\n");
for (i = 0; i < ARRAY_SIZE(random_pi_work); i++)
queue_work(system_long_wq, &random_pi_work[i]);
ssleep(1);
/* if we reach here, simulation failed */
pr_emerg("Simulation of fsimd error failed\n");
}
static void __naked __arm64_simulate_pabort(struct force_err_handle *h)
{
asm volatile ("mov x0, %0 \n\t"
"blr x0\n\t"
"ret \n\t"
:: "r" (PAGE_OFFSET - 0x8));
}
static void __naked __arm64_simulate_unaligned_pc(struct force_err_handle *h)
{
asm volatile ("mov x1, %0 \n\t"
"mov x0, x30 \n\t"
"add x0, x0, 0x1 \n\t"
"orr x0, x0, x1 \n\n"
"blr x0\n\t"
"ret \n\t"
:: "r" PAGE_OFFSET);
}
static void __naked __arm64_simulate_bti(struct force_err_handle *h)
{
asm volatile ("adr x0, . \n\t"
"add x0, x0, 0x10 \n\t"
"br x0 \n\t"
"eor x0, x0, x0 \n\t"
"eor x0, x0, x0 \n\t"
"eor x0, x0, x0 \n\t"
"eor x0, x0, x0 \n\t"
"ret \n\t");
}
static struct force_err_handle __arm64_force_err_default[] = {
FORCE_ERR_HANDLE("undef", "Generating a undefined instruction exception!",
__arm64_simulate_undef),
FORCE_ERR_HANDLE("fsimd_err", "Generating an fsimd error!",
__arm64_simulate_fsimd_error),
FORCE_ERR_HANDLE("pabort", "Generating a data abort exception!",
__arm64_simulate_pabort),
FORCE_ERR_HANDLE("unaligned_pc", "Generating an unaligned pc exception!",
__arm64_simulate_unaligned_pc),
FORCE_ERR_HANDLE("bti", "Generating an bti exception!",
__arm64_simulate_bti),
};
static ssize_t __arm64_force_err_add_handlers(ssize_t begin)
{
struct force_err_handle *h;
int err = 0;
ssize_t n = ARRAY_SIZE(__arm64_force_err_default);
ssize_t i;
for (i = begin; i < n; i++) {
h = &__arm64_force_err_default[i];
INIT_HLIST_NODE(&h->node);
err = sec_force_err_add_custom_handle(h);
if (err) {
pr_err("failed to add a handler - [%zu] %ps (%d)\n",
i, h->func, err);
return -i;
}
}
return n;
}
static void __arm64_force_err_del_handlers(ssize_t last_failed)
{
struct force_err_handle *h;
int err = 0;
ssize_t n = ARRAY_SIZE(__arm64_force_err_default);
ssize_t i;
/* NOTE: force error is not enabled for end-users. */
BUG_ON((last_failed < 0) || (last_failed > n));
for (i = last_failed - 1; i >= 0; i--) {
h = &__arm64_force_err_default[i];
err = sec_force_err_del_custom_handle(h);
if (err)
pr_warn("failed to delete a handler - [%zu] %ps (%d)\n",
i, h->func, err);
}
}
int sec_arm64_force_err_init(struct builder *bd)
{
ssize_t last_failed;
last_failed = __arm64_force_err_add_handlers(0);
if (last_failed <= 0) {
dev_warn(bd->dev, "force err is disabled. ignored.\n");
goto err_add_handlers;
}
return 0;
err_add_handlers:
__arm64_force_err_del_handlers(-last_failed);
return 0;
}
void sec_arm64_force_err_exit(struct builder *bd)
{
__arm64_force_err_del_handlers(ARRAY_SIZE(__arm64_force_err_default));
}

View File

@@ -0,0 +1,5 @@
config SEC_ARM64_FSIMD_DEBUG
tristate "SEC Detecting undesired NEON usage in kernel"
depends on ARM64 && ANDROID_VENDOR_HOOKS
help
TODO: help is not ready.

View File

@@ -0,0 +1 @@
obj-$(CONFIG_SEC_ARM64_FSIMD_DEBUG) += sec_arm64_fsimd_debug.o

View File

@@ -0,0 +1,210 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2019-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <asm/fpsimd.h>
#include <trace/hooks/fpsimd.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_debug.h>
#include <linux/samsung/sec_of.h>
struct fsimd_debug_drvdata {
struct builder bd;
};
static noinline int __fsimd_debug_parse_dt_check_debug_level(struct builder *bd,
struct device_node *np)
{
struct device *dev = bd->dev;
unsigned int sec_dbg_level = sec_debug_level();
int err;
err = sec_of_test_debug_level(np, "sec,debug_level", sec_dbg_level);
if (err == -ENOENT) {
dev_warn(dev, "%s will be enabled all sec debug levels!\n",
dev_name(dev));
return 0;
} else if (err < 0)
return -ENODEV;
return 0;
}
static const struct dt_builder __fsimd_debug_dt_builder[] = {
DT_BUILDER(__fsimd_debug_parse_dt_check_debug_level),
};
static noinline int __fsimd_debug_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __fsimd_debug_dt_builder,
ARRAY_SIZE(__fsimd_debug_dt_builder));
}
#if IS_BUILTIN(CONFIG_SEC_ARM64_FSIMD_DEBUG)
static __always_inline void __fpsimd_save_state(struct user_fpsimd_state *state)
{
fpsimd_save_state(state);
}
#else
/* NOTE: copied from arch/arm64/kernel/entry-fpsimd.S */
static void __naked __fpsimd_save_state(struct user_fpsimd_state *state)
{
asm volatile (
"stp q0, q1, [x0] \n\t"
"stp q2, q3, [x0, #32] \n\t"
"stp q4, q5, [x0, #64] \n\t"
"stp q6, q7, [x0, #96] \n\t"
"stp q8, q9, [x0, #128] \n\t"
"stp q10, q11, [x0, #160] \n\t"
"stp q12, q13, [x0, #192] \n\t"
"stp q14, q15, [x0, #224] \n\t"
"stp q16, q17, [x0, #256] \n\t"
"stp q18, q19, [x0, #288] \n\t"
"stp q20, q21, [x0, #320] \n\t"
"stp q22, q23, [x0, #352] \n\t"
"stp q24, q25, [x0, #384] \n\t"
"stp q26, q27, [x0, #416] \n\t"
"stp q28, q29, [x0, #448] \n\t"
"stp q30, q31, [x0, #480]! \n\t"
"mrs x8, fpsr \n\t"
"str w8, [x0, #32] \n\t"
"mrs x8, fpcr \n\t"
"str w8, [x0, #36] \n\t"
"ret \n\t"
);
}
#endif
static void __trace_android_vh_is_fpsimd_save(void *unused,
struct task_struct *prev, struct task_struct *next)
{
struct user_fpsimd_state current_st;
struct user_fpsimd_state *saved_st = &next->thread.uw.fpsimd_state;
size_t i;
if (test_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE))
return;
__fpsimd_save_state(&current_st);
for (i = 0; i < ARRAY_SIZE(current_st.vregs); i++) {
/* NOTE: this driver will not be proved when the debug level is low. */
BUG_ON(current_st.vregs[i] != saved_st->vregs[i]);
}
/* NOTE: this driver will not be proved when the debug level is low. */
BUG_ON((current_st.fpsr != saved_st->fpsr) ||
(current_st.fpcr != saved_st->fpcr));
}
static int __fsimd_debug_install_vendor_hook(struct builder *bd)
{
return register_trace_android_vh_is_fpsimd_save(
__trace_android_vh_is_fpsimd_save,
NULL);
}
static void __fsimd_debug_uninstall_vendor_hook(struct builder *bd)
{
unregister_trace_android_vh_is_fpsimd_save(
__trace_android_vh_is_fpsimd_save,
NULL);
}
static noinline int __fsimd_debug_probe_epilog(struct builder *bd)
{
struct fsimd_debug_drvdata *drvdata =
container_of(bd, struct fsimd_debug_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
return 0;
}
static const struct dev_builder __fsimd_debug_dev_builder[] = {
DEVICE_BUILDER(__fsimd_debug_parse_dt, NULL),
DEVICE_BUILDER(__fsimd_debug_install_vendor_hook,
__fsimd_debug_uninstall_vendor_hook),
DEVICE_BUILDER(__fsimd_debug_probe_epilog, NULL),
};
static int __fsimd_debug_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct fsimd_debug_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __fsimd_debug_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct fsimd_debug_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static int sec_fsimd_debug_probe(struct platform_device *pdev)
{
return __fsimd_debug_probe(pdev, __fsimd_debug_dev_builder,
ARRAY_SIZE(__fsimd_debug_dev_builder));
}
static int sec_fsimd_debug_remove(struct platform_device *pdev)
{
return __fsimd_debug_remove(pdev, __fsimd_debug_dev_builder,
ARRAY_SIZE(__fsimd_debug_dev_builder));
}
static const struct of_device_id sec_fsimd_debug_match_table[] = {
{ .compatible = "samsung,arm64-fsimd_debug" },
{ .compatible = "samsung,fsimd_debug" }, /* TODP: should be removed in future */
{},
};
MODULE_DEVICE_TABLE(of, sec_fsimd_debug_match_table);
static struct platform_driver sec_fsimd_debug_driver = {
.driver = {
.name = "sec,arm64-fsimd_debug",
.of_match_table = of_match_ptr(sec_fsimd_debug_match_table),
},
.probe = sec_fsimd_debug_probe,
.remove = sec_fsimd_debug_remove,
};
static int __init sec_fsimd_debug_init(void)
{
return platform_driver_register(&sec_fsimd_debug_driver);
}
module_init(sec_fsimd_debug_init);
static void __exit sec_fsimd_debug_exit(void)
{
return platform_driver_unregister(&sec_fsimd_debug_driver);
}
module_exit(sec_fsimd_debug_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Detecting fsimd register corruption");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,21 @@
config SEC_BOOT_STAT
tristate "SEC Boot-stat driver"
help
TODO: help is not ready.
config SEC_BOOT_STAT_TEST_FOR_ON_DEVICE
tristate "KUnit test for sec_boot_stat_test"
depends on KUNIT
depends on SEC_BOOT_STAT
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_BOOT_STAT_TEST_FOR_ONLY_UML
tristate "KUnit test for sec_boot_stat_test"
depends on KUNIT
depends on SEC_BOOT_STAT
depends on UML
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,6 @@
obj-$(CONFIG_SEC_BOOT_STAT) += sec_boot_stat.o
sec_boot_stat-objs := sec_boot_stat_main.o \
sec_boot_stat_proc.o \
sec_enh_boot_time_proc.o
GCOV_PROFILE_sec_boot_stat.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,56 @@
#ifndef __INTERNAL__SEC_BOOT_STAT_H__
#define __INTERNAL__SEC_BOOT_STAT_H__
#include <linux/hashtable.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_boot_stat.h>
#include "sec_boot_stat_proc.h"
#include "sec_enh_boot_time_proc.h"
struct boot_stat_drvdata {
struct builder bd;
struct device *bsp_dev;
struct mutex soc_ops_lock;
struct sec_boot_stat_soc_operations *soc_ops;
struct boot_stat_proc boot_stat;
struct enh_boot_time_proc enh_boot_time;
};
static __always_inline struct device *__boot_stat_proc_to_dev(
struct boot_stat_proc *boot_stat)
{
struct boot_stat_drvdata *drvdata = container_of(boot_stat,
struct boot_stat_drvdata, boot_stat);
return drvdata->bd.dev;
}
static __always_inline struct device *__enh_boot_time_proc_to_dev(
struct enh_boot_time_proc *enh_boot_time)
{
struct boot_stat_drvdata *drvdata = container_of(enh_boot_time,
struct boot_stat_drvdata, enh_boot_time);
return drvdata->bd.dev;
}
/* sec_boot_stata_main.c */
extern unsigned long long sec_boot_stat_ktime_to_time(unsigned long long ktime);
extern void sec_boot_stat_bootloader_stat(struct seq_file *m);
/* sec_boot_stat_proc.c */
extern void sec_boot_stat_add_boot_event(struct boot_stat_drvdata *drvdata, const char *log);
extern int sec_boot_stat_proc_init(struct builder *bd);
extern void sec_boot_stat_proc_exit(struct builder *bd);
/* sec_enh_boot_time_proc.c */
extern void sec_enh_boot_time_add_boot_event(struct boot_stat_drvdata *drvdata, const char *log);
extern int sec_enh_boot_time_init(struct builder *bd);
extern void sec_enh_boot_time_exit(struct builder *bd);
#endif /* __INTERNAL__SEC_BOOT_STAT_H__ */

View File

@@ -0,0 +1,322 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2014-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/samsung/bsp/sec_class.h>
#include <linux/samsung/sec_kunit.h>
#include "sec_boot_stat.h"
static struct boot_stat_drvdata *sec_boot_stat;
static __always_inline bool __boot_stat_is_probed(void)
{
return !!sec_boot_stat;
}
__ss_static __ss_always_inline bool __is_for_enh_boot_time(const char *log)
{
const struct {
char pmsg_mark[2]; /* !@ */
uint64_t boot_prefix;
char colon;
} __packed *log_head = (const void *)log;
const union {
uint64_t raw;
char text[8];
} boot_prefix = {
.text = { 'B', 'o', 'o', 't', '_', 'E', 'B', 'S', },
};
if (log_head->boot_prefix == boot_prefix.raw)
return true;
return false;
}
static void __boot_stat_add(struct boot_stat_drvdata *drvdata, const char *log)
{
if (__is_for_enh_boot_time(log))
sec_enh_boot_time_add_boot_event(drvdata, log);
else
sec_boot_stat_add_boot_event(drvdata, log);
}
void sec_boot_stat_add(const char *log)
{
if (!__boot_stat_is_probed())
return;
__boot_stat_add(sec_boot_stat, log);
}
EXPORT_SYMBOL_GPL(sec_boot_stat_add);
int sec_boot_stat_register_soc_ops(struct sec_boot_stat_soc_operations *soc_ops)
{
int ret = 0;
if (!__boot_stat_is_probed())
return -EBUSY;
mutex_lock(&sec_boot_stat->soc_ops_lock);
if (sec_boot_stat->soc_ops) {
pr_warn("soc specific operations already registered\n");
ret = -ENOENT;
goto __arleady_registered;
}
sec_boot_stat->soc_ops = soc_ops;
__arleady_registered:
mutex_unlock(&sec_boot_stat->soc_ops_lock);
return ret;
}
EXPORT_SYMBOL_GPL(sec_boot_stat_register_soc_ops);
int sec_boot_stat_unregister_soc_ops(struct sec_boot_stat_soc_operations *soc_ops)
{
int ret = 0;
if (!__boot_stat_is_probed())
return -EBUSY;
mutex_lock(&sec_boot_stat->soc_ops_lock);
if (sec_boot_stat->soc_ops != soc_ops) {
pr_warn("already unregistered or wrong soc specific operation\n");
ret = -EINVAL;
goto __invalid_soc_ops;
}
__invalid_soc_ops:
mutex_unlock(&sec_boot_stat->soc_ops_lock);
return ret;
}
EXPORT_SYMBOL_GPL(sec_boot_stat_unregister_soc_ops);
unsigned long long sec_boot_stat_ktime_to_time(unsigned long long ktime)
{
struct sec_boot_stat_soc_operations *soc_ops;
unsigned long long time;
mutex_lock(&sec_boot_stat->soc_ops_lock);
soc_ops = sec_boot_stat->soc_ops;
if (!soc_ops || !soc_ops->ktime_to_time) {
time = ktime;
goto __without_adjust;
}
time = soc_ops->ktime_to_time(ktime);
__without_adjust:
mutex_unlock(&sec_boot_stat->soc_ops_lock);
return time;
}
void sec_boot_stat_bootloader_stat(struct seq_file *m)
{
struct sec_boot_stat_soc_operations *soc_ops;
mutex_lock(&sec_boot_stat->soc_ops_lock);
soc_ops = sec_boot_stat->soc_ops;
if (!soc_ops || !soc_ops->show_on_enh_boot_stat) {
pr_warn("Wrong soc show_on_enh_boot_stat operation\n");
goto __without_adjust;
}
soc_ops->show_on_enh_boot_stat(m);
__without_adjust:
mutex_unlock(&sec_boot_stat->soc_ops_lock);
}
static noinline int __boot_stat_probe_prolog(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
mutex_init(&drvdata->soc_ops_lock);
return 0;
}
static noinline void __boot_stat_remove_epilog(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
mutex_destroy(&drvdata->soc_ops_lock);
}
static int __boot_stat_sec_class_create(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *bsp_dev;
bsp_dev = sec_device_create(NULL, "bsp");
if (IS_ERR(bsp_dev))
return PTR_ERR(bsp_dev);
dev_set_drvdata(bsp_dev, drvdata);
drvdata->bsp_dev = bsp_dev;
return 0;
}
static void __boot_stat_sec_class_remove(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *bsp_dev = drvdata->bsp_dev;
if (!bsp_dev)
return;
sec_device_destroy(bsp_dev->devt);
}
static ssize_t boot_stat_store(struct device *sec_class_dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct boot_stat_drvdata *drvdata = dev_get_drvdata(sec_class_dev);
__boot_stat_add(drvdata, buf);
return count;
}
DEVICE_ATTR_WO(boot_stat);
static struct attribute *sec_bsp_attrs[] = {
&dev_attr_boot_stat.attr,
NULL,
};
static const struct attribute_group sec_bsp_attr_group = {
.attrs = sec_bsp_attrs,
};
static noinline int __boot_stat_sysfs_create(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = drvdata->bsp_dev;
int err;
err = sysfs_create_group(&dev->kobj, &sec_bsp_attr_group);
if (err)
return err;
return 0;
}
static noinline void __boot_stat_sysfs_remove(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = drvdata->bsp_dev;
sysfs_remove_group(&dev->kobj, &sec_bsp_attr_group);
}
static noinline int __boot_stat_probe_epilog(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
return 0;
}
static int __boot_stat_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct boot_stat_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
sec_boot_stat = drvdata;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __boot_stat_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct boot_stat_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __boot_stat_dev_builder[] = {
DEVICE_BUILDER(__boot_stat_probe_prolog, __boot_stat_remove_epilog),
DEVICE_BUILDER(__boot_stat_sec_class_create,
__boot_stat_sec_class_remove),
DEVICE_BUILDER(sec_boot_stat_proc_init, sec_boot_stat_proc_exit),
DEVICE_BUILDER(sec_enh_boot_time_init, sec_enh_boot_time_exit),
DEVICE_BUILDER(__boot_stat_sysfs_create, __boot_stat_sysfs_remove),
DEVICE_BUILDER(__boot_stat_probe_epilog, NULL),
};
static int sec_boot_stat_probe(struct platform_device *pdev)
{
return __boot_stat_probe(pdev, __boot_stat_dev_builder,
ARRAY_SIZE(__boot_stat_dev_builder));
}
static int sec_boot_stat_remove(struct platform_device *pdev)
{
return __boot_stat_remove(pdev, __boot_stat_dev_builder,
ARRAY_SIZE(__boot_stat_dev_builder));
}
static const struct of_device_id sec_boot_stat_match_table[] = {
{ .compatible = "samsung,boot_stat" },
{},
};
MODULE_DEVICE_TABLE(of, sec_boot_stat_match_table);
static struct platform_driver sec_boot_stat_driver = {
.driver = {
.name = "sec,boot_stat",
.of_match_table = of_match_ptr(sec_boot_stat_match_table),
},
.probe = sec_boot_stat_probe,
.remove = sec_boot_stat_remove,
};
static int __init sec_boot_stat_init(void)
{
return platform_driver_register(&sec_boot_stat_driver);
}
module_init(sec_boot_stat_init);
static void __exit sec_boot_stat_exit(void)
{
platform_driver_unregister(&sec_boot_stat_driver);
}
module_exit(sec_boot_stat_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Boot-stat driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,455 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2014-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/stringhash.h>
#include <linux/samsung/sec_kunit.h>
#include "sec_boot_stat.h"
static const char *h_line = "-----------------------------------------------------------------------------------";
#define BOOT_PREFIX(__idx, __head) \
[__idx] = { \
.head = __head, \
.head_len = sizeof(__head) - 1, \
}
static const struct boot_prefix boot_prefixes[] = {
BOOT_PREFIX(EVT_PLATFORM, "!@Boot: "),
BOOT_PREFIX(EVT_RIL, "!@Boot_SVC : "),
BOOT_PREFIX(EVT_DEBUG, "!@Boot_DEBUG: "),
BOOT_PREFIX(EVT_SYSTEMSERVER, "!@Boot_SystemServer: "),
BOOT_PREFIX(EVT_INVALID, ""),
};
enum {
SYSTEM_START_INIT_PROCESS,
PLATFORM_START_PRELOAD,
PLATFORM_END_PRELOAD,
PLATFORM_START_INIT_AND_LOOP,
PLATFORM_START_PACKAGEMANAGERSERVICE,
PLATFORM_END_PACKAGEMANAGERSERVICE,
PLATFORM_START_NETWORK,
PLATFORM_END_NETWORK,
PLATFORM_END_INIT_AND_LOOP,
PLATFORM_PERFORMENABLESCREEN,
PLATFORM_ENABLE_SCREEN,
PLATFORM_BOOT_COMPLETE,
PLATFORM_FINISH_USER_UNLOCKED_COMPLETED,
PLATFORM_SET_ICON_VISIBILITY,
PLATFORM_LAUNCHER_ONCREATE,
PLATFORM_LAUNCHER_ONRESUME,
PLATFORM_LAUNCHER_LOADERTASK_RUN,
PLATFORM_LAUNCHER_FINISHFIRSTBIND,
PLATFORM_VOICE_SVC,
PLATFORM_DATA_SVC,
PLATFORM_PHONEAPP_ONCREATE,
RIL_UNSOL_RIL_CONNECTED,
RIL_SETRADIOPOWER_ON,
RIL_SETUICCSUBSCRIPTION,
RIL_SIM_RECORDSLOADED,
RIL_RUIM_RECORDSLOADED,
RIL_SETUPDATA_RECORDSLOADED,
RIL_SETUPDATACALL,
RIL_RESPONSE_SETUPDATACALL,
RIL_DATA_CONNECTION_ATTACHED,
RIL_DCT_IMSI_READY,
RIL_COMPLETE_CONNECTION,
RIL_CS_REG,
RIL_GPRS_ATTACH,
FACTORY_BOOT_COMPLETE,
NUM_BOOT_EVENTS,
};
#define BOOT_EVENT(__idx, __prefix_idx, __message) \
[__idx] = { \
.prefix_idx = __prefix_idx, \
.message = __message, \
.message_len = sizeof(__message) - 1, \
}
__ss_static struct boot_event boot_events[] = {
BOOT_EVENT(SYSTEM_START_INIT_PROCESS, EVT_PLATFORM, "start init process"),
BOOT_EVENT(PLATFORM_START_PRELOAD, EVT_PLATFORM, "Begin of preload()"),
BOOT_EVENT(PLATFORM_END_PRELOAD, EVT_PLATFORM, "End of preload()"),
BOOT_EVENT(PLATFORM_START_INIT_AND_LOOP, EVT_PLATFORM, "Entered the Android system server!"),
BOOT_EVENT(PLATFORM_START_PACKAGEMANAGERSERVICE, EVT_PLATFORM, "Start PackageManagerService"),
BOOT_EVENT(PLATFORM_END_PACKAGEMANAGERSERVICE, EVT_PLATFORM, "End PackageManagerService"),
BOOT_EVENT(PLATFORM_START_NETWORK, EVT_DEBUG, "start networkManagement"),
BOOT_EVENT(PLATFORM_END_NETWORK, EVT_DEBUG, "end networkManagement"),
BOOT_EVENT(PLATFORM_END_INIT_AND_LOOP, EVT_PLATFORM, "Loop forever"),
BOOT_EVENT(PLATFORM_PERFORMENABLESCREEN, EVT_PLATFORM, "performEnableScreen"),
BOOT_EVENT(PLATFORM_ENABLE_SCREEN, EVT_PLATFORM, "Enabling Screen!"),
BOOT_EVENT(PLATFORM_BOOT_COMPLETE, EVT_PLATFORM, "bootcomplete"),
BOOT_EVENT(PLATFORM_FINISH_USER_UNLOCKED_COMPLETED, EVT_DEBUG, "finishUserUnlockedCompleted"),
BOOT_EVENT(PLATFORM_SET_ICON_VISIBILITY, EVT_PLATFORM, "setIconVisibility: ims_volte: [SHOW]"),
BOOT_EVENT(PLATFORM_LAUNCHER_ONCREATE, EVT_DEBUG, "Launcher.onCreate()"),
BOOT_EVENT(PLATFORM_LAUNCHER_ONRESUME, EVT_DEBUG, "Launcher.onResume()"),
BOOT_EVENT(PLATFORM_LAUNCHER_LOADERTASK_RUN, EVT_DEBUG, "Launcher.LoaderTask.run() start"),
BOOT_EVENT(PLATFORM_LAUNCHER_FINISHFIRSTBIND, EVT_DEBUG, "Launcher - FinishFirstBind"),
BOOT_EVENT(PLATFORM_VOICE_SVC, EVT_PLATFORM, "Voice SVC is acquired"),
BOOT_EVENT(PLATFORM_DATA_SVC, EVT_PLATFORM, "Data SVC is acquired"),
BOOT_EVENT(PLATFORM_PHONEAPP_ONCREATE, EVT_RIL, "PhoneApp OnCrate"),
BOOT_EVENT(RIL_UNSOL_RIL_CONNECTED, EVT_RIL, "RIL_UNSOL_RIL_CONNECTED"),
BOOT_EVENT(RIL_SETRADIOPOWER_ON, EVT_RIL, "setRadioPower on"),
BOOT_EVENT(RIL_SETUICCSUBSCRIPTION, EVT_RIL, "setUiccSubscription"),
BOOT_EVENT(RIL_SIM_RECORDSLOADED, EVT_RIL, "SIM onAllRecordsLoaded"),
BOOT_EVENT(RIL_RUIM_RECORDSLOADED, EVT_RIL, "RUIM onAllRecordsLoaded"),
BOOT_EVENT(RIL_SETUPDATA_RECORDSLOADED, EVT_RIL, "SetupDataRecordsLoaded"),
BOOT_EVENT(RIL_SETUPDATACALL, EVT_RIL, "setupDataCall"),
BOOT_EVENT(RIL_RESPONSE_SETUPDATACALL, EVT_RIL, "Response setupDataCall"),
BOOT_EVENT(RIL_DATA_CONNECTION_ATTACHED, EVT_RIL, "onDataConnectionAttached"),
BOOT_EVENT(RIL_DCT_IMSI_READY, EVT_RIL, "IMSI Ready"),
BOOT_EVENT(RIL_COMPLETE_CONNECTION, EVT_RIL, "completeConnection"),
BOOT_EVENT(RIL_CS_REG, EVT_RIL, "CS Registered"),
BOOT_EVENT(RIL_GPRS_ATTACH, EVT_RIL, "GPRS Attached"),
BOOT_EVENT(FACTORY_BOOT_COMPLETE, EVT_PLATFORM, "Factory Process [Boot Completed]"),
};
__ss_static __ss_always_inline bool __boot_stat_is_boot_event(const char *log)
{
const union {
uint64_t raw;
char text[8];
} boot_prefix = {
.text = { '!', '@', 'B', 'o', 'o', 't', '\0', '\0' },
};
/* NOTE: this is only valid on the 'little endian' system */
const uint64_t boot_prefix_mask = 0x0000FFFFFFFFFFFF;
uint64_t log_prefix;
log_prefix = (*(uint64_t *)log) & boot_prefix_mask;
if (log_prefix == boot_prefix.raw)
return true;
return false;
}
__ss_static __ss_always_inline ssize_t __boot_stat_get_message_offset_from_plog(
const char *log, size_t *offset)
{
ssize_t i;
for (i = 0; i < NUM_OF_BOOT_PREFIX; i++) {
const struct boot_prefix *prefix = &boot_prefixes[i];
if (unlikely(!strncmp(log, prefix->head, prefix->head_len))) {
*offset = prefix->head_len;
return i;
}
}
return -EINVAL;
}
static __always_inline struct boot_event *__boot_stat_find_event_locked(
struct boot_stat_proc *boot_stat,
const char *message)
{
struct boot_event *h;
size_t msg_len = strlen(message);
u32 key = full_name_hash(NULL, message, (unsigned int)msg_len);
hash_for_each_possible(boot_stat->event_htbl, h, hlist, key) {
if (h->message_len != msg_len)
continue;
if (!strncmp(h->message, message, msg_len))
return h;
}
return ERR_PTR(-ENOENT);
}
__ss_static __ss_always_inline void __boot_stat_record_boot_event_locked(
struct boot_stat_proc *boot_stat, const char *message)
{
struct boot_event *event =
__boot_stat_find_event_locked(boot_stat, message);
if (IS_ERR_OR_NULL(event))
return;
if (event->ktime)
return;
event->ktime = local_clock();
list_add_tail(&event->list, &boot_stat->boot_event_head);
boot_stat->nr_event++;
}
#define MAX_LENGTH_OF_SYSTEMSERVER_LOG 90
struct systemserver_init_time_entry {
struct list_head list;
char buf[MAX_LENGTH_OF_SYSTEMSERVER_LOG];
};
static __always_inline void __boot_stat_record_systemserver_init_time_locked(
struct boot_stat_proc *boot_stat, const char *message)
{
struct systemserver_init_time_entry *entry;
struct device *dev = __boot_stat_proc_to_dev(boot_stat);
if (likely(boot_stat->is_completed))
return;
entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (unlikely(!entry))
return;
strlcpy(entry->buf, message, sizeof(entry->buf));
list_add(&entry->list, &boot_stat->systemserver_init_time_head);
}
static __always_inline void __boot_stat_add_boot_event_locked(
struct boot_stat_proc *boot_stat,
const char *log)
{
ssize_t prefix_idx;
size_t offset;
const char *message;
prefix_idx = __boot_stat_get_message_offset_from_plog(log, &offset);
message = &log[offset];
switch (prefix_idx) {
case EVT_PLATFORM:
if (unlikely(!boot_stat->is_completed &&
!strcmp(message, "bootcomplete"))) {
boot_stat->ktime_completed = local_clock();
boot_stat->is_completed = true;
}
__boot_stat_record_boot_event_locked(boot_stat, message);
break;
case EVT_RIL:
case EVT_DEBUG:
__boot_stat_record_boot_event_locked(boot_stat, message);
break;
case EVT_SYSTEMSERVER:
__boot_stat_record_systemserver_init_time_locked(boot_stat,
message);
break;
default:
return;
}
}
void sec_boot_stat_add_boot_event(struct boot_stat_drvdata *drvdata,
const char *log)
{
struct boot_stat_proc *boot_stat;
if (!__boot_stat_is_boot_event(log))
return;
boot_stat = &drvdata->boot_stat;
mutex_lock(&boot_stat->lock);
__boot_stat_add_boot_event_locked(boot_stat, log);
mutex_unlock(&boot_stat->lock);
}
static unsigned long long __boot_stat_show_boot_event_each_locked(
struct seq_file *m,
struct boot_stat_proc *boot_stat,
struct boot_event *event, unsigned long long prev_ktime)
{
char *log;
unsigned long long msec;
unsigned long long delta;
unsigned long long time;
log = kasprintf(GFP_KERNEL, "%s%s",
boot_prefixes[event->prefix_idx].head, event->message);
msec = event->ktime;
do_div(msec, 1000000ULL);
delta = event->ktime - prev_ktime;
do_div(delta, 1000000ULL);
time = sec_boot_stat_ktime_to_time(event->ktime);
do_div(time, 1000000ULL);
seq_printf(m, "%-46s%11llu%13llu%13llu\n", log, time, msec, delta);
kfree(log);
return event->ktime;
}
static void __boot_stat_show_soc(struct seq_file *m,
struct boot_stat_proc *boot_stat)
{
struct boot_stat_drvdata *drvdata = container_of(boot_stat,
struct boot_stat_drvdata, boot_stat);
struct sec_boot_stat_soc_operations *soc_ops;
mutex_lock(&drvdata->soc_ops_lock);
soc_ops = drvdata->soc_ops;
if (!soc_ops || !soc_ops->show_on_boot_stat) {
mutex_unlock(&drvdata->soc_ops_lock);
return;
}
soc_ops->show_on_boot_stat(m);
mutex_unlock(&drvdata->soc_ops_lock);
}
static void __boot_stat_show_boot_event_head(struct seq_file *m,
struct boot_stat_proc *boot_stat)
{
seq_printf(m, "%-47s%11s%13s%13s\n", "Boot Events",
"time", "ktime", "delta");
seq_printf(m, "%s\n", h_line);
__boot_stat_show_soc(m, boot_stat);
}
static void __boot_stat_show_boot_event_locked(struct seq_file *m,
struct boot_stat_proc *boot_stat)
{
struct list_head *boot_event_head = &boot_stat->boot_event_head;
struct boot_event *event;
unsigned long long prev_ktime = 0ULL;
list_for_each_entry(event, boot_event_head, list)
prev_ktime = __boot_stat_show_boot_event_each_locked(
m, boot_stat, event, prev_ktime);
}
static void __boot_stat_show_systemserver_init_time_locked(struct seq_file *m,
struct boot_stat_proc *boot_stat)
{
struct list_head *systemserver_init_time_head =
&boot_stat->systemserver_init_time_head;
struct systemserver_init_time_entry *init_time;
seq_printf(m, "%s\n", h_line);
seq_puts(m, "SystemServer services that took long time\n\n");
list_for_each_entry(init_time, systemserver_init_time_head, list)
seq_printf(m, "%s\n", init_time->buf);
}
static int sec_boot_stat_proc_show(struct seq_file *m, void *v)
{
struct boot_stat_proc *boot_stat = m->private;
__boot_stat_show_boot_event_head(m, boot_stat);
mutex_lock(&boot_stat->lock);
__boot_stat_show_boot_event_locked(m, boot_stat);
__boot_stat_show_systemserver_init_time_locked(m, boot_stat);
mutex_unlock(&boot_stat->lock);
return 0;
}
static int sec_boot_stat_proc_open(struct inode *inode, struct file *file)
{
void *__boot_stat = pde_data(inode);
return single_open(file, sec_boot_stat_proc_show, __boot_stat);
}
static const struct proc_ops boot_stat_pops = {
.proc_open = sec_boot_stat_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static int __boot_stat_procfs_init(struct device *dev,
struct boot_stat_proc *boot_stat)
{
struct proc_dir_entry *proc;
const char *node_name = "boot_stat";
proc = proc_create_data(node_name, 0444, NULL, &boot_stat_pops,
boot_stat);
if (!proc) {
dev_err(dev, "failed create procfs node (%s)\n",
node_name);
return -ENODEV;
}
boot_stat->proc = proc;
return 0;
}
static void __boot_stat_procfs_exit(struct device *dev,
struct boot_stat_proc *boot_stat)
{
proc_remove(boot_stat->proc);
}
__ss_static int __boot_stat_init_boot_events(struct boot_stat_proc *boot_stat)
{
size_t i;
hash_init(boot_stat->event_htbl);
for (i = 0; i < ARRAY_SIZE(boot_events); i++) {
struct boot_event *event = &boot_events[i];
u32 key = full_name_hash(NULL,
event->message, (unsigned int)strlen(event->message));
INIT_HLIST_NODE(&event->hlist);
hash_add(boot_stat->event_htbl, &event->hlist, key);
event->ktime = 0;
}
return 0;
}
int sec_boot_stat_proc_init(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = bd->dev;
struct boot_stat_proc *boot_stat = &drvdata->boot_stat;
int err;
mutex_init(&boot_stat->lock);
boot_stat->total_event = ARRAY_SIZE(boot_events);
INIT_LIST_HEAD(&boot_stat->boot_event_head);
INIT_LIST_HEAD(&boot_stat->systemserver_init_time_head);
__boot_stat_init_boot_events(boot_stat);
if (IS_MODULE(CONFIG_SEC_BOOT_STAT))
sec_boot_stat_add_boot_event(drvdata,
"!@Boot: start init process");
err = __boot_stat_procfs_init(dev, boot_stat);
if (err)
return err;
return 0;
}
void sec_boot_stat_proc_exit(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = bd->dev;
struct boot_stat_proc *boot_stat = &drvdata->boot_stat;
__boot_stat_procfs_exit(dev, boot_stat);
mutex_destroy(&boot_stat->lock);
}

View File

@@ -0,0 +1,41 @@
#ifndef __INTERNAL__SEC_BOOT_STAT_PROC_H__
#define __INTERNAL__SEC_BOOT_STAT_PROC_H__
#define BOOT_STAT_HASH_BITS 3
struct boot_stat_proc {
struct proc_dir_entry *proc;
struct mutex lock;
bool is_completed;
unsigned long long ktime_completed;
struct list_head boot_event_head;
size_t total_event;
size_t nr_event;
struct list_head systemserver_init_time_head;
DECLARE_HASHTABLE(event_htbl, BOOT_STAT_HASH_BITS);
};
enum {
EVT_PLATFORM = 0,
EVT_RIL,
EVT_DEBUG,
EVT_SYSTEMSERVER,
EVT_INVALID,
NUM_OF_BOOT_PREFIX = EVT_INVALID,
};
struct boot_prefix {
const char *head;
size_t head_len;
};
struct boot_event {
struct hlist_node hlist;
struct list_head list;
size_t prefix_idx;
const char *message;
size_t message_len;
unsigned long long ktime;
};
#endif /* __INTERNAL__SEC_BOOT_STAT_PROC_H__ */

View File

@@ -0,0 +1,303 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2014-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stringhash.h>
#include "sec_boot_stat.h"
static const char *h_line = "-----------------------------------------------------------------------------------";
static __always_inline struct enh_boot_time_entry *__enh_boot_time_find_entry_locked(
struct enh_boot_time_proc *enh_boot_time,
const char *message)
{
struct enh_boot_time_entry *h;
size_t msg_len = strlen(message);
u32 key = full_name_hash(NULL, message, (unsigned int)msg_len);
hash_for_each_possible(enh_boot_time->boot_time_htbl, h, hlist, key) {
size_t len = strnlen(h->buf, msg_len + 1);
if (len != msg_len)
continue;
if (!strncmp(h->buf, message, msg_len))
return h;
}
return ERR_PTR(-ENOENT);
}
static __always_inline void __enh_boot_time_record_locked(
struct enh_boot_time_proc *enh_boot_time,
const char *message)
{
struct enh_boot_time_entry *entry;
struct device *dev = __enh_boot_time_proc_to_dev(enh_boot_time);
struct enh_boot_time_entry *entry_in_hash;
u32 key;
entry = devm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
if (unlikely(!entry))
return;
strlcpy(entry->buf, message, sizeof(entry->buf));
entry_in_hash = __enh_boot_time_find_entry_locked(enh_boot_time,
entry->buf);
if (!IS_ERR(entry_in_hash)) {
devm_kfree(dev, entry);
return;
}
entry->ktime = local_clock();
list_add(&entry->list, &enh_boot_time->boot_time_head);
INIT_HLIST_NODE(&entry->hlist);
key = full_name_hash(NULL, entry->buf, (unsigned int)strlen(entry->buf));
hash_add(enh_boot_time->boot_time_htbl, &entry->hlist, key);
}
#define DELAY_KTIME_EBS 30000000000 /* 30 sec */
#define MAX_EVENTS_EBS 150
/* NOTE: BE CAREFUL!!. 2 mutexes are used in this function. */
static __always_inline void __enh_boot_time_update_is_finished(
struct enh_boot_time_proc *enh_boot_time,
struct boot_stat_proc *boot_stat)
{
unsigned long long delay_ktime;
mutex_lock(&boot_stat->lock);
if (!boot_stat->is_completed) {
mutex_unlock(&boot_stat->lock);
return;
}
mutex_unlock(&boot_stat->lock);
mutex_lock(&enh_boot_time->lock);
if (enh_boot_time->is_finished) {
mutex_unlock(&enh_boot_time->lock);
return;
}
/* NOTE: after 'boot_stat->is_completed' is set, 'ktime_completed'
* is never changed anymore. So, at this point, lock in not needed.
*/
delay_ktime = local_clock() - boot_stat->ktime_completed;
if (delay_ktime >= DELAY_KTIME_EBS)
enh_boot_time->is_finished = true;
mutex_unlock(&enh_boot_time->lock);
}
static __always_inline void __enh_boot_time_add_boot_event_locked(
struct enh_boot_time_proc *enh_boot_time, const char *log)
{
const struct {
char pmsg_mark[2]; /* !@ */
uint64_t boot_prefix;
char colon;
} __packed *log_head;
if (enh_boot_time->is_finished ||
enh_boot_time->nr_event >= MAX_EVENTS_EBS)
return;
log_head = (void *)log;
if (log_head->colon == ':') {
const size_t offset = sizeof("!@Boot_EBS: ") - 1;
__enh_boot_time_record_locked(enh_boot_time, &log[offset]);
} else if (log_head->colon == '_') {
__enh_boot_time_record_locked(enh_boot_time, log);
} else {
return;
}
enh_boot_time->nr_event++;
}
void sec_enh_boot_time_add_boot_event(
struct boot_stat_drvdata *drvdata, const char *log)
{
struct enh_boot_time_proc *enh_boot_time = &drvdata->enh_boot_time;
struct boot_stat_proc *boot_stat = &drvdata->boot_stat;
__enh_boot_time_update_is_finished(enh_boot_time, boot_stat);
mutex_lock(&enh_boot_time->lock);
__enh_boot_time_add_boot_event_locked(enh_boot_time, log);
mutex_unlock(&enh_boot_time->lock);
}
static unsigned long long __enh_boot_time_show_framework_each_locked(
struct seq_file *m, struct enh_boot_time_entry *entry,
unsigned long long prev_ktime)
{
unsigned long long msec;
unsigned long long delta;
unsigned long long curr_ktime = entry->ktime;
unsigned long long time;
msec = curr_ktime;
do_div(msec, 1000000ULL);
time = sec_boot_stat_ktime_to_time(curr_ktime);
do_div(time, 1000000ULL);
if (entry->buf[0] == '!') {
delta = curr_ktime - prev_ktime;
do_div(delta, 1000000ULL);
seq_printf(m, "%-90s%7llu%7llu%7llu\n", entry->buf,
time, msec, delta);
} else {
seq_printf(m, "%-90s%7llu%7llu\n", entry->buf,
time, msec);
curr_ktime = prev_ktime;
}
return curr_ktime;
}
static void __enh_boot_time_show_framework_locked(struct seq_file *m,
struct enh_boot_time_proc *enh_boot_time)
{
struct list_head *head = &enh_boot_time->boot_time_head;
struct enh_boot_time_entry *entry;
unsigned long long prev_ktime = 0;
seq_printf(m, "%-90s%7s%7s%7s\n", "Boot Events", "time", "ktime", "delta");
seq_printf(m, "%s\n", h_line);
seq_puts(m, "BOOTLOADER\n");
seq_printf(m, "%s\n", h_line);
sec_boot_stat_bootloader_stat(m);
seq_printf(m, "%s\n", h_line);
seq_puts(m, "FRAMEWORK\n");
seq_printf(m, "%s\n", h_line);
prev_ktime = 0;
list_for_each_entry_reverse (entry, head, list) {
prev_ktime = __enh_boot_time_show_framework_each_locked(m,
entry, prev_ktime);
}
}
static void __enh_boot_time_show_framework(struct seq_file *m,
struct enh_boot_time_proc *enh_boot_time)
{
mutex_lock(&enh_boot_time->lock);
__enh_boot_time_show_framework_locked(m, enh_boot_time);
mutex_unlock(&enh_boot_time->lock);
}
static void __enh_boot_time_show_soc(struct seq_file *m,
struct enh_boot_time_proc *enh_boot_time)
{
struct boot_stat_drvdata *drvdata = container_of(enh_boot_time,
struct boot_stat_drvdata, enh_boot_time);
struct sec_boot_stat_soc_operations *soc_ops;
mutex_lock(&drvdata->soc_ops_lock);
soc_ops = drvdata->soc_ops;
if (!soc_ops || !soc_ops->show_on_enh_boot_time) {
mutex_unlock(&drvdata->soc_ops_lock);
return;
}
soc_ops->show_on_enh_boot_time(m);
mutex_unlock(&drvdata->soc_ops_lock);
}
static int sec_enh_boot_time_proc_show(struct seq_file *m, void *v)
{
struct enh_boot_time_proc *enh_boot_time = m->private;
__enh_boot_time_show_soc(m, enh_boot_time);
__enh_boot_time_show_framework(m, enh_boot_time);
return 0;
}
static int sec_enh_boot_time_proc_open(struct inode *inode, struct file *file)
{
void *__enh_boot_time = pde_data(inode);
return single_open(file, sec_enh_boot_time_proc_show, __enh_boot_time);
}
static const struct proc_ops enh_boot_time_pops = {
.proc_open = sec_enh_boot_time_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
.proc_release = single_release,
};
static int __enh_boot_time_procfs_init(struct device *dev,
struct enh_boot_time_proc *enh_boot_time)
{
struct proc_dir_entry *proc;
const char *node_name = "enhanced_boot_stat";
proc = proc_create_data(node_name, 0444, NULL, &enh_boot_time_pops,
enh_boot_time);
if (!proc) {
dev_err(dev, "failed create procfs node (%s)\n",
node_name);
return -ENODEV;
}
enh_boot_time->proc = proc;
return 0;
}
static void __enh_boot_time_procfs_exit(struct device *dev,
struct enh_boot_time_proc *enh_boot_time)
{
proc_remove(enh_boot_time->proc);
}
int sec_enh_boot_time_init(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = bd->dev;
struct enh_boot_time_proc *enh_boot_time = &drvdata->enh_boot_time;
int err;
mutex_init(&enh_boot_time->lock);
INIT_LIST_HEAD(&enh_boot_time->boot_time_head);
hash_init(enh_boot_time->boot_time_htbl);
if (IS_MODULE(CONFIG_SEC_BOOT_STAT))
sec_enh_boot_time_add_boot_event(drvdata,
"!@Boot_EBS_F: FirstStageMain Init");
err = __enh_boot_time_procfs_init(dev, enh_boot_time);
if (err)
return err;
return 0;
}
void sec_enh_boot_time_exit(struct builder *bd)
{
struct boot_stat_drvdata *drvdata =
container_of(bd, struct boot_stat_drvdata, bd);
struct device *dev = bd->dev;
struct enh_boot_time_proc *enh_boot_time = &drvdata->enh_boot_time;
__enh_boot_time_procfs_exit(dev, enh_boot_time);
mutex_destroy(&enh_boot_time->lock);
}

View File

@@ -0,0 +1,24 @@
#ifndef __INTERNAL__SEC_ENH_BOOT_TIME_PROC_H__
#define __INTERNAL__SEC_ENH_BOOT_TIME_PROC_H__
#define BOOT_TIME_HASH_BITS 3
struct enh_boot_time_proc {
struct proc_dir_entry *proc;
struct mutex lock;
bool is_finished;
size_t nr_event;
struct list_head boot_time_head;
DECLARE_HASHTABLE(boot_time_htbl, BOOT_TIME_HASH_BITS);
};
#define MAX_LENGTH_OF_ENH_BOOT_TIME_LOG 90
struct enh_boot_time_entry {
struct list_head list;
struct hlist_node hlist;
char buf[MAX_LENGTH_OF_ENH_BOOT_TIME_LOG];
unsigned long long ktime;
};
#endif /* __INTERNAL__SEC_ENH_BOOT_TIME_PROC_H__ */

View File

@@ -0,0 +1,11 @@
menuconfig SEC_DEBUG
tristate "SEC TN Debugging Features"
help
TODO: help is not ready.
config SEC_FORCE_ERR
bool "SEC Generating force errors"
default y if SEC_FACTORY
depends on SEC_DEBUG
help
TODO: help is not ready.

View File

@@ -0,0 +1,9 @@
obj-$(CONFIG_SEC_DEBUG) += sec_debug.o
sec_debug-objs := sec_debug_main.o \
sec_ap_serial.o \
sec_user_fault.o \
sec_debug_node.o \
sec_debug_show_stat.o \
sec_panic_with_reason.o
sec_debug-$(CONFIG_SEC_FORCE_ERR) += sec_force_err.o

View File

@@ -0,0 +1,120 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2019-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/samsung/builder_pattern.h>
#include "sec_debug.h"
static unsigned long long ap_serial __ro_after_init;
module_param_named(ap_serial, ap_serial, ullong, 0440);
static ssize_t SVC_AP_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%016llX\n", ap_serial);
}
static DEVICE_ATTR_RO(SVC_AP);
static struct attribute *SVC_AP_attrs[] = {
&dev_attr_SVC_AP.attr,
NULL,
};
static struct attribute_group SVC_AP_group = {
.attrs = SVC_AP_attrs,
};
static const struct attribute_group *SVC_AP_groups[] = {
&SVC_AP_group,
NULL,
};
static int __ap_serial_svc_ap_init(struct device *dev,
struct svc_ap_node *svc_ap)
{
struct kernfs_node *kn;
struct kset *dev_ks;
struct kobject *svc_kobj;
struct device *ap_dev;
int err;
dev_ks = dev->kobj.kset;
svc_kobj = kobject_create_and_add("svc", &dev_ks->kobj);
if (IS_ERR_OR_NULL(svc_kobj)) {
kn = sysfs_get_dirent(dev_ks->kobj.sd, "svc");
if (!kn) {
dev_err(dev, "failed to create sys/devices/svc\n");
return -ENODEV;
}
svc_kobj = (struct kobject *)kn->priv;
}
ap_dev = devm_kzalloc(dev, sizeof(struct device), GFP_KERNEL);
if (!ap_dev) {
err = -ENOMEM;
goto err_alloc_ap_dev;
}
err = dev_set_name(ap_dev, "AP");
if (err < 0) {
err = -ENOENT;
goto err_set_name_ap_dev;
}
ap_dev->kobj.parent = svc_kobj;
ap_dev->groups = SVC_AP_groups;
err = device_register(ap_dev);
if (err < 0) {
err = -EINVAL;
goto err_register_ap_dev;
}
svc_ap->ap_dev = ap_dev;
svc_ap->svc_kobj = svc_kobj;
return 0;
err_register_ap_dev:
err_set_name_ap_dev:
err_alloc_ap_dev:
kobject_put(svc_kobj);
return err;
}
static void __ap_serial_svc_ap_exit(struct device *dev,
struct svc_ap_node *svc_ap)
{
device_unregister(svc_ap->ap_dev);
kobject_put(svc_ap->svc_kobj);
}
int sec_ap_serial_sysfs_init(struct builder *bd)
{
struct sec_debug_drvdata *drvdata = container_of(bd,
struct sec_debug_drvdata, bd);
struct device *dev = bd->dev;
return __ap_serial_svc_ap_init(dev, &drvdata->svc_ap);
}
void sec_ap_serial_sysfs_exit(struct builder *bd)
{
struct sec_debug_drvdata *drvdata = container_of(bd,
struct sec_debug_drvdata, bd);
struct device *dev = bd->dev;
__ap_serial_svc_ap_exit(dev, &drvdata->svc_ap);
}

View File

@@ -0,0 +1,72 @@
#ifndef __INTERNAL__SEC_DEBUG_H__
#define __INTERNAL__SEC_DEBUG_H__
#include <linux/debugfs.h>
#include <linux/hashtable.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/samsung/builder_pattern.h>
struct svc_ap_node {
struct kobject *svc_kobj;
struct device *ap_dev;
};
#define FORCE_ERR_HASH_BITS 3
struct force_err {
struct mutex lock;
DECLARE_HASHTABLE(htbl, FORCE_ERR_HASH_BITS);
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs;
#endif
};
struct sec_debug_drvdata {
struct builder bd;
struct svc_ap_node svc_ap;
bool in_panic;
struct notifier_block nb_panic;
struct notifier_block nb_restart;
#if IS_ENABLED(CONFIG_SEC_FORCE_ERR)
struct force_err force_err;
#endif
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs_panic;
#endif
};
extern struct sec_debug_drvdata *sec_debug;
static __always_inline bool __debug_is_probed(void)
{
return !!sec_debug;
}
/* sec_user_fault.c */
extern int sec_user_fault_init(struct builder *bd);
extern void sec_user_fault_exit(struct builder *bd);
/* sec_ap_serial.c */
extern int sec_ap_serial_sysfs_init(struct builder *bd);
extern void sec_ap_serial_sysfs_exit(struct builder *bd);
/* sec_force_err.c */
extern int sec_force_err_probe_prolog(struct builder *bd);
extern void sec_force_err_remove_epilog(struct builder *bd);
extern int sec_force_err_build_htbl(struct builder *bd);
extern int sec_force_err_debugfs_create(struct builder *bd);
extern void sec_force_err_debugfs_remove(struct builder *bd);
/* sec_debug_show_stat.c */
extern void sec_debug_show_stat(const char *msg);
/* sec_debug_node.c */
extern int sec_debug_node_init_dump_sink(struct builder *bd);
/* sec_panic_with_reason.c */
extern int sec_panic_with_reason_init(struct builder *bd);
extern void sec_panic_with_reason_exit(struct builder *bd);
#endif /* __INTERNAL__SEC_DEBUG_H__ */

View File

@@ -0,0 +1,287 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2017-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/panic_notifier.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/samsung/of_early_populate.h>
#include <linux/samsung/debug/sec_debug.h>
#include "sec_debug.h"
struct sec_debug_drvdata *sec_debug;
static unsigned int sec_dbg_level __ro_after_init;
module_param_named(debug_level, sec_dbg_level, uint, 0440);
static unsigned int sec_dbg_force_upload __ro_after_init;
module_param_named(force_upload, sec_dbg_force_upload, uint, 0440);
static unsigned int enable __read_mostly = 1;
module_param_named(enable, enable, uint, 0644);
unsigned int sec_debug_level(void)
{
return sec_dbg_level;
}
EXPORT_SYMBOL_GPL(sec_debug_level);
bool sec_debug_is_enabled(void)
{
switch (sec_dbg_level) {
case SEC_DEBUG_LEVEL_LOW:
#if IS_ENABLED(CONFIG_SEC_FACTORY)
case SEC_DEBUG_LEVEL_MID:
#endif
return !!sec_dbg_force_upload;
}
return !!enable;
}
EXPORT_SYMBOL_GPL(sec_debug_is_enabled);
static noinline int __debug_parse_dt_panic_notifier_priority(struct builder *bd,
struct device_node *np)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_panic;
s32 priority;
int err;
err = of_property_read_s32(np, "sec,panic_notifier-priority",
&priority);
if (err)
return -EINVAL;
nb->priority = (int)priority;
return 0;
}
static noinline int __debug_parse_dt_restart_handler_priority(struct builder *bd,
struct device_node *np)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_restart;
s32 priority;
int err;
err = of_property_read_s32(np, "sec,restart_handler-priority",
&priority);
if (err)
return -EINVAL;
nb->priority = (int)priority;
return 0;
}
static const struct dt_builder __debug_dt_builder[] = {
DT_BUILDER(__debug_parse_dt_panic_notifier_priority),
DT_BUILDER(__debug_parse_dt_restart_handler_priority),
};
static noinline int __debug_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __debug_dt_builder,
ARRAY_SIZE(__debug_dt_builder));
}
static int sec_debug_panic_notifer_handler(struct notifier_block *nb,
unsigned long l, void *msg)
{
struct sec_debug_drvdata *drvdata =
container_of(nb, struct sec_debug_drvdata, nb_panic);
drvdata->in_panic = true;
sec_debug_show_stat((const char *)msg);
return NOTIFY_OK;
}
static int __debug_register_panic_notifier(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_panic;
drvdata->in_panic = false;
nb->notifier_call = sec_debug_panic_notifer_handler;
return atomic_notifier_chain_register(&panic_notifier_list, nb);
}
static void __debug_unregister_panic_notifier(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_panic;
atomic_notifier_chain_unregister(&panic_notifier_list, nb);
}
static int sec_debug_restart_handler(struct notifier_block *nb,
unsigned long l, void *msg)
{
struct sec_debug_drvdata *drvdata =
container_of(nb, struct sec_debug_drvdata, nb_restart);
if (!drvdata->in_panic)
return NOTIFY_OK;
dev_err(drvdata->bd.dev, "hang on machine_restart\n");
while (true) ;
return NOTIFY_OK;
}
static int __debug_register_restart_handler(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_restart;
nb->notifier_call = sec_debug_restart_handler;
return register_restart_handler(nb);
}
static void __debug_unregister_restart_handler(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_restart;
unregister_restart_handler(nb);
}
static noinline int __debug_probe_epilog(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
sec_debug = drvdata;
return 0;
}
static noinline void __debug_remove_epilog(struct builder *bd)
{
/* FIXME: This is not a graceful exit. */
sec_debug = NULL;
}
static int __debug_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct sec_debug_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __debug_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct sec_debug_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __debug_dev_builder[] = {
DEVICE_BUILDER(__debug_parse_dt, NULL),
DEVICE_BUILDER(sec_user_fault_init, sec_user_fault_exit),
DEVICE_BUILDER(sec_ap_serial_sysfs_init, sec_ap_serial_sysfs_exit),
DEVICE_BUILDER(sec_debug_node_init_dump_sink, NULL),
DEVICE_BUILDER(__debug_register_panic_notifier,
__debug_unregister_panic_notifier),
DEVICE_BUILDER(__debug_register_restart_handler,
__debug_unregister_restart_handler),
DEVICE_BUILDER(sec_panic_with_reason_init, sec_panic_with_reason_exit),
#if IS_ENABLED(CONFIG_SEC_FORCE_ERR)
DEVICE_BUILDER(sec_force_err_probe_prolog, sec_force_err_remove_epilog),
DEVICE_BUILDER(sec_force_err_build_htbl, NULL),
DEVICE_BUILDER(sec_force_err_debugfs_create, sec_force_err_debugfs_remove),
#endif
DEVICE_BUILDER(__debug_probe_epilog, __debug_remove_epilog),
};
static int sec_debug_probe(struct platform_device *pdev)
{
return __debug_probe(pdev, __debug_dev_builder,
ARRAY_SIZE(__debug_dev_builder));
}
static int sec_debug_remove(struct platform_device *pdev)
{
return __debug_remove(pdev, __debug_dev_builder,
ARRAY_SIZE(__debug_dev_builder));
}
static const struct of_device_id sec_debug_match_table[] = {
{ .compatible = "samsung,sec_debug" },
{},
};
MODULE_DEVICE_TABLE(of, sec_debug_match_table);
static struct platform_driver sec_debug_driver = {
.driver = {
.name = "sec,debug",
.of_match_table = of_match_ptr(sec_debug_match_table),
},
.probe = sec_debug_probe,
.remove = sec_debug_remove,
};
static int __init sec_debug_init(void)
{
int err;
err = platform_driver_register(&sec_debug_driver);
if (err)
return err;
err = __of_platform_early_populate_init(sec_debug_match_table);
if (err)
return err;
return 0;
}
core_initcall(sec_debug_init);
static void __exit sec_debug_exit(void)
{
platform_driver_unregister(&sec_debug_driver);
}
module_exit(sec_debug_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("TN Debugging Feature");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,231 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2019-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_debug.h>
#include "sec_debug.h"
static long *g_allocated_phys_mem;
static long *g_allocated_virt_mem;
static int sec_alloc_virtual_mem(const char *val, const struct kernel_param *kp)
{
long *mem;
char *str = (char *) val;
size_t size = (size_t)memparse(str, &str);
if (size) {
mem = vmalloc(size);
if (mem) {
pr_info("Allocated virtual memory of size: 0x%zx bytes\n",
size);
*mem = (long)g_allocated_virt_mem;
g_allocated_virt_mem = mem;
return 0;
}
pr_err("Failed to allocate virtual memory of size: 0x%zx bytes\n",
size);
return -ENOMEM;
}
pr_info("Invalid size: %s bytes\n", val);
return -EAGAIN;
}
module_param_call(alloc_virtual_mem, &sec_alloc_virtual_mem, NULL, NULL, 0644);
static int sec_free_virtual_mem(const char *val, const struct kernel_param *kp)
{
long *mem;
char *str = (char *) val;
size_t free_count = (size_t)memparse(str, &str);
if (!free_count) {
if (strncmp(val, "all", 4)) {
free_count = 10;
} else {
pr_err("Invalid free count: %s\n", val);
return -EAGAIN;
}
}
if (free_count > 10)
free_count = 10;
if (!g_allocated_virt_mem) {
pr_err("No virtual memory chunk to free.\n");
return 0;
}
while (g_allocated_virt_mem && free_count--) {
mem = (long *) *g_allocated_virt_mem;
vfree(g_allocated_virt_mem);
g_allocated_virt_mem = mem;
}
pr_info("Freed previously allocated virtual memory chunks.\n");
if (g_allocated_virt_mem)
pr_info("Still, some virtual memory chunks are not freed. Try again.\n");
return 0;
}
module_param_call(free_virtual_mem, &sec_free_virtual_mem, NULL, NULL, 0644);
static int sec_alloc_physical_mem(const char *val,
const struct kernel_param *kp)
{
long *mem;
char *str = (char *) val;
size_t size = (size_t)memparse(str, &str);
if (size) {
mem = kmalloc(size, GFP_KERNEL);
if (mem) {
pr_info("Allocated physical memory of size: 0x%zx bytes\n",
size);
*mem = (long) g_allocated_phys_mem;
g_allocated_phys_mem = mem;
return 0;
}
pr_err("Failed to allocate physical memory of size: 0x%zx bytes\n",
size);
return -ENOMEM;
}
pr_info("Invalid size: %s bytes\n", val);
return -EAGAIN;
}
module_param_call(alloc_physical_mem, &sec_alloc_physical_mem,
NULL, NULL, 0644);
static int sec_free_physical_mem(const char *val, const struct kernel_param *kp)
{
long *mem;
char *str = (char *) val;
size_t free_count = (size_t)memparse(str, &str);
if (!free_count) {
if (strncmp(val, "all", 4)) {
free_count = 10;
} else {
pr_info("Invalid free count: %s\n", val);
return -EAGAIN;
}
}
if (free_count > 10)
free_count = 10;
if (!g_allocated_phys_mem) {
pr_info("No physical memory chunk to free.\n");
return 0;
}
while (g_allocated_phys_mem && free_count--) {
mem = (long *) *g_allocated_phys_mem;
kfree(g_allocated_phys_mem);
g_allocated_phys_mem = mem;
}
pr_info("Freed previously allocated physical memory chunks.\n");
if (g_allocated_phys_mem)
pr_info("Still, some physical memory chunks are not freed. Try again.\n");
return 0;
}
module_param_call(free_physical_mem, &sec_free_physical_mem, NULL, NULL, 0644);
#if IS_BUILTIN(CONFIG_SEC_DEBUG)
static int dbg_set_cpu_affinity(const char *val, const struct kernel_param *kp)
{
char *endptr;
pid_t pid;
int cpu;
struct cpumask mask;
long ret;
pid = (pid_t)memparse(val, &endptr);
if (*endptr != '@') {
pr_info("invalid input strin: %s\n", val);
return -EINVAL;
}
cpu = (int)memparse(++endptr, &endptr);
cpumask_clear(&mask);
cpumask_set_cpu(cpu, &mask);
pr_info("Setting %d cpu affinity to cpu%d\n", pid, cpu);
ret = sched_setaffinity(pid, &mask);
pr_info("sched_setaffinity returned %ld\n", ret);
return 0;
}
module_param_call(setcpuaff, &dbg_set_cpu_affinity, NULL, NULL, 0644);
#endif
/* FIXME: backward compatibility. This value is always 1 */
static unsigned int reboot_multicmd = 1;
module_param_named(reboot_multicmd, reboot_multicmd, uint, 0644);
static unsigned int dump_sink;
static unsigned int *dump_sink_virt;
static int sec_debug_set_dump_sink(const char *val,
const struct kernel_param *kp)
{
int ret = param_set_uint(val, kp);
if (dump_sink_virt)
*dump_sink_virt = dump_sink;
return ret;
}
static int sec_debug_get_dump_sink(char *buffer, const struct kernel_param *kp)
{
return param_get_uint(buffer, kp);
}
phys_addr_t sec_debug_get_dump_sink_phys(void)
{
return virt_to_phys(dump_sink_virt);
}
EXPORT_SYMBOL_GPL(sec_debug_get_dump_sink_phys);
module_param_call(dump_sink, sec_debug_set_dump_sink, sec_debug_get_dump_sink,
&dump_sink, 0644);
int sec_debug_node_init_dump_sink(struct builder *bd)
{
struct device *dev = bd->dev;
dump_sink_virt = devm_kmalloc(dev, sizeof(*dump_sink_virt), GFP_KERNEL);
if (!dump_sink_virt)
return -ENOMEM;
*dump_sink_virt = dump_sink;
return 0;
}

View File

@@ -0,0 +1,200 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/kernel.h>
#include <linux/sched/debug.h>
#include <linux/sched/signal.h>
#include <linux/sched/stat.h>
static bool __debug_is_platform_lockup_suspected(const char *msg)
{
static const char *expected[] = {
"Crash Key",
"User Crash Key",
"Long Key Press",
"Software Watchdog Timer expired",
};
size_t i;
for (i = 0; i < ARRAY_SIZE(expected); i++) {
if (!strncmp(msg, expected[i], strlen(expected[i])))
return true;
}
return false;
}
/* NOTE: see 'state_filter_match' function */
static bool __debug_is_task_uninterruptible(struct task_struct *p)
{
if (!(p->__state & TASK_UNINTERRUPTIBLE))
return false;
if (p->__state == TASK_IDLE)
return false;
return true;
}
/* NOTE: see 'show_state_filter' function */
static void ____debug_show_task_uninterruptible(void)
{
struct task_struct *g, *p;
for_each_process_thread(g, p) {
if (__debug_is_task_uninterruptible(p))
sched_show_task(p);
}
}
static void __debug_show_task_uninterruptible(void)
{
pr_info("\n");
pr_info(" ---------------------------------------------------------------------------------------\n");
if (IS_BUILTIN(CONFIG_SEC_DEBUG))
show_state_filter(TASK_UNINTERRUPTIBLE);
else
____debug_show_task_uninterruptible();
pr_info(" ---------------------------------------------------------------------------------------\n");
}
/* TODO: this is a modified version of 'show_stat' in 'fs/proc/stat.c'
* this function should be adapted for each kernel version
*/
#ifndef arch_irq_stat_cpu
#define arch_irq_stat_cpu(cpu) 0
#endif
#ifndef arch_irq_stat
#define arch_irq_stat() 0
#endif
static void __debug_show_cpu_stat(void)
{
int i, j;
u64 user, nice, system, idle, iowait, irq, softirq, steal;
u64 guest, guest_nice;
u64 sum = 0;
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
struct timespec64 boottime;
user = nice = system = idle = iowait =
irq = softirq = steal = 0;
guest = guest_nice = 0;
getboottime64(&boottime);
for_each_possible_cpu(i) {
user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle += kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
iowait += kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT];
irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
sum += kstat_cpu_irqs_sum(i);
sum += arch_irq_stat_cpu(i);
for (j = 0; j < NR_SOFTIRQS; j++) {
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
per_softirq_sums[j] += softirq_stat;
sum_softirq += softirq_stat;
}
}
sum += arch_irq_stat();
pr_info("\n");
pr_info(" ---------------------------------------------------------------------------------------\n");
pr_info(" %8s %8s %8s %8s %8s %8s %8s %8s %8s %8s\n",
"user", "nice", "system", "idle", "iowait", "irq",
"softirq", "steal", "guest", "guest_nice");
pr_info("cpu %8llu %8llu %8llu %8llu %8llu %8llu %8llu %8llu %8llu %8llu\n",
nsec_to_clock_t(user),
nsec_to_clock_t(nice),
nsec_to_clock_t(system),
nsec_to_clock_t(idle),
nsec_to_clock_t(iowait),
nsec_to_clock_t(irq),
nsec_to_clock_t(softirq),
nsec_to_clock_t(steal),
nsec_to_clock_t(guest),
nsec_to_clock_t(guest_nice));
for_each_online_cpu(i) {
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
idle = kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
iowait = kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT];
irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
pr_info("cpu%-2d %8llu %8llu %8llu %8llu %8llu %8llu %8llu %8llu %8llu %8llu\n",
i,
nsec_to_clock_t(user),
nsec_to_clock_t(nice),
nsec_to_clock_t(system),
nsec_to_clock_t(idle),
nsec_to_clock_t(iowait),
nsec_to_clock_t(irq),
nsec_to_clock_t(softirq),
nsec_to_clock_t(steal),
nsec_to_clock_t(guest),
nsec_to_clock_t(guest_nice));
}
#if 0
#if IS_BUILTIN(CONFIG_SEC_DEBUG)
pr_info(" ---------------------------------------------------------------------------------------\n");
pr_info("intr %llu\n", (unsigned long long)sum);
/* sum again ? it could be updated? */
for_each_irq_nr(j)
if (kstat_irqs(j))
pr_info(" irq-%d : %u\n", j, kstat_irqs(j));
pr_info(" ---------------------------------------------------------------------------------------\n");
pr_info("\nctxt %llu\n"
"btime %llu\n"
"processes %lu\n"
"procs_running %lu\n"
"procs_blocked %lu\n",
nr_context_switches(),
(unsigned long long)boottime.tv_sec,
total_forks,
nr_running(),
nr_iowait());
pr_info(" ---------------------------------------------------------------------------------------\n");
pr_info("softirq %llu\n", (unsigned long long)sum_softirq);
for (i = 0; i < NR_SOFTIRQS; i++)
pr_info(" softirq-%d : %u\n", i, per_softirq_sums[i]);
#endif
#endif
pr_info("\n");
pr_info(" ---------------------------------------------------------------------------------------\n");
}
void sec_debug_show_stat(const char *msg)
{
if (!__debug_is_platform_lockup_suspected(msg))
return;
__debug_show_task_uninterruptible();
__debug_show_cpu_stat();
}

View File

@@ -0,0 +1,429 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2017-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/stringhash.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_force_err.h>
#include "sec_debug.h"
static struct force_err_handle *__force_err_find_handle_locked(
struct force_err *force_err, const char *val)
{
struct force_err_handle *h;
size_t val_len = strlen(val);
u32 key = full_name_hash(NULL, val, (unsigned int)val_len);
hash_for_each_possible(force_err->htbl, h, node, key) {
size_t len = strnlen(h->val, val_len + 1);
if (len != val_len)
continue;
if (!strncmp(h->val, val, val_len))
return h;
}
return ERR_PTR(-ENOENT);
}
static inline int __force_err_add_custom_handle(struct force_err *force_err,
struct force_err_handle *h)
{
struct force_err_handle *h_old;
u32 key = full_name_hash(NULL, h->val, (unsigned int)strlen(h->val));
int ret = 0;
mutex_lock(&force_err->lock);
if (hash_empty(force_err->htbl)) {
ret = -EBUSY;
goto not_initialized;
}
if (hash_hashed(&h->node)) {
pr_warn("The node is aready added! (%s)\n", h->val);
goto already_added;
}
h_old = __force_err_find_handle_locked(force_err, h->val);
if (!IS_ERR(h_old)) {
pr_warn("A same handler for %s is regitered before. I'll be removed.\n",
h->val);
hash_del(&h_old->node);
}
hash_add(force_err->htbl, &h->node, key);
already_added:
not_initialized:
mutex_unlock(&force_err->lock);
return ret;
}
int sec_force_err_add_custom_handle(struct force_err_handle *h)
{
if (!__debug_is_probed())
return -EBUSY;
return __force_err_add_custom_handle(&sec_debug->force_err, h);
}
EXPORT_SYMBOL_GPL(sec_force_err_add_custom_handle);
static inline int __force_err_del_custom_handle(struct force_err *force_err,
struct force_err_handle *h)
{
int ret = 0;
mutex_lock(&force_err->lock);
if (hash_empty(force_err->htbl)) {
ret = -EBUSY;
goto not_initialized;
}
if (!hash_hashed(&h->node))
goto already_removed;
hash_del(&h->node);
already_removed:
not_initialized:
mutex_unlock(&force_err->lock);
return ret;
}
int sec_force_err_del_custom_handle(struct force_err_handle *h)
{
if (!__debug_is_probed())
return -EBUSY;
return __force_err_del_custom_handle(&sec_debug->force_err, h);
}
EXPORT_SYMBOL_GPL(sec_force_err_del_custom_handle);
/* timeout for dog bark/bite */
#define DELAY_TIME 20000
static void __simulate_apps_wdog_bark(struct force_err_handle *h)
{
unsigned long time_out_jiffies;
pr_emerg("Simulating apps watch dog bark\n");
local_irq_disable();
time_out_jiffies = jiffies + msecs_to_jiffies(DELAY_TIME);
while (time_is_after_jiffies(time_out_jiffies))
udelay(1);
local_irq_enable();
/* if we reach here, simulation failed */
pr_emerg("Simulation of apps watch dog bark failed\n");
}
static void __simulate_bug(struct force_err_handle *h)
{
/* NOTE: force error is not enabled for end-users. */
BUG();
}
static void __simulate_bug_on(struct force_err_handle *h)
{
/* NOTE: force error is not enabled for end-users. */
BUG_ON(true);
}
static void __simulate_panic(struct force_err_handle *h)
{
/* NOTE: force error is not enabled for end-users. */
panic("%s", __func__);
}
static void __simulate_apps_wdog_bite(struct force_err_handle *h)
{
unsigned long time_out_jiffies;
#if IS_ENABLED(CONFIG_HOTPLUG_CPU)
int cpu;
for_each_online_cpu(cpu) {
if (cpu == 0)
continue;
remove_cpu(cpu);
}
#endif
pr_emerg("Simulating apps watch dog bite\n");
local_irq_disable();
time_out_jiffies = jiffies + msecs_to_jiffies(DELAY_TIME);
while (time_is_after_jiffies(time_out_jiffies))
udelay(1);
local_irq_enable();
/* if we reach here, simulation had failed */
pr_emerg("Simualtion of apps watch dog bite failed\n");
}
static void __simulate_bus_hang(struct force_err_handle *h)
{
void __iomem *p = NULL;
pr_emerg("Generating Bus Hang!\n");
p = ioremap_wt(0xFC4B8000, 32);
*(unsigned int *)p = *(unsigned int *)p;
mb(); /* memory barriar to generate bus hang */
pr_info("*p = %x\n", *(unsigned int *)p);
pr_emerg("Clk may be enabled.Try again if it reaches here!\n");
}
unsigned long *__dabort_buf;
static void __simulate_dabort(struct force_err_handle *h)
{
*__dabort_buf = 0;
}
static void __simulate_pabort(struct force_err_handle *h)
{
((void (*)(void))NULL)();
}
static void __simulate_dblfree(struct force_err_handle *h)
{
unsigned int *ptr = kmalloc(sizeof(unsigned int), GFP_KERNEL);
kfree(ptr);
msleep(1000);
kfree(ptr);
}
static void __simulate_danglingref(struct force_err_handle *h)
{
unsigned int *ptr = kmalloc(sizeof(unsigned int), GFP_KERNEL);
kfree(ptr);
*ptr = 0x1234;
}
static void __simulate_lowmem(struct force_err_handle *h)
{
size_t i;
for (i = 0; kmalloc(128 * 1024, GFP_KERNEL); i++)
;
pr_emerg("Allocated %zu KB!\n", i * 128);
}
static void __simulate_memcorrupt(struct force_err_handle *h)
{
unsigned int *ptr = kmalloc(sizeof(unsigned int), GFP_KERNEL);
*ptr++ = 4;
*ptr = 2;
/* NOTE: force error is not enabled for end-users. */
panic("MEMORY CORRUPTION");
}
static struct force_err_handle __force_err_default[] = {
FORCE_ERR_HANDLE("appdogbark", "Generating an apps wdog bark!",
__simulate_apps_wdog_bark),
FORCE_ERR_HANDLE("appdogbite", "Generating an apps wdog bite!",
__simulate_apps_wdog_bite),
FORCE_ERR_HANDLE("dabort", "Generating a data abort exception!",
__simulate_dabort),
FORCE_ERR_HANDLE("pabort", "Generating a data abort exception!",
__simulate_pabort),
FORCE_ERR_HANDLE("bushang", "Generating a Bus Hang!",
__simulate_bus_hang),
FORCE_ERR_HANDLE("dblfree", NULL,
__simulate_dblfree),
FORCE_ERR_HANDLE("danglingref", NULL,
__simulate_danglingref),
FORCE_ERR_HANDLE("lowmem", "Allocating memory until failure!",
__simulate_lowmem),
FORCE_ERR_HANDLE("memcorrupt", NULL,
__simulate_memcorrupt),
FORCE_ERR_HANDLE("KP", "Generating a data abort exception!",
__simulate_dabort),
FORCE_ERR_HANDLE("DP", NULL,
__simulate_apps_wdog_bark),
FORCE_ERR_HANDLE("bug", "call BUG()",
__simulate_bug),
FORCE_ERR_HANDLE("bug_on", "call BUG_ON()",
__simulate_bug_on),
FORCE_ERR_HANDLE("panic", "call panic()",
__simulate_panic),
};
static long __force_error(struct force_err *force_err, const char *val)
{
struct force_err_handle *h;
long err = 0;
pr_emerg("!!!WARN forced error : %s\n", val);
mutex_lock(&force_err->lock);
h = __force_err_find_handle_locked(force_err, val);
if (IS_ERR(h)) {
pr_warn("%s is not supported!\n", val);
mutex_unlock(&force_err->lock);
return 0;
}
h->func(h);
pr_emerg("No such error defined for now!\n");
mutex_unlock(&force_err->lock);
return err;
}
static int force_error(const char *val, const struct kernel_param *kp)
{
char *__trimed_val, *trimed_val;
int err;
if (!__debug_is_probed())
return -EBUSY;
__trimed_val = kstrdup(val, GFP_KERNEL);
if (!__trimed_val) {
pr_err("Not enough memory!\n");
return 0;
}
trimed_val = strim(__trimed_val);
err = (int)__force_error(&sec_debug->force_err, trimed_val);
kfree(__trimed_val);
return err;
}
module_param_call(force_error, force_error, NULL, NULL, 0644);
int sec_force_err_probe_prolog(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct force_err *force_err = &drvdata->force_err;
mutex_init(&force_err->lock);
hash_init(force_err->htbl);
return 0;
}
void sec_force_err_remove_epilog(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct force_err *force_err = &drvdata->force_err;
mutex_destroy(&force_err->lock);
}
int sec_force_err_build_htbl(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct force_err *force_err = &drvdata->force_err;
struct force_err_handle *h;
u32 key;
size_t i;
for (i = 0; i < ARRAY_SIZE(__force_err_default); i++) {
h = &__force_err_default[i];
INIT_HLIST_NODE(&h->node);
key = full_name_hash(NULL, h->val, (unsigned int)strlen(h->val));
hash_add(force_err->htbl, &h->node, key);
}
return 0;
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
static void __force_err_dbgfs_show_each_locked(struct seq_file *m,
struct force_err_handle *h)
{
seq_printf(m, "[<%p>] %s\n", h, h->val);
seq_printf(m, " - msg : %s\n", h->msg);
seq_printf(m, " - func : [<%p>] %ps\n", h->func, h->func);
seq_puts(m, "\n");
}
static int sec_force_err_dbgfs_show_all(struct seq_file *m, void *unsed)
{
struct force_err *force_err = m->private;
struct force_err_handle *h;
int bkt;
mutex_lock(&force_err->lock);
hash_for_each(force_err->htbl, bkt, h, node) {
__force_err_dbgfs_show_each_locked(m, h);
}
mutex_unlock(&force_err->lock);
return 0;
}
static int sec_force_err_dbgfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sec_force_err_dbgfs_show_all,
inode->i_private);
}
static const struct file_operations sec_force_err_dgbfs_fops = {
.open = sec_force_err_dbgfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int sec_force_err_debugfs_create(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct force_err *force_err = &drvdata->force_err;
force_err->dbgfs = debugfs_create_file("sec_force_err", 0440,
NULL, force_err, &sec_force_err_dgbfs_fops);
return 0;
}
void sec_force_err_debugfs_remove(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
struct force_err *force_err = &drvdata->force_err;
debugfs_remove(force_err->dbgfs);
}
#else
int sec_force_err_debugfs_create(struct builder *bd) { return 0; }
void sec_force_err_debugfs_remove(struct builder *bd) {}
#endif /* IS_ENABLED(CONFIG_DEBUG_FS) */

View File

@@ -0,0 +1,82 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#include "sec_debug.h"
#define MAX_BUF_SIZE 64
static ssize_t panic_with_reason_trigger(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
ssize_t ret;
char panicstr[MAX_BUF_SIZE];
if (count > MAX_BUF_SIZE)
return -EINVAL;
/* copy data to kernel space from user space */
ret = simple_write_to_buffer(panicstr, sizeof(panicstr), ppos, buf, count);
if (ret < 0)
return ret;
panicstr[ret] = '\0';
panic("%s", panicstr);
return count;
}
static const struct file_operations panic_with_reason_fops = {
.write = panic_with_reason_trigger,
.open = simple_open,
.llseek = default_llseek,
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
static int __panic_with_reason_init(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
drvdata->dbgfs_panic = debugfs_create_file("panic_with_reason", 0222,
NULL, NULL, &panic_with_reason_fops);
return 0;
}
static void __panic_with_reason_exit(struct builder *bd)
{
struct sec_debug_drvdata *drvdata =
container_of(bd, struct sec_debug_drvdata, bd);
debugfs_remove(drvdata->dbgfs_panic);
}
#else
static int __panic_with_reason_init(struct builder *bd)
{
return 0;
}
static void __panic_with_reason_exit(struct builder *bd)
{
}
#endif
int sec_panic_with_reason_init(struct builder *bd)
{
if (!IS_ENABLED(CONFIG_SEC_FACTORY))
return 0;
return __panic_with_reason_init(bd);
}
void sec_panic_with_reason_exit(struct builder *bd)
{
if (!IS_ENABLED(CONFIG_SEC_FACTORY))
return;
__panic_with_reason_exit(bd);
}

View File

@@ -0,0 +1,65 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2019-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/uaccess.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_debug.h>
#include "sec_debug.h"
static unsigned int enable_user = 1;
module_param_named(enable_user, enable_user, uint, 0644);
static void sec_user_fault_dump(void)
{
if (sec_debug_is_enabled() && enable_user)
panic("User Fault");
}
static ssize_t sec_user_fault_write(struct file *file,
const char __user *buffer, size_t count, loff_t *offs)
{
char buf[100];
if (count > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, buffer, count))
return -EFAULT;
buf[count] = '\0';
if (!strncmp(buf, "dump_user_fault", strlen("dump_user_fault")))
sec_user_fault_dump();
return count;
}
static const struct proc_ops sec_user_fault_proc_fops = {
.proc_write = sec_user_fault_write,
};
static struct proc_dir_entry *proc_user_fault;
int sec_user_fault_init(struct builder *bd)
{
proc_user_fault = proc_create("user_fault", 0220, NULL,
&sec_user_fault_proc_fops);
if (!proc_user_fault)
return -ENOMEM;
return 0;
}
void sec_user_fault_exit(struct builder *bd)
{
proc_remove(proc_user_fault);
}

View File

@@ -0,0 +1,22 @@
config SEC_CRASHKEY
tristate "SEC Force key crash driver"
depends on SEC_KEY_NOTIFIER
help
TODO: help is not ready.
config SEC_CRASHKEY_TEST_FOR_ON_DEVICE
tristate "KUnit test for sec_crashkey_test"
depends on KUNIT
depends on SEC_CRASHKEY
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_CRASHKEY_TEST_FOR_ONLY_UML
tristate "KUnit test for sec_crashkey_test"
depends on KUNIT
depends on UML
depends on SEC_CRASHKEY
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_CRASHKEY) += sec_crashkey.o
GCOV_PROFILE_sec_crashkey.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,585 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2019-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h>
#include <linux/samsung/bsp/sec_key_notifier.h>
#include <linux/samsung/debug/sec_debug.h>
#include <linux/samsung/debug/sec_log_buf.h>
#include <linux/samsung/of_early_populate.h>
#include <linux/samsung/sec_kunit.h>
#include <linux/samsung/sec_of.h>
#include "sec_crashkey.h"
static DEFINE_MUTEX(crashkey_list_lock);
static LIST_HEAD(crashkey_dev_list);
__ss_static struct crashkey_drvdata *__crashkey_find_by_name_locked(
const char *name, struct list_head *head)
{
struct crashkey_drvdata *drvdata;
size_t name_len = strlen(name);
list_for_each_entry(drvdata, head, list) {
size_t len = strnlen(drvdata->name, name_len + 1);
if (len != name_len)
continue;
if (!strncmp(drvdata->name, name, name_len))
return drvdata;
}
return ERR_PTR(-ENOENT);
}
__ss_static int __crashkey_add_preparing_panic_locked(
struct crashkey_drvdata *drvdata, struct notifier_block *nb)
{
struct crashkey_notify *notify = &drvdata->notify;
return raw_notifier_chain_register(&notify->list, nb);
}
int sec_crashkey_add_preparing_panic(struct notifier_block *nb,
const char *name)
{
struct crashkey_drvdata *drvdata;
int err;
mutex_lock(&crashkey_list_lock);
drvdata = __crashkey_find_by_name_locked(name, &crashkey_dev_list);
if (IS_ERR(drvdata)) {
pr_warn("%s is not a valid drvdata device!\n", name);
err = -ENODEV;
goto err_invalid_name;
}
err = __crashkey_add_preparing_panic_locked(drvdata, nb);
if (err) {
struct device *dev = drvdata->bd.dev;
dev_warn(dev, "failed to add a notifier for %s (%d)!\n",
name, err);
dev_warn(dev, "Caller is %pS\n", __builtin_return_address(0));
}
err_invalid_name:
mutex_unlock(&crashkey_list_lock);
return err;
}
EXPORT_SYMBOL_GPL(sec_crashkey_add_preparing_panic);
__ss_static int __crashkey_del_preparing_panic_locked(
struct crashkey_drvdata *drvdata, struct notifier_block *nb)
{
struct crashkey_notify *notify = &drvdata->notify;
return raw_notifier_chain_unregister(&notify->list, nb);
}
int sec_crashkey_del_preparing_panic(struct notifier_block *nb,
const char *name)
{
struct crashkey_drvdata *drvdata;
int err;
mutex_lock(&crashkey_list_lock);
drvdata = __crashkey_find_by_name_locked(name, &crashkey_dev_list);
if (IS_ERR(drvdata)) {
pr_warn("%s is not a valid drvdata device!\n", name);
err = -ENODEV;
goto err_invalid_name;
}
err = __crashkey_del_preparing_panic_locked(drvdata, nb);
if (err) {
struct device *dev = drvdata->bd.dev;
dev_warn(dev, "failed to remove a notifier for %s!\n", name);
dev_warn(dev, "Caller is %pS\n", __builtin_return_address(0));
}
err_invalid_name:
mutex_unlock(&crashkey_list_lock);
return err;
}
EXPORT_SYMBOL_GPL(sec_crashkey_del_preparing_panic);
static __always_inline bool __crashkey_is_same_pattern(
struct crashkey_drvdata *drvdata,
const struct sec_key_notifier_param *param)
{
struct crashkey_kelog *keylog = &drvdata->keylog;
const struct sec_key_notifier_param *desired =
&keylog->desired[keylog->sequence];
if (param->keycode == desired->keycode && param->down == desired->down) {
keylog->sequence++;
return true;
}
return false;
}
static __always_inline void __crashkey_clear_received_state(
struct crashkey_drvdata *drvdata,
const struct sec_key_notifier_param *param)
{
struct crashkey_kelog *keylog = &drvdata->keylog;
struct crashkey_timer *timer = &drvdata->timer;
keylog->sequence = 0;
ratelimit_state_init(&timer->rs, timer->interval,
keylog->nr_pattern - 1);
/* NOTE: if the current pattern is same as the 1st one of desried,
* advande a 'keylog->sequence'.
*/
if (param && __crashkey_is_same_pattern(drvdata, param))
__ratelimit(&timer->rs);
}
static __always_inline void __crashkey_call_crashkey_notify(
struct crashkey_drvdata *drvdata)
{
struct crashkey_notify *notify = &drvdata->notify;
raw_notifier_call_chain(&notify->list, 0, NULL);
}
__ss_static int __crashkey_notifier_call(struct crashkey_drvdata *drvdata,
const struct sec_key_notifier_param *param)
{
struct crashkey_kelog *keylog = &drvdata->keylog;
struct crashkey_timer *timer = &drvdata->timer;
if (!__crashkey_is_same_pattern(drvdata, param))
goto clear_state;
if (!timer->interval || !__ratelimit(&timer->rs)) {
if (keylog->sequence == keylog->nr_pattern)
__crashkey_call_crashkey_notify(drvdata);
else if (timer->interval)
goto clear_state;
}
return NOTIFY_OK;
clear_state:
__crashkey_clear_received_state(drvdata, param);
return NOTIFY_OK;
}
static int sec_crashkey_notifier_call(struct notifier_block *this,
unsigned long type, void *data)
{
struct crashkey_drvdata *drvdata =
container_of(this, struct crashkey_drvdata, nb);
struct sec_key_notifier_param *param = data;
return __crashkey_notifier_call(drvdata, param);
}
__ss_static noinline int __crashkey_parse_dt_name(struct builder *bd,
struct device_node *np)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
return of_property_read_string(np, "sec,name", &drvdata->name);
}
__ss_static int __crashkey_test_dt_debug_level(struct crashkey_drvdata *drvdata,
struct device_node *np, unsigned int sec_dbg_level)
{
struct device *dev = drvdata->bd.dev;
int err;
err = sec_of_test_debug_level(np, "sec,debug_level", sec_dbg_level);
if (err == -ENOENT) {
dev_warn(dev, "this crashkey_dev (%s) will be enabled all sec debug levels!\n",
drvdata->name);
return 0;
} else if (err < 0)
return -ENODEV;
return 0;
}
static noinline int __crashkey_parse_dt_debug_level(struct builder *bd,
struct device_node *np)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
unsigned int sec_dbg_level = sec_debug_level();
return __crashkey_test_dt_debug_level(drvdata, np, sec_dbg_level);
}
__ss_static noinline int __crashkey_parse_dt_panic_msg(struct builder *bd,
struct device_node *np)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_notify *notify = &drvdata->notify;
return of_property_read_string(np, "sec,panic_msg", &notify->panic_msg);
}
__ss_static noinline int __crashkey_parse_dt_interval(struct builder *bd,
struct device_node *np)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_timer *timer = &drvdata->timer;
s32 interval;
int err;
err = of_property_read_s32(np, "sec,interval", &interval);
if (err)
return -EINVAL;
timer->interval = (int)interval * HZ;
return 0;
}
__ss_static noinline int __crashkey_parse_dt_desired_pattern(struct builder *bd,
struct device_node *np)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct device *dev = bd->dev;
struct crashkey_kelog *keylog = &drvdata->keylog;
struct sec_key_notifier_param *desired;
int nr_pattern;
u32 keycode, down;
int i;
nr_pattern = of_property_count_u32_elems(np, "sec,desired_pattern");
nr_pattern /= 2; /* <keycode, down> */
if (nr_pattern <= 0)
return -EINVAL;
desired = devm_kmalloc_array(dev,
nr_pattern, sizeof(*desired), GFP_KERNEL);
if (!desired)
return -ENOMEM;
keylog->nr_pattern = (size_t)nr_pattern;
keylog->desired = desired;
for (i = 0; i < nr_pattern; i++) {
of_property_read_u32_index(np, "sec,desired_pattern",
2 * i, &keycode);
of_property_read_u32_index(np, "sec,desired_pattern",
2 * i + 1, &down);
desired[i].keycode = keycode;
desired[i].down = down;
}
return 0;
}
static const struct dt_builder __crashkey_dt_builder[] = {
DT_BUILDER(__crashkey_parse_dt_name),
DT_BUILDER(__crashkey_parse_dt_debug_level),
DT_BUILDER(__crashkey_parse_dt_panic_msg),
DT_BUILDER(__crashkey_parse_dt_interval),
DT_BUILDER(__crashkey_parse_dt_desired_pattern),
};
static noinline int __crashkey_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __crashkey_dt_builder,
ARRAY_SIZE(__crashkey_dt_builder));
}
__ss_static noinline int __crashkey_probe_prolog(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_notify *notify = &drvdata->notify;
struct crashkey_kelog *keylog = &drvdata->keylog;
struct crashkey_timer *timer = &drvdata->timer;
RAW_INIT_NOTIFIER_HEAD(&notify->list);
ratelimit_state_init(&timer->rs, timer->interval,
keylog->nr_pattern - 1);
return 0;
}
__ss_static void __crashkey_add_to_crashkey_dev_list_locked(
struct crashkey_drvdata *drvdata, struct list_head *head)
{
list_add(&drvdata->list, head);
}
static int __crashkey_add_to_crashkey_dev_list(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
mutex_lock(&crashkey_list_lock);
__crashkey_add_to_crashkey_dev_list_locked(drvdata, &crashkey_dev_list);
mutex_unlock(&crashkey_list_lock);
return 0;
}
__ss_static void __crashkey_del_from_crashkey_dev_list_locked(
struct crashkey_drvdata *drvdata)
{
list_del(&drvdata->list);
}
static void __crashkey_del_from_crashkey_dev_list(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
mutex_lock(&crashkey_list_lock);
__crashkey_del_from_crashkey_dev_list_locked(drvdata);
mutex_unlock(&crashkey_list_lock);
}
static int __crashkey_panic_on_matched(struct notifier_block *this,
unsigned long type, void *data)
{
struct crashkey_notify *notify =
container_of(this, struct crashkey_notify, panic);
/* NOTE: this is an intended operation to reset the device with errors. */
panic("%s", notify->panic_msg);
return NOTIFY_OK;
}
static int __crashkey_set_panic_on_matched(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_notify *notify = &drvdata->notify;
int err;
/* NOTE: register a calling kernel panic in the end of notifier chain */
notify->panic.notifier_call = __crashkey_panic_on_matched;
notify->panic.priority = INT_MIN;
err = sec_crashkey_add_preparing_panic(&notify->panic, drvdata->name);
if (err)
return err;
return 0;
}
static void __crashkey_unset_panic_on_matched(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_notify *notify = &drvdata->notify;
sec_crashkey_del_preparing_panic(&notify->panic, drvdata->name);
}
__ss_static noinline int __crashkey_init_used_key(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct device *dev = bd->dev;
struct crashkey_kelog *keylog = &drvdata->keylog;
const struct sec_key_notifier_param *desired = keylog->desired;
static unsigned int *used_key;
size_t nr_used_key;
bool is_new;
size_t i, j;
used_key = devm_kmalloc_array(dev,
keylog->nr_pattern, sizeof(*used_key), GFP_KERNEL);
if (!used_key)
return -ENOMEM;
used_key[0] = desired[0].keycode;
nr_used_key = 1;
for (i = 1; i < keylog->nr_pattern; i++) {
for (j = 0, is_new = true; j < nr_used_key; j++) {
if (used_key[j] == desired[i].keycode)
is_new = false;
}
if (is_new)
used_key[nr_used_key++] = desired[i].keycode;
}
keylog->used_key = used_key;
keylog->nr_used_key = nr_used_key;
return 0;
}
static int __crashkey_install_keyboard_notifier(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_kelog *keylog = &drvdata->keylog;
int err;
drvdata->nb.notifier_call = sec_crashkey_notifier_call;
err = sec_kn_register_notifier(&drvdata->nb,
keylog->used_key, keylog->nr_used_key);
return err;
}
static void __crashkey_uninstall_keyboard_notifier(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct crashkey_kelog *keylog = &drvdata->keylog;
sec_kn_unregister_notifier(&drvdata->nb,
keylog->used_key, keylog->nr_used_key);
}
static noinline int __crashkey_probe_epilog(struct builder *bd)
{
struct crashkey_drvdata *drvdata =
container_of(bd, struct crashkey_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
return 0;
}
static int __crashkey_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct crashkey_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __crashkey_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct crashkey_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __crashkey_dev_builder[] = {
DEVICE_BUILDER(__crashkey_parse_dt, NULL),
DEVICE_BUILDER(__crashkey_probe_prolog, NULL),
DEVICE_BUILDER(__crashkey_add_to_crashkey_dev_list,
__crashkey_del_from_crashkey_dev_list),
DEVICE_BUILDER(__crashkey_set_panic_on_matched,
__crashkey_unset_panic_on_matched),
DEVICE_BUILDER(__crashkey_init_used_key, NULL),
DEVICE_BUILDER(__crashkey_install_keyboard_notifier,
__crashkey_uninstall_keyboard_notifier),
DEVICE_BUILDER(__crashkey_probe_epilog, NULL),
};
static int sec_crashkey_probe(struct platform_device *pdev)
{
return __crashkey_probe(pdev, __crashkey_dev_builder,
ARRAY_SIZE(__crashkey_dev_builder));
}
static int sec_crashkey_remove(struct platform_device *pdev)
{
return __crashkey_remove(pdev, __crashkey_dev_builder,
ARRAY_SIZE(__crashkey_dev_builder));
}
static const struct of_device_id sec_crashkey_match_table[] = {
{ .compatible = "samsung,crashkey" },
{},
};
MODULE_DEVICE_TABLE(of, sec_crashkey_match_table);
static struct platform_driver sec_crashkey_driver = {
.driver = {
.name = "sec,crashkey",
.of_match_table = of_match_ptr(sec_crashkey_match_table),
},
.probe = sec_crashkey_probe,
.remove = sec_crashkey_remove,
};
static int sec_crashkey_suspend(void)
{
struct crashkey_drvdata *drvdata;
list_for_each_entry(drvdata, &crashkey_dev_list, list) {
__crashkey_clear_received_state(drvdata, NULL);
}
return 0;
}
static struct syscore_ops sec_crashkey_syscore_ops = {
.suspend = sec_crashkey_suspend,
};
static int __init sec_crashkey_init(void)
{
int err;
err = platform_driver_register(&sec_crashkey_driver);
if (err)
return err;
err = __of_platform_early_populate_init(sec_crashkey_match_table);
if (err)
return err;
register_syscore_ops(&sec_crashkey_syscore_ops);
return 0;
}
core_initcall(sec_crashkey_init);
static void __exit sec_crashkey_exit(void)
{
unregister_syscore_ops(&sec_crashkey_syscore_ops);
platform_driver_unregister(&sec_crashkey_driver);
}
module_exit(sec_crashkey_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Force key crash driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,40 @@
#ifndef __INTERNAL__SEC_CRASHKEY_H__
#define __INTERNAL__SEC_CRASHKEY_H__
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/ratelimit.h>
#include <linux/samsung/bsp/sec_key_notifier.h>
#include <linux/samsung/builder_pattern.h>
struct crashkey_kelog {
const struct sec_key_notifier_param *desired;
size_t nr_pattern;
size_t sequence;
unsigned int *used_key;
size_t nr_used_key;
};
struct crashkey_timer {
struct ratelimit_state rs;
int interval;
};
struct crashkey_notify {
struct raw_notifier_head list;
struct notifier_block panic;
const char *panic_msg;
};
struct crashkey_drvdata {
struct builder bd;
struct list_head list;
struct notifier_block nb;
const char *name;
struct crashkey_kelog keylog;
struct crashkey_timer timer;
struct crashkey_notify notify;
};
#endif /* __INTERNAL__SEC_CRASHKEY_H__ */

View File

@@ -0,0 +1,22 @@
config SEC_CRASHKEY_LONG
tristate "SEC Long key reset driver"
depends on SEC_KEY_NOTIFIER
help
TODO: help is not ready.
config SEC_CRASHKEY_LONG_TEST_FOR_ON_DEVICE
tristate "KUnit test for sec_crashkey_long_test"
depends on KUNIT
depends on SEC_CRASHKEY_LONG
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_CRASHKEY_LONG_TEST_FOR_ONLY_UML
tristate "KUnit test for sec_crashkey_long_test"
depends on KUNIT
depends on UML
depends on SEC_CRASHKEY_LONG
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_CRASHKEY_LONG) += sec_crashkey_long.o
GCOV_PROFILE_sec_crashkey_long.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,566 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2019-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_wakeup.h>
#include <linux/slab.h>
#include <linux/samsung/of_early_populate.h>
#include <linux/samsung/debug/sec_debug.h>
#include <linux/samsung/debug/sec_crashkey_long.h>
#include <linux/samsung/sec_kunit.h>
#include "sec_crashkey_long.h"
static struct crashkey_long_drvdata *crashkey_long;
static __always_inline bool __crashkey_long_is_probed(void)
{
return !!crashkey_long;
}
__ss_static int __crashkey_long_add_preparing_panic(
struct crashkey_long_drvdata *drvdata, struct notifier_block *nb)
{
struct crashkey_long_notify *notify;
int err;
notify = &drvdata->notify;
err = raw_notifier_chain_register(&notify->list, nb);
if (err) {
struct device *dev = drvdata->bd.dev;
dev_warn(dev, "failed to add a notifier!\n");
dev_warn(dev, "Caller is %pS\n", __builtin_return_address(0));
}
return err;
}
int sec_crashkey_long_add_preparing_panic(struct notifier_block *nb)
{
if (!__crashkey_long_is_probed())
return -EBUSY;
return __crashkey_long_add_preparing_panic(crashkey_long, nb);
}
EXPORT_SYMBOL_GPL(sec_crashkey_long_add_preparing_panic);
__ss_static int __crashkey_long_del_preparing_panic(
struct crashkey_long_drvdata *drvdata, struct notifier_block *nb)
{
struct crashkey_long_notify *notify;
int err;
notify = &drvdata->notify;
err = raw_notifier_chain_unregister(&notify->list, nb);
if (err) {
struct device *dev = drvdata->bd.dev;
dev_warn(dev, "failed to remove a notifier for!\n");
dev_warn(dev, "Caller is %pS\n", __builtin_return_address(0));
}
return err;
}
int sec_crashkey_long_del_preparing_panic(struct notifier_block *nb)
{
if (!__crashkey_long_is_probed())
return -EPROBE_DEFER;
return __crashkey_long_del_preparing_panic(crashkey_long, nb);
}
EXPORT_SYMBOL_GPL(sec_crashkey_long_del_preparing_panic);
static inline int __crashkey_long_connect_to_input_evnet(void)
{
struct crashkey_long_keylog *keylog;
unsigned long flags;
int ret = 0;
keylog = &crashkey_long->keylog;
spin_lock_irqsave(&crashkey_long->state_lock, flags);
if (crashkey_long->nb_connected)
goto already_connected;
ret = sec_kn_register_notifier(&crashkey_long->nb,
keylog->used_key, keylog->nr_used_key);
crashkey_long->nb_connected = true;
already_connected:
spin_unlock_irqrestore(&crashkey_long->state_lock, flags);
return ret;
}
int sec_crashkey_long_connect_to_input_evnet(void)
{
if (!__crashkey_long_is_probed())
return -EBUSY;
return __crashkey_long_connect_to_input_evnet();
}
EXPORT_SYMBOL_GPL(sec_crashkey_long_connect_to_input_evnet);
static inline int __crashkey_long_disconnect_from_input_event(void)
{
struct crashkey_long_keylog *keylog;
struct crashkey_long_notify *notify;
unsigned long flags;
int ret = 0;
keylog = &crashkey_long->keylog;
notify = &crashkey_long->notify;
spin_lock_irqsave(&crashkey_long->state_lock, flags);
if (crashkey_long->nb_connected)
goto already_disconnected;
ret = sec_kn_unregister_notifier(&crashkey_long->nb,
keylog->used_key, keylog->nr_used_key);
crashkey_long->nb_connected = false;
bitmap_zero(keylog->bitmap_received, keylog->sz_bitmap);
already_disconnected:
spin_unlock_irqrestore(&crashkey_long->state_lock, flags);
del_timer(&notify->tl);
return ret;
}
int sec_crashkey_long_disconnect_from_input_event(void)
{
if (!__crashkey_long_is_probed())
return -EBUSY;
return __crashkey_long_disconnect_from_input_event();
}
EXPORT_SYMBOL_GPL(sec_crashkey_long_disconnect_from_input_event);
static void sec_crashkey_long_do_on_expired(struct timer_list *tl)
{
struct crashkey_long_notify *notify =
container_of(tl, struct crashkey_long_notify, tl);
raw_notifier_call_chain(&notify->list,
SEC_CRASHKEY_LONG_NOTIFY_TYPE_EXPIRED, NULL);
}
__ss_static bool __crashkey_long_is_mached_received_pattern(
struct crashkey_long_drvdata *drvdata)
{
struct crashkey_long_keylog *keylog = &drvdata->keylog;
size_t i;
for (i = 0; i < keylog->nr_used_key; i++) {
if (!test_bit(keylog->used_key[i], keylog->bitmap_received))
return false;
}
return true;
}
__ss_static void __crashkey_long_invoke_notifier_on_matched(
struct crashkey_long_notify *notify)
{
raw_notifier_call_chain(&notify->list,
SEC_CRASHKEY_LONG_NOTIFY_TYPE_MATCHED, NULL);
}
__ss_static void __crashkey_long_invoke_timer_on_matched(
struct crashkey_long_notify *notify)
{
unsigned long expires;
struct crashkey_long_drvdata *drvdata;
if (timer_pending(&notify->tl))
return;
expires = jiffies + msecs_to_jiffies(notify->expire_msec);
drvdata = container_of(notify, struct crashkey_long_drvdata, notify);
timer_setup(&notify->tl, sec_crashkey_long_do_on_expired, 0);
mod_timer(&notify->tl, expires);
dev_info(drvdata->bd.dev, "long key timer - start");
}
static void __crashkey_long_on_matched(struct crashkey_long_drvdata *drvdata)
{
struct crashkey_long_notify *notify = &drvdata->notify;
__crashkey_long_invoke_notifier_on_matched(notify);
if (likely(!sec_debug_is_enabled()))
return;
__crashkey_long_invoke_timer_on_matched(notify);
}
__ss_static void __crashkey_long_invoke_notifier_on_unmatched(
struct crashkey_long_notify *notify)
{
raw_notifier_call_chain(&notify->list,
SEC_CRASHKEY_LONG_NOTIFY_TYPE_UNMATCHED, NULL);
}
__ss_static void __crashkey_long_invoke_timer_on_unmatched(
struct crashkey_long_notify *notify)
{
if (!timer_pending(&notify->tl))
return;
del_timer(&notify->tl);
pr_info("long key timer - cancel");
}
static void __crashkey_long_on_unmatched(struct crashkey_long_drvdata *drvdata)
{
struct crashkey_long_notify *notify = &drvdata->notify;
__crashkey_long_invoke_notifier_on_unmatched(notify);
__crashkey_long_invoke_timer_on_unmatched(notify);
}
__ss_static void __crashkey_long_update_bitmap_received(
struct crashkey_long_drvdata* drvdata,
struct sec_key_notifier_param *param)
{
struct crashkey_long_keylog *keylog = &drvdata->keylog;
if (param->down)
set_bit(param->keycode, keylog->bitmap_received);
else
clear_bit(param->keycode, keylog->bitmap_received);
}
static int sec_crashkey_long_notifier_call(struct notifier_block *this,
unsigned long type, void *data)
{
struct crashkey_long_drvdata *drvdata =
container_of(this, struct crashkey_long_drvdata, nb);
struct sec_key_notifier_param *param = data;
unsigned long flags;
spin_lock_irqsave(&drvdata->state_lock, flags);
if (!drvdata->nb_connected) {
spin_unlock_irqrestore(&drvdata->state_lock, flags);
return NOTIFY_DONE;
}
__crashkey_long_update_bitmap_received(drvdata, param);
spin_unlock_irqrestore(&drvdata->state_lock, flags);
if (__crashkey_long_is_mached_received_pattern(drvdata))
__crashkey_long_on_matched(drvdata);
else
__crashkey_long_on_unmatched(drvdata);
return NOTIFY_OK;
}
__ss_static noinline int __crashkey_long_parse_dt_panic_msg(struct builder *bd,
struct device_node *np)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_notify *notify = &drvdata->notify;
return of_property_read_string(np, "sec,panic_msg", &notify->panic_msg);
}
__ss_static noinline int __crashkey_long_parse_dt_expire_msec(struct builder *bd,
struct device_node *np)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_notify *notify = &drvdata->notify;
u32 expire_msec;
int err;
err = of_property_read_u32(np, "sec,expire_msec", &expire_msec);
if (err)
return -EINVAL;
notify->expire_msec = (unsigned int)expire_msec;
return 0;
}
__ss_static noinline int __crashkey_long_parse_dt_used_key(struct builder *bd,
struct device_node *np)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_keylog *keylog = &drvdata->keylog;
struct device *dev = bd->dev;
int nr_used_key;
unsigned int *used_key;
u32 event;
int i;
nr_used_key = of_property_count_u32_elems(np, "sec,used_key");
if (nr_used_key <= 0) {
dev_err(dev, "reset-key event list is not specified!\n");
return -ENODEV;
}
used_key = devm_kcalloc(dev, nr_used_key, sizeof(*used_key),
GFP_KERNEL);
if (!used_key)
return -ENOMEM;
for (i = 0; i < nr_used_key; i++) {
of_property_read_u32_index(np, "sec,used_key", i, &event);
used_key[i] = event;
}
keylog->used_key = used_key;
keylog->nr_used_key = (size_t)nr_used_key;
return 0;
}
static const struct dt_builder __crashkey_long_dt_builder[] = {
DT_BUILDER(__crashkey_long_parse_dt_panic_msg),
DT_BUILDER(__crashkey_long_parse_dt_expire_msec),
DT_BUILDER(__crashkey_long_parse_dt_used_key),
};
static noinline int __crashkey_long_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __crashkey_long_dt_builder,
ARRAY_SIZE(__crashkey_long_dt_builder));
}
__ss_static noinline int __crashkey_long_probe_prolog(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_notify *notify = &drvdata->notify;
RAW_INIT_NOTIFIER_HEAD(&notify->list);
spin_lock_init(&drvdata->state_lock);
return 0;
}
__ss_static noinline int __crashkey_long_alloc_bitmap_received(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_keylog *keylog = &drvdata->keylog;
unsigned long *bitmap_received;
bitmap_received = devm_bitmap_zalloc(bd->dev, KEY_MAX, GFP_KERNEL);
if (!bitmap_received)
return -ENOMEM;
keylog->bitmap_received = bitmap_received;
keylog->sz_bitmap = BITS_TO_LONGS(KEY_MAX) * sizeof(unsigned long);
return 0;
}
static int __crashkey_long_panic_on_expired(struct notifier_block *this,
unsigned long type, void *v)
{
struct crashkey_long_notify *notify =
container_of(this, struct crashkey_long_notify, panic);
if (type != SEC_CRASHKEY_LONG_NOTIFY_TYPE_EXPIRED)
return NOTIFY_DONE;
pr_err("*** Force trigger kernel panic before triggering hard reset ***\n");
/* NOTE: this is an intended operation to reset the device with errors. */
panic("%s", notify->panic_msg);
return NOTIFY_OK;
}
static int __crashkey_long_set_panic_on_expired(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_notify *notify = &drvdata->notify;
int err;
/* NOTE: register a calling kernel panic in the end of notifier chain */
notify->panic.notifier_call = __crashkey_long_panic_on_expired;
notify->panic.priority = INT_MIN;
err = __crashkey_long_add_preparing_panic(drvdata, &notify->panic);
if (err)
return err;
return 0;
}
static void __crashkey_long_unset_panic_on_expired(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_notify *notify = &drvdata->notify;
__crashkey_long_del_preparing_panic(drvdata, &notify->panic);
}
static int __crashkey_long_install_keyboard_notifier(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_keylog *keylog = &drvdata->keylog;
int err;
drvdata->nb.notifier_call = sec_crashkey_long_notifier_call;
err = sec_kn_register_notifier(&drvdata->nb,
keylog->used_key, keylog->nr_used_key);
if (err)
return err;
drvdata->nb_connected = true;
return 0;
}
static void __crashkey_long_uninstall_keyboard_notifier(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct crashkey_long_keylog *keylog = &drvdata->keylog;
sec_kn_unregister_notifier(&drvdata->nb,
keylog->used_key, keylog->nr_used_key);
}
static int __crashkey_long_probe_epilog(struct builder *bd)
{
struct crashkey_long_drvdata *drvdata =
container_of(bd, struct crashkey_long_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
crashkey_long = drvdata; /* set a singleton */
return 0;
}
static void __crashkey_long_remove_prolog(struct builder *bd)
{
/* FIXME: This is not a graceful exit. */
crashkey_long = NULL;
}
static int __crashkey_long_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct crashkey_long_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __crashkey_long_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct crashkey_long_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __crashkey_long_dev_builder[] = {
DEVICE_BUILDER(__crashkey_long_parse_dt, NULL),
DEVICE_BUILDER(__crashkey_long_probe_prolog, NULL),
DEVICE_BUILDER(__crashkey_long_alloc_bitmap_received, NULL),
DEVICE_BUILDER(__crashkey_long_set_panic_on_expired,
__crashkey_long_unset_panic_on_expired),
DEVICE_BUILDER(__crashkey_long_install_keyboard_notifier,
__crashkey_long_uninstall_keyboard_notifier),
DEVICE_BUILDER(__crashkey_long_probe_epilog,
__crashkey_long_remove_prolog),
};
static int sec_crashkey_long_probe(struct platform_device *pdev)
{
return __crashkey_long_probe(pdev, __crashkey_long_dev_builder,
ARRAY_SIZE(__crashkey_long_dev_builder));
}
static int sec_crashkey_long_remove(struct platform_device *pdev)
{
return __crashkey_long_remove(pdev, __crashkey_long_dev_builder,
ARRAY_SIZE(__crashkey_long_dev_builder));
}
static const struct of_device_id sec_crashkeylong_match_table[] = {
{ .compatible = "samsung,crashkey-long" },
{},
};
MODULE_DEVICE_TABLE(of, sec_crashkeylong_match_table);
static struct platform_driver sec_crashkey_long_driver = {
.driver = {
.name = "sec,crashkey-long",
.of_match_table = of_match_ptr(sec_crashkeylong_match_table),
},
.probe = sec_crashkey_long_probe,
.remove = sec_crashkey_long_remove,
};
static int __init sec_crashkey_long_init(void)
{
int err;
err = platform_driver_register(&sec_crashkey_long_driver);
if (err)
return err;
err = __of_platform_early_populate_init(sec_crashkeylong_match_table);
if (err)
return err;
return 0;
}
core_initcall(sec_crashkey_long_init);
static void __exit sec_crashkey_long_exit(void)
{
platform_driver_unregister(&sec_crashkey_long_driver);
}
module_exit(sec_crashkey_long_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Long key reset driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,34 @@
#ifndef __INTERNAL__SEC_CRASHKEY_LONG_H__
#define __INTERNAL__SEC_CRASHKEY_LONG_H__
#include <linux/notifier.h>
#include <linux/timer.h>
#include <linux/samsung/bsp/sec_key_notifier.h>
#include <linux/samsung/builder_pattern.h>
struct crashkey_long_keylog {
unsigned long *bitmap_received;
size_t sz_bitmap;
const unsigned int *used_key;
size_t nr_used_key;
};
struct crashkey_long_notify {
struct timer_list tl;
unsigned int expire_msec;
struct raw_notifier_head list;
struct notifier_block panic;
const char *panic_msg;
};
struct crashkey_long_drvdata {
struct builder bd;
spinlock_t state_lock; /* key_notifer and input events */
bool nb_connected;
struct notifier_block nb;
struct crashkey_long_keylog keylog;
struct crashkey_long_notify notify;
};
#endif /* __INTERNAL__SEC_CRASHKEY_LONG_H__ */

View File

@@ -0,0 +1,5 @@
config SEC_DEBUG_REGION
tristate "SEC Memory pool for debugging features"
select GENERIC_ALLOCATOR
help
TODO: help is not ready.

View File

@@ -0,0 +1,7 @@
obj-$(CONFIG_SEC_DEBUG_REGION) += sec_debug_region.o
sec_debug_region-objs := sec_debug_region_main.o \
sec_debug_region_pool.o \
sec_debug_region_gen_pool.o \
sec_debug_region_cma_pool.o \
sec_debug_region_slab_pool.o

View File

@@ -0,0 +1,62 @@
#ifndef __INTERNAL__SEC_DEBUG_REGION_H__
#define __INTERNAL__SEC_DEBUG_REGION_H__
#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/samsung/builder_pattern.h>
struct dbg_region_drvdata;
struct dbg_region_pool {
int (*probe)(struct dbg_region_drvdata *);
void (*remove)(struct dbg_region_drvdata *);
void *(*alloc)(struct dbg_region_drvdata *, size_t, phys_addr_t *);
void (*free)(struct dbg_region_drvdata *, size_t, void *, phys_addr_t);
};
struct dbg_region_root {
struct list_head clients;
uint32_t magic;
phys_addr_t __root; /* physical address of myself */
} __packed __aligned(1);
enum {
RMEM_TYPE_NOMAP = 0,
RMEM_TYPE_MAPPED,
RMEM_TYPE_REUSABLE,
RMEM_TYPE_SLAB,
};
struct dbg_region_drvdata {
struct builder bd;
struct reserved_mem *rmem;
unsigned int rmem_type;
phys_addr_t phys;
size_t size;
struct mutex lock;
unsigned long virt;
const struct dbg_region_pool *pool;
void *private;
struct dbg_region_root *root;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs;
#endif
};
/* sec_debug_pool.c */
extern int __dbg_region_pool_init(struct builder *bd);
extern void __dbg_region_pool_exit(struct builder *bd);
/* sec_debug_region_gen_pool.c */
extern const struct dbg_region_pool *__dbg_region_gen_pool_creator(void);
/* sec_debug_region_cma_pool.c */
extern const struct dbg_region_pool *__dbg_region_cma_pool_creator(void);
/* sec_debug_region_slab_pool.c */
extern const struct dbg_region_pool *__dbg_region_slab_pool_creator(void);
#endif /* __INTERNAL__SEC_DEBUG_REGION_H__ */

View File

@@ -0,0 +1,67 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2022-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/of_reserved_mem.h>
#include "sec_debug_region.h"
static int dbg_region_cma_pool_probe(struct dbg_region_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
struct device_node *np = dev_of_node(dev);
int err;
err = of_reserved_mem_device_init_by_idx(dev, np, 0);
if (err) {
dev_err(dev, "failed to initialize reserved mem (%d)\n", err);
return err;
}
return 0;
}
static void dbg_region_cma_pool_remove(struct dbg_region_drvdata *drvdata)
{
}
static void *dbg_region_cma_pool_alloc(struct dbg_region_drvdata *drvdata,
size_t size, phys_addr_t *__phys)
{
struct device *dev = drvdata->bd.dev;
void *vaddr;
dma_addr_t phys;
vaddr = dma_alloc_wc(dev, PAGE_ALIGN(size), &phys, GFP_KERNEL);
if (!vaddr)
return ERR_PTR(-ENOMEM);
*__phys = (phys_addr_t)phys;
return vaddr;
}
static void dbg_region_cma_pool_free(struct dbg_region_drvdata *drvdata,
size_t size, void *vaddr, phys_addr_t phys)
{
struct device *dev = drvdata->bd.dev;
dma_free_wc(dev, PAGE_ALIGN(size), vaddr, (dma_addr_t)phys);
}
static const struct dbg_region_pool dbg_region_cma_pool = {
.probe = dbg_region_cma_pool_probe,
.remove = dbg_region_cma_pool_remove,
.alloc = dbg_region_cma_pool_alloc,
.free = dbg_region_cma_pool_free,
};
const struct dbg_region_pool *__dbg_region_cma_pool_creator(void)
{
return &dbg_region_cma_pool;
}

View File

@@ -0,0 +1,118 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2022-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/kernel.h>
#include "sec_debug_region.h"
static int __dbg_region_prepare_pool(struct dbg_region_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
unsigned int rmem_type = drvdata->rmem_type;
void __iomem *virt;
if (rmem_type == RMEM_TYPE_NOMAP)
virt = devm_ioremap_wc(dev, drvdata->phys, drvdata->size);
else
virt = phys_to_virt(drvdata->phys);
if (!virt)
return -EFAULT;
drvdata->virt = (unsigned long)virt;
return 0;
}
static int __dbg_region_gen_pool_create(struct dbg_region_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
const int min_alloc_order = ilog2(cache_line_size());
struct gen_pool *pool;
int err;
pool = devm_gen_pool_create(dev, min_alloc_order, -1, "sec_dbg_region");
if (IS_ERR(pool)) {
err = PTR_ERR(pool);
goto err_gen_pool_create;
}
err = gen_pool_add_virt(pool, drvdata->virt, drvdata->phys,
drvdata->size, -1);
if (err) {
err = -ENOMEM;
goto err_gen_pool_add_virt;
}
drvdata->private = pool;
return 0;
err_gen_pool_add_virt:
gen_pool_destroy(pool);
err_gen_pool_create:
return err;
}
static int dbg_region_gen_pool_probe(struct dbg_region_drvdata *drvdata)
{
int err;
err = __dbg_region_prepare_pool(drvdata);
if (err)
return err;
err = __dbg_region_gen_pool_create(drvdata);
if (err)
return err;
return 0;
}
static void dbg_region_gen_pool_remove(struct dbg_region_drvdata *drvdata)
{
struct gen_pool *pool = drvdata->private;
gen_pool_destroy(pool);
}
static void *dbg_region_gen_pool_alloc(struct dbg_region_drvdata *drvdata,
size_t size, phys_addr_t *phys)
{
struct gen_pool *pool = drvdata->private;
unsigned long virt;
virt = gen_pool_alloc(pool, size);
if (!virt)
return ERR_PTR(-ENOMEM);
*phys = gen_pool_virt_to_phys(pool, virt);
return (void *)virt;
}
static void dbg_region_gen_pool_free(struct dbg_region_drvdata *drvdata,
size_t size, void *virt, phys_addr_t phys)
{
struct gen_pool *pool = drvdata->private;
gen_pool_free(pool, (unsigned long)virt, size);
}
static const struct dbg_region_pool dbg_region_gen_pool = {
.probe = dbg_region_gen_pool_probe,
.remove = dbg_region_gen_pool_remove,
.alloc = dbg_region_gen_pool_alloc,
.free = dbg_region_gen_pool_free,
};
const struct dbg_region_pool *__dbg_region_gen_pool_creator(void)
{
return &dbg_region_gen_pool;
}

View File

@@ -0,0 +1,556 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2017-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/samsung/sec_of.h>
#include <linux/samsung/debug/sec_debug.h>
#include <linux/samsung/debug/sec_debug_region.h>
#include "sec_debug_region.h"
static struct dbg_region_drvdata *dbg_region;
static __always_inline bool __dbg_region_is_probed(void)
{
return !!dbg_region;
}
static void *__dbg_region_alloc(struct dbg_region_drvdata *drvdata,
size_t size, phys_addr_t *phys)
{
const struct dbg_region_pool *pool = drvdata->pool;
return pool->alloc(drvdata, size, phys);
}
static void __dbg_region_free(struct dbg_region_drvdata *drvdata,
size_t size, void *virt, phys_addr_t phys)
{
const struct dbg_region_pool *pool = drvdata->pool;
return pool->free(drvdata, size, virt, phys);
}
static struct sec_dbg_region_client *__dbg_region_find_locked(
struct dbg_region_drvdata *drvdata, uint32_t unique_id)
{
struct sec_dbg_region_client *client;
list_for_each_entry(client, &drvdata->root->clients, list) {
if (client->unique_id == unique_id)
return client;
}
return ERR_PTR(-ENOENT);
}
static struct sec_dbg_region_client *__dbg_region_alloc_client_data(
struct dbg_region_drvdata *drvdata)
{
struct sec_dbg_region_client *client;
phys_addr_t __client;
client = __dbg_region_alloc(drvdata,
sizeof(struct sec_dbg_region_client), &__client);
if (IS_ERR_OR_NULL(client))
return ERR_PTR(-ENOMEM);
client->__client = __client;
return client;
}
static void __dbg_region_free_client_data(struct dbg_region_drvdata *drvdata,
struct sec_dbg_region_client *client)
{
__dbg_region_free(drvdata, sizeof(*client), client, client->__client);
}
static void *__dbg_region_alloc_client_memory(struct dbg_region_drvdata *drvdata,
size_t size, phys_addr_t *phys)
{
void *virt;
virt = __dbg_region_alloc(drvdata, size, phys);
if (IS_ERR_OR_NULL(virt))
return ERR_PTR(-ENOMEM);
memset_io(virt, 0x0, size);
return virt;
}
static void __dbg_region_free_client_memory(struct dbg_region_drvdata *drvdata,
struct sec_dbg_region_client *client)
{
__dbg_region_free(drvdata, client->size,
(void *)client->virt, client->phys);
}
static inline struct sec_dbg_region_client *__dbg_region_alloc_client(
struct dbg_region_drvdata *drvdata,
uint32_t unique_id, size_t size)
{
struct sec_dbg_region_client *client;
void *virt;
int err;
mutex_lock(&drvdata->lock);
client = __dbg_region_find_locked(drvdata, unique_id);
if (!IS_ERR(client)) {
err = -EINVAL;
goto err_duplicated_unique_id;
}
client = __dbg_region_alloc_client_data(drvdata);
if (IS_ERR_OR_NULL(client)) {
err = PTR_ERR(client);
goto err_client_data;
}
virt = __dbg_region_alloc_client_memory(drvdata, size, &client->phys);
if (IS_ERR_OR_NULL(virt)) {
err = -ENOMEM;
goto err_client_memory;
}
client->virt = (unsigned long)virt;
client->magic = SEC_DBG_REGION_CLIENT_MAGIC;
client->unique_id = unique_id;
client->size = size;
client->name = NULL; /* optional for each client */
list_add(&client->list, &drvdata->root->clients);
mutex_unlock(&drvdata->lock);
return client;
err_client_memory:
__dbg_region_free_client_data(drvdata, client);
err_client_data:
err_duplicated_unique_id:
mutex_unlock(&drvdata->lock);
return ERR_PTR(err);
}
struct sec_dbg_region_client *sec_dbg_region_alloc(uint32_t unique_id,
size_t size)
{
if (!__dbg_region_is_probed())
return ERR_PTR(-EBUSY);
return __dbg_region_alloc_client(dbg_region, unique_id, size);
}
EXPORT_SYMBOL_GPL(sec_dbg_region_alloc);
static inline int __dbg_region_free_client(struct dbg_region_drvdata *drvdata,
struct sec_dbg_region_client *client)
{
mutex_lock(&drvdata->lock);
if (client->magic != SEC_DBG_REGION_CLIENT_MAGIC) {
mutex_unlock(&drvdata->lock);
/* NOTE: should be detected during developing */
BUG_ON(sec_debug_is_enabled());
} else {
list_del(&client->list);
client->magic = 0x0;
mutex_unlock(&drvdata->lock);
__dbg_region_free_client_memory(drvdata, client);
__dbg_region_free_client_data(drvdata, client);
}
return 0;
}
int sec_dbg_region_free(struct sec_dbg_region_client *client)
{
if (!__dbg_region_is_probed())
return -EBUSY;
return __dbg_region_free_client(dbg_region, client);
}
EXPORT_SYMBOL_GPL(sec_dbg_region_free);
static inline const struct sec_dbg_region_client *__dbg_region_find(
struct dbg_region_drvdata *drvdata, uint32_t unique_id)
{
const struct sec_dbg_region_client *client;
mutex_lock(&drvdata->lock);
client = __dbg_region_find_locked(drvdata, unique_id);
mutex_unlock(&drvdata->lock);
return client;
}
const struct sec_dbg_region_client *sec_dbg_region_find(uint32_t unique_id)
{
if (!__dbg_region_is_probed())
return ERR_PTR(-EBUSY);
return __dbg_region_find(dbg_region, unique_id);
}
EXPORT_SYMBOL_GPL(sec_dbg_region_find);
static noinline int __dbg_region_parse_dt_memory_region(struct builder *bd,
struct device_node *np)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
struct device *dev = bd->dev;
struct device_node *mem_np;
struct reserved_mem *rmem;
mem_np = of_parse_phandle(np, "memory-region", 0);
if (!mem_np)
return -EINVAL;
rmem = of_reserved_mem_lookup(mem_np);
if (!rmem) {
dev_warn(dev, "failed to get a reserved memory (%s)\n",
mem_np->name);
return -EFAULT;
}
drvdata->rmem = rmem;
return 0;
}
static bool __dbg_region_is_in_reserved_mem_bound(
const struct reserved_mem *rmem,
phys_addr_t base, phys_addr_t size)
{
phys_addr_t rmem_base = rmem->base;
phys_addr_t rmem_end = rmem_base + rmem->size - 1;
phys_addr_t end = base + size - 1;
if ((base >= rmem_base) && (end <= rmem_end))
return true;
return false;
}
static int __dbg_region_use_partial_reserved_mem(
struct dbg_region_drvdata *drvdata, struct device_node *np)
{
struct reserved_mem *rmem = drvdata->rmem;
phys_addr_t base;
phys_addr_t size;
int err;
err = sec_of_parse_reg_prop(np, &base, &size);
if (err)
return err;
if (!__dbg_region_is_in_reserved_mem_bound(rmem, base, size))
return -ERANGE;
drvdata->phys = base;
drvdata->size = size;
return 0;
}
static int __dbg_region_use_entire_reserved_mem(
struct dbg_region_drvdata *drvdata)
{
struct reserved_mem *rmem = drvdata->rmem;
drvdata->phys = rmem->base;
drvdata->size = rmem->size;
return 0;
}
static noinline int __dbg_region_parse_dt_partial_reserved_mem(struct builder *bd,
struct device_node *np)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
int err;
if (of_property_read_bool(np, "sec,use-partial_reserved_mem"))
err = __dbg_region_use_partial_reserved_mem(drvdata, np);
else
err = __dbg_region_use_entire_reserved_mem(drvdata);
if (err)
return -EFAULT;
return 0;
}
static noinline int __dbg_region_parse_dt_set_rmem_type(struct builder *bd,
struct device_node *np)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
struct device_node *mem_np;
mem_np = of_parse_phandle(np, "memory-region", 0);
if (!mem_np)
return -EINVAL;
if (of_property_read_bool(mem_np, "sec,type-slab"))
drvdata->rmem_type = RMEM_TYPE_SLAB;
else if (of_property_read_bool(mem_np, "no-map"))
drvdata->rmem_type = RMEM_TYPE_NOMAP;
else if (of_property_read_bool(mem_np, "reusable"))
drvdata->rmem_type = RMEM_TYPE_REUSABLE;
else
drvdata->rmem_type = RMEM_TYPE_MAPPED;
return 0;
}
static const struct dt_builder __dbg_region_dt_builder[] = {
DT_BUILDER(__dbg_region_parse_dt_memory_region),
DT_BUILDER(__dbg_region_parse_dt_partial_reserved_mem),
DT_BUILDER(__dbg_region_parse_dt_set_rmem_type),
};
static noinline int __dbg_region_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __dbg_region_dt_builder,
ARRAY_SIZE(__dbg_region_dt_builder));
}
static noinline int __dbg_region_probe_prolog(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
mutex_init(&drvdata->lock);
return 0;
}
static noinline void __dbg_region_remove_epilog(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
mutex_destroy(&drvdata->lock);
}
static noinline int __dbg_region_create_root(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
struct dbg_region_root *root;
phys_addr_t __root;
if (drvdata->rmem_type != RMEM_TYPE_SLAB)
root = __dbg_region_alloc(drvdata,
sizeof(struct dbg_region_root), &__root);
else {
root = phys_to_virt(drvdata->phys);
__root = drvdata->phys;
}
pr_debug("root = %px\n", root);
if (IS_ERR_OR_NULL(root))
return -ENOMEM;
root->__root = __root;
root->magic = SEC_DBG_REGION_ROOT_MAGIC;
INIT_LIST_HEAD(&root->clients);
drvdata->root = root;
return 0;
}
static noinline void __dbg_region_delete_root(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
struct dbg_region_root *root = drvdata->root;
if (drvdata->rmem_type != RMEM_TYPE_SLAB)
__dbg_region_free(drvdata, sizeof(struct dbg_region_root),
root, root->__root);
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
static int sec_dbg_region_dbgfs_show_all(struct seq_file *m, void *unsed)
{
struct dbg_region_drvdata *drvdata = m->private;
struct dbg_region_root *root = drvdata->root;
const struct sec_dbg_region_client *client;
size_t sz_used = sizeof(*root);
seq_printf(m, "%pa++%pa - %s\n",
&drvdata->phys, &drvdata->size,
dev_name(drvdata->bd.dev));
seq_puts(m, "\nclients:\n");
mutex_lock(&drvdata->lock);
list_for_each_entry(client, &root->clients, list) {
seq_printf(m, "%pa++%pa %7zu - %s (0x%08X)\n",
&client->phys, &client->size, client->size,
client->name, client->unique_id);
sz_used += sizeof(*client);
sz_used += client->size;
}
mutex_unlock(&drvdata->lock);
seq_puts(m, "\n");
seq_printf(m, " - Total : %zu\n", drvdata->size);
seq_printf(m, " - Used : %zu\n", sz_used);
seq_puts(m, "\n");
return 0;
}
static int sec_dbg_region_dbgfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sec_dbg_region_dbgfs_show_all,
inode->i_private);
}
static const struct file_operations sec_dbg_region_dgbfs_fops = {
.open = sec_dbg_region_dbgfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static int __dbg_region_debugfs_create(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
drvdata->dbgfs = debugfs_create_file("sec_debug_region", 0440,
NULL, drvdata, &sec_dbg_region_dgbfs_fops);
return 0;
}
static void __dbg_region_debugfs_remove(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
debugfs_remove(drvdata->dbgfs);
}
#else
static int __dbg_region_debugfs_create(struct builder *bd) { return 0; }
static void __dbg_region_debugfs_remove(struct builder *bd) {}
#endif
static noinline int __dbg_region_probe_epilog(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
dbg_region = drvdata; /* set a singleton */
return 0;
}
static noinline void __dbg_region_remove_prolog(struct builder *bd)
{
/* FIXME: This is not a graceful exit. */
dbg_region = NULL;
}
static int __dbg_region_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct dbg_region_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __dbg_region_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct dbg_region_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __dbg_region_dev_builder[] = {
DEVICE_BUILDER(__dbg_region_parse_dt, NULL),
DEVICE_BUILDER(__dbg_region_probe_prolog, __dbg_region_remove_epilog),
DEVICE_BUILDER(__dbg_region_pool_init, __dbg_region_pool_exit),
DEVICE_BUILDER(__dbg_region_create_root, __dbg_region_delete_root),
DEVICE_BUILDER(__dbg_region_debugfs_create,
__dbg_region_debugfs_remove),
DEVICE_BUILDER(__dbg_region_probe_epilog, __dbg_region_remove_prolog),
};
static int sec_dbg_region_probe(struct platform_device *pdev)
{
return __dbg_region_probe(pdev, __dbg_region_dev_builder,
ARRAY_SIZE(__dbg_region_dev_builder));
}
static int sec_dbg_region_remove(struct platform_device *pdev)
{
return __dbg_region_remove(pdev, __dbg_region_dev_builder,
ARRAY_SIZE(__dbg_region_dev_builder));
}
static const struct of_device_id sec_dbg_region_match_table[] = {
{ .compatible = "samsung,debug_region" },
{},
};
MODULE_DEVICE_TABLE(of, sec_dbg_region_match_table);
static struct platform_driver sec_dbg_region_driver = {
.driver = {
.name = "sec,debug_region",
.of_match_table = of_match_ptr(sec_dbg_region_match_table),
},
.probe = sec_dbg_region_probe,
.remove = sec_dbg_region_remove,
};
static __init int sec_dbg_region_init(void)
{
return platform_driver_register(&sec_dbg_region_driver);
}
core_initcall_sync(sec_dbg_region_init);
static __exit void sec_dbg_region_exit(void)
{
platform_driver_unregister(&sec_dbg_region_driver);
}
module_exit(sec_dbg_region_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Memory pool for debugging features");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,72 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2022-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include "sec_debug_region.h"
static bool __dbg_region_pool_sanity_check(const struct dbg_region_pool *pool)
{
return !(!pool->probe || !pool->remove || !pool->alloc || !pool->free);
}
static const struct dbg_region_pool *
__dbg_region_pool_creator(struct dbg_region_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
unsigned int rmem_type = drvdata->rmem_type;
const struct dbg_region_pool *pool;
switch (rmem_type) {
case RMEM_TYPE_NOMAP:
case RMEM_TYPE_MAPPED:
pool = __dbg_region_gen_pool_creator();
break;
case RMEM_TYPE_REUSABLE:
pool = __dbg_region_cma_pool_creator();
break;
case RMEM_TYPE_SLAB:
pool = __dbg_region_slab_pool_creator();
break;
default:
dev_warn(dev, "%u is not a supported or deprecated rmem-type\n",
rmem_type);
pool = ERR_PTR(-ENOENT);
}
if (!__dbg_region_pool_sanity_check(pool))
return ERR_PTR(-EINVAL);
return pool;
}
static void __dbg_region_pool_factory(struct dbg_region_drvdata *drvdata)
{
drvdata->pool = __dbg_region_pool_creator(drvdata);
}
int __dbg_region_pool_init(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
__dbg_region_pool_factory(drvdata);
if (IS_ERR_OR_NULL(drvdata->pool))
return -ENODEV;
drvdata->pool->probe(drvdata);
return 0;
}
void __dbg_region_pool_exit(struct builder *bd)
{
struct dbg_region_drvdata *drvdata =
container_of(bd, struct dbg_region_drvdata, bd);
drvdata->pool->remove(drvdata);
}

View File

@@ -0,0 +1,52 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2024 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/io.h>
#include <linux/slab.h>
#include "sec_debug_region.h"
static int dbg_region_slab_pool_probe(struct dbg_region_drvdata *drvdata)
{
return 0;
}
static void dbg_region_slab_pool_remove(struct dbg_region_drvdata *drvdata)
{
}
static void *dbg_region_slab_pool_alloc(struct dbg_region_drvdata *drvdata,
size_t size, phys_addr_t *__phys)
{
void *vaddr;
vaddr = kzalloc(size, GFP_KERNEL);
if (!vaddr)
return ERR_PTR(-ENOMEM);
*__phys = virt_to_phys(vaddr);
return vaddr;
}
static void dbg_region_slab_pool_free(struct dbg_region_drvdata *drvdata,
size_t size, void *vaddr, phys_addr_t phys)
{
kfree(vaddr);
}
static const struct dbg_region_pool dbg_region_slab_pool = {
.probe = dbg_region_slab_pool_probe,
.remove = dbg_region_slab_pool_remove,
.alloc = dbg_region_slab_pool_alloc,
.free = dbg_region_slab_pool_free,
};
const struct dbg_region_pool *__dbg_region_slab_pool_creator(void)
{
return &dbg_region_slab_pool;
}

View File

@@ -0,0 +1,25 @@
config SEC_LOG_BUF
tristate "SEC Kernel Log Buffer"
help
TODO: help is not ready.
config SEC_LOG_BUF_USING_TP_CONSOLE
bool "SEC Kernel Log Buffer - Trace Console Back-End"
depends on SEC_LOG_BUF
default n
help
TODO: help is not ready.
config SEC_LOG_BUF_USING_KPROBE
bool "SEC Kernel Log Buffer - Kprobe Back-End"
depends on SEC_LOG_BUF
default n
help
TODO: help is not ready.
config SEC_LOG_BUF_USING_VH_LOGBUF
bool "SEC Kernel Log Buffer - Android Vendor-Hook Back-End"
depends on SEC_LOG_BUF
default n
help
TODO: help is not ready.

View File

@@ -0,0 +1,17 @@
obj-$(CONFIG_SEC_LOG_BUF) += sec_log_buf.o
sec_log_buf-objs := sec_log_buf_main.o \
sec_log_buf_logger.o \
sec_log_buf_builtin.o \
sec_log_buf_console.o \
sec_log_buf_last_kmsg.o \
sec_log_buf_ap_klog.o
sec_log_buf-$(CONFIG_DEBUG_FS) += sec_log_buf_debugfs.o
sec_log_buf-$(CONFIG_SEC_LOG_BUF_USING_TP_CONSOLE) += sec_log_buf_tp_console.o
sec_log_buf-$(CONFIG_SEC_LOG_BUF_USING_KPROBE) += sec_log_buf_kprobe.o
sec_log_buf-$(CONFIG_SEC_LOG_BUF_USING_VH_LOGBUF) += sec_log_buf_vh_logbuf.o
ifeq ($(CONFIG_SEC_LOG_BUF_USING_VH_LOGBUF),y)
CFLAGS_sec_log_buf_vh_logbuf.o = -I$(srctree)/kernel/printk
endif

View File

@@ -0,0 +1,149 @@
#ifndef __INTERNAL__SEC_QC_LOG_BUF_H__
#define __INTERNAL__SEC_QC_LOG_BUF_H__
#include <linux/console.h>
#include <linux/debugfs.h>
#include <linux/crypto.h>
#include <linux/kmsg_dump.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/proc_fs.h>
#include <linux/reboot.h>
#include <linux/samsung/builder_pattern.h>
#include <linux/samsung/debug/sec_log_buf.h>
struct last_kmsg_data {
char *buf;
size_t size;
bool use_compression;
const char *compressor;
struct mutex lock;
unsigned int ref_cnt;
struct crypto_comp *tfm;
char *buf_comp;
size_t size_comp;
struct proc_dir_entry *proc;
};
struct log_buf_drvdata;
struct log_buf_logger {
int (*probe)(struct log_buf_drvdata *);
void (*remove)(struct log_buf_drvdata *);
};
struct ap_klog_proc {
char *buf;
size_t size;
struct mutex lock;
unsigned int ref_cnt;
struct proc_dir_entry *proc;
struct proc_dir_entry *symlink;
};
struct log_buf_drvdata {
struct builder bd;
struct reserved_mem *rmem;
phys_addr_t paddr;
size_t size;
struct kmsg_dump_iter iter;
unsigned int strategy;
union {
struct console con;
struct kprobe probe;
};
const struct log_buf_logger *logger;
struct atomic_notifier_head sync_list;
struct last_kmsg_data last_kmsg;
struct ap_klog_proc ap_klog;
struct notifier_block nb_restart;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbgfs;
#endif
};
extern struct log_buf_drvdata *sec_log_buf;
static __always_inline bool __log_buf_is_probed(void)
{
return !!sec_log_buf;
}
static __always_inline size_t __log_buf_print_time(u64 ts, char *buf)
{
unsigned long rem_nsec = do_div(ts, 1000000000);
return sprintf(buf, "\n[%5lu.%06lu]",
(unsigned long)ts, rem_nsec / 1000);
}
static __always_inline size_t __log_buf_print_process(unsigned int cpu,
char *buf, size_t sz_buf)
{
return scnprintf(buf, sz_buf, " %c[%2u:%15s:%5u] ",
in_interrupt() ? 'I' : ' ',
cpu, current->comm, (unsigned int)current->pid);
}
/* sec_log_buf_main.c */
extern bool __log_buf_is_acceptable(const char *s, size_t count);
extern void __log_buf_write(const char *s, size_t count);
extern void __log_buf_store_from_kmsg_dumper(void);
extern const struct sec_log_buf_head *__log_buf_get_header(void);
extern ssize_t __log_buf_get_buf_size(void);
extern size_t __log_buf_copy_to_buffer(void *buf);
/* sec_log_buf_last_kmsg.c */
extern int __last_kmsg_alloc_buffer(struct builder *bd);
extern void __last_kmsg_free_buffer(struct builder *bd);
extern int __last_kmsg_pull_last_log(struct builder *bd);
extern int __last_kmsg_procfs_create(struct builder *bd);
extern void __last_kmsg_procfs_remove(struct builder *bd);
extern int __last_kmsg_init_compression(struct builder *bd);
extern void __last_kmsg_exit_compression(struct builder *bd);
/* sec_log_buf_debugfs.c */
#if IS_ENABLED(CONFIG_DEBUG_FS)
extern int __log_buf_debugfs_create(struct builder *bd);
extern void __log_buf_debugfs_remove(struct builder *bd);
#endif
/* sec_log_buf_logger.c */
extern int __log_buf_logger_init(struct builder *bd);
extern void __log_buf_logger_exit(struct builder *bd);
/* sec_log_buf_builtin.c */
extern const struct log_buf_logger *__log_buf_logger_builtin_creator(void);
/* sec_log_buf_tp_console.c */
#if IS_ENABLED(CONFIG_SEC_LOG_BUF_USING_TP_CONSOLE)
extern const struct log_buf_logger *__log_buf_logger_tp_console_creator(void);
#else
static const inline struct log_buf_logger *__log_buf_logger_tp_console_creator(void) { return ERR_PTR(-ENODEV); }
#endif
/* sec_log_buf_kprobe.c */
#if IS_ENABLED(CONFIG_SEC_LOG_BUF_USING_KPROBE)
extern const struct log_buf_logger *__log_buf_logger_kprobe_creator(void);
#else
static const inline struct log_buf_logger *__log_buf_logger_kprobe_creator(void) { return ERR_PTR(-ENODEV); }
#endif
/* sec_log_buf_console.c */
extern const struct log_buf_logger *__log_buf_logger_console_creator(void);
/* sec_log_buf_vh_log_buf.c */
#if IS_ENABLED(CONFIG_SEC_LOG_BUF_USING_VH_LOGBUF)
extern const struct log_buf_logger *__log_buf_logger_vh_logbuf_creator(void);
#else
static const inline struct log_buf_logger *__log_buf_logger_vh_logbuf_creator(void) { return ERR_PTR(-ENODEV); }
#endif
/* sec_log_buf_ap_klog.c */
extern int __ap_klog_proc_init(struct builder *bd);
extern void __ap_klog_proc_exit(struct builder *bd);
extern int __ap_klog_proc_create_symlink(struct builder *bd);
extern void __ap_klog_proc_remove_symlink(struct builder *bd);
#endif /* __INTERNAL__SEC_QC_LOG_BUF_H__ */

View File

@@ -0,0 +1,152 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2022-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#include "sec_log_buf.h"
static int sec_ap_klog_open(struct inode *inode, struct file *file)
{
struct ap_klog_proc *ap_klog = pde_data(inode);
struct log_buf_drvdata *drvdata = container_of(ap_klog,
struct log_buf_drvdata, ap_klog);
const size_t sz_buf = drvdata->size;
int err = 0;
mutex_lock(&ap_klog->lock);
if (ap_klog->ref_cnt) {
ap_klog->ref_cnt++;
goto already_cached;
}
ap_klog->buf = vmalloc(sz_buf);
if (!ap_klog->buf) {
err = -ENOMEM;
goto err_vmalloc;
}
ap_klog->size = __log_buf_copy_to_buffer(ap_klog->buf);
ap_klog->ref_cnt++;
mutex_unlock(&ap_klog->lock);
return 0;
err_vmalloc:
already_cached:
mutex_unlock(&ap_klog->lock);
return err;
}
static ssize_t sec_ap_klog_read(struct file *file, char __user * buf,
size_t len, loff_t * offset)
{
struct ap_klog_proc *ap_klog = pde_data(file_inode(file));
loff_t pos = *offset;
ssize_t count;
if (pos < 0 || pos > ap_klog->size)
return 0;
count = min(len, (size_t) (ap_klog->size - pos));
if (copy_to_user(buf, ap_klog->buf + pos, count))
return -EFAULT;
*offset += count;
return count;
}
static loff_t sec_ap_klog_lseek(struct file *file, loff_t off, int whence)
{
struct ap_klog_proc *ap_klog = pde_data(file_inode(file));
return fixed_size_llseek(file, off, whence, ap_klog->size);
}
static int sec_ap_klog_release(struct inode *inode, struct file *file)
{
struct ap_klog_proc *ap_klog = pde_data(inode);
mutex_lock(&ap_klog->lock);
ap_klog->ref_cnt--;
if (ap_klog->ref_cnt)
goto still_used;
vfree(ap_klog->buf);
ap_klog->buf = NULL;
ap_klog->size = 0;
still_used:
mutex_unlock(&ap_klog->lock);
return 0;
}
static const struct proc_ops ap_klog_pops = {
.proc_open = sec_ap_klog_open,
.proc_read = sec_ap_klog_read,
.proc_lseek = sec_ap_klog_lseek,
.proc_release = sec_ap_klog_release,
};
#define AP_KLOG_PROC_NODE "sec_log"
int __ap_klog_proc_init(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
struct ap_klog_proc *ap_klog = &drvdata->ap_klog;
ap_klog->proc = proc_create_data(AP_KLOG_PROC_NODE, 0444,
NULL, &ap_klog_pops, ap_klog);
if (!ap_klog->proc) {
dev_warn(dev, "failed to create proc entry\n");
return -ENODEV;
}
mutex_init(&ap_klog->lock);
return 0;
}
void __ap_klog_proc_exit(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct ap_klog_proc *ap_klog = &drvdata->ap_klog;
proc_remove(ap_klog->proc);
mutex_destroy(&ap_klog->lock);
}
#define AP_KLOG_PROC_SYMLINK "ap_klog"
int __ap_klog_proc_create_symlink(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
struct ap_klog_proc *ap_klog = &drvdata->ap_klog;
ap_klog->symlink = proc_symlink(AP_KLOG_PROC_SYMLINK, NULL,
AP_KLOG_PROC_NODE);
if (!ap_klog->symlink) {
dev_warn(dev, "failed to create proc entry\n");
return -ENODEV;
}
return 0;
}
void __ap_klog_proc_remove_symlink(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct ap_klog_proc *ap_klog = &drvdata->ap_klog;
proc_remove(ap_klog->symlink);
}

View File

@@ -0,0 +1,43 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2010-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include "sec_log_buf.h"
void sec_log_buf_store_on_vprintk_emit(void)
{
if (!__log_buf_is_probed())
return;
if (sec_log_buf->strategy != SEC_LOG_BUF_STRATEGY_BUILTIN)
return;
__log_buf_store_from_kmsg_dumper();
}
static int log_buf_logger_builtin_probe(struct log_buf_drvdata *drvdata)
{
return 0;
}
static void log_buf_logger_builtin_remove(struct log_buf_drvdata *drvdata)
{
}
static const struct log_buf_logger log_buf_logger_builtin = {
.probe = log_buf_logger_builtin_probe,
.remove = log_buf_logger_builtin_remove,
};
const struct log_buf_logger *__log_buf_logger_builtin_creator(void)
{
if (IS_BUILTIN(CONFIG_SEC_LOG_BUF))
return &log_buf_logger_builtin;
else
return ERR_PTR(-ENODEV);
}

View File

@@ -0,0 +1,53 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2010-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include "sec_log_buf.h"
static void sec_log_buf_write_console(struct console *console, const char *s,
unsigned int count)
{
if (!__log_buf_is_acceptable(s, count))
return;
__log_buf_write(s, count);
}
static int log_buf_logger_console_probe(struct log_buf_drvdata *drvdata)
{
struct console *con = &drvdata->con;
strlcpy(con->name, "sec_log_buf", sizeof(con->name));
con->write = sec_log_buf_write_console;
/* NOTE: CON_PRINTBUFFER is ommitted.
* I use __log_buf_pull_early_buffer instead of it.
*/
con->flags = CON_ENABLED | CON_ANYTIME;
con->index = -1;
register_console(con);
return 0;
}
static void log_buf_logger_console_remove(struct log_buf_drvdata *drvdata)
{
struct console *con = &drvdata->con;
unregister_console(con);
}
static const struct log_buf_logger log_buf_logger_console = {
.probe = log_buf_logger_console_probe,
.remove = log_buf_logger_console_remove,
};
const struct log_buf_logger *__log_buf_logger_console_creator(void)
{
return &log_buf_logger_console;
}

View File

@@ -0,0 +1,93 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2022-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include "sec_log_buf.h"
static void __log_buf_dbgfs_show_basic(struct seq_file *m,
struct log_buf_drvdata *drvdata)
{
seq_puts(m, "- Basic Information:\n");
seq_printf(m, " + VA:0x%p (PA:%pa) / %zu bytes\n", __log_buf_get_header(),
&drvdata->paddr, drvdata->size);
seq_puts(m, "\n");
}
static void __log_buf_dbgfs_show_logger(struct seq_file *m,
struct log_buf_drvdata *drvdata)
{
seq_puts(m, "- Logger Information:\n");
seq_printf(m, " + logger : [%u] %ps\n", drvdata->strategy, drvdata->logger);
seq_puts(m, "\n");
}
static void __log_buf_dbgfs_show_last_kmsg(struct seq_file *m,
struct log_buf_drvdata *drvdata)
{
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
seq_puts(m, "- Last-KMSG Information:\n");
seq_printf(m, " + compressor : %s\n", last_kmsg->use_compression ?
last_kmsg->compressor : "none");
if (!last_kmsg->use_compression)
seq_printf(m, " + VA:0x%p / %zu bytes\n",
last_kmsg->buf, last_kmsg->size);
else {
size_t ratio = (last_kmsg->size_comp * 100000) / last_kmsg->size;
seq_printf(m, " + VA:0x%p / %zu (%zu) bytes (%zu.%03zu%%)\n",
last_kmsg->buf_comp, last_kmsg->size_comp, last_kmsg->size,
ratio / 1000, ratio % 1000);
}
seq_puts(m, "\n");
}
static int sec_log_buf_dbgfs_show_all(struct seq_file *m, void *unsed)
{
struct log_buf_drvdata *drvdata = m->private;
__log_buf_dbgfs_show_basic(m, drvdata);
__log_buf_dbgfs_show_logger(m, drvdata);
__log_buf_dbgfs_show_last_kmsg(m, drvdata);
return 0;
}
static int sec_log_buf_dbgfs_open(struct inode *inode, struct file *file)
{
return single_open(file, sec_log_buf_dbgfs_show_all, inode->i_private);
}
static const struct file_operations sec_log_buf_dgbfs_fops = {
.open = sec_log_buf_dbgfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int __log_buf_debugfs_create(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
drvdata->dbgfs = debugfs_create_file("sec_log_buf", 0440,
NULL, drvdata, &sec_log_buf_dgbfs_fops);
return 0;
}
void __log_buf_debugfs_remove(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
debugfs_remove(drvdata->dbgfs);
}

View File

@@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2010-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include "sec_log_buf.h"
static void sec_log_buf_post_handle_on_vprintk_emit(struct kprobe *probe,
struct pt_regs *regs, unsigned long flags)
{
__log_buf_store_from_kmsg_dumper();
}
static int log_buf_logger_kprobe_probe(struct log_buf_drvdata *drvdata)
{
struct kprobe *kp = &drvdata->probe;
int err;
kp->symbol_name = "vprintk_emit";
kp->post_handler = sec_log_buf_post_handle_on_vprintk_emit;
err = register_kprobe(kp);
if (err)
goto err_failed_to_register;
return 0;
err_failed_to_register:
return err;
}
static void log_buf_logger_kprobe_remove(struct log_buf_drvdata *drvdata)
{
struct kprobe *kp = &drvdata->probe;
unregister_kprobe(kp);
}
static const struct log_buf_logger log_buf_logger_kprobe = {
.probe = log_buf_logger_kprobe_probe,
.remove = log_buf_logger_kprobe_remove,
};
const struct log_buf_logger *__log_buf_logger_kprobe_creator(void)
{
return &log_buf_logger_kprobe;
}

View File

@@ -0,0 +1,307 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2010-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include "sec_log_buf.h"
int __last_kmsg_alloc_buffer(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
const size_t log_buf_size = __log_buf_get_buf_size();
last_kmsg->buf = vmalloc(log_buf_size);
if (!last_kmsg->buf)
return -ENOMEM;
return 0;
}
void __last_kmsg_free_buffer(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
vfree(last_kmsg->buf);
}
int __last_kmsg_pull_last_log(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
char *buf = last_kmsg->buf;
last_kmsg->size = __log_buf_copy_to_buffer(buf);
return 0;
}
static int __last_kmsg_decompress_buf(struct last_kmsg_data *last_kmsg)
{
void *buf;
unsigned int size = last_kmsg->size;
unsigned int size_comp = last_kmsg->size_comp;
int err;
buf = vmalloc(size);
if (!buf) {
pr_warn("failed to alloc buf\n");
return -ENOMEM;
}
err = crypto_comp_decompress(last_kmsg->tfm,
last_kmsg->buf_comp, size_comp, buf, &size);
if (err) {
pr_warn("failed to decompress (%d)\n", err);
vfree(buf);
return err;
}
last_kmsg->buf = buf;
return 0;
}
static void __last_kmsg_release_buf(struct last_kmsg_data *last_kmsg)
{
vfree(last_kmsg->buf);
last_kmsg->buf = NULL;
}
static int sec_last_kmsg_buf_open(struct inode *inode, struct file *file)
{
struct last_kmsg_data *last_kmsg = pde_data(inode);
int err = 0;
if (!last_kmsg->use_compression || !last_kmsg->size)
return 0;
mutex_lock(&last_kmsg->lock);
if (last_kmsg->ref_cnt) {
last_kmsg->ref_cnt++;
goto already_decompressed;
}
err = __last_kmsg_decompress_buf(last_kmsg);
if (err) {
pr_warn("failed to decompress last_kmsg (%d)\n", err);
goto err_decompress;
}
last_kmsg->ref_cnt++;
mutex_unlock(&last_kmsg->lock);
return 0;
err_decompress:
already_decompressed:
mutex_unlock(&last_kmsg->lock);
return err;
}
static ssize_t sec_last_kmsg_buf_read(struct file *file, char __user *buf,
size_t len, loff_t *offset)
{
struct last_kmsg_data *last_kmsg = pde_data(file_inode(file));
loff_t pos = *offset;
ssize_t count;
if (pos >= last_kmsg->size || !last_kmsg->buf) {
pr_warn("pos %lld, size %zu\n", pos, last_kmsg->size);
return 0;
}
count = min(len, (size_t)(last_kmsg->size - pos));
if (copy_to_user(buf, last_kmsg->buf + pos, count))
return -EFAULT;
*offset += count;
return count;
}
static loff_t sec_last_kmsg_buf_lseek(struct file *file, loff_t off,
int whence)
{
struct last_kmsg_data *last_kmsg = pde_data(file_inode(file));
return fixed_size_llseek(file, off, whence, last_kmsg->size);
}
static int sec_last_kmsg_buf_release(struct inode *inode, struct file *file)
{
struct last_kmsg_data *last_kmsg = pde_data(inode);
if (!last_kmsg->use_compression)
return 0;
mutex_lock(&last_kmsg->lock);
last_kmsg->ref_cnt--;
if (last_kmsg->ref_cnt)
goto still_used;
__last_kmsg_release_buf(last_kmsg);
still_used:
mutex_unlock(&last_kmsg->lock);
return 0;
}
static const struct proc_ops last_kmsg_buf_pops = {
.proc_open = sec_last_kmsg_buf_open,
.proc_read = sec_last_kmsg_buf_read,
.proc_lseek = sec_last_kmsg_buf_lseek,
.proc_release = sec_last_kmsg_buf_release,
};
#define LAST_LOG_BUF_NODE "last_kmsg"
int __last_kmsg_procfs_create(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
last_kmsg->proc = proc_create_data(LAST_LOG_BUF_NODE, 0444,
NULL, &last_kmsg_buf_pops, last_kmsg);
if (!last_kmsg->proc) {
dev_warn(dev, "failed to create proc entry. ram console may be present\n");
return -ENODEV;
}
mutex_init(&last_kmsg->lock);
proc_set_size(last_kmsg->proc, last_kmsg->size);
return 0;
}
void __last_kmsg_procfs_remove(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
proc_remove(last_kmsg->proc);
mutex_destroy(&last_kmsg->lock);
}
static void *__last_kmsg_vmalloc_compressed(struct last_kmsg_data *last_kmsg,
struct device *dev, struct crypto_comp *tfm, size_t *size_comp)
{
unsigned int size_decomp = last_kmsg->size;
unsigned int size = last_kmsg->size * 2;
void *buf_tmp;
void *buf_comp;
int err;
buf_tmp = vmalloc(size);
if (!buf_tmp)
return ERR_PTR(-ENOMEM);
err = crypto_comp_compress(tfm, last_kmsg->buf, size_decomp,
buf_tmp, &size);
if (err || size >= size_decomp) {
vfree(buf_tmp);
return ERR_PTR(-EINVAL);
}
buf_comp = vmalloc(size);
if (!buf_comp) {
vfree(buf_tmp);
return ERR_PTR(-ENOMEM);
}
memcpy(buf_comp, buf_tmp, size);
vfree(buf_tmp);
*size_comp = size;
return buf_comp;
}
static int ____last_kmsg_init_compression(struct last_kmsg_data *last_kmsg,
struct device *dev)
{
struct crypto_comp *tfm;
void *buf_comp;
size_t size_comp;
int err;
tfm = crypto_alloc_comp(last_kmsg->compressor, 0, 0);
if (IS_ERR(tfm)) {
err = PTR_ERR(tfm);
goto err_alloc_comp;
}
buf_comp = __last_kmsg_vmalloc_compressed(last_kmsg, dev, tfm, &size_comp);
if (IS_ERR_OR_NULL(buf_comp)) {
err = PTR_ERR(buf_comp);
goto err_alloc_buf_comp;
}
vfree(last_kmsg->buf);
last_kmsg->buf = NULL;
last_kmsg->buf_comp = buf_comp;
last_kmsg->size_comp = size_comp;
last_kmsg->tfm = tfm;
return 0;
err_alloc_buf_comp:
crypto_free_comp(tfm);
err_alloc_comp:
last_kmsg->use_compression = false;
return err;
}
int __last_kmsg_init_compression(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
if (!last_kmsg->use_compression || !last_kmsg->size)
return 0;
return ____last_kmsg_init_compression(last_kmsg, dev);
}
static void ____last_kmsg_exit_compression(struct last_kmsg_data *last_kmsg,
struct device *dev)
{
vfree(last_kmsg->buf_comp);
last_kmsg->buf_comp = NULL;
crypto_free_comp(last_kmsg->tfm);
}
void __last_kmsg_exit_compression(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
if (!last_kmsg->use_compression)
return;
____last_kmsg_exit_compression(last_kmsg, dev);
}

View File

@@ -0,0 +1,67 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2010-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include "sec_log_buf.h"
static const struct log_buf_logger *
__log_buf_logger_creator(struct log_buf_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
const struct log_buf_logger *logger = NULL;
unsigned int strategy = drvdata->strategy;
switch (strategy) {
case SEC_LOG_BUF_STRATEGY_BUILTIN:
logger = __log_buf_logger_builtin_creator();
break;
case SEC_LOG_BUF_STRATEGY_TP_CONSOLE:
logger = __log_buf_logger_tp_console_creator();
break;
case SEC_LOG_BUF_STRATEGY_KPROBE:
logger = __log_buf_logger_kprobe_creator();
break;
case SEC_LOG_BUF_STRATEGY_CONSOLE:
logger = __log_buf_logger_console_creator();
break;
case SEC_LOG_BUF_STRATEGY_VH_LOGBUF:
logger = __log_buf_logger_vh_logbuf_creator();
break;
}
if (IS_ERR_OR_NULL(logger)) {
dev_warn(dev, "%u is not supported or deprecated. use default\n",
strategy);
logger = __log_buf_logger_console_creator();
}
return logger;
}
static void __log_buf_logger_factory(struct log_buf_drvdata *drvdata)
{
drvdata->logger = __log_buf_logger_creator(drvdata);
}
int __log_buf_logger_init(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
__log_buf_logger_factory(drvdata);
return drvdata->logger->probe(drvdata);
}
void __log_buf_logger_exit(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
drvdata->logger->remove(drvdata);
}

View File

@@ -0,0 +1,792 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2010-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/cacheflush.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/samsung/sec_of.h>
#include "sec_log_buf.h"
struct log_buf_drvdata *sec_log_buf __read_mostly;
static struct sec_log_buf_head *s_log_buf __read_mostly;
static size_t sec_log_buf_size __read_mostly;
static struct atomic_notifier_head *sync_list __read_mostly;
const char *block_str[] = {
"init: Loading module",
};
static void (*__log_buf_memcpy_fromio)(void *, const void *, size_t) __read_mostly;
static void (*__log_buf_memcpy_toio)(void *, const void *, size_t) __read_mostly;
static void notrace ____log_buf_memcpy_fromio(void *dst, const void *src, size_t cnt)
{
memcpy_fromio(dst, src, cnt);
}
static void notrace ____log_buf_memcpy_toio(void *dst, const void *src, size_t cnt)
{
memcpy_toio(dst, src, cnt);
}
static void notrace ____log_buf_memcpy(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
const struct sec_log_buf_head *__log_buf_get_header(void)
{
return s_log_buf;
}
const struct sec_log_buf_head *sec_log_buf_get_header(void)
{
if (!__log_buf_is_probed())
return ERR_PTR(-EBUSY);
return __log_buf_get_header();
}
EXPORT_SYMBOL_GPL(sec_log_buf_get_header);
ssize_t __log_buf_get_buf_size(void)
{
return sec_log_buf_size;
}
ssize_t sec_log_buf_get_buf_size(void)
{
if (!__log_buf_is_probed())
return -EBUSY;
return __log_buf_get_buf_size();
}
EXPORT_SYMBOL_GPL(sec_log_buf_get_buf_size);
static int __log_buf_register_sync_handler(struct log_buf_drvdata *drvdata,
struct notifier_block *nb)
{
return atomic_notifier_chain_register(&drvdata->sync_list, nb);
}
int sec_log_buf_register_sync_handler(struct notifier_block *nb)
{
if (!__log_buf_is_probed())
return -EBUSY;
return __log_buf_register_sync_handler(sec_log_buf, nb);
}
EXPORT_SYMBOL_GPL(sec_log_buf_register_sync_handler);
static int __log_buf_unregister_sync_handler(struct log_buf_drvdata *drvdata,
struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&drvdata->sync_list, nb);
}
int sec_log_buf_unregister_sync_handler(struct notifier_block *nb)
{
if (!__log_buf_is_probed())
return -EBUSY;
return __log_buf_unregister_sync_handler(sec_log_buf, nb);
}
EXPORT_SYMBOL_GPL(sec_log_buf_unregister_sync_handler);
bool __log_buf_is_acceptable(const char *s, size_t count)
{
static bool filter_en = !!IS_ENABLED(CONFIG_SAMSUNG_PRODUCT_SHIP);
const char *magic_str = "init: init second stage started!";
size_t i;
if (likely(!filter_en))
return true;
if (strnstr(s, magic_str, count)) {
filter_en = false;
return true;
}
for (i = 0; i < ARRAY_SIZE(block_str); i++) {
if (strnstr(s, block_str[i], count))
return false;
}
return true;
}
void notrace __log_buf_write(const char *s, size_t count)
{
size_t f_len, s_len, remain_space;
size_t idx;
idx = s_log_buf->idx % sec_log_buf_size;
remain_space = sec_log_buf_size - idx;
f_len = min(count, remain_space);
__log_buf_memcpy_toio(&(s_log_buf->buf[idx]), s, f_len);
s_len = count - f_len;
if (unlikely(s_len))
__log_buf_memcpy_toio(s_log_buf->buf, &s[f_len], s_len);
s_log_buf->idx += (uint32_t)count;
atomic_notifier_call_chain(sync_list, s_log_buf->idx, s_log_buf);
}
static __always_inline size_t __log_buf_print_kmsg(unsigned int cpu,
char *buf, size_t sz_buf)
{
struct kmsg_dump_iter *iter = &sec_log_buf->iter;
size_t len;
if (kmsg_dump_get_line(iter, true, buf, sz_buf, &len))
return len;
return 0;
}
#define SZ_TASK_BUF 32
#define SZ_KMSG_BUF 256
struct log_buf_kmsg_ctx {
char task[SZ_TASK_BUF];
size_t task_len;
char head[SZ_KMSG_BUF];
size_t head_len;
char *tail;
size_t tail_len;
};
static DEFINE_PER_CPU(struct log_buf_kmsg_ctx, kmsg_ctx);
static DEFINE_PER_CPU(struct log_buf_kmsg_ctx, kmsg_ctx_irq);
static bool __log_buf_kmsg_check_level_text(struct log_buf_kmsg_ctx *ctx)
{
char *head = ctx->head;
char *endp;
long l;
if (head[0] != '<')
return false;
/* NOTE: simple_strto{?} fucnctions are not recommended for normal cases.
* Because the position of next token is requried, kstrto{?} functions
* can not be used or more complex implementation is needed.
*/
l = simple_strtol(&head[1], &endp, 10);
if (!endp || endp[0] != '>')
return false;
return true;
}
static void __log_buf_kmsg_split(struct log_buf_kmsg_ctx *ctx)
{
char *head = ctx->head;
char *tail;
const char *delim = "] ";
size_t head_len;
tail = strnstr(head, delim, SZ_KMSG_BUF);
if (!tail) {
ctx->tail = NULL;
return;
}
tail = &tail[2];
head_len = tail - head - 1;
head[head_len] = '\0';
ctx->tail = tail;
ctx->tail_len = ctx->head_len - head_len - 1;
ctx->head_len = head_len;
}
static __always_inline void __log_buf_kmg_print(struct log_buf_kmsg_ctx *ctx)
{
if (__log_buf_kmsg_check_level_text(ctx))
__log_buf_kmsg_split(ctx);
__log_buf_write(ctx->head, ctx->head_len);
if (ctx->tail) {
__log_buf_write(ctx->task, ctx->task_len);
__log_buf_write(ctx->tail, ctx->tail_len);
}
}
size_t __log_buf_copy_to_buffer(void *__buf)
{
char *buf = (char *)__buf;
const struct sec_log_buf_head *log_buf_head = __log_buf_get_header();
const size_t log_buf_size = __log_buf_get_buf_size();
const size_t max_size = log_buf_size;
size_t head;
size_t total;
if (log_buf_head->idx > max_size) {
head = (size_t)log_buf_head->idx % log_buf_size;
__log_buf_memcpy_fromio(buf, &log_buf_head->buf[head],
log_buf_size - head);
if (head != 0)
__log_buf_memcpy_fromio(&buf[log_buf_size - head],
log_buf_head->buf, head);
total = max_size;
} else {
__log_buf_memcpy_fromio(buf, log_buf_head->buf, log_buf_head->idx);
total = log_buf_head->idx;
}
return total;
}
static noinline int __log_buf_parse_dt_strategy(struct builder *bd,
struct device_node *np)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
u32 strategy;
int err;
err = of_property_read_u32(np, "sec,strategy", &strategy);
if (err)
return -EINVAL;
if (IS_MODULE(CONFIG_SEC_LOG_BUF) &&
(strategy == SEC_LOG_BUF_STRATEGY_BUILTIN)) {
dev_err(dev, "BUILTIN strategy can't be used in the kernel module!\n");
return -EINVAL;
}
if (strategy >= SEC_LOG_BUF_NR_STRATEGIES) {
dev_err(dev, "invalid strategy (%u)!\n", strategy);
return -EINVAL;
}
drvdata->strategy = (unsigned int)strategy;
return 0;
}
static noinline int __log_buf_parse_dt_memory_region(struct builder *bd,
struct device_node *np)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
struct device_node *mem_np;
struct reserved_mem *rmem;
mem_np = of_parse_phandle(np, "memory-region", 0);
if (!mem_np)
return -EINVAL;
rmem = of_reserved_mem_lookup(mem_np);
if (!rmem) {
dev_warn(dev, "failed to get a reserved memory (%s)\n",
mem_np->name);
return -EFAULT;
}
drvdata->rmem = rmem;
return 0;
}
static bool __log_buf_is_in_reserved_mem_bound(
const struct reserved_mem *rmem,
phys_addr_t base, phys_addr_t size)
{
phys_addr_t rmem_base = rmem->base;
phys_addr_t rmem_end = rmem_base + rmem->size - 1;
phys_addr_t end = base + size - 1;
if ((base >= rmem_base) && (end <= rmem_end))
return true;
return false;
}
static int __log_buf_use_partial_reserved_mem(
struct log_buf_drvdata *drvdata, struct device_node *np)
{
struct reserved_mem *rmem = drvdata->rmem;
phys_addr_t base;
phys_addr_t size;
int err;
err = sec_of_parse_reg_prop(np, &base, &size);
if (err)
return err;
if (!__log_buf_is_in_reserved_mem_bound(rmem, base, size))
return -ERANGE;
drvdata->paddr = base;
drvdata->size = size;
return 0;
}
static int __log_buf_use_entire_reserved_mem(
struct log_buf_drvdata *drvdata)
{
struct reserved_mem *rmem = drvdata->rmem;
drvdata->paddr = rmem->base;
drvdata->size = rmem->size;
return 0;
}
static noinline int __log_buf_parse_dt_partial_reserved_mem(struct builder *bd,
struct device_node *np)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
int err;
if (of_property_read_bool(np, "sec,use-partial_reserved_mem"))
err = __log_buf_use_partial_reserved_mem(drvdata, np);
else
err = __log_buf_use_entire_reserved_mem(drvdata);
if (err)
return -EFAULT;
return 0;
}
static noinline int __log_buf_parse_dt_test_no_map(struct builder *bd,
struct device_node *np)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device_node *mem_np;
mem_np = of_parse_phandle(np, "memory-region", 0);
if (!mem_np)
return -EINVAL;
if (!of_property_read_bool(mem_np, "no-map")) {
s_log_buf = phys_to_virt(drvdata->paddr);
__log_buf_memcpy_fromio = ____log_buf_memcpy;
__log_buf_memcpy_toio = ____log_buf_memcpy;
} else {
__log_buf_memcpy_fromio = ____log_buf_memcpy_fromio;
__log_buf_memcpy_toio = ____log_buf_memcpy_toio;
}
return 0;
}
static noinline int __last_kmsg_parse_dt_use_last_kmsg_compression(struct builder *bd,
struct device_node *np)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
last_kmsg->use_compression =
of_property_read_bool(np, "sec,use-last_kmsg_compression");
return 0;
}
static noinline int __last_kmsg_parse_dt_last_kmsg_compressor(struct builder *bd,
struct device_node *np)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct last_kmsg_data *last_kmsg = &drvdata->last_kmsg;
if (!last_kmsg->use_compression)
return 0;
return of_property_read_string(np, "sec,last_kmsg_compressor",
&last_kmsg->compressor);
}
static const struct dt_builder __log_buf_dt_builder[] = {
DT_BUILDER(__log_buf_parse_dt_strategy),
DT_BUILDER(__log_buf_parse_dt_memory_region),
DT_BUILDER(__log_buf_parse_dt_partial_reserved_mem),
DT_BUILDER(__log_buf_parse_dt_test_no_map),
DT_BUILDER(__last_kmsg_parse_dt_use_last_kmsg_compression),
DT_BUILDER(__last_kmsg_parse_dt_last_kmsg_compressor),
};
static noinline int __log_buf_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __log_buf_dt_builder,
ARRAY_SIZE(__log_buf_dt_builder));
}
static void __iomem *__log_buf_ioremap(struct log_buf_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
if (s_log_buf)
return s_log_buf;
return devm_ioremap(dev, drvdata->paddr, drvdata->size);
}
static noinline int __log_buf_probe_prolog(struct builder * bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
ATOMIC_INIT_NOTIFIER_HEAD(&drvdata->sync_list);
sync_list = &drvdata->sync_list;
return 0;
}
static noinline void __log_buf_prepare_buffer_raw(struct log_buf_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
dev_warn(dev, "sec_log_magic is not valid : 0x%x at 0x%p\n",
s_log_buf->magic, &(s_log_buf->magic));
s_log_buf->magic = SEC_LOG_MAGIC;
s_log_buf->idx = 0;
s_log_buf->prev_idx = 0;
}
static noinline int __log_buf_prepare_buffer(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
s_log_buf = __log_buf_ioremap(drvdata);
if (!s_log_buf)
return -EFAULT;
if (s_log_buf->magic != SEC_LOG_MAGIC)
__log_buf_prepare_buffer_raw(drvdata);
sec_log_buf_size = drvdata->size -
offsetof(struct sec_log_buf_head, buf);
return 0;
}
static ssize_t __pull_early_buffer(struct log_buf_drvdata *drvdata, char *buf)
{
struct kmsg_dump_iter *iter = &drvdata->iter;
ssize_t copied;
char *line;
size_t len;
line = kvmalloc(PAGE_SIZE, GFP_KERNEL);
if (!line)
return -ENOMEM;
memset(buf, 0x0, drvdata->size);
copied = 0;
kmsg_dump_rewind(iter);
while (kmsg_dump_get_line(iter, true, line, PAGE_SIZE, &len)) {
memcpy_fromio(&buf[copied], line, len);
copied += len;
}
kvfree(line);
return copied;
}
static size_t __remove_till_end_of_line(char *substr)
{
size_t i = 0;
while (substr[i] != '\n' && substr[i] != '\0')
substr[i++] = ' ';
return i;
}
static void ____remove_block_str(char *buf, size_t len, const char *keyword)
{
size_t offset = 0;
char *substr;
while (offset < len) {
substr = strnstr(&buf[offset], keyword, len);
if (!substr)
break;
offset = substr - buf;
offset += __remove_till_end_of_line(substr);
}
}
static void __remove_block_str(char *buf, size_t len)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(block_str); i++)
____remove_block_str(buf, len, block_str[i]);
}
static noinline int __log_buf_pull_early_buffer(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
char *buf;
ssize_t copied;
buf = kvmalloc(drvdata->size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
copied = __pull_early_buffer(drvdata, buf);
if (copied < 0) {
kvfree(buf);
return copied;
}
if (IS_ENABLED(CONFIG_SAMSUNG_PRODUCT_SHIP))
__remove_block_str(buf, copied);
__log_buf_write(buf, copied);
kvfree(buf);
return 0;
}
void notrace __log_buf_store_from_kmsg_dumper(void)
{
unsigned int cpu;
struct log_buf_kmsg_ctx *ctx_pcpu;
cpu = get_cpu();
if (in_irq())
ctx_pcpu = this_cpu_ptr(&kmsg_ctx_irq);
else
ctx_pcpu = this_cpu_ptr(&kmsg_ctx);
ctx_pcpu->head_len = __log_buf_print_kmsg(cpu,
ctx_pcpu->head, SZ_KMSG_BUF);
if (!ctx_pcpu->head_len)
goto print_nothing;
ctx_pcpu->task_len = __log_buf_print_process(cpu,
ctx_pcpu->task, SZ_TASK_BUF);
do {
__log_buf_kmg_print(ctx_pcpu);
ctx_pcpu->head_len = __log_buf_print_kmsg(cpu,
ctx_pcpu->head, SZ_KMSG_BUF);
} while (ctx_pcpu->head_len);
print_nothing:
put_cpu();
}
static int sec_log_buf_restart_handler(struct notifier_block *nb,
unsigned long l, void *msg)
{
struct log_buf_drvdata *drvdata =
container_of(nb, struct log_buf_drvdata, nb_restart);
const unsigned long pfn = PHYS_PFN(drvdata->paddr);
const unsigned long nr_pages = (unsigned long)(drvdata->size >> PAGE_SHIFT);
unsigned long i;
flush_cache_all(); /* NOTE: may not work on arm64 architecture */
for (i = 0; i < nr_pages; i++) {
struct page *page = pfn_to_page(pfn + i);
flush_dcache_page(page);
}
return NOTIFY_OK;
}
static int __log_buf_register_restart_handler(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_restart;
nb->priority = 255;
nb->notifier_call = sec_log_buf_restart_handler;
return register_restart_handler(nb);
}
static void __log_buf_unregister_restart_handler(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct notifier_block *nb = &drvdata->nb_restart;
unregister_restart_handler(nb);
}
static noinline int __log_buf_probe_epilog(struct builder *bd)
{
struct log_buf_drvdata *drvdata =
container_of(bd, struct log_buf_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
sec_log_buf = drvdata; /* set a singleton */
pr_debug("buf base virtual addrs 0x%p phy=%pa\n", s_log_buf,
&sec_log_buf->paddr);
return 0;
}
static noinline void __log_buf_remove_prolog(struct builder *bd)
{
/* FIXME: This is not a graceful exit.
* 'sec_log_buf' can be used in some calling 'printk'.
*/
sec_log_buf = NULL;
}
static int __log_buf_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct log_buf_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __log_buf_probe_threaded(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct log_buf_drvdata *drvdata = platform_get_drvdata(pdev);
return sec_director_probe_dev_threaded(&drvdata->bd, builder, n,
"log_buf");
}
static int __log_buf_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct log_buf_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static int __log_buf_remove_threaded(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct log_buf_drvdata *drvdata = platform_get_drvdata(pdev);
struct director_threaded *drct = drvdata->bd.drct;
sec_director_destruct_dev_threaded(drct);
return 0;
}
static const struct dev_builder __log_buf_dev_builder[] = {
DEVICE_BUILDER(__log_buf_parse_dt, NULL),
DEVICE_BUILDER(__log_buf_probe_prolog, NULL),
DEVICE_BUILDER(__log_buf_prepare_buffer, NULL),
DEVICE_BUILDER(__last_kmsg_alloc_buffer, __last_kmsg_free_buffer),
DEVICE_BUILDER(__last_kmsg_pull_last_log, NULL),
DEVICE_BUILDER(__last_kmsg_procfs_create, __last_kmsg_procfs_remove),
DEVICE_BUILDER(__log_buf_pull_early_buffer, NULL),
DEVICE_BUILDER(__log_buf_logger_init, __log_buf_logger_exit),
DEVICE_BUILDER(__ap_klog_proc_init, __ap_klog_proc_exit),
DEVICE_BUILDER(__ap_klog_proc_create_symlink, __ap_klog_proc_remove_symlink),
DEVICE_BUILDER(__log_buf_register_restart_handler, __log_buf_unregister_restart_handler),
DEVICE_BUILDER(__log_buf_probe_epilog, __log_buf_remove_prolog),
};
static const struct dev_builder __log_buf_dev_builder_threaded[] = {
#if IS_ENABLED(CONFIG_DEBUG_FS)
DEVICE_BUILDER(__log_buf_debugfs_create, __log_buf_debugfs_remove),
#endif
DEVICE_BUILDER(__last_kmsg_init_compression, __last_kmsg_exit_compression),
};
static int sec_log_buf_probe(struct platform_device *pdev)
{
int err;
err = __log_buf_probe(pdev, __log_buf_dev_builder,
ARRAY_SIZE(__log_buf_dev_builder));
if (err)
return err;
return __log_buf_probe_threaded(pdev, __log_buf_dev_builder_threaded,
ARRAY_SIZE(__log_buf_dev_builder_threaded));
}
static int sec_log_buf_remove(struct platform_device *pdev)
{
__log_buf_remove_threaded(pdev, __log_buf_dev_builder_threaded,
ARRAY_SIZE(__log_buf_dev_builder_threaded));
__log_buf_remove(pdev, __log_buf_dev_builder,
ARRAY_SIZE(__log_buf_dev_builder));
return 0;
}
static const struct of_device_id sec_log_buf_match_table[] = {
{ .compatible = "samsung,kernel_log_buf" },
{},
};
MODULE_DEVICE_TABLE(of, sec_log_buf_match_table);
static struct platform_driver sec_log_buf_driver = {
.driver = {
.name = "sec,log_buf",
.of_match_table = of_match_ptr(sec_log_buf_match_table),
},
.probe = sec_log_buf_probe,
.remove = sec_log_buf_remove,
};
static int __init sec_log_buf_init(void)
{
return platform_driver_register(&sec_log_buf_driver);
}
/* NOTE: all compression algorithms are registered in 'subsys_initcall' stage. */
subsys_initcall_sync(sec_log_buf_init);
static void __exit sec_log_buf_exit(void)
{
platform_driver_unregister(&sec_log_buf_driver);
}
module_exit(sec_log_buf_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("Kernel log buffer shared with boot loader");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,83 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
#include <trace/events/printk.h>
#include "sec_log_buf.h"
static __always_inline size_t __trace_console_print_prefix(char *buf, size_t sz_buf)
{
size_t len = 0;
u64 ts_nsec = local_clock(); /* NOTE: may have a skew... */
len += __log_buf_print_time(ts_nsec, buf + len);
len += __log_buf_print_process(smp_processor_id(), buf + len, sz_buf - len);
return len;
}
#define PREFIX_MAX 64
static __always_inline void __trace_console_locked(void *unused,
const char *text, size_t len)
{
static char last_char = '\n';
size_t text_len;
if (!__log_buf_is_acceptable(text, len))
return;
if (unlikely(len == 0 || text[0] == '\0'))
goto skip;
if (likely(last_char == '\n')) {
char prefix[PREFIX_MAX];
size_t prefix_len;
prefix_len = __trace_console_print_prefix(prefix, sizeof(prefix));
__log_buf_write(prefix, prefix_len);
}
__log_buf_write(text, len);
skip:
text_len = len + strnlen(&text[len], 16);
last_char = text[text_len - 1];
}
static void __trace_console(void *unused, const char *text, size_t len)
{
static DEFINE_SPINLOCK(lock);
unsigned long flags;
spin_lock_irqsave(&lock, flags);
__trace_console_locked(unused, text, len);
spin_unlock_irqrestore(&lock, flags);
}
static int log_buf_logger_tp_console_probe(struct log_buf_drvdata *drvdata)
{
return register_trace_console(__trace_console, NULL);
}
static void log_buf_logger_tp_console_remove(struct log_buf_drvdata *drvdata)
{
unregister_trace_console(__trace_console, NULL);
}
static const struct log_buf_logger log_buf_tp_console = {
.probe = log_buf_logger_tp_console_probe,
.remove = log_buf_logger_tp_console_remove,
};
const struct log_buf_logger *__log_buf_logger_tp_console_creator(void)
{
return &log_buf_tp_console;
}

View File

@@ -0,0 +1,104 @@
// SPDX-License-Identifier: GPL-2.0
/*
* COPYRIGHT(C) 2021-2024 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <printk_ringbuffer.h>
#include <trace/hooks/logbuf.h>
#include "sec_log_buf.h"
static size_t print_time(u64 ts, char *buf)
{
unsigned long rem_nsec = do_div(ts, 1000000000);
return sprintf(buf, "\n[%5lu.%06lu]",
(unsigned long)ts, rem_nsec / 1000);
}
static size_t print_process(char *buf)
{
return sprintf(buf, "%c[%1d:%15s:%5d] ",
in_interrupt() ? 'I' : ' ',
smp_processor_id(),
current->comm,
task_pid_nr(current));
}
static size_t info_print_prefix(const struct printk_info *info, char *buf)
{
size_t len = 0;
len += print_time(info->ts_nsec, buf + len);
len += print_process(buf + len);
return len;
}
#define PREFIX_MAX 64
static void __trace_android_vh_logbuf(void *unused,
struct printk_ringbuffer *rb, struct printk_record *r)
{
size_t text_len = r->info->text_len;
size_t buf_size = r->text_buf_size;
char *text = r->text_buf;
char prefix[PREFIX_MAX];
size_t prefix_len;
if (text_len > buf_size)
text_len = buf_size;
if (!__log_buf_is_acceptable(text, text_len))
return;
prefix_len = info_print_prefix(r->info, prefix);
__log_buf_write(prefix, prefix_len);
__log_buf_write(text, text_len);
}
static void __trace_android_vh_logbuf_pr_cont(void *unused,
struct printk_record *r, size_t text_len)
{
size_t offset = r->info->text_len - text_len;
char *text = &r->text_buf[offset];
if (!__log_buf_is_acceptable(text, text_len))
return;
__log_buf_write(text, text_len);
}
static int log_buf_logger_vh_logbuf_probe(struct log_buf_drvdata *drvdata)
{
int err;
err = register_trace_android_vh_logbuf(__trace_android_vh_logbuf,
NULL);
if (err)
return err;
return register_trace_android_vh_logbuf_pr_cont(
__trace_android_vh_logbuf_pr_cont, NULL);
}
static void log_buf_logger_vh_logbuf_remove(struct log_buf_drvdata *drvdata)
{
unregister_trace_android_vh_logbuf(__trace_android_vh_logbuf, NULL);
unregister_trace_android_vh_logbuf_pr_cont(
__trace_android_vh_logbuf_pr_cont, NULL);
}
static const struct log_buf_logger log_buf_logger_vh_logbuf = {
.probe = log_buf_logger_vh_logbuf_probe,
.remove = log_buf_logger_vh_logbuf_remove,
};
const struct log_buf_logger *__log_buf_logger_vh_logbuf_creator(void)
{
return &log_buf_logger_vh_logbuf;
}

View File

@@ -0,0 +1,21 @@
config SEC_PMSG
tristate "PSTORE backend for saving android platform log"
help
TODO: help is not ready.
config SEC_PMSG_TEST_FOR_ON_DEVICE
tristate "KUnit test for sec_pmsg_test"
depends on KUNIT
depends on SEC_PMSG
help
TODO: Describe config fully.
If you run this test driver on device, SHOULD set this config as 'm' to build test driver modulraly.
config SEC_PMSG_TEST_FOR_ONLY_UML
tristate "KUnit test for sec_pmsg_test"
depends on KUNIT
depends on UML
depends on SEC_PMSG
help
TODO: Describe config fully.
This CONFIG is recommended to set to y.

View File

@@ -0,0 +1,3 @@
obj-$(CONFIG_SEC_PMSG) += sec_pmsg.o
GCOV_PROFILE_sec_pmsg.o := $(CONFIG_KUNIT)

View File

@@ -0,0 +1,707 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* COPYRIGHT(C) 2016-2023 Samsung Electronics Co., Ltd. All Right Reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ":%s() " fmt, __func__
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pstore.h>
#include <linux/sched/clock.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/uaccess.h>
#include <linux/samsung/sec_kunit.h>
#include <linux/samsung/sec_of.h>
#include <linux/samsung/debug/sec_boot_stat.h>
#include <linux/samsung/debug/sec_debug.h>
#include "sec_pmsg.h"
/* This defines are for PSTORE */
#define SS_LOGGER_LEVEL_HEADER (1)
#define SS_LOGGER_LEVEL_PREFIX (2)
#define SS_LOGGER_LEVEL_TEXT (3)
#define SS_LOGGER_LEVEL_MAX (4)
#define SS_LOGGER_SKIP_COUNT (4)
#define SS_LOGGER_STRING_PAD (1)
#define SS_LOGGER_HEADER_SIZE (80)
#define SS_LOG_ID_MAIN (0)
#define SS_LOG_ID_RADIO (1)
#define SS_LOG_ID_EVENTS (2)
#define SS_LOG_ID_SYSTEM (3)
#define SS_LOG_ID_CRASH (4)
#define SS_LOG_ID_KERNEL (5)
static struct pmsg_drvdata *sec_pmsg __read_mostly;
static char *pmsg_buf __read_mostly;
static size_t pmsg_size __read_mostly;
static size_t pmsg_idx;
static void (*__pmsg_memcpy_toio)(void *, const void *, size_t) __read_mostly;
static void notrace ____pmsg_memcpy_toio(void *dst, const void *src, size_t cnt)
{
memcpy_toio(dst, src, cnt);
}
static void notrace ____pmsg_memcpy(void *dst, const void *src, size_t cnt)
{
memcpy(dst, src, cnt);
}
static void notrace ____pmsg_memcpy_dummy(void *dst, const void *src, size_t cnt)
{
}
static inline void __pmsg_logger(const char *buf, size_t size)
{
size_t f_len, s_len, remain_space;
size_t idx;
idx = pmsg_idx % pmsg_size;
remain_space = pmsg_size - idx;
f_len = min(size, remain_space);
__pmsg_memcpy_toio(&(pmsg_buf[idx]), buf, f_len);
s_len = size - f_len;
if (unlikely(s_len))
__pmsg_memcpy_toio(pmsg_buf, &buf[f_len], s_len);
pmsg_idx += size;
}
__ss_static __ss_always_inline
int ____logger_level_header(struct pmsg_logger *logger,
struct logger_level_header_ctx *llhc)
{
u64 tv_kernel = llhc->tv_kernel;
u64 rem_nsec;
struct tm tm_buf;
rem_nsec = do_div(tv_kernel, 1000000000);
time64_to_tm(logger->tv_sec, 0, &tm_buf);
return scnprintf(llhc->buffer, SS_LOGGER_HEADER_SIZE,
"\n[%5llu.%06llu][%d:%16s] %02d-%02d "
"%02d:%02d:%02d.%03d %5d %5d ",
(unsigned long long)tv_kernel,
(unsigned long long)rem_nsec / 1000,
llhc->cpu, llhc->comm,
tm_buf.tm_mon + 1, tm_buf.tm_mday,
tm_buf.tm_hour, tm_buf.tm_min,
tm_buf.tm_sec, logger->tv_nsec / 1000000,
logger->pid, logger->tid);
}
static inline void __logger_level_header(struct pmsg_logger *logger,
char *buffer, size_t count)
{
struct logger_level_header_ctx _llhc;
struct logger_level_header_ctx *llhc = &_llhc;
int buffer_len;
if (IS_ENABLED(CONFIG_SEC_PMSG_USE_EVENT_LOG)
&& logger->id == SS_LOG_ID_EVENTS)
return;
llhc->cpu = raw_smp_processor_id();
llhc->comm = current->comm;
llhc->tv_kernel = local_clock();
llhc->buffer = buffer;
llhc->count = count;
buffer_len = ____logger_level_header(logger, llhc);
__pmsg_logger(buffer, buffer_len - 1);
}
__ss_static __ss_inline char ____logger_level_prefix(struct pmsg_logger *logger)
{
const char *prio_magic = "!.VDIWEFS";
const size_t prio_magic_len = sizeof("!.VDIWEFS") - 1;
size_t prio = (size_t)logger->__msg;
return prio < prio_magic_len ? prio_magic[prio] : '?';
}
static inline void __logger_level_prefix(struct pmsg_logger *logger,
char *buffer, size_t count)
{
if (IS_ENABLED(CONFIG_SEC_PMSG_USE_EVENT_LOG) &&
logger->id == SS_LOG_ID_EVENTS)
return;
buffer[0] = ____logger_level_prefix(logger);
if (IS_ENABLED(CONFIG_SEC_PMSG_USE_EVENT_LOG))
logger->__msg = 0xff;
__pmsg_logger(buffer, 1);
}
static inline void __ss_logger_level_text_event_log(struct pmsg_logger *logger,
char *buffer, size_t count)
{
/* TODO: CONFIG_SEC_PMSG_USE_EVENT_LOG (CONFIG_SEC_EVENT_LOG in
* a legacy implementation) is never used, yet.
* It's maybe deprecated and I'll implement it if it is required.
*/
}
static inline void ____logger_level_text(struct pmsg_logger *logger,
char *buffer, size_t count)
{
char *eatnl = &buffer[count - SS_LOGGER_STRING_PAD];
if (count == SS_LOGGER_SKIP_COUNT && *eatnl != '\0')
return;
if (count > 1 && *(uint16_t*)buffer == *(uint16_t *)"!@") {
/* To prevent potential buffer overrun
* put a null at the end of the buffer.
*/
buffer[count - 1] = '\0';
/* FIXME: print without a module and a function name */
printk(KERN_INFO "%s\n", buffer);
sec_boot_stat_add(buffer);
}
__pmsg_logger(buffer, count - 1);
}
static inline void __logger_level_text(struct pmsg_logger *logger,
char *buffer, size_t count)
{
if (unlikely(logger->id == SS_LOG_ID_EVENTS)) {
__ss_logger_level_text_event_log(logger, buffer, count);
return;
}
____logger_level_text(logger, buffer, count);
}
static inline int __logger_combine_pmsg(struct pmsg_logger *logger,
char *buffer, size_t count, unsigned int level)
{
switch (level) {
case SS_LOGGER_LEVEL_HEADER:
__logger_level_header(logger, buffer, count);
break;
case SS_LOGGER_LEVEL_PREFIX:
__logger_level_prefix(logger, buffer, count);
break;
case SS_LOGGER_LEVEL_TEXT:
__logger_level_text(logger, buffer, count);
break;
default:
pr_warn("unknown logger level : %u\n", level);
break;
}
__pmsg_logger(" ", 1);
return 0;
}
static __always_inline void __logger_write_user_pmsg_log_header(
struct pmsg_logger *logger, char *buffer, size_t count)
{
struct ss_pmsg_log_header_t *pmsg_header =
(struct ss_pmsg_log_header_t *)buffer;
if (pmsg_header->magic != 'l') {
__logger_combine_pmsg(logger, buffer, count, SS_LOGGER_LEVEL_TEXT);
} else {
logger->pid = pmsg_header->pid;
logger->uid = pmsg_header->uid;
logger->len = pmsg_header->len;
}
}
static __always_inline void __logger_write_user_android_log_header(
struct pmsg_logger *logger, char *buffer, size_t count)
{
struct ss_android_log_header_t *header =
(struct ss_android_log_header_t *)buffer;
logger->id = header->id;
logger->tid = header->tid;
logger->tv_sec = header->tv_sec;
logger->tv_nsec = header->tv_nsec;
if (logger->id > 7)
__logger_combine_pmsg(logger, buffer, count, SS_LOGGER_LEVEL_TEXT);
else
__logger_combine_pmsg(logger, buffer, count, SS_LOGGER_LEVEL_HEADER);
}
static __always_inline int __pmsg_write_user(struct pstore_record *record,
const char __user *buf, size_t count)
{
struct pmsg_drvdata *drvdata = record->psi->data;
struct pmsg_logger *logger = drvdata->logger;
char *big_buffer = NULL;
char *buffer;
int err;
if (unlikely(count > MAX_BUFFER_SIZE)) {
big_buffer = kmalloc(count, GFP_KERNEL);
if (unlikely(!big_buffer))
return -ENOMEM;
buffer = big_buffer;
} else {
struct pmsg_buffer *buf =
per_cpu_ptr(drvdata->buf, raw_smp_processor_id());
buffer = &buf->buffer[0];
}
err = __copy_from_user(buffer, buf, count);
if (unlikely(err))
return -EFAULT;
switch (count) {
case sizeof(struct ss_pmsg_log_header_t):
__logger_write_user_pmsg_log_header(logger, buffer, count);
break;
case sizeof(struct ss_android_log_header_t):
__logger_write_user_android_log_header(logger, buffer, count);
break;
case sizeof(unsigned char):
logger->__msg = buffer[0];
__logger_combine_pmsg(logger, buffer, count, SS_LOGGER_LEVEL_PREFIX);
break;
default:
__logger_combine_pmsg(logger, buffer, count, SS_LOGGER_LEVEL_TEXT);
break;
}
kfree(big_buffer);
return 0;
}
static int notrace sec_pmsg_write_user(struct pstore_record *record,
const char __user *buf)
{
if (unlikely(record->type != PSTORE_TYPE_PMSG))
return -EINVAL;
return __pmsg_write_user(record, buf, record->size);
}
static ssize_t notrace sec_pmsg_read(struct pstore_record *record)
{
/* FIXME: I don't do anything. */
return 0;
}
static int notrace sec_pmsg_write(struct pstore_record *record)
{
/* FIXME: I don't do anything. */
return 0;
}
static struct pstore_info sec_pmsg_pstore = {
.owner = THIS_MODULE,
.name = "sec,pstore_pmsg",
.read = sec_pmsg_read,
.write = sec_pmsg_write,
.write_user = sec_pmsg_write_user,
.flags = PSTORE_FLAGS_PMSG,
};
static noinline int __pmsg_parse_dt_memory_region(struct builder *bd,
struct device_node *np)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
struct device *dev = bd->dev;
struct device_node *mem_np;
struct reserved_mem *rmem;
mem_np = of_parse_phandle(np, "memory-region", 0);
if (!mem_np)
return -EINVAL;
rmem = of_reserved_mem_lookup(mem_np);
if (!rmem) {
dev_warn(dev, "failed to get a reserved memory (%s)\n",
mem_np->name);
return -EFAULT;
}
drvdata->rmem = rmem;
return 0;
}
static bool __pmsg_is_in_reserved_mem_bound(
const struct reserved_mem *rmem,
phys_addr_t base, phys_addr_t size)
{
phys_addr_t rmem_base = rmem->base;
phys_addr_t rmem_end = rmem_base + rmem->size - 1;
phys_addr_t end = base + size - 1;
if ((base >= rmem_base) && (end <= rmem_end))
return true;
return false;
}
static int __pmsg_use_partial_reserved_mem(
struct pmsg_drvdata *drvdata, struct device_node *np)
{
struct reserved_mem *rmem = drvdata->rmem;
phys_addr_t base;
phys_addr_t size;
int err;
err = sec_of_parse_reg_prop(np, &base, &size);
if (err)
return err;
if (!__pmsg_is_in_reserved_mem_bound(rmem, base, size))
return -ERANGE;
drvdata->paddr = base;
drvdata->size = size;
return 0;
}
static int __pmsg_use_entire_reserved_mem(
struct pmsg_drvdata *drvdata)
{
struct reserved_mem *rmem = drvdata->rmem;
drvdata->paddr = rmem->base;
drvdata->size = rmem->size;
return 0;
}
static noinline int __pmsg_parse_dt_splitted_reserved_mem(struct builder *bd,
struct device_node *np)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
int err;
if (of_property_read_bool(np, "sec,use-partial_reserved_mem"))
err = __pmsg_use_partial_reserved_mem(drvdata, np);
else
err = __pmsg_use_entire_reserved_mem(drvdata);
if (err)
return -EFAULT;
return 0;
}
static noinline int __pmsg_parse_dt_test_no_map(struct builder *bd,
struct device_node *np)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
struct device_node *mem_np;
mem_np = of_parse_phandle(np, "memory-region", 0);
if (!mem_np)
return -EINVAL;
if (!of_property_read_bool(mem_np, "no-map")) {
pmsg_buf = phys_to_virt(drvdata->paddr);
__pmsg_memcpy_toio = ____pmsg_memcpy;
drvdata->nomap = false;
} else {
__pmsg_memcpy_toio = ____pmsg_memcpy_toio;
drvdata->nomap = true;
}
return 0;
}
#if IS_BUILTIN(CONFIG_SEC_PMSG)
static __always_inline unsigned long __free_reserved_area(void *start, void *end, int poison, const char *s)
{
return free_reserved_area(start, end, poison, s);
}
#else
/* FIXME: this is a copy of 'free_reserved_area' of 'page_alloc.c' */
static unsigned long __free_reserved_area(void *start, void *end, int poison, const char *s)
{
void *pos;
unsigned long pages = 0;
start = (void *)PAGE_ALIGN((unsigned long)start);
end = (void *)((unsigned long)end & PAGE_MASK);
for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
struct page *page = virt_to_page(pos);
void *direct_map_addr;
direct_map_addr = page_address(page);
direct_map_addr = kasan_reset_tag(direct_map_addr);
if ((unsigned int)poison <= 0xFF)
memset(direct_map_addr, poison, PAGE_SIZE);
free_reserved_page(page);
}
if (pages && s)
pr_info("Freeing %s memory: %ldK\n",
s, pages << (PAGE_SHIFT - 10));
return pages;
}
#endif
static void __pmsg_free_reserved_area(struct pmsg_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
uint8_t *start;
if (drvdata->nomap) {
dev_warn(dev, "reserved_mem has 'no-map' and can't be freed\n");
return;
}
start = (uint8_t *)phys_to_virt(drvdata->paddr);
__free_reserved_area(start, start + drvdata->size, -1, "sec_pmsg");
}
__ss_static int __pmsg_handle_dt_debug_level(struct pmsg_drvdata *drvdata,
struct device_node *np, unsigned int sec_dbg_level)
{
int err;
err = sec_of_test_debug_level(np, "sec,debug_level", sec_dbg_level);
if (err == -EINVAL) {
__pmsg_free_reserved_area(drvdata);
__pmsg_memcpy_toio = ____pmsg_memcpy_dummy;
return -EPERM;
}
return 0;
}
static noinline int __pmsg_parse_dt_check_debug_level(struct builder *bd,
struct device_node *np)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
unsigned int sec_dbg_level = sec_debug_level();
int err;
err = __pmsg_handle_dt_debug_level(drvdata, np, sec_dbg_level);
if (err)
dev_warn(bd->dev, "pmsg will not be stored\n");
return 0;
}
static const struct dt_builder __pmsg_dt_builder[] = {
DT_BUILDER(__pmsg_parse_dt_memory_region),
DT_BUILDER(__pmsg_parse_dt_splitted_reserved_mem),
DT_BUILDER(__pmsg_parse_dt_test_no_map),
DT_BUILDER(__pmsg_parse_dt_check_debug_level),
};
static noinline int __pmsg_parse_dt(struct builder *bd)
{
return sec_director_parse_dt(bd, __pmsg_dt_builder,
ARRAY_SIZE(__pmsg_dt_builder));
}
static noinline int __pmsg_prepare_logger(struct builder *bd)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
struct device *dev = bd->dev;
struct pmsg_logger *logger;
logger = devm_kmalloc(dev, sizeof(*drvdata->logger), GFP_KERNEL);
if (!logger)
return -ENOMEM;
drvdata->logger = logger;
return 0;
}
static noinline int __pmsg_prepare_buffer(struct builder *bd)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
struct device *dev = bd->dev;
struct pmsg_buffer *buf;
buf = devm_alloc_percpu(dev, struct pmsg_buffer);
if (!buf)
return -ENOMEM;
drvdata->buf = buf;
return 0;
}
static void *__pmsg_ioremap(struct pmsg_drvdata *drvdata)
{
struct device *dev = drvdata->bd.dev;
if (pmsg_buf)
return pmsg_buf;
#if IS_ENABLED(CONFIG_HAS_IOMEM)
return devm_ioremap(dev, drvdata->paddr, drvdata->size);
#else
dev = dev;
return ioremap(drvdata->paddr, drvdata->size);
#endif
}
static noinline int __pmsg_prepare_carveout(struct builder *bd)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
pmsg_buf = __pmsg_ioremap(drvdata);
if (!pmsg_buf)
return -EFAULT;
pmsg_size = drvdata->size;
pmsg_idx = 0;
return 0;
}
static noinline int __pmsg_pstore_register(struct builder *bd)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
sec_pmsg_pstore.data = drvdata;
drvdata->pstore= &sec_pmsg_pstore;
return pstore_register(drvdata->pstore);
}
static noinline void __pmsg_pstore_unregister(struct builder *bd)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
sec_pmsg_pstore.data = NULL;
pstore_unregister(drvdata->pstore);
}
static noinline int __pmsg_probe_epilog(struct builder *bd)
{
struct pmsg_drvdata *drvdata =
container_of(bd, struct pmsg_drvdata, bd);
struct device *dev = bd->dev;
dev_set_drvdata(dev, drvdata);
sec_pmsg = drvdata;
return 0;
}
static noinline void __pmsg_remove_prolog(struct builder *bd)
{
sec_pmsg = NULL;
}
static int __pmsg_probe(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct device *dev = &pdev->dev;
struct pmsg_drvdata *drvdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->bd.dev = dev;
return sec_director_probe_dev(&drvdata->bd, builder, n);
}
static int __pmsg_remove(struct platform_device *pdev,
const struct dev_builder *builder, ssize_t n)
{
struct pmsg_drvdata *drvdata = platform_get_drvdata(pdev);
sec_director_destruct_dev(&drvdata->bd, builder, n, n);
return 0;
}
static const struct dev_builder __pmsg_dev_builder[] = {
DEVICE_BUILDER(__pmsg_parse_dt, NULL),
DEVICE_BUILDER(__pmsg_prepare_logger, NULL),
DEVICE_BUILDER(__pmsg_prepare_buffer, NULL),
DEVICE_BUILDER(__pmsg_prepare_carveout, NULL),
DEVICE_BUILDER(__pmsg_pstore_register, __pmsg_pstore_unregister),
DEVICE_BUILDER(__pmsg_probe_epilog, __pmsg_remove_prolog),
};
static int sec_pmsg_probe(struct platform_device *pdev)
{
return __pmsg_probe(pdev, __pmsg_dev_builder,
ARRAY_SIZE(__pmsg_dev_builder));
}
static int sec_pmsg_remove(struct platform_device *pdev)
{
return __pmsg_remove(pdev, __pmsg_dev_builder,
ARRAY_SIZE(__pmsg_dev_builder));
}
static const struct of_device_id sec_pmsg_match_table[] = {
{ .compatible = "samsung,pstore_pmsg" },
{},
};
MODULE_DEVICE_TABLE(of, sec_pmsg_match_table);
static struct platform_driver sec_pmsg_driver = {
.driver = {
.name = "sec,pmsg",
.of_match_table = of_match_ptr(sec_pmsg_match_table),
},
.probe = sec_pmsg_probe,
.remove = sec_pmsg_remove,
};
static int __init sec_pmsg_init(void)
{
return platform_driver_register(&sec_pmsg_driver);
}
module_init(sec_pmsg_init);
static void __exit sec_pmsg_exit(void)
{
platform_driver_unregister(&sec_pmsg_driver);
}
module_exit(sec_pmsg_exit);
MODULE_AUTHOR("Samsung Electronics");
MODULE_DESCRIPTION("PSTORE backend for saving android platform log");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,61 @@
#ifndef __INTERNAL__SEC_PMSG_H__
#define __INTERNAL__SEC_PMSG_H__
#include <linux/of_reserved_mem.h>
#include <linux/samsung/builder_pattern.h>
#define MAX_BUFFER_SIZE 1024
struct ss_pmsg_log_header_t {
uint8_t magic;
uint16_t len;
uint16_t uid;
uint16_t pid;
} __attribute__((__packed__));
struct ss_android_log_header_t {
unsigned char id;
uint16_t tid;
int32_t tv_sec;
int32_t tv_nsec;
} __attribute__((__packed__));
struct pmsg_logger {
uint16_t len;
uint16_t id;
uint16_t pid;
uint16_t tid;
uint16_t uid;
uint16_t level;
int32_t tv_sec;
int32_t tv_nsec;
union {
char msg[0];
char __msg; /* 1 byte reserved area for 'unsigned char' request from user */
};
};
struct pmsg_buffer {
char buffer[MAX_BUFFER_SIZE];
};
struct pmsg_drvdata {
struct builder bd;
struct reserved_mem *rmem;
phys_addr_t paddr;
size_t size;
bool nomap;
struct pstore_info *pstore;
struct pmsg_logger *logger;
struct pmsg_buffer __percpu *buf;
};
struct logger_level_header_ctx {
int cpu;
const char *comm;
u64 tv_kernel;
char *buffer;
size_t count;
};
#endif /* __INTERNAL__SEC_PMSG_H__ */

View File

@@ -0,0 +1,14 @@
source "drivers/samsung/debug/qcom/debug/Kconfig"
source "drivers/samsung/debug/qcom/reboot_cmd/Kconfig"
source "drivers/samsung/debug/qcom/dbg_partition/Kconfig"
source "drivers/samsung/debug/qcom/reboot_reason/Kconfig"
source "drivers/samsung/debug/qcom/upload_cause/Kconfig"
source "drivers/samsung/debug/qcom/logger/Kconfig"
source "drivers/samsung/debug/qcom/soc_id/Kconfig"
source "drivers/samsung/debug/qcom/wdt_core/Kconfig"
source "drivers/samsung/debug/qcom/summary/Kconfig"
source "drivers/samsung/debug/qcom/user_reset/Kconfig"
source "drivers/samsung/debug/qcom/smem/Kconfig"
source "drivers/samsung/debug/qcom/hw_param/Kconfig"
source "drivers/samsung/debug/qcom/rst_exinfo/Kconfig"
source "drivers/samsung/debug/qcom/mock/Kconfig"

View File

@@ -0,0 +1,14 @@
obj-$(CONFIG_SEC_QC_DEBUG) += debug/
obj-$(CONFIG_SEC_QC_RBCMD) += reboot_cmd/
obj-$(CONFIG_SEC_QC_DEBUG_PARTITION) += dbg_partition/
obj-$(CONFIG_SEC_QC_QCOM_REBOOT_REASON) += reboot_reason/
obj-$(CONFIG_SEC_QC_UPLOAD_CAUSE) += upload_cause/
obj-$(CONFIG_SEC_QC_LOGGER) += logger/
obj-$(CONFIG_SEC_QC_SOC_ID) += soc_id/
obj-$(CONFIG_SEC_QC_QCOM_WDT_CORE) += wdt_core/
obj-$(CONFIG_SEC_QC_SUMMARY) += summary/
obj-$(CONFIG_SEC_QC_USER_RESET) += user_reset/
obj-$(CONFIG_SEC_QC_SMEM) += smem/
obj-$(CONFIG_SEC_QC_HW_PARAM) += hw_param/
obj-$(CONFIG_SEC_QC_RST_EXINFO) += rst_exinfo/
obj-$(CONFIG_SEC_QC_MOCK) += mock/

Some files were not shown because too many files have changed in this diff Show More