Add samsung specific changes

This commit is contained in:
2025-08-11 14:29:00 +02:00
parent c66122e619
commit 4d134a1294
2688 changed files with 1127995 additions and 11475 deletions

View File

@@ -35,6 +35,7 @@ config CORESIGHT_LINK_AND_SINK_TMC
tristate "Coresight generic TMC driver"
depends on CORESIGHT_LINKS_AND_SINKS
select CORESIGHT_CSR
help
This enables support for the Trace Memory Controller driver.
Depending on its configuration the device can act as a link (embedded
@@ -188,6 +189,54 @@ config CORESIGHT_CTI_INTEGRATION_REGS
registers are not used in normal operation and can leave devices in
an inconsistent state.
config CORESIGHT_TRACE_NOC
tristate "Coresight Trace Noc driver"
help
This driver provides support for Trace NoC component.
Trace NoC is a interconnect that is used to collect trace from
various subsystems and transport it QDSS trace sink.It sits in
the different tiles of SOC and aggregates the trace local to the
tile and transports it another tile or to QDSS trace sink eventually.
config CORESIGHT_TGU
tristate "CoreSight Trigger Generation Unit driver"
help
This driver provides support for Trigger Generation Unit that is
used to detect patterns or sequences on a given set of signals.
TGU is used to monitor a particular bus within a given region to
detect illegal transaction sequences or slave responses. It is also
used to monitor a data stream to detect protocol violations and to
provide a trigger point for centering data around a specific event
within the trace data buffer.
config CORESIGHT_CSR
tristate "CoreSight Slave Register driver"
help
This driver provides support for CoreSight Slave Register block
that hosts miscellaneous configuration registers.
Those configuration registers can be used to control, various
coresight configurations.
config CORESIGHT_REMOTE_ETM
tristate "Remote processor ETM trace support"
depends on QCOM_QMI_HELPERS
help
Enables support for ETM trace collection on remote processor using
CoreSight framework. Enabling this will allow turning on ETM
tracing on remote processor via sysfs by configuring the required
CoreSight components.
config CORESIGHT_UETM
tristate "CoreSight UETM driver"
depends on QTI_SCMI_VENDOR_PROTOCOL
help
This driver provides support for Coresight UETM. This driver gathers
UETM configuration parameters from sysfs userspace and sends them to
CPUCP via SCMI.
To compile this driver as a module, choose M here: the module will be
called coresight-uetm.
config CORESIGHT_TRBE
tristate "Trace Buffer Extension (TRBE) driver"
depends on ARM64 && CORESIGHT_SOURCE_ETM4X
@@ -245,6 +294,16 @@ config CORESIGHT_DUMMY
other subsystem and use Linux drivers to configure rest of trace
path.
To compile this driver as a module, choose M here: the module will be
called coresight-dummy.
config CORESIGHT_QMI
tristate "CORESIGHT QMI support"
help
Enables support for sending command to subsystem via QMI. this is
primarily used for enabling remote ETM, assigning etr, assigning ATID
for subsystem source.
To compile this driver as a module, choose M here: the module will be
called coresight-dummy.
endif

View File

@@ -2,6 +2,9 @@
#
# Makefile for CoreSight drivers.
#
CFLAGS_coresight-stm.o := -D__DISABLE_TRACE_MMIO__
obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-sysfs.o coresight-syscfg.o coresight-config.o \
@@ -9,7 +12,8 @@ coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-syscfg-configfs.o coresight-trace-id.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \
coresight-tmc-etr.o
coresight-tmc-etr.o coresight-byte-cntr.o \
coresight-tmc-usb.o
obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
@@ -30,4 +34,10 @@ obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
obj-$(CONFIG_ULTRASOC_SMB) += ultrasoc-smb.o
obj-$(CONFIG_CORESIGHT_TRACE_NOC) += coresight-trace-noc.o
obj-$(CONFIG_CORESIGHT_TGU) += coresight-tgu.o
obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
obj-$(CONFIG_CORESIGHT_DUMMY) += coresight-dummy.o
obj-$(CONFIG_CORESIGHT_REMOTE_ETM) += coresight-remote-etm.o coresight-tmc-sec.o
obj-$(CONFIG_CORESIGHT_QMI) += coresight-qmi.o
obj-$(CONFIG_CORESIGHT_UETM) += coresight-uetm.o

View File

@@ -0,0 +1,401 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/of_irq.h>
#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/property.h>
#include "coresight-priv.h"
#include "coresight-byte-cntr.h"
#include "coresight-common.h"
#define CSR_BYTECNTVAL (0x06C)
static void tmc_etr_read_bytes(struct byte_cntr *byte_cntr_data, long offset,
size_t bytes, size_t *len, char **bufp)
{
struct tmc_drvdata *tmcdrvdata = byte_cntr_data->tmcdrvdata;
struct etr_buf *etr_buf = tmcdrvdata->sysfs_buf;
size_t actual;
if (*len >= bytes)
*len = bytes;
else if (((uint32_t)offset % bytes) + *len > bytes)
*len = bytes - ((uint32_t)offset % bytes);
actual = tmc_etr_buf_get_data(etr_buf, offset, *len, bufp);
*len = actual;
if (actual == bytes || (actual + (uint32_t)offset) % bytes == 0)
atomic_dec(&byte_cntr_data->irq_cnt);
}
static irqreturn_t etr_handler(int irq, void *data)
{
struct byte_cntr *byte_cntr_data = data;
struct tmc_drvdata *tmcdrvdata = byte_cntr_data->tmcdrvdata;
if (tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
atomic_inc(&byte_cntr_data->irq_cnt);
wake_up(&byte_cntr_data->usb_wait_wq);
} else if (tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
atomic_inc(&byte_cntr_data->irq_cnt);
wake_up(&byte_cntr_data->wq);
}
byte_cntr_data->total_irq++;
return IRQ_HANDLED;
}
static long tmc_etr_flush_remaining_bytes(struct tmc_drvdata *tmcdrvdata, long offset,
size_t len, char **bufpp)
{
long req_size, actual = 0;
struct etr_buf *etr_buf;
struct device *dev;
struct byte_cntr *byte_cntr_data;
if (!tmcdrvdata)
return -EINVAL;
byte_cntr_data = tmcdrvdata->byte_cntr;
if (!byte_cntr_data)
return -EINVAL;
etr_buf = tmcdrvdata->sysfs_buf;
dev = &tmcdrvdata->csdev->dev;
req_size = ((byte_cntr_data->rwp_offset < offset) ? tmcdrvdata->size : 0) +
byte_cntr_data->rwp_offset - offset;
if (req_size > len)
req_size = len;
if (req_size > 0)
actual = tmc_etr_buf_get_data(etr_buf, offset, req_size, bufpp);
return actual;
}
static ssize_t tmc_etr_byte_cntr_read(struct file *fp, char __user *data,
size_t len, loff_t *ppos)
{
struct byte_cntr *byte_cntr_data = fp->private_data;
struct tmc_drvdata *tmcdrvdata = byte_cntr_data->tmcdrvdata;
char *bufp = NULL;
long actual;
int ret = 0;
if (!data)
return -EINVAL;
mutex_lock(&byte_cntr_data->byte_cntr_lock);
if (!byte_cntr_data->read_active) {
actual = tmc_etr_flush_remaining_bytes(tmcdrvdata,
byte_cntr_data->offset, len, &bufp);
if (actual > 0) {
len = actual;
goto copy;
} else {
ret = -EINVAL;
goto err0;
}
}
if (byte_cntr_data->enable) {
if (!atomic_read(&byte_cntr_data->irq_cnt)) {
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
if (wait_event_interruptible(byte_cntr_data->wq,
atomic_read(&byte_cntr_data->irq_cnt) > 0
|| !byte_cntr_data->enable))
return -ERESTARTSYS;
mutex_lock(&byte_cntr_data->byte_cntr_lock);
if (!byte_cntr_data->read_active) {
actual = tmc_etr_flush_remaining_bytes(tmcdrvdata,
byte_cntr_data->offset, len, &bufp);
if (actual > 0) {
len = actual;
goto copy;
} else {
ret = -EINVAL;
goto err0;
}
}
}
tmc_etr_read_bytes(byte_cntr_data, byte_cntr_data->offset,
byte_cntr_data->block_size, &len, &bufp);
} else {
actual = tmc_etr_flush_remaining_bytes(tmcdrvdata,
byte_cntr_data->offset, len, &bufp);
if (actual > 0) {
len = actual;
goto copy;
} else {
ret = -EINVAL;
goto err0;
}
}
copy:
if (copy_to_user(data, bufp, len)) {
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
dev_dbg(&tmcdrvdata->csdev->dev,
"%s: copy_to_user failed\n", __func__);
return -EFAULT;
}
byte_cntr_data->total_size += len;
if (byte_cntr_data->offset + len >= tmcdrvdata->size)
byte_cntr_data->offset = 0;
else
byte_cntr_data->offset += len;
goto out;
err0:
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return ret;
out:
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return len;
}
void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data)
{
if (!byte_cntr_data)
return;
mutex_lock(&byte_cntr_data->byte_cntr_lock);
if (byte_cntr_data->block_size == 0
|| byte_cntr_data->read_active) {
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return;
}
atomic_set(&byte_cntr_data->irq_cnt, 0);
byte_cntr_data->enable = true;
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
}
EXPORT_SYMBOL(tmc_etr_byte_cntr_start);
void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data)
{
if (!byte_cntr_data)
return;
mutex_lock(&byte_cntr_data->byte_cntr_lock);
byte_cntr_data->rwp_offset =
tmc_get_rwp_offset(byte_cntr_data->tmcdrvdata);
byte_cntr_data->enable = false;
byte_cntr_data->read_active = false;
atomic_set(&byte_cntr_data->irq_cnt, 0);
wake_up(&byte_cntr_data->wq);
coresight_csr_set_byte_cntr(byte_cntr_data->csr,
byte_cntr_data->irqctrl_offset, 0);
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
}
EXPORT_SYMBOL(tmc_etr_byte_cntr_stop);
static int tmc_etr_byte_cntr_release(struct inode *in, struct file *fp)
{
struct byte_cntr *byte_cntr_data = fp->private_data;
struct device *dev = &byte_cntr_data->tmcdrvdata->csdev->dev;
mutex_lock(&byte_cntr_data->byte_cntr_lock);
byte_cntr_data->read_active = false;
atomic_set(&byte_cntr_data->irq_cnt, 0);
if (byte_cntr_data->enable)
coresight_csr_set_byte_cntr(byte_cntr_data->csr,
byte_cntr_data->irqctrl_offset, 0);
disable_irq_wake(byte_cntr_data->byte_cntr_irq);
dev_dbg(dev, "send data total size: %lld bytes, irq_cnt: %lld, offset: %lu, rwp_offset: %lu\n",
byte_cntr_data->total_size, byte_cntr_data->total_irq,
byte_cntr_data->offset, byte_cntr_data->rwp_offset);
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return 0;
}
static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
{
struct byte_cntr *byte_cntr_data =
container_of(in->i_cdev, struct byte_cntr, dev);
struct tmc_drvdata *tmcdrvdata = byte_cntr_data->tmcdrvdata;
mutex_lock(&byte_cntr_data->byte_cntr_lock);
if (byte_cntr_data->read_active) {
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return -EBUSY;
}
if (tmcdrvdata->mode != CS_MODE_SYSFS ||
!byte_cntr_data->block_size) {
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return -EINVAL;
}
enable_irq_wake(byte_cntr_data->byte_cntr_irq);
/* IRQ is a '8- byte' counter and to observe interrupt at
* 'block_size' bytes of data
*/
coresight_csr_set_byte_cntr(byte_cntr_data->csr, byte_cntr_data->irqctrl_offset,
(byte_cntr_data->block_size) / 8);
fp->private_data = byte_cntr_data;
nonseekable_open(in, fp);
byte_cntr_data->enable = true;
byte_cntr_data->read_active = true;
byte_cntr_data->total_size = 0;
byte_cntr_data->offset = tmc_get_rwp_offset(tmcdrvdata);
byte_cntr_data->total_irq = 0;
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return 0;
}
static const struct file_operations byte_cntr_fops = {
.owner = THIS_MODULE,
.open = tmc_etr_byte_cntr_open,
.read = tmc_etr_byte_cntr_read,
.release = tmc_etr_byte_cntr_release,
.llseek = no_llseek,
};
static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
{
int ret;
unsigned int baseminor = 0;
unsigned int count = 1;
struct device *device;
dev_t dev;
ret = alloc_chrdev_region(&dev, baseminor, count, byte_cntr_data->name);
if (ret < 0) {
pr_err("alloc_chrdev_region failed %d\n", ret);
return ret;
}
cdev_init(&byte_cntr_data->dev, &byte_cntr_fops);
byte_cntr_data->dev.owner = THIS_MODULE;
byte_cntr_data->dev.ops = &byte_cntr_fops;
ret = cdev_add(&byte_cntr_data->dev, dev, 1);
if (ret)
goto exit_unreg_chrdev_region;
byte_cntr_data->driver_class = class_create(byte_cntr_data->class_name);
if (IS_ERR(byte_cntr_data->driver_class)) {
ret = -ENOMEM;
pr_err("class_create failed %d\n", ret);
goto exit_unreg_chrdev_region;
}
device = device_create(byte_cntr_data->driver_class, NULL,
byte_cntr_data->dev.dev, byte_cntr_data,
byte_cntr_data->name);
if (IS_ERR(device)) {
pr_err("class_device_create failed %d\n", ret);
ret = -ENOMEM;
goto exit_destroy_class;
}
return 0;
exit_destroy_class:
class_destroy(byte_cntr_data->driver_class);
exit_unreg_chrdev_region:
unregister_chrdev_region(byte_cntr_data->dev.dev, 1);
return ret;
}
struct byte_cntr *byte_cntr_init(struct amba_device *adev,
struct tmc_drvdata *drvdata)
{
struct device *dev = &adev->dev;
struct device_node *np = adev->dev.of_node;
int byte_cntr_irq;
int ret;
struct byte_cntr *byte_cntr_data;
byte_cntr_irq = of_irq_get_byname(np, "byte-cntr-irq");
if (byte_cntr_irq < 0)
return NULL;
byte_cntr_data = devm_kzalloc(dev, sizeof(*byte_cntr_data), GFP_KERNEL);
if (!byte_cntr_data)
return NULL;
ret = devm_request_irq(dev, byte_cntr_irq, etr_handler,
IRQF_TRIGGER_RISING | IRQF_SHARED,
dev_name(dev), byte_cntr_data);
if (ret) {
dev_err(dev, "Byte_cntr interrupt registration failed\n");
return NULL;
}
ret = of_property_read_u32(dev->of_node, "csr-irqctrl-offset",
&byte_cntr_data->irqctrl_offset);
if (ret) {
dev_dbg(dev, "Get byte cntr csr irqctrl offset failed\n");
byte_cntr_data->irqctrl_offset = CSR_BYTECNTVAL;
}
ret = device_property_read_string(dev, "byte-cntr-name", &byte_cntr_data->name);
if (ret) {
dev_dbg(dev, "Get byte cntr name failed\n");
byte_cntr_data->name = "byte-cntr";
}
ret = device_property_read_string(dev, "byte-cntr-class-name",
&byte_cntr_data->class_name);
if (ret) {
dev_dbg(dev, "Get byte cntr class name failed\n");
byte_cntr_data->class_name = "coresight-tmc-etr-stream";
}
ret = byte_cntr_register_chardev(byte_cntr_data);
if (ret) {
dev_err(dev, "Byte_cntr char dev registration failed\n");
return NULL;
}
byte_cntr_data->byte_cntr_irq = byte_cntr_irq;
byte_cntr_data->csr = drvdata->csr;
byte_cntr_data->tmcdrvdata = drvdata;
atomic_set(&byte_cntr_data->irq_cnt, 0);
init_waitqueue_head(&byte_cntr_data->wq);
mutex_init(&byte_cntr_data->byte_cntr_lock);
return byte_cntr_data;
}
void byte_cntr_remove(struct byte_cntr *byte_cntr_data)
{
device_destroy(byte_cntr_data->driver_class,
byte_cntr_data->dev.dev);
class_destroy(byte_cntr_data->driver_class);
unregister_chrdev_region(byte_cntr_data->dev.dev, 1);
}

View File

@@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_BYTE_CNTR_H
#define _CORESIGHT_BYTE_CNTR_H
#include <linux/cdev.h>
#include <linux/amba/bus.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include "coresight-tmc.h"
struct byte_cntr {
struct cdev dev;
struct class *driver_class;
bool enable;
bool read_active;
uint32_t byte_cntr_value;
uint32_t block_size;
int byte_cntr_irq;
atomic_t irq_cnt;
atomic_t usb_free_buf;
wait_queue_head_t wq;
wait_queue_head_t usb_wait_wq;
struct workqueue_struct *usb_wq;
struct qdss_request *usb_req;
struct work_struct read_work;
struct mutex usb_bypass_lock;
struct mutex byte_cntr_lock;
struct coresight_csr *csr;
struct tmc_drvdata *tmcdrvdata;
const char *name;
const char *class_name;
int irqctrl_offset;
unsigned long offset;
unsigned long rwp_offset;
uint64_t total_size;
uint64_t total_irq;
};
extern void tmc_etr_byte_cntr_start(struct byte_cntr *byte_cntr_data);
extern void tmc_etr_byte_cntr_stop(struct byte_cntr *byte_cntr_data);
#endif

View File

@@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_COMMON_H
#define _CORESIGHT_COMMON_H
#define BM(lsb, msb) ((BIT(msb) - BIT(lsb)) + BIT(msb))
#define BVAL(val, n) ((val & BIT(n)) >> n)
struct coresight_csr {
const char *name;
struct list_head link;
};
#if IS_ENABLED(CONFIG_CORESIGHT_CSR)
extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
extern void msm_qdss_csr_enable_eth(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_eth(struct coresight_csr *csr);
extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
uint32_t val);
extern void coresight_csr_set_byte_cntr(struct coresight_csr *csr, int irqctrl_offset,
uint32_t count);
extern struct coresight_csr *coresight_csr_get(const char *name);
extern int coresight_csr_set_etr_atid(struct coresight_device *csdev, int atid, bool enable);
#if IS_ENABLED(CONFIG_OF)
extern int of_get_coresight_csr_name(struct device_node *node,
const char **csr_name);
#else
static inline int of_get_coresight_csr_name(struct device_node *node,
const char **csr_name){ return -EINVAL; }
#endif
#else
static inline void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr) {}
static inline void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr) {}
static inline void msm_qdss_csr_disable_flush(struct coresight_csr *csr) {}
static inline void msm_qdss_csr_enable_eth(struct coresight_csr *csr) {}
static inline void msm_qdss_csr_disable_eth(struct coresight_csr *csr) {}
static inline int coresight_csr_hwctrl_set(struct coresight_csr *csr,
uint64_t addr, uint32_t val) { return -EINVAL; }
static inline void coresight_csr_set_byte_cntr(struct coresight_csr *csr, int irqctrl_offset,
uint32_t count) {}
static inline struct coresight_csr *coresight_csr_get(const char *name)
{ return NULL; }
static inline int coresight_csr_set_etr_atid(struct coresight_device *csdev, int atid, bool enable)
{return -EINVAL; }
static inline int of_get_coresight_csr_name(struct device_node *node,
const char **csr_name){ return -EINVAL; }
#endif
#if IS_ENABLED(CONFIG_CORESIGHT_CTI) && IS_ENABLED(CONFIG_OF)
extern struct coresight_cti_data *of_get_coresight_cti_data(
struct device *dev, struct device_node *node);
#else
static inline struct coresight_cti_data *of_get_coresight_cti_data(
struct device *dev, struct device_node *node) { return NULL; }
#endif
#endif

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/build_bug.h>
@@ -17,14 +18,18 @@
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/coresight.h>
#include <linux/of_platform.h>
#include <linux/property.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-common.h"
#include "coresight-syscfg.h"
#define MAX_SINK_NAME 25
static DEFINE_MUTEX(coresight_mutex);
static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
@@ -67,8 +72,12 @@ ssize_t coresight_simple_show_pair(struct device *_dev,
struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
u64 val;
int ret;
ret = pm_runtime_resume_and_get(_dev->parent);
if (ret < 0)
return ret;
pm_runtime_get_sync(_dev->parent);
val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
pm_runtime_put_sync(_dev->parent);
return sysfs_emit(buf, "0x%llx\n", val);
@@ -81,8 +90,12 @@ ssize_t coresight_simple_show32(struct device *_dev,
struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
u64 val;
int ret;
ret = pm_runtime_resume_and_get(_dev->parent);
if (ret < 0)
return ret;
pm_runtime_get_sync(_dev->parent);
val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
pm_runtime_put_sync(_dev->parent);
return sysfs_emit(buf, "0x%llx\n", val);
@@ -113,15 +126,57 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
static struct coresight_device *coresight_get_source(struct list_head *path)
{
struct coresight_device *csdev;
if (!path)
return NULL;
csdev = list_first_entry(path, struct coresight_node, link)->csdev;
if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
return NULL;
return csdev;
}
/**
* coresight_source_filter - checks whether the connection matches the source
* of path if connection is binded to specific source.
* @path: The list of devices
* @conn: The connection of one outport
*
* Return zero if the connection doesn't have a source binded or source of the
* path matches the source binds to connection.
*/
static int coresight_source_filter(struct list_head *path,
struct coresight_connection *conn)
{
int ret = 0;
struct coresight_device *source = NULL;
if (conn->source_name == NULL)
return ret;
source = coresight_get_source(path);
if (source == NULL)
return ret;
return strcmp(conn->source_name, dev_name(&source->dev));
}
static struct coresight_connection *
coresight_find_out_connection(struct coresight_device *src_dev,
struct coresight_device *dest_dev)
struct coresight_device *dest_dev,
struct list_head *path)
{
int i;
struct coresight_connection *conn;
for (i = 0; i < src_dev->pdata->nr_outconns; i++) {
conn = src_dev->pdata->out_conns[i];
if (coresight_source_filter(path, conn))
continue;
if (conn->dest_dev == dest_dev)
return conn;
}
@@ -312,7 +367,8 @@ static void coresight_disable_sink(struct coresight_device *csdev)
static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
struct coresight_device *child,
struct list_head *path)
{
int ret = 0;
int link_subtype;
@@ -321,8 +377,8 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (!parent || !child)
return -EINVAL;
inconn = coresight_find_out_connection(parent, csdev);
outconn = coresight_find_out_connection(csdev, child);
inconn = coresight_find_out_connection(parent, csdev, path);
outconn = coresight_find_out_connection(csdev, child, path);
link_subtype = csdev->subtype.link_subtype;
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG && IS_ERR(inconn))
@@ -341,7 +397,8 @@ static int coresight_enable_link(struct coresight_device *csdev,
static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
struct coresight_device *child,
struct list_head *path)
{
int i;
int link_subtype;
@@ -350,8 +407,8 @@ static void coresight_disable_link(struct coresight_device *csdev,
if (!parent || !child)
return;
inconn = coresight_find_out_connection(parent, csdev);
outconn = coresight_find_out_connection(csdev, child);
inconn = coresight_find_out_connection(parent, csdev, path);
outconn = coresight_find_out_connection(csdev, child, path);
link_subtype = csdev->subtype.link_subtype;
if (link_ops(csdev)->disable) {
@@ -523,7 +580,7 @@ static void coresight_disable_path_from(struct list_head *path,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
coresight_disable_link(csdev, parent, child);
coresight_disable_link(csdev, parent, child, path);
break;
default:
break;
@@ -597,6 +654,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
*/
if (ret)
goto out;
break;
case CORESIGHT_DEV_TYPE_SOURCE:
/* sources are enabled from either sysFS or Perf */
@@ -604,7 +662,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
child = list_next_entry(nd, link)->csdev;
ret = coresight_enable_link(csdev, parent, child);
ret = coresight_enable_link(csdev, parent, child, path);
if (ret)
goto err;
break;
@@ -634,6 +692,8 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
EXPORT_SYMBOL_GPL(coresight_get_sink);
static struct coresight_device *
coresight_find_enabled_sink(struct coresight_device *csdev)
@@ -677,6 +737,52 @@ coresight_get_enabled_sink(struct coresight_device *source)
return coresight_find_enabled_sink(source);
}
static int coresight_enabled_sink(struct device *dev, const void *data)
{
const bool *reset = data;
struct coresight_device *csdev = to_coresight_device(dev);
if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
csdev->activated) {
/*
* Now that we have a handle on the sink for this session,
* disable the sysFS "enable_sink" flag so that possible
* concurrent perf session that wish to use another sink don't
* trip on it. Doing so has no ramification for the current
* session.
*/
if (*reset)
csdev->activated = false;
return 1;
}
return 0;
}
/**
* coresight_get_enabled_sink_from_bus - returns the first enabled sink found on the bus
* @deactivate: Whether the 'enable_sink' flag should be reset
*
* When operated from perf the deactivate parameter should be set to 'true'.
* That way the "enabled_sink" flag of the sink that was selected can be reset,
* allowing for other concurrent perf sessions to choose a different sink.
*
* When operated from sysFS users have full control and as such the deactivate
* parameter should be set to 'false', hence mandating users to explicitly
* clear the flag.
*/
static struct coresight_device *coresight_get_enabled_sink_from_bus(bool deactivate)
{
struct device *dev = NULL;
dev = bus_find_device(&coresight_bustype, NULL, &deactivate,
coresight_enabled_sink);
return dev ? to_coresight_device(dev) : NULL;
}
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -750,7 +856,7 @@ static inline void coresight_put_ref(struct coresight_device *csdev)
{
struct device *dev = csdev->dev.parent;
pm_runtime_put(dev);
pm_runtime_put_sync(dev);
put_device(dev);
module_put(dev->driver->owner);
}
@@ -818,7 +924,8 @@ static void coresight_drop_device(struct coresight_device *csdev)
*/
static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink,
struct list_head *path)
struct list_head *path,
struct coresight_device *source)
{
int i, ret;
bool found = false;
@@ -830,7 +937,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
if (coresight_is_percpu_source(csdev) && coresight_is_percpu_sink(sink) &&
sink == per_cpu(csdev_sink, source_ops(csdev)->cpu_id(csdev))) {
if (_coresight_build_path(sink, sink, path) == 0) {
if (_coresight_build_path(sink, sink, path, source) == 0) {
found = true;
goto out;
}
@@ -841,8 +948,12 @@ static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *child_dev;
child_dev = csdev->pdata->out_conns[i]->dest_dev;
if (csdev->pdata->out_conns[i]->source_name &&
strcmp(csdev->pdata->out_conns[i]->source_name,
dev_name(&source->dev)))
continue;
if (child_dev &&
_coresight_build_path(child_dev, sink, path) == 0) {
_coresight_build_path(child_dev, sink, path, source) == 0) {
found = true;
break;
}
@@ -887,7 +998,7 @@ struct list_head *coresight_build_path(struct coresight_device *source,
INIT_LIST_HEAD(path);
rc = _coresight_build_path(source, sink, path);
rc = _coresight_build_path(source, sink, path, source);
if (rc) {
kfree(path);
return ERR_PTR(rc);
@@ -917,6 +1028,7 @@ void coresight_release_path(struct list_head *path)
}
kfree(path);
path = NULL;
}
/* return true if the device is a suitable type for a default sink */
@@ -1117,54 +1229,29 @@ static int coresight_validate_source(struct coresight_device *csdev,
return 0;
}
int coresight_enable(struct coresight_device *csdev)
static int coresight_validate_sink(struct coresight_device *source,
struct coresight_device *sink)
{
int cpu, ret = 0;
struct coresight_device *sink;
struct list_head *path;
if (of_coresight_secure_node(sink) && !of_coresight_secure_node(source)) {
dev_err(&sink->dev, "dont support this source: %s\n",
dev_name(&source->dev));
return -EINVAL;
}
return 0;
}
static int coresight_store_path(struct coresight_device *csdev,
struct list_head *path)
{
int cpu, ret;
enum coresight_dev_subtype_source subtype;
u32 hash;
subtype = csdev->subtype.source_subtype;
mutex_lock(&coresight_mutex);
ret = coresight_validate_source(csdev, __func__);
if (ret)
goto out;
if (csdev->enable) {
/*
* There could be multiple applications driving the software
* source. So keep the refcount for each such user when the
* source is already enabled.
*/
if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
atomic_inc(&csdev->refcnt);
goto out;
}
sink = coresight_get_enabled_sink(csdev);
if (!sink) {
ret = -EINVAL;
goto out;
}
path = coresight_build_path(csdev, sink);
if (IS_ERR(path)) {
pr_err("building path(s) failed\n");
ret = PTR_ERR(path);
goto out;
}
ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
if (ret)
goto err_path;
ret = coresight_enable_source(csdev, CS_MODE_SYSFS, NULL);
if (ret)
goto err_source;
switch (subtype) {
case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
/*
@@ -1186,41 +1273,22 @@ int coresight_enable(struct coresight_device *csdev)
hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
ret = idr_alloc_u32(&path_idr, path, &hash, hash, GFP_KERNEL);
if (ret)
goto err_source;
return ret;
break;
default:
/* We can't be here */
break;
}
out:
mutex_unlock(&coresight_mutex);
return ret;
err_source:
coresight_disable_path(path);
err_path:
coresight_release_path(path);
goto out;
return 0;
}
EXPORT_SYMBOL_GPL(coresight_enable);
void coresight_disable(struct coresight_device *csdev)
static struct list_head *coresight_remove_path(struct coresight_device *csdev)
{
int cpu, ret;
struct list_head *path = NULL;
int cpu;
struct list_head *path;
u32 hash;
mutex_lock(&coresight_mutex);
ret = coresight_validate_source(csdev, __func__);
if (ret)
goto out;
if (!csdev->enable || !coresight_disable_source_sysfs(csdev, NULL))
goto out;
switch (csdev->subtype.source_subtype) {
case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
cpu = source_ops(csdev)->cpu_id(csdev);
@@ -1234,20 +1302,152 @@ void coresight_disable(struct coresight_device *csdev)
path = idr_find(&path_idr, hash);
if (path == NULL) {
pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
goto out;
return NULL;
}
idr_remove(&path_idr, hash);
break;
default:
/* We can't be here */
path = NULL;
break;
}
coresight_disable_path(path);
coresight_release_path(path);
return path;
}
struct list_head *coresight_get_path(struct coresight_device *csdev)
{
int cpu;
struct list_head *path;
u32 hash;
switch (csdev->subtype.source_subtype) {
case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
cpu = source_ops(csdev)->cpu_id(csdev);
path = per_cpu(tracer_path, cpu);
break;
case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
/* Find the path by the hash. */
path = idr_find(&path_idr, hash);
if (path == NULL) {
pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
return NULL;
}
break;
default:
path = NULL;
break;
}
return path;
}
EXPORT_SYMBOL_GPL(coresight_get_path);
int coresight_enable(struct coresight_device *csdev)
{
int ret = 0;
struct coresight_device *sink;
struct list_head *path;
enum coresight_dev_subtype_source subtype;
subtype = csdev->subtype.source_subtype;
mutex_lock(&coresight_mutex);
ret = coresight_validate_source(csdev, __func__);
if (ret)
goto out;
if (csdev->enable) {
/*
* There could be multiple applications driving the software
* source. So keep the refcount for each such user when the
* source is already enabled.
*/
if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
atomic_inc(&csdev->refcnt);
goto out;
}
if (csdev->def_sink) {
sink = csdev->def_sink;
sink->activated = true;
} else
sink = coresight_get_enabled_sink(csdev);
if (!sink) {
ret = -EINVAL;
goto out;
}
ret = coresight_validate_sink(csdev, sink);
if (ret)
goto out;
path = coresight_build_path(csdev, sink);
if (IS_ERR(path)) {
ret = PTR_ERR(path);
pr_err("building path(s) failed %d\n", ret);
goto out;
}
ret = coresight_store_path(csdev, path);
if (ret)
goto err_path;
ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
if (ret)
goto err_enable_path;
ret = coresight_enable_source(csdev, CS_MODE_SYSFS, NULL);
if (ret)
goto err_source;
out:
mutex_unlock(&coresight_mutex);
return ret;
err_source:
coresight_disable_path(path);
err_enable_path:
coresight_remove_path(csdev);
err_path:
coresight_release_path(path);
goto out;
}
EXPORT_SYMBOL_GPL(coresight_enable);
static void _coresight_disable(struct coresight_device *csdev)
{
int ret;
struct list_head *path = NULL;
ret = coresight_validate_source(csdev, __func__);
if (ret)
return;
if (csdev->def_sink)
csdev->def_sink = NULL;
if (!csdev->enable || !coresight_disable_source_sysfs(csdev, NULL))
return;
path = coresight_remove_path(csdev);
if (!path)
return;
coresight_disable_path(path);
coresight_release_path(path);
}
void coresight_disable(struct coresight_device *csdev)
{
mutex_lock(&coresight_mutex);
_coresight_disable(csdev);
mutex_unlock(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_disable);
@@ -1265,15 +1465,22 @@ static ssize_t enable_sink_store(struct device *dev,
{
int ret;
unsigned long val;
struct coresight_device *sink;
struct coresight_device *csdev = to_coresight_device(dev);
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val)
if (val) {
sink = coresight_get_enabled_sink_from_bus(false);
if (sink && sink->type != csdev->type) {
dev_err(&csdev->dev,
"Another type sink is enabled.\n");
return -EINVAL;
}
csdev->activated = true;
else
} else
csdev->activated = false;
return size;
@@ -1313,6 +1520,59 @@ static ssize_t enable_source_store(struct device *dev,
}
static DEVICE_ATTR_RW(enable_source);
static ssize_t sink_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct coresight_device *csdev = to_coresight_device(dev);
if (csdev->def_sink)
return scnprintf(buf, PAGE_SIZE, "%s\n", dev_name(&csdev->def_sink->dev));
else
return scnprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t sink_name_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
u32 hash;
char *sink_name;
struct coresight_device *new_sink, *current_sink;
struct coresight_device *csdev = to_coresight_device(dev);
if (size >= MAX_SINK_NAME)
return -EINVAL;
if (size == 0) {
csdev->def_sink = NULL;
return size;
}
sink_name = kstrdup(buf, GFP_KERNEL);
if (!sink_name)
return -ENOMEM;
sink_name[size-1] = 0;
hash = hashlen_hash(hashlen_string(NULL, sink_name));
new_sink = coresight_get_sink_by_id(hash);
current_sink = coresight_get_enabled_sink_from_bus(false);
if (!new_sink || (current_sink &&
new_sink && current_sink->type !=
new_sink->type)) {
dev_err(&csdev->dev,
"Sink name is invalid or another type sink is enabled.\n");
kfree(sink_name);
return -EINVAL;
}
csdev->def_sink = new_sink;
kfree(sink_name);
return size;
}
static DEVICE_ATTR_RW(sink_name);
static struct attribute *coresight_sink_attrs[] = {
&dev_attr_enable_sink.attr,
NULL,
@@ -1321,6 +1581,7 @@ ATTRIBUTE_GROUPS(coresight_sink);
static struct attribute *coresight_source_attrs[] = {
&dev_attr_enable_source.attr,
&dev_attr_sink_name.attr,
NULL,
};
ATTRIBUTE_GROUPS(coresight_source);
@@ -1784,28 +2045,32 @@ char *coresight_alloc_device_name(struct coresight_dev_list *dict,
{
int idx;
char *name = NULL;
const char *coresight_name = NULL;
struct fwnode_handle **list;
struct device_node *node = dev->of_node;
mutex_lock(&coresight_mutex);
idx = coresight_search_device_idx(dict, dev_fwnode(dev));
if (idx < 0) {
/* Make space for the new entry */
idx = dict->nr_idx;
list = krealloc_array(dict->fwnode_list,
idx + 1, sizeof(*dict->fwnode_list),
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(list)) {
idx = -ENOMEM;
goto done;
if (!of_property_read_string(node, "coresight-name", &coresight_name))
name = devm_kasprintf(dev, GFP_KERNEL, "%s", coresight_name);
else {
idx = coresight_search_device_idx(dict, dev_fwnode(dev));
if (idx < 0) {
/* Make space for the new entry */
idx = dict->nr_idx;
list = krealloc_array(dict->fwnode_list,
idx + 1, sizeof(*dict->fwnode_list),
GFP_KERNEL);
if (ZERO_OR_NULL_PTR(list))
goto done;
list[idx] = dev_fwnode(dev);
dict->fwnode_list = list;
dict->nr_idx = idx + 1;
}
list[idx] = dev_fwnode(dev);
dict->fwnode_list = list;
dict->nr_idx = idx + 1;
name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", dict->pfx, idx);
}
name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", dict->pfx, idx);
done:
mutex_unlock(&coresight_mutex);
return name;

File diff suppressed because it is too large Load Diff

View File

@@ -15,9 +15,12 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/spinlock.h>
#include <linux/pinctrl/consumer.h>
#include <linux/suspend.h>
#include "coresight-priv.h"
#include "coresight-cti.h"
@@ -69,16 +72,29 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata)
writel_relaxed(0, drvdata->base + CTICONTROL);
/* write the CTI trigger registers */
for (i = 0; i < config->nr_trig_max; i++) {
writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i));
writel_relaxed(config->ctiouten[i],
drvdata->base + CTIOUTEN(i));
}
if (drvdata->extended_cti) {
for (i = 0; i < config->nr_trig_max; i++) {
writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN_EXTENDED(i));
writel_relaxed(config->ctiouten[i],
drvdata->base + CTIOUTEN_EXTENDED(i));
}
/* other regs */
writel_relaxed(config->ctigate, drvdata->base + CTIGATE);
writel_relaxed(config->asicctl, drvdata->base + ASICCTL);
writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET);
/* other regs */
writel_relaxed(config->ctigate, drvdata->base + CTIGATE_EXTENDED);
writel_relaxed(config->asicctl, drvdata->base + ASICCTL_EXTENDED);
writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET_EXTENDED);
} else {
for (i = 0; i < config->nr_trig_max; i++) {
writel_relaxed(config->ctiinen[i], drvdata->base + CTIINEN(i));
writel_relaxed(config->ctiouten[i],
drvdata->base + CTIOUTEN(i));
}
/* other regs */
writel_relaxed(config->ctigate, drvdata->base + CTIGATE);
writel_relaxed(config->asicctl, drvdata->base + ASICCTL);
writel_relaxed(config->ctiappset, drvdata->base + CTIAPPSET);
}
/* re-enable CTI */
writel_relaxed(1, drvdata->base + CTICONTROL);
@@ -99,10 +115,12 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
if (config->hw_enabled || !config->hw_powered)
goto cti_state_unchanged;
/* claim the device */
rc = coresight_claim_device(drvdata->csdev);
if (rc)
goto cti_err_not_enabled;
if (!drvdata->extended_cti) {
/* claim the device */
rc = coresight_claim_device(drvdata->csdev);
if (rc)
goto cti_err_not_enabled;
}
cti_write_all_hw_regs(drvdata);
@@ -175,7 +193,8 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
writel_relaxed(0, drvdata->base + CTICONTROL);
config->hw_enabled = false;
coresight_disclaim_device_unlocked(csdev);
if (!drvdata->extended_cti)
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
return ret;
@@ -270,8 +289,10 @@ int cti_add_connection_entry(struct device *dev, struct cti_drvdata *drvdata,
cti_dev->nr_trig_con++;
/* add connection usage bit info to overall info */
drvdata->config.trig_in_use |= tc->con_in->used_mask;
drvdata->config.trig_out_use |= tc->con_out->used_mask;
bitmap_or(drvdata->config.trig_in_use, drvdata->config.trig_in_use,
tc->con_in->used_mask, drvdata->config.nr_trig_max);
bitmap_or(drvdata->config.trig_out_use, drvdata->config.trig_out_use,
tc->con_out->used_mask, drvdata->config.nr_trig_max);
return 0;
}
@@ -314,7 +335,6 @@ int cti_add_default_connection(struct device *dev, struct cti_drvdata *drvdata)
{
int ret = 0;
int n_trigs = drvdata->config.nr_trig_max;
u32 n_trig_mask = GENMASK(n_trigs - 1, 0);
struct cti_trig_con *tc = NULL;
/*
@@ -325,12 +345,104 @@ int cti_add_default_connection(struct device *dev, struct cti_drvdata *drvdata)
if (!tc)
return -ENOMEM;
tc->con_in->used_mask = n_trig_mask;
tc->con_out->used_mask = n_trig_mask;
bitmap_fill(tc->con_in->used_mask, n_trigs);
bitmap_fill(tc->con_out->used_mask, n_trigs);
ret = cti_add_connection_entry(dev, drvdata, tc, NULL, "default");
return ret;
}
static int cti_trigin_gpio_enable(struct cti_drvdata *drvdata)
{
int ret;
struct pinctrl *pctrl;
struct pinctrl_state *pctrl_state;
if (drvdata->gpio_trigin->pctrl)
return 0;
pctrl = devm_pinctrl_get(drvdata->csdev->dev.parent);
if (IS_ERR(pctrl)) {
dev_err(&drvdata->csdev->dev, "pinctrl get failed\n");
return PTR_ERR(pctrl);
}
pctrl_state = pinctrl_lookup_state(pctrl, "cti-trigin-pctrl");
if (IS_ERR(pctrl_state)) {
dev_err(&drvdata->csdev->dev,
"pinctrl get state failed\n");
ret = PTR_ERR(pctrl_state);
goto err;
}
ret = pinctrl_select_state(pctrl, pctrl_state);
if (ret) {
dev_err(&drvdata->csdev->dev,
"pinctrl enable state failed\n");
goto err;
}
drvdata->gpio_trigin->pctrl = pctrl;
return 0;
err:
devm_pinctrl_put(pctrl);
return ret;
}
static int cti_trigout_gpio_enable(struct cti_drvdata *drvdata)
{
int ret;
struct pinctrl *pctrl;
struct pinctrl_state *pctrl_state;
if (drvdata->gpio_trigout->pctrl)
return 0;
pctrl = devm_pinctrl_get(drvdata->csdev->dev.parent);
if (IS_ERR(pctrl)) {
dev_err(&drvdata->csdev->dev, "pinctrl get failed\n");
return PTR_ERR(pctrl);
}
pctrl_state = pinctrl_lookup_state(pctrl, "cti-trigout-pctrl");
if (IS_ERR(pctrl_state)) {
dev_err(&drvdata->csdev->dev,
"pinctrl get state failed\n");
ret = PTR_ERR(pctrl_state);
goto err;
}
ret = pinctrl_select_state(pctrl, pctrl_state);
if (ret) {
dev_err(&drvdata->csdev->dev,
"pinctrl enable state failed\n");
goto err;
}
drvdata->gpio_trigout->pctrl = pctrl;
return 0;
err:
devm_pinctrl_put(pctrl);
return ret;
}
void cti_trigin_gpio_disable(struct cti_drvdata *drvdata)
{
if (!drvdata->gpio_trigin->pctrl)
return;
devm_pinctrl_put(drvdata->gpio_trigin->pctrl);
drvdata->gpio_trigin->pctrl = NULL;
}
void cti_trigout_gpio_disable(struct cti_drvdata *drvdata)
{
if (!drvdata->gpio_trigout->pctrl)
return;
devm_pinctrl_put(drvdata->gpio_trigout->pctrl);
drvdata->gpio_trigout->pctrl = NULL;
}
/** cti channel api **/
/* attach/detach channel from trigger - write through if enabled. */
int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
@@ -339,7 +451,6 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
u32 trig_bitmask;
u32 chan_bitmask;
u32 reg_value;
int reg_offset;
@@ -349,25 +460,27 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
(trigger_idx >= config->nr_trig_max))
return -EINVAL;
trig_bitmask = BIT(trigger_idx);
/* ensure registered triggers and not out filtered */
if (direction == CTI_TRIG_IN) {
if (!(trig_bitmask & config->trig_in_use))
if (!(test_bit(trigger_idx, config->trig_in_use)))
return -EINVAL;
} else {
if (!(trig_bitmask & config->trig_out_use))
if (!(test_bit(trigger_idx, config->trig_out_use)))
return -EINVAL;
if ((config->trig_filter_enable) &&
(config->trig_out_filter & trig_bitmask))
test_bit(trigger_idx, config->trig_out_filter))
return -EINVAL;
}
/* update the local register values */
chan_bitmask = BIT(channel_idx);
reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) :
CTIOUTEN(trigger_idx));
if (drvdata->extended_cti)
reg_offset = (direction == CTI_TRIG_IN ? CTIINEN_EXTENDED(trigger_idx) :
CTIOUTEN_EXTENDED(trigger_idx));
else
reg_offset = (direction == CTI_TRIG_IN ? CTIINEN(trigger_idx) :
CTIOUTEN(trigger_idx));
spin_lock(&drvdata->spinlock);
@@ -385,6 +498,24 @@ int cti_channel_trig_op(struct device *dev, enum cti_chan_op op,
else
config->ctiouten[trigger_idx] = reg_value;
spin_unlock(&drvdata->spinlock);
if (op == CTI_CHAN_ATTACH) {
if (direction == CTI_TRIG_IN &&
drvdata->gpio_trigin->trig == trigger_idx)
cti_trigin_gpio_enable(drvdata);
else if (direction == CTI_TRIG_OUT &&
drvdata->gpio_trigout->trig == trigger_idx)
cti_trigout_gpio_enable(drvdata);
} else {
if (direction == CTI_TRIG_IN &&
drvdata->gpio_trigin->trig == trigger_idx)
cti_trigin_gpio_disable(drvdata);
else if (direction == CTI_TRIG_OUT &&
drvdata->gpio_trigout->trig == trigger_idx)
cti_trigout_gpio_disable(drvdata);
}
spin_lock(&drvdata->spinlock);
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, reg_offset, reg_value);
@@ -423,8 +554,12 @@ int cti_channel_gate_op(struct device *dev, enum cti_chan_gate_op op,
}
if (err == 0) {
config->ctigate = reg_value;
if (cti_active(config))
cti_write_single_reg(drvdata, CTIGATE, reg_value);
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIGATE_EXTENDED, reg_value);
else
cti_write_single_reg(drvdata, CTIGATE, reg_value);
}
}
spin_unlock(&drvdata->spinlock);
return err;
@@ -451,19 +586,28 @@ int cti_channel_setop(struct device *dev, enum cti_chan_set_op op,
case CTI_CHAN_SET:
config->ctiappset |= chan_bitmask;
reg_value = config->ctiappset;
reg_offset = CTIAPPSET;
if (drvdata->extended_cti)
reg_offset = CTIAPPSET_EXTENDED;
else
reg_offset = CTIAPPSET;
break;
case CTI_CHAN_CLR:
config->ctiappset &= ~chan_bitmask;
reg_value = chan_bitmask;
reg_offset = CTIAPPCLEAR;
if (drvdata->extended_cti)
reg_offset = CTIAPPCLEAR_EXTENDED;
else
reg_offset = CTIAPPCLEAR;
break;
case CTI_CHAN_PULSE:
config->ctiappset &= ~chan_bitmask;
reg_value = chan_bitmask;
reg_offset = CTIAPPPULSE;
if (drvdata->extended_cti)
reg_offset = CTIAPPPULSE_EXTENDED;
else
reg_offset = CTIAPPPULSE;
break;
default:
@@ -857,6 +1001,46 @@ static void cti_remove(struct amba_device *adev)
coresight_unregister(drvdata->csdev);
}
static bool is_extended_cti(struct device *dev)
{
return fwnode_property_present(dev->fwnode, "qcom,extended_cti");
}
static int cti_parse_gpio(struct cti_drvdata *drvdata, struct amba_device *adev)
{
int ret;
int trig;
drvdata->gpio_trigin = devm_kzalloc(&adev->dev,
sizeof(struct cti_pctrl), GFP_KERNEL);
if (!drvdata->gpio_trigin)
return -ENOMEM;
drvdata->gpio_trigin->trig = -1;
ret = of_property_read_u32(adev->dev.of_node,
"qcom,cti-gpio-trigin", &trig);
if (!ret)
drvdata->gpio_trigin->trig = trig;
else if (ret != -EINVAL)
return ret;
drvdata->gpio_trigout = devm_kzalloc(&adev->dev,
sizeof(struct cti_pctrl), GFP_KERNEL);
if (!drvdata->gpio_trigout)
return -ENOMEM;
drvdata->gpio_trigout->trig = -1;
ret = of_property_read_u32(adev->dev.of_node,
"qcom,cti-gpio-trigout", &trig);
if (!ret)
drvdata->gpio_trigout->trig = trig;
else if (ret != -EINVAL)
return ret;
return 0;
}
static int cti_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
@@ -872,6 +1056,15 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata)
return -ENOMEM;
drvdata->dclk = devm_clk_get(dev, "dynamic_clk");
if (!IS_ERR(drvdata->dclk)) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
} else
drvdata->dclk = NULL;
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
@@ -911,6 +1104,10 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
if (!cti_desc.name)
return -ENOMEM;
ret = cti_parse_gpio(drvdata, adev);
if (ret)
return ret;
/* setup CPU power management handling for CPU bound CTI devices. */
ret = cti_pm_setup(drvdata);
if (ret)
@@ -948,8 +1145,11 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->csdev_release = drvdata->csdev->dev.release;
drvdata->csdev->dev.release = cti_device_release;
drvdata->extended_cti = is_extended_cti(dev);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
/* all done - dec pm refcount */
pm_runtime_put(&adev->dev);
pm_runtime_put_sync(&adev->dev);
dev_info(&drvdata->csdev->dev, "CTI initialized\n");
return 0;
@@ -958,6 +1158,119 @@ pm_release:
return ret;
}
#ifdef CONFIG_DEEPSLEEP
static int cti_suspend(struct device *dev)
{
int rc = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev);
if ((pm_suspend_target_state == PM_SUSPEND_MEM)
&& drvdata->config.hw_enabled) {
drvdata->config.hw_enabled_store = drvdata->config.hw_enabled;
do {
rc = cti_disable(drvdata->csdev, NULL);
if (!rc)
pm_runtime_put_sync(dev);
else
return rc;
} while (drvdata->config.enable_req_count);
}
return rc;
}
static int cti_resume(struct device *dev)
{
int rc = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev);
if ((pm_suspend_target_state == PM_SUSPEND_MEM)
&& drvdata->config.hw_enabled_store) {
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
rc = cti_enable(drvdata->csdev, CS_MODE_SYSFS, NULL);
if (rc)
pm_runtime_put_sync(dev);
drvdata->config.hw_enabled_store = false;
}
return rc;
}
#else
static int cti_suspend(struct device *dev)
{
return 0;
}
static int cti_resume(struct device *dev)
{
return 0;
}
#endif
#ifdef CONFIG_HIBERNATION
static int cti_freeze(struct device *dev)
{
int rc = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata->config.hw_enabled) {
drvdata->config.hw_enabled_store = drvdata->config.hw_enabled;
do {
rc = cti_disable(drvdata->csdev, NULL);
if (!rc)
pm_runtime_put_sync(dev);
else
return rc;
} while (drvdata->config.enable_req_count);
}
return rc;
}
static int cti_restore(struct device *dev)
{
int rc = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev);
if (drvdata->config.hw_enabled_store) {
rc = pm_runtime_resume_and_get(dev);
if (rc)
return rc;
rc = cti_enable(drvdata->csdev, CS_MODE_SYSFS, NULL);
if (rc)
pm_runtime_put_sync(dev);
drvdata->config.hw_enabled_store = false;
}
return rc;
}
#else
static int cti_freeze(struct device *dev)
{
return 0;
}
static int cti_restore(struct device *dev)
{
return 0;
}
#endif
static const struct dev_pm_ops cti_dev_pm_ops = {
.suspend = cti_suspend,
.resume = cti_resume,
.freeze = cti_freeze,
.restore = cti_restore,
};
static struct amba_cs_uci_id uci_id_cti[] = {
{
/* CTI UCI data */
@@ -983,6 +1296,7 @@ static struct amba_driver cti_driver = {
.drv = {
.name = "coresight-cti",
.owner = THIS_MODULE,
.pm = pm_ptr(&cti_dev_pm_ops),
.suppress_bind_attrs = true,
},
.probe = cti_probe,

View File

@@ -136,8 +136,8 @@ static int cti_plat_create_v8_etm_connection(struct device *dev,
goto create_v8_etm_out;
/* build connection data */
tc->con_in->used_mask = 0xF0; /* sigs <4,5,6,7> */
tc->con_out->used_mask = 0xF0; /* sigs <4,5,6,7> */
bitmap_set(tc->con_in->used_mask, 4, 4); /* sigs <4,5,6,7> */
bitmap_set(tc->con_out->used_mask, 4, 4); /* sigs <4,5,6,7> */
/*
* The EXTOUT type signals from the ETM are connected to a set of input
@@ -194,10 +194,10 @@ static int cti_plat_create_v8_connections(struct device *dev,
goto of_create_v8_out;
/* Set the v8 PE CTI connection data */
tc->con_in->used_mask = 0x3; /* sigs <0 1> */
bitmap_set(tc->con_in->used_mask, 0, 2); /* sigs <0 1> */
tc->con_in->sig_types[0] = PE_DBGTRIGGER;
tc->con_in->sig_types[1] = PE_PMUIRQ;
tc->con_out->used_mask = 0x7; /* sigs <0 1 2 > */
bitmap_set(tc->con_out->used_mask, 0, 3); /* sigs <0 1 2 > */
tc->con_out->sig_types[0] = PE_EDBGREQ;
tc->con_out->sig_types[1] = PE_DBGRESTART;
tc->con_out->sig_types[2] = PE_CTIIRQ;
@@ -213,7 +213,7 @@ static int cti_plat_create_v8_connections(struct device *dev,
goto of_create_v8_out;
/* filter pe_edbgreq - PE trigout sig <0> */
drvdata->config.trig_out_filter |= 0x1;
set_bit(0, drvdata->config.trig_out_filter);
of_create_v8_out:
return ret;
@@ -257,7 +257,7 @@ static int cti_plat_read_trig_group(struct cti_trig_grp *tgrp,
if (!err) {
/* set the signal usage mask */
for (idx = 0; idx < tgrp->nr_sigs; idx++)
tgrp->used_mask |= BIT(values[idx]);
set_bit(values[idx], tgrp->used_mask);
}
kfree(values);
@@ -331,7 +331,9 @@ static int cti_plat_process_filter_sigs(struct cti_drvdata *drvdata,
err = cti_plat_read_trig_group(tg, fwnode, CTI_DT_FILTER_OUT_SIGS);
if (!err)
drvdata->config.trig_out_filter |= tg->used_mask;
bitmap_or(drvdata->config.trig_out_filter,
drvdata->config.trig_out_filter,
tg->used_mask, drvdata->config.nr_trig_max);
kfree(tg);
return err;

View File

@@ -112,13 +112,27 @@ static ssize_t enable_store(struct device *dev,
ret = pm_runtime_resume_and_get(dev->parent);
if (ret)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
ret = cti_enable(drvdata->csdev, CS_MODE_SYSFS, NULL);
if (ret)
pm_runtime_put(dev->parent);
if (ret) {
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
}
} else {
ret = cti_disable(drvdata->csdev, NULL);
if (!ret)
pm_runtime_put(dev->parent);
if (!ret) {
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
}
}
if (ret)
@@ -179,13 +193,25 @@ static ssize_t coresight_cti_reg_show(struct device *dev,
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
u32 val = 0;
int ret;
pm_runtime_get_sync(dev->parent);
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered)
val = readl_relaxed(drvdata->base + cti_attr->off);
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return sysfs_emit(buf, "0x%x\n", val);
}
@@ -197,15 +223,29 @@ static __maybe_unused ssize_t coresight_cti_reg_store(struct device *dev,
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_off_attribute *cti_attr = container_of(attr, struct cs_off_attribute, attr);
unsigned long val = 0;
int ret;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
pm_runtime_get_sync(dev->parent);
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered)
cti_write_single_reg(drvdata, cti_attr->off, val);
spin_unlock(&drvdata->spinlock);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
pm_runtime_put_sync(dev->parent);
return size;
}
@@ -251,60 +291,6 @@ static struct attribute *coresight_cti_mgmt_attrs[] = {
NULL,
};
/* CTI low level programming registers */
/*
* Show a simple 32 bit value if enabled and powered.
* If inaccessible & pcached_val not NULL then show cached value.
*/
static ssize_t cti_reg32_show(struct device *dev, char *buf,
u32 *pcached_val, int reg_offset)
{
u32 val = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
if ((reg_offset >= 0) && cti_active(config)) {
CS_UNLOCK(drvdata->base);
val = readl_relaxed(drvdata->base + reg_offset);
if (pcached_val)
*pcached_val = val;
CS_LOCK(drvdata->base);
} else if (pcached_val) {
val = *pcached_val;
}
spin_unlock(&drvdata->spinlock);
return sprintf(buf, "%#x\n", val);
}
/*
* Store a simple 32 bit value.
* If pcached_val not NULL, then copy to here too,
* if reg_offset >= 0 then write through if enabled.
*/
static ssize_t cti_reg32_store(struct device *dev, const char *buf,
size_t size, u32 *pcached_val, int reg_offset)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* local store */
if (pcached_val)
*pcached_val = (u32)val;
/* write through if offset and enabled */
if ((reg_offset >= 0) && cti_active(config))
cti_write_single_reg(drvdata, reg_offset, val);
spin_unlock(&drvdata->spinlock);
return size;
}
/* Standard macro for simple rw cti config registers */
#define cti_config_reg32_rw(name, cfgname, offset) \
static ssize_t name##_show(struct device *dev, \
@@ -388,8 +374,13 @@ static ssize_t inen_store(struct device *dev,
config->ctiinen[index] = val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIINEN(index), val);
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIINEN_EXTENDED(index), val);
else
cti_write_single_reg(drvdata, CTIINEN(index), val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -427,8 +418,13 @@ static ssize_t outen_store(struct device *dev,
config->ctiouten[index] = val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIOUTEN(index), val);
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIOUTEN_EXTENDED(index), val);
else
cti_write_single_reg(drvdata, CTIOUTEN(index), val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -448,9 +444,143 @@ static ssize_t intack_store(struct device *dev,
}
static DEVICE_ATTR_WO(intack);
cti_config_reg32_rw(gate, ctigate, CTIGATE);
cti_config_reg32_rw(asicctl, asicctl, ASICCTL);
cti_config_reg32_rw(appset, ctiappset, CTIAPPSET);
static ssize_t gate_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u32 val = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
if (cti_active(config)) {
CS_UNLOCK(drvdata->base);
if (drvdata->extended_cti)
val = readl_relaxed(drvdata->base + CTIGATE_EXTENDED);
else
val = readl_relaxed(drvdata->base + CTIGATE);
CS_LOCK(drvdata->base);
}
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#x\n", val);
}
static ssize_t gate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* write through if offset and enabled */
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIGATE_EXTENDED, val);
else
cti_write_single_reg(drvdata, CTIGATE, val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(gate);
static ssize_t asicctl_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u32 val = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
if (cti_active(config)) {
CS_UNLOCK(drvdata->base);
if (drvdata->extended_cti)
val = readl_relaxed(drvdata->base + ASICCTL_EXTENDED);
else
val = readl_relaxed(drvdata->base + ASICCTL);
CS_LOCK(drvdata->base);
}
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#x\n", val);
}
static ssize_t asicctl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* write through if offset and enabled */
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, ASICCTL_EXTENDED, val);
else
cti_write_single_reg(drvdata, ASICCTL, val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(asicctl);
static ssize_t appset_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
u32 val = 0;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
spin_lock(&drvdata->spinlock);
if (cti_active(config)) {
CS_UNLOCK(drvdata->base);
if (drvdata->extended_cti)
val = readl_relaxed(drvdata->base + CTIAPPSET_EXTENDED);
else
val = readl_relaxed(drvdata->base + CTIAPPSET);
CS_LOCK(drvdata->base);
}
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%#x\n", val);
}
static ssize_t appset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
/* write through if offset and enabled */
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIAPPSET_EXTENDED, val);
else
cti_write_single_reg(drvdata, CTIAPPSET, val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(appset);
static ssize_t appclear_store(struct device *dev,
struct device_attribute *attr,
@@ -469,8 +599,12 @@ static ssize_t appclear_store(struct device *dev,
config->ctiappset &= ~val;
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIAPPCLEAR, val);
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIAPPCLEAR, val);
else
cti_write_single_reg(drvdata, CTIAPPCLEAR_EXTENDED, val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
@@ -490,13 +624,177 @@ static ssize_t apppulse_store(struct device *dev,
spin_lock(&drvdata->spinlock);
/* write through if enabled */
if (cti_active(config))
cti_write_single_reg(drvdata, CTIAPPPULSE, val);
if (cti_active(config)) {
if (drvdata->extended_cti)
cti_write_single_reg(drvdata, CTIAPPPULSE_EXTENDED, val);
else
cti_write_single_reg(drvdata, CTIAPPPULSE, val);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(apppulse);
static ssize_t triginstatus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
u32 val = 0;
u32 n, i = 0;
ssize_t len = 0;
int ret;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered) {
if (drvdata->extended_cti) {
i = (config->nr_trig_max - 1) / 32;
for (n = 0; n <= i; n++) {
val = readl_relaxed(drvdata->base + CTITRIGINSTATUS_EXTENDED(n));
len += scnprintf(buf + len, PAGE_SIZE - len, "%u - %u : 0x%x\n", n,
((n+1) * 32) - 1, val);
}
} else
val = readl_relaxed(drvdata->base + CTITRIGINSTATUS);
}
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (drvdata->extended_cti)
return len;
else
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
}
static DEVICE_ATTR_RO(triginstatus);
static ssize_t trigoutstatus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *config = &drvdata->config;
u32 val = 0;
u32 n, i = 0;
ssize_t len = 0;
int ret;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered) {
if (drvdata->extended_cti) {
i = (config->nr_trig_max - 1) / 32;
for (n = 0; n <= i; n++) {
val = readl_relaxed(drvdata->base + CTITRIGOUTSTATUS_EXTENDED(n));
len += scnprintf(buf + len, PAGE_SIZE - len, "%u - %u : 0x%x\n", n,
((n+1) * 32) - 1, val);
}
} else
val = readl_relaxed(drvdata->base + CTITRIGOUTSTATUS);
}
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (drvdata->extended_cti)
return len;
else
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
}
static DEVICE_ATTR_RO(trigoutstatus);
static ssize_t chinstatus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
u32 val = 0;
int ret;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered) {
if (drvdata->extended_cti)
val = readl_relaxed(drvdata->base + CTICHINSTATUS_EXTENDED);
else
val = readl_relaxed(drvdata->base + CTICHINSTATUS);
}
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
}
static DEVICE_ATTR_RO(chinstatus);
static ssize_t choutstatus_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
u32 val = 0;
int ret;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (drvdata->config.hw_powered) {
if (drvdata->extended_cti)
val = readl_relaxed(drvdata->base + CTICHOUTSTATUS_EXTENDED);
else
val = readl_relaxed(drvdata->base + CTICHOUTSTATUS);
}
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
}
static DEVICE_ATTR_RO(choutstatus);
/*
* Define CONFIG_CORESIGHT_CTI_INTEGRATION_REGS to enable the access to the
* integration control registers. Normally only used to investigate connection
@@ -512,10 +810,10 @@ static struct attribute *coresight_cti_regs_attrs[] = {
&dev_attr_appset.attr,
&dev_attr_appclear.attr,
&dev_attr_apppulse.attr,
coresight_cti_reg(triginstatus, CTITRIGINSTATUS),
coresight_cti_reg(trigoutstatus, CTITRIGOUTSTATUS),
coresight_cti_reg(chinstatus, CTICHINSTATUS),
coresight_cti_reg(choutstatus, CTICHOUTSTATUS),
&dev_attr_triginstatus.attr,
&dev_attr_trigoutstatus.attr,
&dev_attr_chinstatus.attr,
&dev_attr_choutstatus.attr,
#ifdef CONFIG_CORESIGHT_CTI_INTEGRATION_REGS
coresight_cti_reg_rw(itctrl, CORESIGHT_ITCTRL),
coresight_cti_reg(ittrigin, ITTRIGIN),
@@ -711,10 +1009,8 @@ static ssize_t trigout_filtered_show(struct device *dev,
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
int size = 0, nr_trig_max = cfg->nr_trig_max;
unsigned long mask = cfg->trig_out_filter;
if (mask)
size = bitmap_print_to_pagebuf(true, buf, &mask, nr_trig_max);
size = bitmap_print_to_pagebuf(true, buf, cfg->trig_out_filter, nr_trig_max);
return size;
}
static DEVICE_ATTR_RO(trigout_filtered);
@@ -734,6 +1030,11 @@ static ssize_t chan_xtrigs_reset_store(struct device *dev,
for (i = 0; i < config->nr_trig_max; i++) {
config->ctiinen[i] = 0;
config->ctiouten[i] = 0;
if (drvdata->gpio_trigin->trig == i)
cti_trigin_gpio_disable(drvdata);
if (drvdata->gpio_trigout->trig == i)
cti_trigout_gpio_disable(drvdata);
}
/* clear the other regs */
@@ -926,9 +1227,8 @@ static ssize_t trigin_sig_show(struct device *dev,
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
unsigned long mask = con->con_in->used_mask;
return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
return bitmap_print_to_pagebuf(true, buf, con->con_in->used_mask, cfg->nr_trig_max);
}
static ssize_t trigout_sig_show(struct device *dev,
@@ -940,9 +1240,8 @@ static ssize_t trigout_sig_show(struct device *dev,
struct cti_trig_con *con = (struct cti_trig_con *)ext_attr->var;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cti_config *cfg = &drvdata->config;
unsigned long mask = con->con_out->used_mask;
return bitmap_print_to_pagebuf(true, buf, &mask, cfg->nr_trig_max);
return bitmap_print_to_pagebuf(true, buf, con->con_out->used_mask, cfg->nr_trig_max);
}
/* convert a sig type id to a name */

View File

@@ -37,6 +37,20 @@
#define CTICHOUTSTATUS 0x13C
#define CTIGATE 0x140
#define ASICCTL 0x144
#define CTIINTACK_EXTENDED(n) (0x020 + (4 * n))
#define CTIAPPSET_EXTENDED 0x004
#define CTIAPPCLEAR_EXTENDED 0x008
#define CTIAPPPULSE_EXTENDED 0x00C
#define CTIINEN_EXTENDED(n) (0x400 + (4 * n))
#define CTIOUTEN_EXTENDED(n) (0x800 + (4 * n))
#define CTITRIGINSTATUS_EXTENDED(n) (0x040 + (4 * n))
#define CTITRIGOUTSTATUS_EXTENDED(n) (0x060 + (4 * n))
#define CTICHINSTATUS_EXTENDED 0x080
#define CTICHOUTSTATUS_EXTENDED 0x084
#define CTIGATE_EXTENDED 0x088
#define ASICCTL_EXTENDED 0x08c
/* Integration test registers */
#define ITCHINACK 0xEDC /* WO CTI CSSoc 400 only*/
#define ITTRIGINACK 0xEE0 /* WO CTI CSSoc 400 only*/
@@ -46,6 +60,16 @@
#define ITTRIGOUTACK 0xEF0 /* RO CTI CSSoc 400 only*/
#define ITCHIN 0xEF4 /* RO */
#define ITTRIGIN 0xEF8 /* RO */
#define ITCHINACK_EXTENDED 0xE70 /* WO CTI CSSoc 400 only*/
#define ITTRIGINACK_EXTENDED(n) (0xE80 + (4 * n)) /* WO CTI CSSoc 400 only*/
#define ITCHOUT_EXTENDED 0xE74 /* WO RW-600 */
#define ITTRIGOUT_EXTENDED(n) (0xEA + (4 * n)) /* WO RW-600 */
#define ITCHOUTACK_EXTENDED 0xE78 /* RO CTI CSSoc 400 only*/
#define ITTRIGOUTACK_EXTENDED(n) (0xEC0 + (4 * n)) /* RO CTI CSSoc 400 only*/
#define ITCHIN_EXTENDED 0xE7C /* RO */
#define ITTRIGIN_EXTENDED(n) (0xEE0 + (4 * n)) /* RO */
/* management registers */
#define CTIDEVAFF0 0xFA8
#define CTIDEVAFF1 0xFAC
@@ -56,7 +80,7 @@
* Max of in and out defined in the DEVID register.
* - pick up actual number used from .dts parameters if present.
*/
#define CTIINOUTEN_MAX 32
#define CTIINOUTEN_MAX 128
/**
* Group of related trigger signals
@@ -67,7 +91,7 @@
*/
struct cti_trig_grp {
int nr_sigs;
u32 used_mask;
DECLARE_BITMAP(used_mask, CTIINOUTEN_MAX);
int sig_types[];
};
@@ -144,11 +168,14 @@ struct cti_config {
int enable_req_count;
bool hw_enabled;
bool hw_powered;
#if defined(CONFIG_DEEPSLEEP) || defined(CONFIG_HIBERNATION)
bool hw_enabled_store;
#endif
/* registered triggers and filtering */
u32 trig_in_use;
u32 trig_out_use;
u32 trig_out_filter;
DECLARE_BITMAP(trig_in_use, CTIINOUTEN_MAX);
DECLARE_BITMAP(trig_out_use, CTIINOUTEN_MAX);
DECLARE_BITMAP(trig_out_filter, CTIINOUTEN_MAX);
bool trig_filter_enable;
u8 xtrig_rchan_sel;
@@ -161,6 +188,11 @@ struct cti_config {
u32 asicctl;
};
struct cti_pctrl {
struct pinctrl *pctrl;
int trig;
};
/**
* struct cti_drvdata - specifics for the CTI device
* @base: Memory mapped base address for this component..
@@ -170,6 +202,7 @@ struct cti_config {
* @config: Configuration data for this CTI device.
* @node: List entry of this device in the list of CTI devices.
* @csdev_release: release function for underlying coresight_device.
* @dclk: optional clock to be dynamically enabled when CTI device is enabled.
*/
struct cti_drvdata {
void __iomem *base;
@@ -179,6 +212,10 @@ struct cti_drvdata {
struct cti_config config;
struct list_head node;
void (*csdev_release)(struct device *dev);
bool extended_cti;
struct cti_pctrl *gpio_trigin;
struct cti_pctrl *gpio_trigout;
struct clk *dclk;
};
/*
@@ -217,6 +254,8 @@ struct cti_trig_con *cti_allocate_trig_con(struct device *dev, int in_sigs,
int out_sigs);
int cti_enable(struct coresight_device *csdev, enum cs_mode mode, void *data);
int cti_disable(struct coresight_device *csdev, void *data);
void cti_trigin_gpio_disable(struct cti_drvdata *drvdata);
void cti_trigout_gpio_disable(struct cti_drvdata *drvdata);
void cti_write_all_hw_regs(struct cti_drvdata *drvdata);
void cti_write_intack(struct device *dev, u32 ackval);
void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value);

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/coresight.h>
@@ -11,18 +11,107 @@
#include <linux/pm_runtime.h>
#include "coresight-priv.h"
#include "coresight-trace-id.h"
#include "coresight-common.h"
#include "coresight-qmi.h"
struct dummy_drvdata {
struct device *dev;
struct coresight_device *csdev;
u8 traceid;
bool static_atid;
};
DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
/*
* Dummy source could be connected to a QMI device, which can send commmand
* to subsystem via QMI. This is represented by the Output port of the dummy
* source connected to the input port of the QMI.
*
* Returns : coresight_device ptr for the QMI device if a QMI is found.
* : NULL otherwise.
*/
static struct coresight_device *
dummy_source_get_qmi_device(struct dummy_drvdata *drvdata)
{
int i;
struct coresight_device *tmp, *dummy = drvdata->csdev;
if (!IS_ENABLED(CONFIG_CORESIGHT_QMI))
return NULL;
for (i = 0; i < dummy->pdata->nr_outconns; i++) {
tmp = dummy->pdata->out_conns[i]->dest_dev;
if (tmp && coresight_is_qmi_device(tmp))
return tmp;
}
return NULL;
}
/* qmi_assign_dummy_source_atid: assign atid to subsystem via qmi
* device. if there is no qmi helper device connection, retunr 0
* and exit.
*
* Returns : 0 on success
*/
static int qmi_assign_dummy_source_atid(struct dummy_drvdata *drvdata)
{
struct coresight_device *qmi = dummy_source_get_qmi_device(drvdata);
struct coresight_atid_assign_req_msg_v01 *atid_data;
const char *trace_name;
int ret;
ret = of_property_read_string(drvdata->dev->of_node,
"trace-name", &trace_name);
if (ret)
return -EINVAL;
atid_data = kzalloc(sizeof(*atid_data), GFP_KERNEL);
if (!atid_data)
return -ENOMEM;
strscpy(atid_data->name, trace_name, CORESIGHT_QMI_TRACE_NAME_MAX_LEN);
atid_data->atids[0] = drvdata->traceid;
atid_data->num_atids = 1;
if (qmi)
return coresight_qmi_assign_atid(qmi, atid_data);
return 0;
}
static int dummy_source_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode)
{
int ret;
int trace_id;
struct dummy_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
if (!drvdata->static_atid) {
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0)
return trace_id;
drvdata->traceid = (u8)trace_id;
ret = qmi_assign_dummy_source_atid(drvdata);
if (ret) {
coresight_trace_id_put_system_id(trace_id);
dev_err(drvdata->dev, "Assign dummy source atid fail\n");
return ret;
}
} else {
ret = coresight_trace_id_reserve_id(drvdata->traceid);
if (ret) {
dev_err(drvdata->dev, "Reserve atid: %d fail\n", drvdata->traceid);
return ret;
}
}
coresight_csr_set_etr_atid(csdev, drvdata->traceid, true);
dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0;
@@ -31,6 +120,13 @@ static int dummy_source_enable(struct coresight_device *csdev,
static void dummy_source_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct dummy_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
if (drvdata->static_atid)
coresight_trace_id_free_reserved_id(drvdata->traceid);
else
coresight_trace_id_put_system_id(drvdata->traceid);
dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
}
@@ -67,6 +163,31 @@ static const struct coresight_ops dummy_sink_cs_ops = {
.sink_ops = &dummy_sink_ops,
};
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct dummy_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->traceid;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(traceid);
static struct attribute *dummy_source_attrs[] = {
&dev_attr_traceid.attr,
NULL,
};
static struct attribute_group dummy_source_attr_grp = {
.attrs = dummy_source_attrs,
};
static const struct attribute_group *dummy_source_attr_grps[] = {
&dummy_source_attr_grp,
NULL,
};
static int dummy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -74,6 +195,7 @@ static int dummy_probe(struct platform_device *pdev)
struct coresight_platform_data *pdata;
struct dummy_drvdata *drvdata;
struct coresight_desc desc = { 0 };
int trace_id;
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
@@ -85,6 +207,7 @@ static int dummy_probe(struct platform_device *pdev)
desc.subtype.source_subtype =
CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
desc.ops = &dummy_source_cs_ops;
desc.groups = dummy_source_attr_grps;
} else if (of_device_is_compatible(node, "arm,coresight-dummy-sink")) {
desc.name = coresight_alloc_device_name(&sink_devs, dev);
if (!desc.name)
@@ -117,6 +240,16 @@ static int dummy_probe(struct platform_device *pdev)
return PTR_ERR(drvdata->csdev);
pm_runtime_enable(dev);
if (of_device_is_compatible(node, "arm,coresight-dummy-source")) {
if (!of_property_read_u32(pdev->dev.of_node, "atid", &trace_id)) {
drvdata->static_atid = true;
drvdata->traceid = (u8)trace_id;
}
}
dev_dbg(dev, "Dummy device initialized\n");
return 0;
@@ -128,6 +261,7 @@ static int dummy_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
pm_runtime_disable(dev);
coresight_unregister(drvdata->csdev);
return 0;
}

View File

@@ -736,7 +736,7 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
}
dev_set_drvdata(dev, drvdata);

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Description: CoreSight Program Flow Trace driver
*/
@@ -33,6 +34,7 @@
#include "coresight-etm.h"
#include "coresight-etm-perf.h"
#include "coresight-trace-id.h"
#include "coresight-common.h"
/*
* Not really modular but using module_param is the easiest way to
@@ -484,7 +486,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
struct perf_event *event)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
int trace_id, ret = 0;
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
@@ -508,8 +510,14 @@ static int etm_enable_perf(struct coresight_device *csdev,
}
drvdata->traceid = (u8)trace_id;
coresight_csr_set_etr_atid(csdev, drvdata->traceid, true);
/* And enable it */
return etm_enable_hw(drvdata);
ret = etm_enable_hw(drvdata);
if (ret)
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
return ret;
}
static int etm_enable_sysfs(struct coresight_device *csdev)
@@ -525,6 +533,8 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
if (ret < 0)
goto unlock_enable_sysfs;
coresight_csr_set_etr_atid(csdev, drvdata->traceid, true);
/*
* Configure the ETM only if the CPU is online. If it isn't online
* hw configuration will take place on the local CPU during bring up.
@@ -541,8 +551,10 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
ret = -ENODEV;
}
if (ret)
if (ret) {
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
etm_release_trace_id(drvdata);
}
unlock_enable_sysfs:
spin_unlock(&drvdata->spinlock);
@@ -694,7 +706,7 @@ static void etm_disable(struct coresight_device *csdev,
WARN_ON_ONCE(mode);
return;
}
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
if (mode)
local_set(&drvdata->mode, CS_MODE_DISABLED);
}
@@ -921,7 +933,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
etmdrvdata[drvdata->cpu] = drvdata;
pm_runtime_put(&adev->dev);
pm_runtime_put_sync(&adev->dev);
dev_info(&drvdata->csdev->dev,
"%s initialized\n", (char *)coresight_get_uci_data(id));
if (boot_enable) {

View File

@@ -47,8 +47,12 @@ static ssize_t etmsr_show(struct device *dev,
{
unsigned long flags, val;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
int ret;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
pm_runtime_get_sync(dev->parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -56,7 +60,7 @@ static ssize_t etmsr_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
pm_runtime_put_sync(dev->parent);
return sprintf(buf, "%#lx\n", val);
}
@@ -940,13 +944,16 @@ static ssize_t seq_curr_state_show(struct device *dev,
unsigned long val, flags;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
int ret;
if (!local_read(&drvdata->mode)) {
val = config->seq_curr_state;
goto out;
}
pm_runtime_get_sync(dev->parent);
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
spin_lock_irqsave(&drvdata->spinlock, flags);
CS_UNLOCK(drvdata->base);
@@ -954,7 +961,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
CS_LOCK(drvdata->base);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
pm_runtime_put(dev->parent);
pm_runtime_put_sync(dev->parent);
out:
return sprintf(buf, "%#lx\n", val);
}

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2014, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/acpi.h>
@@ -31,7 +32,9 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/of.h>
#include <linux/clk/clk-conf.h>
#include <linux/suspend.h>
#include <asm/barrier.h>
#include <asm/sections.h>
@@ -45,6 +48,7 @@
#include "coresight-self-hosted-trace.h"
#include "coresight-syscfg.h"
#include "coresight-trace-id.h"
#include "coresight-common.h"
static int boot_enable;
module_param(boot_enable, int, 0444);
@@ -458,7 +462,8 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
}
etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
if (drvdata->ext_inp_sel)
etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
etm4x_relaxed_write32(csa, config->cntr_ctrl[i], TRCCNTCTLRn(i));
@@ -779,9 +784,14 @@ static int etm4_enable_perf(struct coresight_device *csdev,
}
drvdata->trcid = (u8)trace_id;
coresight_csr_set_etr_atid(csdev, drvdata->trcid, true);
/* And enable it */
ret = etm4_enable_hw(drvdata);
if (ret)
coresight_csr_set_etr_atid(csdev, drvdata->trcid, false);
out:
return ret;
}
@@ -808,6 +818,8 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
if (ret < 0)
goto unlock_sysfs_enable;
coresight_csr_set_etr_atid(csdev, drvdata->trcid, true);
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
@@ -820,8 +832,10 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
if (!ret)
drvdata->sticky_enable = true;
if (ret)
if (ret) {
coresight_csr_set_etr_atid(csdev, drvdata->trcid, false);
etm4_release_trace_id(drvdata);
}
unlock_sysfs_enable:
spin_unlock(&drvdata->spinlock);
@@ -903,6 +917,7 @@ static void etm4_disable_hw(void *info)
tsb_csync();
etm4x_relaxed_write32(csa, control, TRCPRGCTLR);
isb();
/* wait for TRCSTATR.PMSTABLE to go to '1' */
if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1))
dev_err(etm_dev,
@@ -1019,6 +1034,7 @@ static void etm4_disable(struct coresight_device *csdev,
break;
}
coresight_csr_set_etr_atid(csdev, drvdata->trcid, false);
if (mode)
local_set(&drvdata->mode, CS_MODE_DISABLED);
}
@@ -1285,6 +1301,8 @@ static void etm4_init_arch_data(void *info)
etmidr5 = etm4x_relaxed_read32(csa, TRCIDR5);
/* NUMEXTIN, bits[8:0] number of external inputs implemented */
drvdata->nr_ext_inp = FIELD_GET(TRCIDR5_NUMEXTIN_MASK, etmidr5);
/* NUMEXTINSEL, bits[11:9] number of external inputs implemented */
drvdata->ext_inp_sel = FIELD_GET(TRCIDR5_NUMEXTINSEL_MASK, etmidr5);
/* TRACEIDSIZE, bits[21:16] indicates the trace ID width */
drvdata->trcid_size = FIELD_GET(TRCIDR5_TRACEIDSIZE_MASK, etmidr5);
/* ATBTRIG, bit[22] implementation can support ATB triggers? */
@@ -1712,7 +1730,8 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
}
state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
if (drvdata->ext_inp_sel)
state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
state->trccntrldvr[i] = etm4x_read32(csa, TRCCNTRLDVRn(i));
@@ -1844,7 +1863,8 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
}
etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
if (drvdata->ext_inp_sel)
etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, state->trccntrldvr[i], TRCCNTRLDVRn(i));
@@ -2016,7 +2036,8 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
type_name = "etm";
}
desc.name = devm_kasprintf(dev, GFP_KERNEL,
if (of_property_read_string(dev->of_node, "coresight-name", &desc.name))
desc.name = devm_kasprintf(dev, GFP_KERNEL,
"%s%d", type_name, drvdata->cpu);
if (!desc.name)
return -ENOMEM;
@@ -2142,7 +2163,7 @@ static int etm4_probe_amba(struct amba_device *adev, const struct amba_id *id)
dev_set_drvdata(dev, drvdata);
ret = etm4_probe(dev);
if (!ret)
pm_runtime_put(&adev->dev);
pm_runtime_put_sync(&adev->dev);
return ret;
}
@@ -2176,7 +2197,7 @@ static int etm4_probe_platform_dev(struct platform_device *pdev)
ret = etm4_probe(&pdev->dev);
pm_runtime_put(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
return ret;
}
@@ -2205,7 +2226,7 @@ static int etm4_probe_cpu(unsigned int cpu)
etm4_add_coresight_dev(&init_arg);
pm_runtime_put(init_arg.dev);
pm_runtime_put_sync(init_arg.dev);
return 0;
}
@@ -2340,8 +2361,43 @@ static int etm4_runtime_resume(struct device *dev)
}
#endif
#ifdef CONFIG_DEEPSLEEP
static int etm_suspend(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
if (pm_suspend_target_state == PM_SUSPEND_MEM)
coresight_disable(drvdata->csdev);
return 0;
}
#else
static int etm_suspend(struct device *dev)
{
return 0;
}
#endif
#ifdef CONFIG_HIBERNATION
static int etm_freeze(struct device *dev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev);
coresight_disable(drvdata->csdev);
return 0;
}
#else
static int etm_freeze(struct device *dev)
{
return 0;
}
#endif
static const struct dev_pm_ops etm4_dev_pm_ops = {
SET_RUNTIME_PM_OPS(etm4_runtime_suspend, etm4_runtime_resume, NULL)
.suspend = etm_suspend,
.freeze = etm_freeze,
};
static const struct of_device_id etm4_sysreg_match[] = {

View File

@@ -2454,10 +2454,14 @@ static ssize_t coresight_etm4x_reg_show(struct device *dev,
{
u32 val, offset;
struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
int ret;
offset = coresight_etm4x_attr_to_offset(d_attr);
pm_runtime_get_sync(dev->parent);
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
val = etmv4_cross_read(drvdata, offset);
pm_runtime_put_sync(dev->parent);

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_CORESIGHT_ETM_H
@@ -162,6 +163,7 @@
#define TRCIDR4_NUMVMIDC_MASK GENMASK(31, 28)
#define TRCIDR5_NUMEXTIN_MASK GENMASK(8, 0)
#define TRCIDR5_NUMEXTINSEL_MASK GENMASK(11, 9)
#define TRCIDR5_TRACEIDSIZE_MASK GENMASK(21, 16)
#define TRCIDR5_ATBTRIG BIT(22)
#define TRCIDR5_LPOVERRIDE BIT(23)
@@ -997,6 +999,7 @@ struct etmv4_drvdata {
u8 nr_pe_cmp;
u8 nr_addr_cmp;
u8 nr_cntr;
u8 ext_inp_sel;
u8 nr_ext_inp;
u8 numcidc;
u8 numvmidc;

View File

@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2012, 2017, The Linux Foundation. All rights reserved.
*
* Description: CoreSight Funnel driver
*/
@@ -19,6 +19,9 @@
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/cpu_pm.h>
#include <linux/pm_domain.h>
#include "coresight-priv.h"
@@ -31,7 +34,9 @@
#define FUNNEL_ENSx_MASK 0xff
DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
static enum cpuhp_state hp_online;
static LIST_HEAD(cpu_pm_list);
static DEFINE_SPINLOCK(delay_lock);
/**
* struct funnel_drvdata - specifics associated to a funnel component
* @base: memory mapped base address for this component.
@@ -39,6 +44,7 @@ DEFINE_CORESIGHT_DEVLIST(funnel_devs, "funnel");
* @csdev: component vitals needed by the framework.
* @priority: port selection order.
* @spinlock: serialize enable/disable operations.
* @dclk: optional clock to be dynamically enabled when this device is enabled.
*/
struct funnel_drvdata {
void __iomem *base;
@@ -46,6 +52,9 @@ struct funnel_drvdata {
struct coresight_device *csdev;
unsigned long priority;
spinlock_t spinlock;
struct clk *dclk;
struct pm_config pm_config;
struct list_head link;
};
static int dynamic_funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
@@ -83,7 +92,19 @@ static int funnel_enable(struct coresight_device *csdev,
unsigned long flags;
bool first_enable = false;
if (drvdata->dclk) {
rc = clk_prepare_enable(drvdata->dclk);
if (rc)
return rc;
}
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered) {
rc = -EINVAL;
goto out;
}
if (atomic_read(&in->dest_refcnt) == 0) {
if (drvdata->base)
rc = dynamic_funnel_enable_hw(drvdata, in->dest_port);
@@ -92,8 +113,12 @@ static int funnel_enable(struct coresight_device *csdev,
}
if (!rc)
atomic_inc(&in->dest_refcnt);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (rc && drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (first_enable)
dev_dbg(&csdev->dev, "FUNNEL inport %d enabled\n",
in->dest_port);
@@ -128,13 +153,22 @@ static void funnel_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered)
goto out;
if (atomic_dec_return(&in->dest_refcnt) == 0) {
if (drvdata->base)
dynamic_funnel_disable_hw(drvdata, in->dest_port);
last_disable = true;
}
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (last_disable)
dev_dbg(&csdev->dev, "FUNNEL inport %d disabled\n",
in->dest_port);
@@ -191,14 +225,35 @@ static ssize_t funnel_ctrl_show(struct device *dev,
{
u32 val;
struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
int ret;
unsigned long flags;
pm_runtime_get_sync(dev->parent);
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
val = get_funnel_ctrl_hw(drvdata);
pm_runtime_put(dev->parent);
return sprintf(buf, "%#x\n", val);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
pm_runtime_put_sync(dev->parent);
if (ret)
return ret;
else
return scnprintf(buf, PAGE_SIZE, "%#x\n", val);
}
static DEVICE_ATTR_RO(funnel_ctrl);
@@ -209,13 +264,75 @@ static struct attribute *coresight_funnel_attrs[] = {
};
ATTRIBUTE_GROUPS(coresight_funnel);
static int funnel_get_resource_byname(struct device_node *np,
char *ch_base, struct resource *res)
{
const char *name = NULL;
int index = 0, found = 0;
while (!of_property_read_string_index(np, "reg-names", index, &name)) {
if (strcmp(ch_base, name)) {
index++;
continue;
}
/* We have a match and @index is where it's at */
found = 1;
break;
}
if (!found)
return -EINVAL;
return of_address_to_resource(np, index, res);
}
static void funnel_init_power_state(struct device *dev, struct funnel_drvdata *drvdata)
{
int cpu;
struct cpumask *cpumask;
struct pm_config *pm_config = &drvdata->pm_config;
struct generic_pm_domain *pd;
if (dev->pm_domain) {
pd = pd_to_genpd(dev->pm_domain);
cpumask = pd->cpus;
if (cpumask_empty(cpumask)) {
pm_config->hw_powered = true;
return;
}
cpus_read_lock();
for_each_online_cpu(cpu) {
if (cpumask_test_cpu(cpu, cpumask)) {
pm_config->hw_powered = true;
break;
}
}
pm_config->pd_cpumask = cpumask;
cpumask_and(&pm_config->powered_cpus, cpumask, cpu_online_mask);
cpumask_copy(&pm_config->online_cpus, &pm_config->powered_cpus);
spin_lock(&delay_lock);
list_add(&drvdata->link, &cpu_pm_list);
spin_unlock(&delay_lock);
cpus_read_unlock();
return;
}
pm_config->hw_powered = true;
}
static int funnel_probe(struct device *dev, struct resource *res)
{
int ret;
void __iomem *base;
struct coresight_platform_data *pdata = NULL;
struct funnel_drvdata *drvdata;
struct resource res_real;
struct coresight_desc desc = { 0 };
struct device_node *np = dev->of_node;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-funnel"))
@@ -233,14 +350,36 @@ static int funnel_probe(struct device *dev, struct resource *res)
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
}
/*
* Map the device base for dynamic-funnel, which has been
* validated by AMBA core.
*/
if (res) {
drvdata->dclk = devm_clk_get(dev, "dynamic_clk");
if (!IS_ERR(drvdata->dclk)) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
} else
drvdata->dclk = NULL;
if (of_property_read_bool(np, "qcom,duplicate-funnel")) {
ret = funnel_get_resource_byname(np, "funnel-base-real",
&res_real);
if (ret)
return ret;
res = &res_real;
base = devm_ioremap(dev, res->start, resource_size(res));
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_disable_clk;
}
drvdata->base = base;
desc.groups = coresight_funnel_groups;
} else if (res) {
/*
* Map the device base for dynamic-funnel, which has been
* validated by AMBA core.
*/
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
@@ -271,10 +410,13 @@ static int funnel_probe(struct device *dev, struct resource *res)
ret = PTR_ERR(drvdata->csdev);
goto out_disable_clk;
}
pm_runtime_put(dev);
funnel_init_power_state(dev, drvdata);
pm_runtime_put_sync(dev);
ret = 0;
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
out_disable_clk:
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
clk_disable_unprepare(drvdata->atclk);
@@ -285,6 +427,11 @@ static int funnel_remove(struct device *dev)
{
struct funnel_drvdata *drvdata = dev_get_drvdata(dev);
spin_lock(&delay_lock);
if (drvdata->pm_config.pm_enable)
list_del(&drvdata->link);
spin_unlock(&delay_lock);
coresight_unregister(drvdata->csdev);
return 0;
@@ -382,6 +529,127 @@ static void dynamic_funnel_remove(struct amba_device *adev)
funnel_remove(&adev->dev);
}
static int funnel_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
unsigned int cpu = smp_processor_id();
struct funnel_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
switch (cmd) {
case CPU_PM_ENTER:
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!cpumask_test_cpu(cpu, &pm_config->online_cpus)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
continue;
}
cpumask_clear_cpu(cpu, &pm_config->powered_cpus);
if (cpumask_empty(&pm_config->powered_cpus))
pm_config->hw_powered = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!cpumask_test_cpu(cpu, &pm_config->online_cpus)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
continue;
}
pm_config->hw_powered = true;
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block funnel_cpu_pm_nb = {
.notifier_call = funnel_cpu_pm_notify,
};
static int funnel_offline_cpu(unsigned int cpu)
{
struct funnel_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
cpumask_clear_cpu(cpu, &pm_config->online_cpus);
cpumask_clear_cpu(cpu, &pm_config->powered_cpus);
if (cpumask_empty(&pm_config->powered_cpus))
pm_config->hw_powered = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
return 0;
}
static int funnel_online_cpu(unsigned int cpu)
{
struct funnel_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
cpumask_set_cpu(cpu, &pm_config->online_cpus);
pm_config->hw_powered = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
return 0;
}
static int __init funnel_pm_setup(void)
{
int ret;
ret = cpu_pm_register_notifier(&funnel_cpu_pm_nb);
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"arm/coresight-funnel:online",
funnel_online_cpu, funnel_offline_cpu);
if (ret > 0) {
hp_online = ret;
return 0;
}
cpu_pm_unregister_notifier(&funnel_cpu_pm_nb);
return ret;
}
static void funnel_pm_clear(void)
{
cpu_pm_unregister_notifier(&funnel_cpu_pm_nb);
if (hp_online) {
cpuhp_remove_state_nocalls(hp_online);
hp_online = 0;
}
}
static const struct amba_id dynamic_funnel_ids[] = {
{
.id = 0x000bb908,
@@ -413,18 +681,25 @@ static int __init funnel_init(void)
{
int ret;
ret = funnel_pm_setup();
if (ret)
return ret;
ret = platform_driver_register(&static_funnel_driver);
if (ret) {
pr_info("Error registering platform driver\n");
return ret;
goto pm_clear;
}
ret = amba_driver_register(&dynamic_funnel_driver);
if (ret) {
pr_info("Error registering amba driver\n");
platform_driver_unregister(&static_funnel_driver);
goto pm_clear;
}
return ret;
pm_clear:
funnel_pm_clear();
return ret;
}
@@ -432,6 +707,7 @@ static void __exit funnel_exit(void)
{
platform_driver_unregister(&static_funnel_driver);
amba_driver_unregister(&dynamic_funnel_driver);
funnel_pm_clear();
}
module_init(funnel_init);

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
* Copyright (c) 2012, 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/acpi.h>
@@ -183,6 +184,38 @@ static int of_coresight_get_cpu(struct device *dev)
return cpu;
}
/*
* of_coresight_get_atid_number: Get the atid number of a source device.
*
* Returns the number of the atid. If the result is less than zero, it means
* failure.
*/
int of_coresight_get_atid_number(struct coresight_device *csdev)
{
return of_property_count_u32_elems(csdev->dev.parent->of_node, "atid");
}
/*
* of_coresight_get_atid: Get the atid array of a source device.
*
* Returns 0 on success.
*/
int of_coresight_get_atid(struct coresight_device *csdev, u32 *atid, int atid_num)
{
return of_property_read_u32_array(csdev->dev.parent->of_node, "atid", atid, atid_num);
}
/*
* of_coresight_secure: Check whether the device is a secure node
*
* Return true, it means this is a secure node.
*/
bool of_coresight_secure_node(struct coresight_device *csdev)
{
return of_property_read_bool(csdev->dev.parent->of_node,
"qcom,secure-component");
}
/*
* of_coresight_parse_endpoint : Parse the given output endpoint @ep
* and fill the connection information in @pdata->out_conns
@@ -205,6 +238,7 @@ static int of_coresight_parse_endpoint(struct device *dev,
struct fwnode_handle *rdev_fwnode;
struct coresight_connection conn = {};
struct coresight_connection *new_conn;
struct device_node *sn = NULL;
do {
/* Parse the local port details */
@@ -242,7 +276,13 @@ static int of_coresight_parse_endpoint(struct device *dev,
*/
conn.dest_fwnode = fwnode_handle_get(rdev_fwnode);
conn.dest_port = rendpoint.port;
conn.source_name = NULL;
sn = of_parse_phandle(ep, "source", 0);
if (sn) {
ret = of_property_read_string(sn,
"coresight-name", &conn.source_name);
of_node_put(sn);
}
new_conn = coresight_add_out_conn(dev, pdata, &conn);
if (IS_ERR_VALUE(new_conn)) {
fwnode_handle_put(conn.dest_fwnode);

View File

@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2012, 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_PRIV_H
@@ -74,6 +75,22 @@ extern ssize_t coresight_simple_show_pair(struct device *_dev,
extern const u32 coresight_barrier_pkt[4];
#define CORESIGHT_BARRIER_PKT_SIZE (sizeof(coresight_barrier_pkt))
struct pm_config {
struct cpumask powered_cpus;
struct cpumask *pd_cpumask;
struct cpumask online_cpus;
struct list_head link;
bool hw_powered;
bool pm_enable;
};
struct delay_probe_arg {
struct amba_device *adev;
struct cpumask *cpumask;
struct list_head link;
const struct amba_id *id;
};
enum etm_addr_type {
ETM_ADDR_TYPE_NONE,
ETM_ADDR_TYPE_SINGLE,
@@ -132,6 +149,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *
coresight_get_enabled_sink(struct coresight_device *source);
struct list_head *coresight_get_path(struct coresight_device *csdev);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct coresight_device *
coresight_find_default_sink(struct coresight_device *csdev);
@@ -164,6 +182,8 @@ struct cti_assoc_op {
extern void coresight_set_cti_ops(const struct cti_assoc_op *cti_op);
extern void coresight_remove_cti_ops(void);
bool of_coresight_secure_node(struct coresight_device *csdev);
/*
* Macros and inline functions to handle CoreSight UCI data and driver
* private data in AMBA ID table entries, and extract data values.

View File

@@ -0,0 +1,439 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include "coresight-qmi.h"
#include "coresight-priv.h"
DEFINE_CORESIGHT_DEVLIST(qmi_devs, "qmi");
static int service_coresight_qmi_new_server(struct qmi_handle *qmi,
struct qmi_service *svc)
{
struct qmi_drvdata *drvdata = container_of(qmi,
struct qmi_drvdata, handle);
drvdata->s_addr.sq_family = AF_QIPCRTR;
drvdata->s_addr.sq_node = svc->node;
drvdata->s_addr.sq_port = svc->port;
drvdata->service_connected = true;
dev_info(drvdata->dev,
"Connection established between QMI handle and %d service\n",
drvdata->inst_id);
return 0;
}
static void service_coresight_qmi_del_server(struct qmi_handle *qmi,
struct qmi_service *svc)
{
struct qmi_drvdata *drvdata = container_of(qmi,
struct qmi_drvdata, handle);
drvdata->service_connected = false;
dev_info(drvdata->dev,
"Connection disconnected between QMI handle and %d service\n",
drvdata->inst_id);
}
static struct qmi_ops server_ops = {
.new_server = service_coresight_qmi_new_server,
.del_server = service_coresight_qmi_del_server,
};
int coresight_qmi_remote_etm_enable(struct coresight_device *csdev)
{
struct qmi_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
struct coresight_set_etm_req_msg_v01 req;
struct coresight_set_etm_resp_msg_v01 resp = { { 0, 0 } };
struct qmi_txn txn;
int ret;
mutex_lock(&drvdata->mutex);
if (!drvdata->service_connected) {
dev_err(drvdata->dev, "QMI service not connected!\n");
ret = -EINVAL;
goto err;
}
/*
* The QMI handle may be NULL in the following scenarios:
* 1. QMI service is not present
* 2. QMI service is present but attempt to enable remote ETM is earlier
* than service is ready to handle request
* 3. Connection between QMI client and QMI service failed
*
* Enable CoreSight without processing further QMI commands which
* provides the option to enable remote ETM by other means.
*/
req.state = CORESIGHT_ETM_STATE_ENABLED_V01;
ret = qmi_txn_init(&drvdata->handle, &txn,
coresight_set_etm_resp_msg_v01_ei,
&resp);
if (ret < 0) {
dev_err(drvdata->dev, "QMI tx init failed , ret:%d\n",
ret);
goto err;
}
ret = qmi_send_request(&drvdata->handle, &drvdata->s_addr,
&txn, CORESIGHT_QMI_SET_ETM_REQ_V01,
CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN,
coresight_set_etm_req_msg_v01_ei,
&req);
if (ret < 0) {
dev_err(drvdata->dev, "QMI send ACK failed, ret:%d\n",
ret);
qmi_txn_cancel(&txn);
goto err;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(TIMEOUT_MS));
if (ret < 0) {
dev_err(drvdata->dev, "QMI qmi txn wait failed, ret:%d\n",
ret);
goto err;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01)
dev_err(drvdata->dev, "QMI request failed 0x%x\n",
resp.resp.error);
mutex_unlock(&drvdata->mutex);
dev_info(drvdata->dev, "Remote ETM tracing enabled for instance %d\n",
drvdata->inst_id);
return 0;
err:
mutex_unlock(&drvdata->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(coresight_qmi_remote_etm_enable);
void coresight_qmi_remote_etm_disable(struct coresight_device *csdev)
{
struct qmi_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
struct coresight_set_etm_req_msg_v01 req;
struct coresight_set_etm_resp_msg_v01 resp = { { 0, 0 } };
struct qmi_txn txn;
int ret;
mutex_lock(&drvdata->mutex);
if (!drvdata->service_connected) {
dev_err(drvdata->dev, "QMI service not connected!\n");
goto err;
}
req.state = CORESIGHT_ETM_STATE_DISABLED_V01;
ret = qmi_txn_init(&drvdata->handle, &txn,
coresight_set_etm_resp_msg_v01_ei,
&resp);
if (ret < 0) {
dev_err(drvdata->dev, "QMI tx init failed , ret:%d\n",
ret);
goto err;
}
ret = qmi_send_request(&drvdata->handle, &drvdata->s_addr,
&txn, CORESIGHT_QMI_SET_ETM_REQ_V01,
CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN,
coresight_set_etm_req_msg_v01_ei,
&req);
if (ret < 0) {
dev_err(drvdata->dev, "QMI send req failed, ret:%d\n",
ret);
qmi_txn_cancel(&txn);
goto err;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(TIMEOUT_MS));
if (ret < 0) {
dev_err(drvdata->dev, "QMI qmi txn wait failed, ret:%d\n",
ret);
goto err;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
dev_err(drvdata->dev, "QMI request failed 0x%x\n",
resp.resp.error);
goto err;
}
dev_info(drvdata->dev, "Remote ETM tracing disabled for instance %d\n",
drvdata->inst_id);
err:
mutex_unlock(&drvdata->mutex);
}
EXPORT_SYMBOL_GPL(coresight_qmi_remote_etm_disable);
/*
* remote_etm_etr_assign - reassign the ownership of an ETR instance to specified
* subsystem.
*/
int coresight_qmi_etr_assign(struct coresight_device *csdev,
struct coresight_etr_assign_req_msg_v01 *req)
{
struct qmi_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct coresight_etr_assign_resp_msg_v01 resp = { { 0, 0 } };
struct qmi_txn txn;
int ret = 0;
if (!drvdata)
return -EINVAL;
mutex_lock(&drvdata->mutex);
if (!drvdata->service_connected) {
dev_err(drvdata->dev, "QMI service not connected!\n");
ret = -EINVAL;
goto err;
}
/*
* @subsys_id: id of the subsystem which ownership of etr be assigned.
* @etr_id: ETR instance ID.
* @buffer_base: Base address of the DDR buffer to be used by this ETR.
* @buffer_size: Size in bytes of the DDR buffer to be used by this ETR.
*/
ret = qmi_txn_init(&drvdata->handle, &txn,
coresight_etr_assign_resp_msg_v01_ei,
&resp);
if (ret < 0) {
dev_err(drvdata->dev, "QMI tx init failed , ret:%d\n",
ret);
goto err;
}
ret = qmi_send_request(&drvdata->handle, &drvdata->s_addr,
&txn, CORESIGHT_QMI_ETR_ASSIGN_REQ_V01,
CORESIGHT_QMI_ETR_ASSIGN_REQ_MAX_LEN,
coresight_etr_assign_req_msg_v01_ei,
req);
if (ret < 0) {
dev_err(drvdata->dev, "QMI send req failed, ret:%d\n",
ret);
qmi_txn_cancel(&txn);
goto err;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(TIMEOUT_MS));
if (ret < 0) {
dev_err(drvdata->dev, "QMI qmi txn wait failed, ret:%d\n",
ret);
goto err;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
dev_err(drvdata->dev, "QMI request failed 0x%x\n",
resp.resp.error);
goto err;
}
dev_info(drvdata->dev, "Assign etr success\n");
ret = 0;
err:
mutex_unlock(&drvdata->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(coresight_qmi_etr_assign);
int coresight_qmi_assign_atid(struct coresight_device *csdev,
struct coresight_atid_assign_req_msg_v01 *req)
{
struct qmi_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
struct coresight_atid_assign_resp_msg_v01 resp = { { 0, 0 } };
struct qmi_txn txn;
int ret;
mutex_lock(&drvdata->mutex);
if (!drvdata->service_connected) {
dev_err(drvdata->dev, "QMI service not connected!\n");
ret = -EINVAL;
goto err;
}
/*
* The QMI handle may be NULL in the following scenarios:
* 1. QMI service is not present
* 2. QMI service is present but attempt to enable remote ETM is earlier
* than service is ready to handle request
* 3. Connection between QMI client and QMI service failed
*/
ret = qmi_txn_init(&drvdata->handle, &txn,
coresight_atid_assign_resp_msg_v01_ei,
&resp);
if (ret < 0) {
dev_err(drvdata->dev, "QMI tx init failed , ret:%d\n",
ret);
goto err;
}
ret = qmi_send_request(&drvdata->handle, &drvdata->s_addr,
&txn, CORESIGHT_QMI_ATID_ASSIGN_V01,
CORESIGHT_QMI_ATID_ASSIGN_REQ_MAX_LEN,
coresight_atid_assign_req_msg_v01_ei,
req);
if (ret < 0) {
dev_err(drvdata->dev, "QMI send ACK failed, ret:%d\n",
ret);
qmi_txn_cancel(&txn);
goto err;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(TIMEOUT_MS));
if (ret < 0) {
dev_err(drvdata->dev, "QMI qmi txn wait failed, ret:%d\n",
ret);
goto err;
}
/* Check the response */
if (resp.resp.result != QMI_RESULT_SUCCESS_V01)
dev_err(drvdata->dev, "QMI request failed 0x%x\n",
resp.resp.error);
mutex_unlock(&drvdata->mutex);
dev_info(drvdata->dev, "ATID assign for instance %d\n",
drvdata->inst_id);
return 0;
err:
mutex_unlock(&drvdata->mutex);
return ret;
}
EXPORT_SYMBOL_GPL(coresight_qmi_assign_atid);
static const struct coresight_ops_helper qmi_helper_ops = {
.enable = NULL,
.disable = NULL,
};
static const struct coresight_ops qmi_ops = {
.helper_ops = &qmi_helper_ops,
};
static int coresight_qmi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata;
struct qmi_drvdata *drvdata;
struct coresight_desc desc = {0 };
int ret;
desc.name = coresight_alloc_device_name(&qmi_devs, dev);
if (!desc.name)
return -ENOMEM;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
pdev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
ret = of_property_read_u32(pdev->dev.of_node, "qcom,inst-id",
&drvdata->inst_id);
if (ret)
return ret;
mutex_init(&drvdata->mutex);
ret = qmi_handle_init(&drvdata->handle,
CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN,
&server_ops, NULL);
if (ret < 0) {
dev_err(dev, "qmi client init failed ret:%d\n", ret);
return ret;
}
qmi_add_lookup(&drvdata->handle,
CORESIGHT_QMI_SVC_ID,
CORESIGHT_QMI_VERSION,
drvdata->inst_id);
desc.type = CORESIGHT_DEV_TYPE_HELPER;
desc.pdata = pdev->dev.platform_data;
desc.dev = &pdev->dev;
desc.ops = &qmi_ops;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto err;
}
dev_info(dev, "qmi initialized\n");
return 0;
err:
qmi_handle_release(&drvdata->handle);
return ret;
}
static int coresight_qmi_remove(struct platform_device *pdev)
{
struct qmi_drvdata *drvdata = platform_get_drvdata(pdev);
qmi_handle_release(&drvdata->handle);
coresight_unregister(drvdata->csdev);
return 0;
}
static const struct of_device_id coresight_qmi_match[] = {
{.compatible = "qcom,coresight-qmi"},
{}
};
static struct platform_driver coresight_qmi_driver = {
.probe = coresight_qmi_probe,
.remove = coresight_qmi_remove,
.driver = {
.name = "coresight-qmi",
.of_match_table = coresight_qmi_match,
},
};
int __init coresight_qmi_init(void)
{
return platform_driver_register(&coresight_qmi_driver);
}
module_init(coresight_qmi_init);
void __exit coresight_qmi_exit(void)
{
platform_driver_unregister(&coresight_qmi_driver);
}
module_exit(coresight_qmi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CoreSight QMI driver");

View File

@@ -0,0 +1,330 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_QMI_H
#define _CORESIGHT_QMI_H
#include <linux/soc/qcom/qmi.h>
#define CORESIGHT_QMI_SVC_ID (0x33)
#define CORESIGHT_QMI_VERSION (1)
#define CORESIGHT_QMI_GET_ETM_REQ_V01 (0x002B)
#define CORESIGHT_QMI_GET_ETM_RESP_V01 (0x002B)
#define CORESIGHT_QMI_SET_ETM_REQ_V01 (0x002C)
#define CORESIGHT_QMI_SET_ETM_RESP_V01 (0x002C)
#define CORESIGHT_QMI_ETR_ASSIGN_REQ_V01 (0x0042)
#define CORESIGHT_QMI_ETR_ASSIGN_RESP_V01 (0x0042)
#define CORESIGHT_QMI_ATID_ASSIGN_V01 (0x0044)
#define CORESIGHT_QMI_GET_ETM_REQ_MAX_LEN (0)
#define CORESIGHT_QMI_GET_ETM_RESP_MAX_LEN (14)
#define CORESIGHT_QMI_SET_ETM_REQ_MAX_LEN (7)
#define CORESIGHT_QMI_SET_ETM_RESP_MAX_LEN (7)
#define CORESIGHT_QMI_ETR_ASSIGN_REQ_MAX_LEN (36)
#define CORESIGHT_QMI_ETR_ASSIGN_RESP_MAX_LEN (7)
#define CORESIGHT_QMI_ATID_ASSIGN_REQ_MAX_LEN (34)
#define CORESIGHT_QMI_ATID_ASSIGN_RESP_MAX_LEN (7)
#define CORESIGHT_QMI_TRACE_NAME_MAX_LEN (25)
#define TIMEOUT_MS (10000)
struct qmi_drvdata {
struct device *dev;
struct coresight_device *csdev;
struct mutex mutex;
struct qmi_handle handle;
uint32_t inst_id;
bool service_connected;
bool security;
struct sockaddr_qrtr s_addr;
};
enum cs_qmi_command {
CS_QMI_ENABLE_REMOTE_ETM,
CS_QMI_DISABLE_REMOTE_ETM,
CS_QMI_ASSIGN_ETR_TO_MPSS,
CS_QMI_ASSIGN_ETR_TO_APSS,
CS_QMI_ASSIGN_ATID,
};
enum coresight_etm_state_enum_type_v01 {
/* To force a 32 bit signed enum. Do not change or use */
CORESIGHT_ETM_STATE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
CORESIGHT_ETM_STATE_DISABLED_V01 = 0,
CORESIGHT_ETM_STATE_ENABLED_V01 = 1,
CORESIGHT_ETM_STATE_ENUM_TYPE_MAX_ENUM_VAL_01 = INT_MAX,
};
struct coresight_get_etm_req_msg_v01 {
/*
* This element is a placeholder to prevent declaration of
* empty struct. Do not change.
*/
char __placeholder;
};
struct coresight_get_etm_resp_msg_v01 {
/* Mandatory */
/* QMI result Code */
struct qmi_response_type_v01 resp;
/* Optional */
/* ETM output state, must be set to true if state is being passed */
uint8_t state_valid;
/* Present when result code is QMI_RESULT_SUCCESS */
enum coresight_etm_state_enum_type_v01 state;
};
struct coresight_set_etm_req_msg_v01 {
/* Mandatory */
/* ETM output state */
enum coresight_etm_state_enum_type_v01 state;
};
struct coresight_set_etm_resp_msg_v01 {
/* Mandatory */
struct qmi_response_type_v01 resp;
};
struct coresight_etr_assign_req_msg_v01 {
u32 etr_id;
u32 subsys_id;
u64 buffer_base;
u64 buffer_size;
};
struct coresight_etr_assign_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
struct coresight_atid_assign_req_msg_v01 {
char name[CORESIGHT_QMI_TRACE_NAME_MAX_LEN];
u8 atids[8];
u8 num_atids;
};
struct coresight_atid_assign_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
struct cs_qmi_data {
enum cs_qmi_command command;
struct coresight_etr_assign_req_msg_v01 *etr_data;
struct coresight_atid_assign_req_msg_v01 *atid_data;
};
static struct qmi_elem_info coresight_etr_assign_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = 4,
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct coresight_etr_assign_req_msg_v01,
etr_id),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = 4,
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct coresight_etr_assign_req_msg_v01,
subsys_id),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = 8,
.array_type = NO_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct coresight_etr_assign_req_msg_v01,
buffer_base),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = 8,
.array_type = NO_ARRAY,
.tlv_type = 0x04,
.offset = offsetof(struct coresight_etr_assign_req_msg_v01,
buffer_size),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info coresight_etr_assign_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct coresight_etr_assign_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info coresight_set_etm_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(enum coresight_etm_state_enum_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct coresight_set_etm_req_msg_v01,
state),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info coresight_set_etm_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct coresight_set_etm_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info coresight_atid_assign_req_msg_v01_ei[] = {
{
.data_type = QMI_STRING,
.elem_len = CORESIGHT_QMI_TRACE_NAME_MAX_LEN,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct coresight_atid_assign_req_msg_v01,
name),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 8,
.elem_size = sizeof(u8),
.array_type = STATIC_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct coresight_atid_assign_req_msg_v01,
atids),
.ei_array = NULL,
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct coresight_atid_assign_req_msg_v01,
num_atids),
.ei_array = NULL,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = 0,
.ei_array = NULL,
},
};
static struct qmi_elem_info coresight_atid_assign_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct coresight_atid_assign_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_EOTI,
.elem_len = 0,
.elem_size = 0,
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = 0,
.ei_array = NULL,
},
};
static inline bool coresight_is_qmi_device(struct coresight_device *csdev)
{
if (!IS_ENABLED(CONFIG_CORESIGHT_QMI))
return false;
if (csdev->type != CORESIGHT_DEV_TYPE_HELPER)
return false;
return true;
}
#if IS_ENABLED(CONFIG_CORESIGHT_QMI)
extern int coresight_qmi_remote_etm_enable(struct coresight_device *csdev);
extern void coresight_qmi_remote_etm_disable(struct coresight_device *csdev);
extern int coresight_qmi_etr_assign(struct coresight_device *csdev,
struct coresight_etr_assign_req_msg_v01 *req);
extern int coresight_qmi_assign_atid(struct coresight_device *csdev,
struct coresight_atid_assign_req_msg_v01 *req);
#else
static inline int coresight_qmi_remote_etm_enable(
struct coresight_device *csdev) {return -EINVAL; }
static inline int coresight_qmi_etr_assign(struct coresight_device *csdev,
struct coresight_etr_assign_req_msg_v01 *req) {return -EINVAL; }
static inline int coresight_qmi_assign_atid(struct coresight_device *csdev,
struct coresight_atid_assign_req_msg_v01 *req) {return -EINVAL; }
static inline void coresight_qmi_remote_etm_disable(
struct coresight_device *csdev) {}
#endif
#endif

View File

@@ -0,0 +1,417 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/suspend.h>
#include "coresight-qmi.h"
#include "coresight-trace-id.h"
#include "coresight-common.h"
#ifdef CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE
static int boot_enable = CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE;
#else
static int boot_enable;
#endif
DEFINE_CORESIGHT_DEVLIST(remote_etm_devs, "remote-etm");
struct remote_etm_drvdata {
struct device *dev;
struct coresight_device *csdev;
struct mutex mutex;
bool enable;
u8 *traceids;
u32 num_trcid;
bool static_atid;
};
/*
* Remote ETM could be connected to a QMI device, which can send commmand
* to subsystem via QMI. This is represented by the Output port of the remote
* ETM connected to the input port of the QMI.
*
* Returns : coresight_device ptr for the QMI device if a QMI is found.
* : NULL otherwise.
*/
static struct coresight_device *
remote_etm_get_qmi_device(struct remote_etm_drvdata *drvdata)
{
int i;
struct coresight_device *tmp, *etm = drvdata->csdev;
if (!IS_ENABLED(CONFIG_CORESIGHT_QMI))
return NULL;
for (i = 0; i < etm->pdata->nr_outconns; i++) {
tmp = etm->pdata->out_conns[i]->dest_dev;
if (tmp && coresight_is_qmi_device(tmp))
return tmp;
}
return NULL;
}
static int qmi_assign_remote_etm_atid(struct remote_etm_drvdata *drvdata)
{
struct coresight_device *qmi = remote_etm_get_qmi_device(drvdata);
struct coresight_atid_assign_req_msg_v01 *atid_data;
const char *trace_name = dev_name(drvdata->dev);
int i, ret;
ret = of_property_read_string(drvdata->dev->of_node,
"trace-name", &trace_name);
if (ret)
return -EINVAL;
atid_data = kzalloc(sizeof(*atid_data), GFP_KERNEL);
if (!atid_data)
return -ENOMEM;
strscpy(atid_data->name, trace_name, CORESIGHT_QMI_TRACE_NAME_MAX_LEN);
for (i = 0; i < drvdata->num_trcid; i++)
atid_data->atids[i] = drvdata->traceids[i];
atid_data->num_atids = drvdata->num_trcid;
if (qmi)
return coresight_qmi_assign_atid(qmi, atid_data);
return 0;
}
static int qmi_enable_remote_etm(struct remote_etm_drvdata *drvdata)
{
struct coresight_device *qmi = remote_etm_get_qmi_device(drvdata);
if (qmi)
return coresight_qmi_remote_etm_enable(qmi);
return 0;
}
static int qmi_disable_remote_etm(struct remote_etm_drvdata *drvdata)
{
struct coresight_device *qmi = remote_etm_get_qmi_device(drvdata);
if (qmi)
coresight_qmi_remote_etm_disable(qmi);
return 0;
}
static int remote_etm_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode)
{
struct remote_etm_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
int i, ret;
mutex_lock(&drvdata->mutex);
if (!drvdata->static_atid) {
ret = qmi_assign_remote_etm_atid(drvdata);
if (ret) {
dev_err(drvdata->dev, "Assign remote etm atid fail\n");
goto unlock_mutex;
}
} else {
for (i = 0; i < drvdata->num_trcid; i++) {
ret = coresight_trace_id_reserve_id(drvdata->traceids[i]);
if (ret) {
dev_err(drvdata->dev, "reserve atid: %d fail\n",
drvdata->traceids[i]);
break;
}
}
if (i < drvdata->num_trcid) {
for (; i > 0; i--)
coresight_trace_id_free_reserved_id(drvdata->traceids[i - 1]);
goto unlock_mutex;
}
}
for (i = 0; i < drvdata->num_trcid; i++)
coresight_csr_set_etr_atid(csdev, drvdata->traceids[i], true);
ret = qmi_enable_remote_etm(drvdata);
if (ret) {
dev_err(drvdata->dev, "Enable remote etm fail\n");
goto error;
}
dev_info(drvdata->dev, "Enable remote etm success\n");
mutex_unlock(&drvdata->mutex);
return 0;
error:
for (i = 0; i < drvdata->num_trcid; i++) {
coresight_csr_set_etr_atid(csdev, drvdata->traceids[i], false);
if (drvdata->static_atid)
coresight_trace_id_free_reserved_id(drvdata->traceids[i]);
}
unlock_mutex:
mutex_unlock(&drvdata->mutex);
return ret;
}
static void remote_etm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct remote_etm_drvdata *drvdata =
dev_get_drvdata(csdev->dev.parent);
int i;
mutex_lock(&drvdata->mutex);
qmi_disable_remote_etm(drvdata);
for (i = 0; i < drvdata->num_trcid; i++)
coresight_csr_set_etr_atid(csdev, drvdata->traceids[i], false);
for (i = 0; i < drvdata->num_trcid; i++) {
if (drvdata->static_atid)
coresight_trace_id_free_reserved_id(drvdata->traceids[i]);
else
coresight_trace_id_put_system_id(drvdata->traceids[i]);
}
mutex_unlock(&drvdata->mutex);
}
static const struct coresight_ops_source remote_etm_source_ops = {
.enable = remote_etm_enable,
.disable = remote_etm_disable,
};
static const struct coresight_ops remote_cs_ops = {
.source_ops = &remote_etm_source_ops,
};
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct remote_etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
if (drvdata->num_trcid == 1)
return scnprintf(buf, PAGE_SIZE, "%#x\n", drvdata->traceids[0]);
else
return scnprintf(buf, PAGE_SIZE, "%#x %#x\n",
drvdata->traceids[0], drvdata->traceids[1]);
}
static DEVICE_ATTR_RO(traceid);
static struct attribute *remote_etm_attrs[] = {
&dev_attr_traceid.attr,
NULL,
};
static struct attribute_group remote_etm_attr_grp = {
.attrs = remote_etm_attrs,
};
static const struct attribute_group *remote_etm_attr_grps[] = {
&remote_etm_attr_grp,
NULL,
};
static int remote_etm_get_traceid(struct remote_etm_drvdata *drvdata)
{
int ret, i, trace_id;
struct device *dev = drvdata->dev;
u32 *atid;
ret = of_property_count_u32_elems(dev->of_node, "atid");
if (ret < 0) {
ret = of_property_read_u32(dev->of_node, "qcom,atid-num",
&drvdata->num_trcid);
if (ret)
return -EINVAL;
} else {
drvdata->num_trcid = ret;
drvdata->static_atid = true;
}
atid = devm_kcalloc(dev, drvdata->num_trcid, sizeof(*atid), GFP_KERNEL);
if (!atid)
return -ENOMEM;
if (drvdata->static_atid) {
ret = of_property_read_u32_array(dev->of_node, "atid",
atid, drvdata->num_trcid);
if (ret)
return ret;
} else {
for (i = 0; i < drvdata->num_trcid; i++) {
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0)
return trace_id;
atid[i] = trace_id;
}
}
drvdata->traceids = devm_kcalloc(dev, drvdata->num_trcid,
sizeof(u8), GFP_KERNEL);
if (!drvdata->traceids)
return -ENOMEM;
for (i = 0; i < drvdata->num_trcid; i++)
drvdata->traceids[i] = (u8)atid[i];
return 0;
}
static int remote_etm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata;
struct remote_etm_drvdata *drvdata;
struct coresight_desc desc = {0 };
int ret;
desc.name = coresight_alloc_device_name(&remote_etm_devs, dev);
if (!desc.name)
return -ENOMEM;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
pdev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &pdev->dev;
platform_set_drvdata(pdev, drvdata);
mutex_init(&drvdata->mutex);
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
desc.ops = &remote_cs_ops;
desc.pdata = pdev->dev.platform_data;
desc.dev = &pdev->dev;
desc.groups = remote_etm_attr_grps;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto err;
}
ret = remote_etm_get_traceid(drvdata);
if (ret) {
coresight_unregister(drvdata->csdev);
return ret;
}
dev_info(dev, "Remote ETM initialized\n");
if (boot_enable)
coresight_enable(drvdata->csdev);
return 0;
err:
return ret;
}
static int remote_etm_remove(struct platform_device *pdev)
{
struct remote_etm_drvdata *drvdata = platform_get_drvdata(pdev);
int i;
if (!drvdata->static_atid)
for (i = 0; i < drvdata->num_trcid; i++)
coresight_trace_id_put_system_id(drvdata->traceids[i]);
coresight_unregister(drvdata->csdev);
return 0;
}
static const struct of_device_id remote_etm_match[] = {
{.compatible = "qcom,coresight-remote-etm"},
{}
};
#ifdef CONFIG_DEEPSLEEP
static int remote_etm_suspend(struct device *dev)
{
struct remote_etm_drvdata *drvdata = dev_get_drvdata(dev);
struct coresight_device *csdev = drvdata->csdev;
if (pm_suspend_target_state == PM_SUSPEND_MEM) {
do {
coresight_disable(csdev);
} while (atomic_read(&csdev->refcnt));
}
return 0;
}
#else
static int remote_etm_suspend(struct device *dev)
{
return 0;
}
#endif
#ifdef CONFIG_HIBERNATION
static int remote_etm_freeze(struct device *dev)
{
struct remote_etm_drvdata *drvdata = dev_get_drvdata(dev);
struct coresight_device *csdev = drvdata->csdev;
do {
coresight_disable(csdev);
} while (atomic_read(&csdev->refcnt));
return 0;
}
#else
static int remote_etm_freeze(struct device *dev)
{
return 0;
}
#endif
static const struct dev_pm_ops remote_etm_dev_pm_ops = {
.suspend = remote_etm_suspend,
.freeze = remote_etm_freeze,
};
static struct platform_driver remote_etm_driver = {
.probe = remote_etm_probe,
.remove = remote_etm_remove,
.driver = {
.name = "coresight-remote-etm",
.of_match_table = remote_etm_match,
.pm = pm_ptr(&remote_etm_dev_pm_ops),
},
};
int __init remote_etm_init(void)
{
return platform_driver_register(&remote_etm_driver);
}
module_init(remote_etm_init);
void __exit remote_etm_exit(void)
{
platform_driver_unregister(&remote_etm_driver);
}
module_exit(remote_etm_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CoreSight Remote ETM driver");

View File

@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Description: CoreSight Replicator driver
*/
@@ -18,6 +19,8 @@
#include <linux/clk.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/pm_domain.h>
#include <linux/cpu_pm.h>
#include "coresight-priv.h"
@@ -26,6 +29,11 @@
DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
static LIST_HEAD(delay_probe_list);
static LIST_HEAD(cpu_pm_list);
static enum cpuhp_state hp_online;
static DEFINE_SPINLOCK(delay_lock);
/**
* struct replicator_drvdata - specifics associated to a replicator component
* @base: memory mapped base address for this component. Also indicates
@@ -34,6 +42,8 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* @csdev: component vitals needed by the framework
* @spinlock: serialize enable/disable operations.
* @check_idfilter_val: check if the context is lost upon clock removal.
* @delayed: parameter for delayed probe.
* @dclk: optional clock to be dynamically enabled when this device is enabled.
*/
struct replicator_drvdata {
void __iomem *base;
@@ -41,6 +51,10 @@ struct replicator_drvdata {
struct coresight_device *csdev;
spinlock_t spinlock;
bool check_idfilter_val;
struct delay_probe_arg *delayed;
struct clk *dclk;
struct pm_config pm_config;
struct list_head link;
};
static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
@@ -123,7 +137,19 @@ static int replicator_enable(struct coresight_device *csdev,
unsigned long flags;
bool first_enable = false;
if (drvdata->dclk) {
rc = clk_prepare_enable(drvdata->dclk);
if (rc)
return rc;
}
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered) {
rc = -EINVAL;
goto out;
}
if (atomic_read(&out->src_refcnt) == 0) {
if (drvdata->base)
rc = dynamic_replicator_enable(drvdata, in->dest_port,
@@ -133,10 +159,15 @@ static int replicator_enable(struct coresight_device *csdev,
}
if (!rc)
atomic_inc(&out->src_refcnt);
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
if (rc && drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return rc;
}
@@ -178,13 +209,20 @@ static void replicator_disable(struct coresight_device *csdev,
bool last_disable = false;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered)
goto out;
if (atomic_dec_return(&out->src_refcnt) == 0) {
if (drvdata->base)
dynamic_replicator_disable(drvdata, in->dest_port,
out->src_port);
last_disable = true;
}
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (last_disable)
dev_dbg(&csdev->dev, "REPLICATOR disabled\n");
@@ -199,9 +237,61 @@ static const struct coresight_ops replicator_cs_ops = {
.link_ops = &replicator_link_ops,
};
static ssize_t replicator_reg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
int ret;
u32 val;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
val = readl_relaxed(drvdata->base + cs_attr->off);
out:
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(dev->parent);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (ret)
return ret;
else
return sysfs_emit(buf, "0x%x\n", val);
}
#define coresight_replicator_reg(name, offset) \
(&((struct cs_off_attribute[]) { \
{ \
__ATTR(name, 0444, replicator_reg_show, NULL), \
offset \
} \
})[0].attr.attr)
static struct attribute *replicator_mgmt_attrs[] = {
coresight_simple_reg32(idfilter0, REPLICATOR_IDFILTER0),
coresight_simple_reg32(idfilter1, REPLICATOR_IDFILTER1),
coresight_replicator_reg(idfilter0, REPLICATOR_IDFILTER0),
coresight_replicator_reg(idfilter1, REPLICATOR_IDFILTER1),
NULL,
};
@@ -215,7 +305,7 @@ static const struct attribute_group *replicator_groups[] = {
NULL,
};
static int replicator_probe(struct device *dev, struct resource *res)
static int replicator_add_coresight_dev(struct device *dev, struct resource *res)
{
int ret = 0;
struct coresight_platform_data *pdata = NULL;
@@ -232,7 +322,7 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (!desc.name)
return -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
drvdata = dev_get_drvdata(dev);
if (!drvdata)
return -ENOMEM;
@@ -240,9 +330,17 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
}
drvdata->dclk = devm_clk_get(dev, "dynamic_clk");
if (!IS_ERR(drvdata->dclk)) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
} else
drvdata->dclk = NULL;
/*
* Map the device base for dynamic-replicator, which has been
* validated by AMBA core
@@ -262,7 +360,6 @@ static int replicator_probe(struct device *dev, struct resource *res)
"qcom,replicator-loses-context"))
drvdata->check_idfilter_val = true;
dev_set_drvdata(dev, drvdata);
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata)) {
@@ -285,7 +382,11 @@ static int replicator_probe(struct device *dev, struct resource *res)
}
replicator_reset(drvdata);
pm_runtime_put(dev);
pm_runtime_put_sync(dev);
drvdata->pm_config.hw_powered = true;
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
out_disable_clk:
if (ret && !IS_ERR_OR_NULL(drvdata->atclk))
@@ -297,6 +398,8 @@ static int replicator_remove(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
if (!drvdata->csdev)
return 0;
coresight_unregister(drvdata->csdev);
return 0;
}
@@ -304,13 +407,19 @@ static int replicator_remove(struct device *dev)
static int static_replicator_probe(struct platform_device *pdev)
{
int ret;
struct replicator_drvdata *drvdata;
drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, drvdata);
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
/* Static replicators do not have programming base */
ret = replicator_probe(&pdev->dev, NULL);
ret = replicator_add_coresight_dev(&pdev->dev, NULL);
if (ret) {
pm_runtime_put_noidle(&pdev->dev);
@@ -384,17 +493,243 @@ static struct platform_driver static_replicator_driver = {
},
};
static int replicator_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
unsigned int cpu = smp_processor_id();
struct replicator_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
switch (cmd) {
case CPU_PM_ENTER:
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!cpumask_test_cpu(cpu, &pm_config->online_cpus)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
continue;
}
cpumask_clear_cpu(cpu, &pm_config->powered_cpus);
if (cpumask_empty(&pm_config->powered_cpus))
pm_config->hw_powered = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!cpumask_test_cpu(cpu, &pm_config->online_cpus)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
continue;
}
pm_config->hw_powered = true;
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block replicator_cpu_pm_nb = {
.notifier_call = replicator_cpu_pm_notify,
};
static int replicator_offline_cpu(unsigned int cpu)
{
struct replicator_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
cpumask_clear_cpu(cpu, &pm_config->online_cpus);
cpumask_clear_cpu(cpu, &pm_config->powered_cpus);
if (cpumask_empty(&pm_config->powered_cpus))
pm_config->hw_powered = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
return 0;
}
static int replicator_online_cpu(unsigned int cpu)
{
struct delay_probe_arg *init_arg, *tmp;
int ret;
unsigned long flags;
struct replicator_drvdata *drvdata, *tmp_drv;
struct pm_config *pm_config;
list_for_each_entry_safe(drvdata, tmp_drv, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
cpumask_set_cpu(cpu, &pm_config->online_cpus);
pm_config->hw_powered = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
list_for_each_entry_safe(init_arg, tmp, &delay_probe_list, link) {
if (cpumask_test_cpu(cpu, init_arg->cpumask)) {
drvdata = amba_get_drvdata(init_arg->adev);
pm_config = &drvdata->pm_config;
spin_lock(&delay_lock);
drvdata->delayed = NULL;
list_del(&init_arg->link);
spin_unlock(&delay_lock);
ret = pm_runtime_resume_and_get(&init_arg->adev->dev);
if (ret < 0)
return ret;
ret = replicator_add_coresight_dev(&init_arg->adev->dev,
&init_arg->adev->res);
if (ret)
pm_runtime_put_sync(&init_arg->adev->dev);
else {
pm_config->pd_cpumask = init_arg->cpumask;
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
cpumask_set_cpu(cpu, &pm_config->online_cpus);
pm_config->pm_enable = true;
spin_lock(&delay_lock);
list_add(&drvdata->link, &cpu_pm_list);
spin_unlock(&delay_lock);
}
}
}
return 0;
}
static int dynamic_replicator_probe(struct amba_device *adev,
const struct amba_id *id)
{
return replicator_probe(&adev->dev, &adev->res);
struct device *dev = &adev->dev;
struct generic_pm_domain *pd;
struct delay_probe_arg *init_arg;
int cpu, ret;
struct cpumask *cpumask;
struct replicator_drvdata *drvdata;
struct pm_config *pm_config;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(dev, drvdata);
pm_config = &drvdata->pm_config;
if (dev->pm_domain) {
pd = pd_to_genpd(dev->pm_domain);
cpumask = pd->cpus;
if (cpumask_empty(cpumask))
return replicator_add_coresight_dev(dev, &adev->res);
cpus_read_lock();
for_each_online_cpu(cpu) {
if (cpumask_test_cpu(cpu, cpumask)) {
ret = replicator_add_coresight_dev(dev, &adev->res);
if (ret)
dev_dbg(dev, "add coresight_dev fail:%d\n", ret);
else {
pm_config->pd_cpumask = cpumask;
cpumask_and(&pm_config->powered_cpus,
cpumask, cpu_online_mask);
cpumask_copy(&pm_config->online_cpus,
&pm_config->powered_cpus);
pm_config->pm_enable = true;
spin_lock(&delay_lock);
list_add(&drvdata->link, &cpu_pm_list);
spin_unlock(&delay_lock);
}
cpus_read_unlock();
return ret;
}
}
init_arg = devm_kzalloc(dev, sizeof(*init_arg), GFP_KERNEL);
if (!init_arg) {
cpus_read_unlock();
return -ENOMEM;
}
spin_lock(&delay_lock);
init_arg->adev = adev;
init_arg->cpumask = pd->cpus;
list_add(&init_arg->link, &delay_probe_list);
drvdata->delayed = init_arg;
spin_unlock(&delay_lock);
pm_runtime_put_sync(&adev->dev);
cpus_read_unlock();
return 0;
}
return replicator_add_coresight_dev(dev, &adev->res);
}
static void dynamic_replicator_remove(struct amba_device *adev)
{
struct replicator_drvdata *drvdata = amba_get_drvdata(adev);
spin_lock(&delay_lock);
if (drvdata->delayed) {
list_del(&drvdata->delayed->link);
spin_unlock(&delay_lock);
return;
}
if (drvdata->pm_config.pm_enable)
list_del(&drvdata->link);
spin_unlock(&delay_lock);
replicator_remove(&adev->dev);
}
static int __init replicator_pm_setup(void)
{
int ret;
ret = cpu_pm_register_notifier(&replicator_cpu_pm_nb);
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"arm/coresight-replicator:online",
replicator_online_cpu, replicator_offline_cpu);
if (ret > 0) {
hp_online = ret;
return 0;
}
cpu_pm_unregister_notifier(&replicator_cpu_pm_nb);
return ret;
}
static void replicator_pm_clear(void)
{
cpu_pm_unregister_notifier(&replicator_cpu_pm_nb);
if (hp_online) {
cpuhp_remove_state_nocalls(hp_online);
hp_online = 0;
}
}
static const struct amba_id dynamic_replicator_ids[] = {
CS_AMBA_ID(0x000bb909),
CS_AMBA_ID(0x000bb9ec), /* Coresight SoC-600 */
@@ -419,18 +754,26 @@ static int __init replicator_init(void)
{
int ret;
ret = replicator_pm_setup();
if (ret)
return ret;
ret = platform_driver_register(&static_replicator_driver);
if (ret) {
pr_info("Error registering platform driver\n");
return ret;
goto clear_pm;
}
ret = amba_driver_register(&dynamic_replicator_driver);
if (ret) {
pr_info("Error registering amba driver\n");
platform_driver_unregister(&static_replicator_driver);
goto clear_pm;
}
return ret;
clear_pm:
replicator_pm_clear();
return ret;
}
@@ -438,6 +781,7 @@ static void __exit replicator_exit(void)
{
platform_driver_unregister(&static_replicator_driver);
amba_driver_unregister(&dynamic_replicator_driver);
replicator_pm_clear();
}
module_init(replicator_init);

View File

@@ -2,6 +2,8 @@
/*
* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Description: CoreSight System Trace Macrocell driver
*
* Initial implementation by Pratik Patel
@@ -29,9 +31,12 @@
#include <linux/perf_event.h>
#include <linux/pm_runtime.h>
#include <linux/stm.h>
#include <linux/suspend.h>
#include "coresight-priv.h"
#include "coresight-trace-id.h"
#include "coresight-common.h"
#include "../stm/stm.h"
#define STMDMASTARTR 0xc04
#define STMDMASTOPR 0xc08
@@ -145,6 +150,7 @@ struct stm_drvdata {
u32 stmheer;
u32 stmheter;
u32 stmhebsr;
bool static_atid;
};
static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata)
@@ -196,6 +202,7 @@ static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
u32 val;
int ret;
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (mode != CS_MODE_SYSFS)
@@ -206,8 +213,22 @@ static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
/* Someone is already using the tracer */
if (val)
return -EBUSY;
if (drvdata->static_atid) {
ret = coresight_trace_id_reserve_id(drvdata->traceid);
if (ret) {
local_set(&drvdata->mode, CS_MODE_DISABLED);
dev_err(&csdev->dev, "reserve ATID: %d fail\n", drvdata->traceid);
return ret;
}
}
coresight_csr_set_etr_atid(csdev, drvdata->traceid, true);
pm_runtime_get_sync(csdev->dev.parent);
ret = pm_runtime_resume_and_get(csdev->dev.parent);
if (ret < 0) {
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
local_set(&drvdata->mode, CS_MODE_DISABLED);
return ret;
}
spin_lock(&drvdata->spinlock);
stm_enable_hw(drvdata);
@@ -274,8 +295,11 @@ static void stm_disable(struct coresight_device *csdev,
/* Wait until the engine has completely stopped */
coresight_timeout(csa, STMTCSR, STMTCSR_BUSY_BIT, 0);
pm_runtime_put(csdev->dev.parent);
pm_runtime_put_sync(csdev->dev.parent);
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
if (drvdata->static_atid)
coresight_trace_id_free_reserved_id(drvdata->traceid);
local_set(&drvdata->mode, CS_MODE_DISABLED);
dev_dbg(&csdev->dev, "STM tracing disabled\n");
}
@@ -827,7 +851,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
}
dev_set_drvdata(dev, drvdata);
@@ -890,14 +914,18 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
goto stm_unregister;
}
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0) {
ret = trace_id;
goto cs_unregister;
if (!of_property_read_u32(adev->dev.of_node, "atid", &trace_id))
drvdata->static_atid = true;
else {
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0) {
ret = trace_id;
goto cs_unregister;
}
}
drvdata->traceid = (u8)trace_id;
pm_runtime_put(&adev->dev);
drvdata->traceid = (u8)trace_id;
pm_runtime_put_sync(&adev->dev);
dev_info(&drvdata->csdev->dev, "%s initialized\n",
(char *)coresight_get_uci_data(id));
@@ -915,7 +943,8 @@ static void stm_remove(struct amba_device *adev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_trace_id_put_system_id(drvdata->traceid);
if (!drvdata->static_atid)
coresight_trace_id_put_system_id(drvdata->traceid);
coresight_unregister(drvdata->csdev);
stm_unregister_device(&drvdata->stm);
@@ -943,8 +972,115 @@ static int stm_runtime_resume(struct device *dev)
}
#endif
#ifdef CONFIG_DEEPSLEEP
static int stm_suspend(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
struct coresight_device *csdev = drvdata->csdev;
struct stm_device *stm_dev;
struct list_head *head, *p;
if (pm_suspend_target_state == PM_SUSPEND_MEM) {
do {
coresight_disable(csdev);
} while (atomic_read(&csdev->refcnt));
stm_dev = drvdata->stm.stm;
if (stm_dev) {
head = &stm_dev->link_list;
list_for_each(p, head)
pm_runtime_put_autosuspend(&stm_dev->dev);
}
}
return 0;
}
static int stm_resume(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
struct stm_device *stm_dev;
struct list_head *head, *p;
if (pm_suspend_target_state == PM_SUSPEND_MEM) {
stm_dev = drvdata->stm.stm;
if (stm_dev) {
head = &stm_dev->link_list;
list_for_each(p, head)
pm_runtime_get(&stm_dev->dev);
}
}
return 0;
}
#else
static int stm_suspend(struct device *dev)
{
return 0;
}
static int stm_resume(struct device *dev)
{
return 0;
}
#endif
#ifdef CONFIG_HIBERNATION
static int stm_freeze(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
struct coresight_device *csdev = drvdata->csdev;
struct stm_device *stm_dev;
struct list_head *head, *p;
do {
coresight_disable(csdev);
} while (atomic_read(&csdev->refcnt));
stm_dev = drvdata->stm.stm;
if (stm_dev) {
head = &stm_dev->link_list;
list_for_each(p, head)
pm_runtime_put_autosuspend(&stm_dev->dev);
}
return 0;
}
static int stm_restore(struct device *dev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(dev);
struct stm_device *stm_dev;
struct list_head *head, *p;
stm_dev = drvdata->stm.stm;
if (stm_dev) {
head = &stm_dev->link_list;
list_for_each(p, head)
pm_runtime_get(&stm_dev->dev);
}
return 0;
}
#else
static int stm_freeze(struct device *dev)
{
return 0;
}
static int stm_restore(struct device *dev)
{
return 0;
}
#endif
static const struct dev_pm_ops stm_dev_pm_ops = {
SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
.suspend = stm_suspend,
.resume = stm_resume,
.freeze = stm_freeze,
.restore = stm_restore,
};
static const struct amba_id stm_ids[] = {

View File

@@ -0,0 +1,527 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/amba/bus.h>
#include <linux/topology.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include "coresight-priv.h"
#define tgu_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
#define tgu_readl(drvdata, off) __raw_readl(drvdata->base + off)
#define TGU_LOCK(drvdata) \
do { \
mb(); /* ensure configuration take effect before we lock it */ \
tgu_writel(drvdata, 0x0, CORESIGHT_LAR); \
} while (0)
#define TGU_UNLOCK(drvdata) \
do { \
tgu_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
mb(); /* ensure unlock take effect before we configure */ \
} while (0)
#define TGU_CONTROL 0x0000
#define TIMER0_STATUS 0x0004
#define COUNTER0_STATUS 0x000C
#define TGU_STATUS 0x0014
#define TIMER0_COMPARE_STEP(n) (0x0040 + 0x1D8 * n)
#define COUNTER0_COMPARE_STEP(n) (0x0048 + 0x1D8 * n)
#define GROUP_REG_STEP(grp, reg, step) (0x0074 + 0x60 * grp + 0x4 * reg + \
0x1D8 * step)
#define CONDITION_DECODE_STEP(m, n) (0x0050 + 0x4 * m + 0x1D8 * n)
#define CONDITION_SELECT_STEP(m, n) (0x0060 + 0x4 * m + 0x1D8 * n)
#define GROUP0 0x0074
#define GROUP1 0x00D4
#define GROUP2 0x0134
#define GROUP3 0x0194
#define TGU_LAR 0x0FB0
#define MAX_GROUP_SETS 256
#define MAX_GROUPS 4
#define MAX_CONDITION_SETS 64
#define MAX_TIMER_COUNTER_SETS 8
#define to_tgu_drvdata(c) container_of(c, struct tgu_drvdata, tgu)
struct Trigger_group_data {
unsigned long grpaddr;
unsigned long value;
};
struct Trigger_condition_data {
unsigned long condaddr;
unsigned long value;
};
struct Trigger_select_data {
unsigned long selectaddr;
unsigned long value;
};
struct Trigger_timer_data {
unsigned long timeraddr;
unsigned long value;
};
struct Trigger_counter_data {
unsigned long counteraddr;
unsigned long value;
};
struct tgu_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
struct clk *clk;
spinlock_t spinlock;
int max_steps;
int max_conditions;
int max_regs;
int max_timer_counter;
struct Trigger_group_data *grp_data;
struct Trigger_condition_data *condition_data;
struct Trigger_select_data *select_data;
struct Trigger_timer_data *timer_data;
struct Trigger_counter_data *counter_data;
int grp_refcnt;
int cond_refcnt;
int select_refcnt;
int timer_refcnt;
int counter_refcnt;
bool enable;
};
DEFINE_CORESIGHT_DEVLIST(tgu_devs, "tgu");
static ssize_t enable_tgu_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long value;
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
int ret, i, j;
if (kstrtoul(buf, 16, &value))
return -EINVAL;
/* Enable clock */
ret = pm_runtime_resume_and_get(drvdata->dev);
if (ret < 0)
return ret;
spin_lock(&drvdata->spinlock);
/* Unlock the TGU LAR */
TGU_UNLOCK(drvdata);
if (value) {
/* Disable TGU to program the triggers */
tgu_writel(drvdata, 0, TGU_CONTROL);
/* program the TGU Group data for the desired use case*/
for (i = 0; i <= drvdata->grp_refcnt; i++)
tgu_writel(drvdata, drvdata->grp_data[i].value,
drvdata->grp_data[i].grpaddr);
/* program the unused Condition Decode registers NOT bits to 1*/
for (i = 0; i <= drvdata->max_conditions; i++) {
for (j = 0; j <= drvdata->max_steps; j++)
tgu_writel(drvdata, 0x1000000,
CONDITION_DECODE_STEP(i, j));
}
/* program the TGU Condition Decode for the desired use case*/
for (i = 0; i <= drvdata->cond_refcnt; i++)
tgu_writel(drvdata, drvdata->condition_data[i].value,
drvdata->condition_data[i].condaddr);
/* program the TGU Condition Select for the desired use case*/
for (i = 0; i <= drvdata->select_refcnt; i++)
tgu_writel(drvdata, drvdata->select_data[i].value,
drvdata->select_data[i].selectaddr);
/* Timer and Counter Check */
for (i = 0; i <= drvdata->timer_refcnt; i++)
tgu_writel(drvdata, drvdata->timer_data[i].value,
drvdata->timer_data[i].timeraddr);
for (i = 0; i <= drvdata->counter_refcnt; i++)
tgu_writel(drvdata, drvdata->counter_data[i].value,
drvdata->counter_data[i].counteraddr);
/* Enable TGU to program the triggers */
tgu_writel(drvdata, 1, TGU_CONTROL);
drvdata->enable = true;
dev_dbg(dev, "Coresight-TGU enabled\n");
} else {
/* Disable TGU to program the triggers */
tgu_writel(drvdata, 0, TGU_CONTROL);
pm_runtime_put_sync(drvdata->dev);
dev_dbg(dev, "Coresight-TGU disabled\n");
}
TGU_LOCK(drvdata);
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(enable_tgu);
static ssize_t reset_tgu_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long value;
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
int ret;
if (kstrtoul(buf, 16, &value))
return -EINVAL;
if (!drvdata->enable) {
/* Enable clock */
ret = pm_runtime_resume_and_get(drvdata->dev);
if (ret < 0)
return ret;
}
spin_lock(&drvdata->spinlock);
/* Unlock the TGU LAR */
TGU_UNLOCK(drvdata);
if (value) {
/* Disable TGU to program the triggers */
tgu_writel(drvdata, 0, TGU_CONTROL);
/* Reset the Reference counters*/
drvdata->grp_refcnt = 0;
drvdata->cond_refcnt = 0;
drvdata->select_refcnt = 0;
drvdata->timer_refcnt = 0;
drvdata->counter_refcnt = 0;
dev_dbg(dev, "Coresight-TGU disabled\n");
} else
dev_dbg(dev, "Invalid input to reset the TGU\n");
TGU_LOCK(drvdata);
spin_unlock(&drvdata->spinlock);
pm_runtime_put_sync(drvdata->dev);
return size;
}
static DEVICE_ATTR_WO(reset_tgu);
static ssize_t set_group_store(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
int grp, reg, step;
unsigned long value;
if (sscanf(buf, "%d %d %d %lx", &grp, &reg, &step, &value) != 4)
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (drvdata->grp_refcnt >= MAX_GROUP_SETS) {
dev_err(drvdata->dev, " Too many groups are being configured\n");
spin_unlock(&drvdata->spinlock);
return -EINVAL;
}
if ((grp <= MAX_GROUPS) && (reg <= drvdata->max_regs)) {
drvdata->grp_data[drvdata->grp_refcnt].grpaddr =
GROUP_REG_STEP(grp, reg, step);
drvdata->grp_data[drvdata->grp_refcnt].value = value;
drvdata->grp_refcnt++;
} else
dev_err(drvdata->dev, "Invalid group data\n");
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(set_group);
static ssize_t set_condition_store(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long value;
int cond, step;
if (drvdata->cond_refcnt >= MAX_CONDITION_SETS) {
dev_err(drvdata->dev, " Too many groups are being configured\n");
return -EINVAL;
}
if (sscanf(buf, "%d %d %lx", &cond, &step, &value) != 3)
return -EINVAL;
spin_lock(&drvdata->spinlock);
if ((cond <= drvdata->max_conditions) && (step <=
drvdata->max_steps)) {
drvdata->condition_data[drvdata->cond_refcnt].condaddr =
CONDITION_DECODE_STEP(cond, step);
drvdata->condition_data[drvdata->cond_refcnt].value = value;
drvdata->cond_refcnt++;
} else
dev_err(drvdata->dev, "Invalid condition decode data\n");
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(set_condition);
static ssize_t set_select_store(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long value;
int select, step;
if (drvdata->select_refcnt >= MAX_CONDITION_SETS) {
dev_err(drvdata->dev, " Too many groups are being configured\n");
return -EINVAL;
}
if (sscanf(buf, "%d %d %lx", &select, &step, &value) != 3)
return -EINVAL;
spin_lock(&drvdata->spinlock);
if ((select <= drvdata->max_conditions) && (step <=
drvdata->max_steps)) {
drvdata->select_data[drvdata->select_refcnt].selectaddr =
CONDITION_SELECT_STEP(select, step);
drvdata->select_data[drvdata->select_refcnt].value = value;
drvdata->select_refcnt++;
} else
dev_err(drvdata->dev, "Invalid select decode data\n");
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(set_select);
static ssize_t set_timer_store(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long value;
int step;
if (drvdata->timer_refcnt >= MAX_TIMER_COUNTER_SETS) {
dev_err(drvdata->dev, " Too many groups are being configured\n");
return -EINVAL;
}
if (sscanf(buf, "%d %lx", &step, &value) != 2)
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (step <= drvdata->max_timer_counter) {
drvdata->timer_data[drvdata->timer_refcnt].timeraddr =
TIMER0_COMPARE_STEP(step);
drvdata->timer_data[drvdata->timer_refcnt].value = value;
drvdata->timer_refcnt++;
} else
dev_err(drvdata->dev, "Invalid TGU timer data\n");
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(set_timer);
static ssize_t set_counter_store(struct device *dev, struct device_attribute
*attr, const char *buf, size_t size)
{
struct tgu_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long value;
int step;
if (drvdata->counter_refcnt >= MAX_TIMER_COUNTER_SETS) {
dev_err(drvdata->dev, " Too many groups are being configured\n");
return -EINVAL;
}
if (sscanf(buf, "%d %lx", &step, &value) != 2)
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (step <= drvdata->max_timer_counter) {
drvdata->counter_data[drvdata->counter_refcnt].counteraddr =
COUNTER0_COMPARE_STEP(step);
drvdata->counter_data[drvdata->counter_refcnt].value = value;
drvdata->counter_refcnt++;
} else
dev_err(drvdata->dev, "Invalid TGU counter data\n");
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(set_counter);
static struct attribute *tgu_attrs[] = {
&dev_attr_enable_tgu.attr,
&dev_attr_reset_tgu.attr,
&dev_attr_set_group.attr,
&dev_attr_set_condition.attr,
&dev_attr_set_select.attr,
&dev_attr_set_timer.attr,
&dev_attr_set_counter.attr,
NULL,
};
static struct attribute_group tgu_attr_grp = {
.attrs = tgu_attrs,
};
static const struct attribute_group *tgu_attr_grps[] = {
&tgu_attr_grp,
NULL,
};
static int tgu_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct tgu_drvdata *drvdata;
struct coresight_desc desc = { 0 };
desc.name = coresight_alloc_device_name(&tgu_devs, dev);
if (!desc.name)
return -ENOMEM;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
spin_lock_init(&drvdata->spinlock);
ret = of_property_read_u32(adev->dev.of_node, "tgu-steps",
&drvdata->max_steps);
if (ret)
return -EINVAL;
ret = of_property_read_u32(adev->dev.of_node, "tgu-conditions",
&drvdata->max_conditions);
if (ret)
return -EINVAL;
ret = of_property_read_u32(adev->dev.of_node, "tgu-regs",
&drvdata->max_regs);
if (ret)
return -EINVAL;
ret = of_property_read_u32(adev->dev.of_node, "tgu-timer-counters",
&drvdata->max_timer_counter);
if (ret)
return -EINVAL;
/* Alloc memory for Grps, Conditions and Steps */
drvdata->grp_data = devm_kzalloc(dev, MAX_GROUP_SETS *
sizeof(*drvdata->grp_data),
GFP_KERNEL);
if (!drvdata->grp_data)
return -ENOMEM;
drvdata->condition_data = devm_kzalloc(dev, MAX_CONDITION_SETS *
sizeof(*drvdata->condition_data),
GFP_KERNEL);
if (!drvdata->condition_data)
return -ENOMEM;
drvdata->select_data = devm_kzalloc(dev, MAX_CONDITION_SETS *
sizeof(*drvdata->select_data),
GFP_KERNEL);
if (!drvdata->select_data)
return -ENOMEM;
drvdata->timer_data = devm_kzalloc(dev, MAX_TIMER_COUNTER_SETS *
sizeof(*drvdata->timer_data),
GFP_KERNEL);
if (!drvdata->timer_data)
return -ENOMEM;
drvdata->counter_data = devm_kzalloc(dev, MAX_TIMER_COUNTER_SETS *
sizeof(*drvdata->counter_data),
GFP_KERNEL);
if (!drvdata->counter_data)
return -ENOMEM;
drvdata->enable = false;
desc.type = CORESIGHT_DEV_TYPE_HELPER;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
desc.groups = tgu_attr_grps;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
goto err;
}
pm_runtime_put_sync(&adev->dev);
dev_dbg(dev, "TGU initialized\n");
return 0;
err:
pm_runtime_put_sync(&adev->dev);
return ret;
}
static struct amba_id tgu_ids[] = {
{
.id = 0x0003b999,
.mask = 0x0003ffff,
.data = "TGU",
},
{ 0, 0},
};
static struct amba_driver tgu_driver = {
.drv = {
.name = "coresight-tgu",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = tgu_probe,
.id_table = tgu_ids,
};
builtin_amba_driver(tgu_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CoreSight TGU driver");

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012, 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Description: CoreSight Trace Memory Controller driver
*/
@@ -23,14 +24,22 @@
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/amba/bus.h>
#include <linux/cpu_pm.h>
#include <linux/pm_domain.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
#include "coresight-common.h"
DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
static LIST_HEAD(delay_probe_list);
static LIST_HEAD(cpu_pm_list);
static enum cpuhp_state hp_online;
static DEFINE_SPINLOCK(delay_lock);
int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
@@ -65,6 +74,11 @@ void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
tmc_wait_for_tmcready(drvdata);
}
void tmc_disable_stop_on_flush(struct tmc_drvdata *drvdata)
{
drvdata->stop_on_flush = false;
}
void tmc_enable_hw(struct tmc_drvdata *drvdata)
{
writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
@@ -182,19 +196,25 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
ssize_t actual;
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
mutex_lock(&drvdata->mem_lock);
actual = tmc_get_sysfs_trace(drvdata, *ppos, len, &bufp);
if (actual <= 0)
if (actual <= 0) {
mutex_unlock(&drvdata->mem_lock);
return 0;
}
if (copy_to_user(data, bufp, actual)) {
dev_dbg(&drvdata->csdev->dev,
"%s: copy_to_user failed\n", __func__);
mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
*ppos += actual;
dev_dbg(&drvdata->csdev->dev, "%zu bytes copied\n", actual);
mutex_unlock(&drvdata->mem_lock);
return actual;
}
@@ -253,21 +273,114 @@ static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
return memwidth;
}
static ssize_t coresight_tmc_reg32_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
int ret;
u32 val;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
val = readl_relaxed(drvdata->base + cs_attr->off);
out:
spin_unlock(&drvdata->spinlock);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
pm_runtime_put_sync(dev->parent);
if (ret)
return ret;
else
return sysfs_emit(buf, "0x%x\n", val);
}
static ssize_t coresight_tmc_reg64_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
int ret;
u64 val;
ret = pm_runtime_resume_and_get(dev->parent);
if (ret < 0)
return ret;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
pm_runtime_put_sync(dev->parent);
return ret;
}
}
spin_lock(&drvdata->spinlock);
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
val = readl_relaxed(drvdata->base + cs_attr->lo_off) |
((u64)readl_relaxed(drvdata->base + cs_attr->hi_off) << 32);
out:
spin_unlock(&drvdata->spinlock);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
pm_runtime_put_sync(dev->parent);
if (ret)
return ret;
else
return sysfs_emit(buf, "0x%llx\n", val);
}
#define coresight_tmc_reg32(name, offset) \
(&((struct cs_off_attribute[]) { \
{ \
__ATTR(name, 0444, coresight_tmc_reg32_show, NULL), \
offset \
} \
})[0].attr.attr)
#define coresight_tmc_reg64(name, lo_off, hi_off) \
(&((struct cs_pair_attribute[]) { \
{ \
__ATTR(name, 0444, coresight_tmc_reg64_show, NULL), \
lo_off, hi_off \
} \
})[0].attr.attr)
static struct attribute *coresight_tmc_mgmt_attrs[] = {
coresight_simple_reg32(rsz, TMC_RSZ),
coresight_simple_reg32(sts, TMC_STS),
coresight_simple_reg64(rrp, TMC_RRP, TMC_RRPHI),
coresight_simple_reg64(rwp, TMC_RWP, TMC_RWPHI),
coresight_simple_reg32(trg, TMC_TRG),
coresight_simple_reg32(ctl, TMC_CTL),
coresight_simple_reg32(ffsr, TMC_FFSR),
coresight_simple_reg32(ffcr, TMC_FFCR),
coresight_simple_reg32(mode, TMC_MODE),
coresight_simple_reg32(pscr, TMC_PSCR),
coresight_simple_reg32(devid, CORESIGHT_DEVID),
coresight_simple_reg64(dba, TMC_DBALO, TMC_DBAHI),
coresight_simple_reg32(axictl, TMC_AXICTL),
coresight_simple_reg32(authstatus, TMC_AUTHSTATUS),
coresight_tmc_reg32(rsz, TMC_RSZ),
coresight_tmc_reg32(sts, TMC_STS),
coresight_tmc_reg64(rrp, TMC_RRP, TMC_RRPHI),
coresight_tmc_reg64(rwp, TMC_RWP, TMC_RWPHI),
coresight_tmc_reg32(trg, TMC_TRG),
coresight_tmc_reg32(ctl, TMC_CTL),
coresight_tmc_reg32(ffsr, TMC_FFSR),
coresight_tmc_reg32(ffcr, TMC_FFCR),
coresight_tmc_reg32(mode, TMC_MODE),
coresight_tmc_reg32(pscr, TMC_PSCR),
coresight_tmc_reg32(devid, CORESIGHT_DEVID),
coresight_tmc_reg64(dba, TMC_DBALO, TMC_DBAHI),
coresight_tmc_reg32(axictl, TMC_AXICTL),
coresight_tmc_reg32(authstatus, TMC_AUTHSTATUS),
NULL,
};
@@ -277,7 +390,7 @@ static ssize_t trigger_cntr_show(struct device *dev,
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->trigger_cntr;
return sprintf(buf, "%#lx\n", val);
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t trigger_cntr_store(struct device *dev,
@@ -302,7 +415,7 @@ static ssize_t buffer_size_show(struct device *dev,
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
return sprintf(buf, "%#x\n", drvdata->size);
return scnprintf(buf, PAGE_SIZE, "%#x\n", drvdata->size);
}
static ssize_t buffer_size_store(struct device *dev,
@@ -329,23 +442,144 @@ static ssize_t buffer_size_store(struct device *dev,
static DEVICE_ATTR_RW(buffer_size);
static struct attribute *coresight_tmc_attrs[] = {
static ssize_t block_size_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
uint32_t val = 0;
if (drvdata->byte_cntr)
val = drvdata->byte_cntr->block_size;
return scnprintf(buf, PAGE_SIZE, "%d\n",
val);
}
static ssize_t block_size_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (!drvdata->byte_cntr)
return -EINVAL;
if (val && val < 4096) {
pr_err("Assign minimum block size of 4096 bytes\n");
return -EINVAL;
}
mutex_lock(&drvdata->byte_cntr->byte_cntr_lock);
drvdata->byte_cntr->block_size = val;
mutex_unlock(&drvdata->byte_cntr->byte_cntr_lock);
return size;
}
static DEVICE_ATTR_RW(block_size);
static ssize_t out_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%s\n",
str_tmc_etr_out_mode[drvdata->out_mode]);
}
static ssize_t out_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
char str[10] = "";
int ret;
if (strlen(buf) >= 10)
return -EINVAL;
if (sscanf(buf, "%s", str) != 1)
return -EINVAL;
ret = tmc_etr_switch_mode(drvdata, str);
return ret ? ret : size;
}
static DEVICE_ATTR_RW(out_mode);
static ssize_t stop_on_flush_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 val;
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
if (drvdata->stop_on_flush)
val = 1;
else
val = 0;
return scnprintf(buf, PAGE_SIZE, "%x\n", val);
}
static ssize_t stop_on_flush_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
unsigned long val;
struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
return -EINVAL;
if (val)
drvdata->stop_on_flush = true;
else
drvdata->stop_on_flush = false;
return size;
}
static DEVICE_ATTR_RW(stop_on_flush);
static struct attribute *coresight_tmc_etr_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_buffer_size.attr,
&dev_attr_block_size.attr,
&dev_attr_out_mode.attr,
&dev_attr_stop_on_flush.attr,
NULL,
};
static const struct attribute_group coresight_tmc_group = {
.attrs = coresight_tmc_attrs,
static struct attribute *coresight_tmc_etf_attrs[] = {
&dev_attr_trigger_cntr.attr,
&dev_attr_stop_on_flush.attr,
NULL,
};
static const struct attribute_group coresight_tmc_etr_group = {
.attrs = coresight_tmc_etr_attrs,
};
static const struct attribute_group coresight_tmc_etf_group = {
.attrs = coresight_tmc_etf_attrs,
};
static const struct attribute_group coresight_tmc_mgmt_group = {
.attrs = coresight_tmc_mgmt_attrs,
.name = "mgmt",
};
static const struct attribute_group *coresight_tmc_groups[] = {
&coresight_tmc_group,
static const struct attribute_group *coresight_tmc_etr_groups[] = {
&coresight_tmc_etr_group,
&coresight_tmc_mgmt_group,
NULL,
};
static const struct attribute_group *coresight_tmc_etf_groups[] = {
&coresight_tmc_etf_group,
&coresight_tmc_mgmt_group,
NULL,
};
@@ -429,7 +663,7 @@ static u32 tmc_etr_get_max_burst_size(struct device *dev)
return burst_size;
}
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
static int tmc_add_coresight_dev(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
u32 devid;
@@ -442,12 +676,10 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_dev_list *dev_list = NULL;
ret = -ENOMEM;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
drvdata = dev_get_drvdata(dev);
if (!drvdata)
goto out;
dev_set_drvdata(dev, drvdata);
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base)) {
@@ -455,10 +687,19 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
goto out;
}
drvdata->dclk = devm_clk_get(dev, "dynamic_clk");
if (!IS_ERR(drvdata->dclk)) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
} else
drvdata->dclk = NULL;
drvdata->base = base;
desc.access = CSDEV_ACCESS_IOMEM(base);
spin_lock_init(&drvdata->spinlock);
mutex_init(&drvdata->mem_lock);
devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
drvdata->config_type = BMVAL(devid, 6, 7);
@@ -467,39 +708,62 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->pid = -1;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
drvdata->out_mode = TMC_ETR_OUT_MODE_MEM;
drvdata->size = tmc_etr_get_default_buffer_size(dev);
drvdata->max_burst_size = tmc_etr_get_max_burst_size(dev);
} else {
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
ret = of_get_coresight_csr_name(adev->dev.of_node, &drvdata->csr_name);
if (ret)
dev_dbg(dev, "No csr data\n");
else {
drvdata->csr = coresight_csr_get(drvdata->csr_name);
if (IS_ERR(drvdata->csr)) {
dev_dbg(dev, "failed to get csr, defer probe\n");
return -EPROBE_DEFER;
}
}
desc.dev = dev;
desc.groups = coresight_tmc_groups;
drvdata->stop_on_flush = false;
switch (drvdata->config_type) {
case TMC_CONFIG_TYPE_ETB:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.ops = &tmc_etb_cs_ops;
desc.groups = coresight_tmc_etf_groups;
dev_list = &etb_devs;
break;
case TMC_CONFIG_TYPE_ETR:
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
desc.ops = &tmc_etr_cs_ops;
desc.groups = coresight_tmc_etr_groups;
ret = tmc_etr_setup_caps(dev, devid,
coresight_get_uci_data(id));
if (ret)
goto out;
idr_init(&drvdata->idr);
mutex_init(&drvdata->idr_mutex);
dev_list = &etr_devs;
drvdata->byte_cntr = byte_cntr_init(adev, drvdata);
ret = tmc_etr_usb_init(adev, drvdata);
if (ret)
goto out;
break;
case TMC_CONFIG_TYPE_ETF:
desc.type = CORESIGHT_DEV_TYPE_LINKSINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
desc.ops = &tmc_etf_cs_ops;
desc.groups = coresight_tmc_etf_groups;
dev_list = &etf_devs;
break;
default:
@@ -534,12 +798,206 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
ret = misc_register(&drvdata->miscdev);
if (ret)
coresight_unregister(drvdata->csdev);
else
pm_runtime_put(&adev->dev);
else {
drvdata->pm_config.hw_powered = true;
pm_runtime_put_sync(&adev->dev);
}
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
out:
return ret;
}
static int tmc_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
void *v)
{
unsigned int cpu = smp_processor_id();
struct tmc_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
switch (cmd) {
case CPU_PM_ENTER:
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!cpumask_test_cpu(cpu, &pm_config->online_cpus)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
continue;
}
cpumask_clear_cpu(cpu, &pm_config->powered_cpus);
if (cpumask_empty(&pm_config->powered_cpus))
pm_config->hw_powered = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
break;
case CPU_PM_EXIT:
case CPU_PM_ENTER_FAILED:
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!cpumask_test_cpu(cpu, &pm_config->online_cpus)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
continue;
}
pm_config->hw_powered = true;
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block tmc_cpu_pm_nb = {
.notifier_call = tmc_cpu_pm_notify,
};
static int tmc_offline_cpu(unsigned int cpu)
{
struct tmc_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
cpumask_clear_cpu(cpu, &pm_config->online_cpus);
cpumask_clear_cpu(cpu, &pm_config->powered_cpus);
if (cpumask_empty(&pm_config->powered_cpus))
pm_config->hw_powered = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
return 0;
}
static int tmc_online_cpu(unsigned int cpu)
{
int ret;
struct tmc_drvdata *drvdata, *tmp;
struct pm_config *pm_config;
unsigned long flags;
struct delay_probe_arg *init_arg, *arg_tmp;
list_for_each_entry_safe(drvdata, tmp, &cpu_pm_list, link) {
pm_config = &drvdata->pm_config;
if (!cpumask_test_cpu(cpu, pm_config->pd_cpumask))
continue;
spin_lock_irqsave(&drvdata->spinlock, flags);
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
cpumask_set_cpu(cpu, &pm_config->online_cpus);
pm_config->hw_powered = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
}
list_for_each_entry_safe(init_arg, arg_tmp, &delay_probe_list, link) {
if (cpumask_test_cpu(cpu, init_arg->cpumask)) {
drvdata = amba_get_drvdata(init_arg->adev);
pm_config = &drvdata->pm_config;
spin_lock(&delay_lock);
drvdata->delayed = NULL;
list_del(&init_arg->link);
spin_unlock(&delay_lock);
ret = pm_runtime_resume_and_get(&init_arg->adev->dev);
if (ret < 0)
return ret;
ret = tmc_add_coresight_dev(init_arg->adev, init_arg->id);
if (ret)
pm_runtime_put_sync(&init_arg->adev->dev);
else {
pm_config->pd_cpumask = init_arg->cpumask;
cpumask_set_cpu(cpu, &pm_config->powered_cpus);
cpumask_set_cpu(cpu, &pm_config->online_cpus);
pm_config->pm_enable = true;
spin_lock(&delay_lock);
list_add(&drvdata->link, &cpu_pm_list);
spin_unlock(&delay_lock);
}
}
}
return 0;
}
static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct generic_pm_domain *pd;
struct delay_probe_arg *init_arg;
struct tmc_drvdata *drvdata;
int cpu, ret;
struct cpumask *cpumask;
struct pm_config *pm_config;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(dev, drvdata);
pm_config = &drvdata->pm_config;
if (dev->pm_domain) {
pd = pd_to_genpd(dev->pm_domain);
cpumask = pd->cpus;
if (cpumask_empty(cpumask))
return tmc_add_coresight_dev(adev, id);
cpus_read_lock();
for_each_online_cpu(cpu) {
if (cpumask_test_cpu(cpu, cpumask)) {
ret = tmc_add_coresight_dev(adev, id);
if (ret)
dev_dbg(dev, "add coresight_dev fail:%d\n", ret);
else {
pm_config->pd_cpumask = cpumask;
cpumask_and(&pm_config->powered_cpus,
cpumask, cpu_online_mask);
cpumask_copy(&pm_config->online_cpus,
&pm_config->powered_cpus);
pm_config->pm_enable = true;
spin_lock(&delay_lock);
list_add(&drvdata->link, &cpu_pm_list);
spin_unlock(&delay_lock);
}
cpus_read_unlock();
return ret;
}
}
init_arg = devm_kzalloc(dev, sizeof(*init_arg), GFP_KERNEL);
if (!init_arg) {
cpus_read_unlock();
return -ENOMEM;
}
spin_lock(&delay_lock);
init_arg->adev = adev;
init_arg->cpumask = pd->cpus;
init_arg->id = id;
list_add(&init_arg->link, &delay_probe_list);
drvdata->delayed = init_arg;
spin_unlock(&delay_lock);
cpus_read_unlock();
pm_runtime_put_sync(&adev->dev);
return 0;
}
return tmc_add_coresight_dev(adev, id);
}
static void tmc_shutdown(struct amba_device *adev)
{
unsigned long flags;
@@ -550,7 +1008,10 @@ static void tmc_shutdown(struct amba_device *adev)
if (drvdata->mode == CS_MODE_DISABLED)
goto out;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
(drvdata->out_mode == TMC_ETR_OUT_MODE_MEM ||
(drvdata->out_mode == TMC_ETR_OUT_MODE_USB &&
drvdata->usb_data->usb_mode == TMC_ETR_USB_SW)))
tmc_etr_disable_hw(drvdata);
/*
@@ -566,15 +1027,63 @@ static void tmc_remove(struct amba_device *adev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
spin_lock(&delay_lock);
if (drvdata->delayed) {
list_del(&drvdata->delayed->link);
spin_unlock(&delay_lock);
return;
}
if (drvdata->pm_config.pm_enable)
list_del(&drvdata->delayed->link);
spin_unlock(&delay_lock);
if (!drvdata->csdev)
return;
/*
* Since misc_open() holds a refcount on the f_ops, which is
* etb fops in this case, device is there until last file
* handler to this device is closed.
*/
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR
&& drvdata->byte_cntr)
byte_cntr_remove(drvdata->byte_cntr);
misc_deregister(&drvdata->miscdev);
coresight_unregister(drvdata->csdev);
}
static int __init tmc_pm_setup(void)
{
int ret;
ret = cpu_pm_register_notifier(&tmc_cpu_pm_nb);
if (ret)
return ret;
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
"arm/coresight-tmc:online",
tmc_online_cpu, tmc_offline_cpu);
if (ret > 0) {
hp_online = ret;
return 0;
}
cpu_pm_unregister_notifier(&tmc_cpu_pm_nb);
return ret;
}
static void tmc_pm_clear(void)
{
cpu_pm_unregister_notifier(&tmc_cpu_pm_nb);
if (hp_online) {
cpuhp_remove_state_nocalls(hp_online);
hp_online = 0;
}
}
static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb961),
/* Coresight SoC 600 TMC-ETR/ETS */
@@ -600,7 +1109,33 @@ static struct amba_driver tmc_driver = {
.id_table = tmc_ids,
};
module_amba_driver(tmc_driver);
static int __init tmc_init(void)
{
int ret;
ret = tmc_pm_setup();
if (ret)
return ret;
ret = amba_driver_register(&tmc_driver);
if (ret) {
pr_err("Error registering tmc AMBA driver\n");
tmc_pm_clear();
return ret;
}
return ret;
}
static void __exit tmc_exit(void)
{
amba_driver_unregister(&tmc_driver);
tmc_pm_clear();
}
module_init(tmc_init);
module_exit(tmc_exit);
MODULE_AUTHOR("Pratik Patel <pratikp@codeaurora.org>");
MODULE_DESCRIPTION("Arm CoreSight Trace Memory Controller driver");

View File

@@ -2,6 +2,7 @@
/*
* Copyright(C) 2016 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/atomic.h>
@@ -32,10 +33,18 @@ static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
}
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
drvdata->base + TMC_FFCR);
if (drvdata->stop_on_flush) {
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN | TMC_FFCR_STOP_ON_FLUSH,
drvdata->base + TMC_FFCR);
} else {
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
drvdata->base + TMC_FFCR);
}
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
tmc_enable_hw(drvdata);
@@ -84,7 +93,14 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
/* Check if the etf already disabled*/
if (!(readl_relaxed(drvdata->base + TMC_CTL) & TMC_CTL_CAPT_EN)) {
CS_LOCK(drvdata->base);
return;
}
tmc_flush_and_stop(drvdata);
tmc_disable_stop_on_flush(drvdata);
/*
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
@@ -178,6 +194,14 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret;
}
/*
* If we don't have a buffer release the lock and allocate memory.
* Otherwise keep the lock and move along.
@@ -188,8 +212,10 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
/* Allocating the memory here while outside of the spinlock */
buf = kzalloc(drvdata->size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (!buf) {
ret = -ENOMEM;
goto out;
}
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -200,6 +226,10 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
goto out;
}
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
@@ -241,6 +271,9 @@ out:
if (!used)
kfree(buf);
if (ret && drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return ret;
}
@@ -253,9 +286,22 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
struct perf_output_handle *handle = data;
struct cs_buffers *buf = etm_perf_sink_config(handle);
if (buf == NULL)
return -EINVAL;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret;
}
spin_lock_irqsave(&drvdata->spinlock, flags);
do {
ret = -EINVAL;
if (!drvdata->pm_config.hw_powered)
break;
if (drvdata->reading)
break;
/*
@@ -298,6 +344,9 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (ret && drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return ret;
}
@@ -329,31 +378,44 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev,
static int tmc_disable_etf_sink(struct coresight_device *csdev)
{
unsigned long flags;
int ret = 0;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
ret = -EINVAL;
goto disable_clk;
}
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
ret = -EBUSY;
goto disable_clk;
}
if (atomic_dec_return(&csdev->refcnt)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
ret = -EBUSY;
goto disable_clk;
} else {
/* Complain if we (somehow) got out of sync */
WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
tmc_etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
}
/* Complain if we (somehow) got out of sync */
WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
tmc_etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
disable_clk:
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
return 0;
return ret;
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
@@ -365,10 +427,24 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
bool first_enable = false;
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret;
}
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
ret = -EINVAL;
goto disable_clk;
}
if (drvdata->reading) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
ret = -EBUSY;
goto disable_clk;
}
if (atomic_read(&csdev->refcnt) == 0) {
@@ -384,6 +460,9 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
if (first_enable)
dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
disable_clk:
if (ret && drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
return ret;
}
@@ -401,13 +480,21 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
if (!drvdata->pm_config.hw_powered)
goto disable_clk;
if (atomic_dec_return(&csdev->refcnt) == 0) {
tmc_etf_disable_hw(drvdata);
drvdata->mode = CS_MODE_DISABLED;
last_disable = true;
}
disable_clk:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
if (last_disable)
dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
}
@@ -488,6 +575,9 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->pm_config.hw_powered)
goto out;
/* Don't do anything if another tracer is using this sink */
if (atomic_read(&csdev->refcnt) != 1)
goto out;
@@ -642,6 +732,10 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
/* Disable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
@@ -663,7 +757,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
char *buf = NULL;
enum tmc_mode mode;
unsigned long flags;
int rc = 0;
int ret = 0;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
@@ -674,6 +768,10 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
/* Re-enable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS) {
if (!drvdata->pm_config.hw_powered) {
ret = -EINVAL;
goto out;
}
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
@@ -689,11 +787,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
rc = __tmc_etb_enable_hw(drvdata);
if (rc) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
}
__tmc_etb_enable_hw(drvdata);
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.
@@ -702,7 +796,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
buf = drvdata->buf;
drvdata->buf = NULL;
}
out:
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -712,5 +806,5 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
*/
kfree(buf);
return 0;
return ret;
}

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright(C) 2016 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
@@ -10,6 +11,7 @@
#include <linux/iommu.h>
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/qcom-iommu-util.h>
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -18,6 +20,7 @@
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
#include "coresight-common.h"
struct etr_flat_buf {
struct device *dev;
@@ -171,8 +174,15 @@ static void tmc_pages_free(struct tmc_pages *tmc_pages,
__free_page(tmc_pages->pages[i]);
}
kfree(tmc_pages->pages);
kfree(tmc_pages->daddrs);
if (is_vmalloc_addr(tmc_pages->pages))
vfree(tmc_pages->pages);
else
kfree(tmc_pages->pages);
if (is_vmalloc_addr(tmc_pages->daddrs))
vfree(tmc_pages->daddrs);
else
kfree(tmc_pages->daddrs);
tmc_pages->pages = NULL;
tmc_pages->daddrs = NULL;
tmc_pages->nr_pages = 0;
@@ -198,14 +208,24 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
nr_pages = tmc_pages->nr_pages;
tmc_pages->daddrs = kcalloc(nr_pages, sizeof(*tmc_pages->daddrs),
GFP_KERNEL);
if (!tmc_pages->daddrs)
return -ENOMEM;
if (!tmc_pages->daddrs) {
tmc_pages->daddrs = vmalloc(sizeof(*tmc_pages->daddrs) * nr_pages);
if (!tmc_pages->daddrs)
return -ENOMEM;
}
tmc_pages->pages = kcalloc(nr_pages, sizeof(*tmc_pages->pages),
GFP_KERNEL);
if (!tmc_pages->pages) {
kfree(tmc_pages->daddrs);
tmc_pages->daddrs = NULL;
return -ENOMEM;
tmc_pages->pages = vmalloc(sizeof(*tmc_pages->pages) * nr_pages);
if (!tmc_pages->pages) {
if (is_vmalloc_addr(tmc_pages->daddrs))
vfree(tmc_pages->daddrs);
else
kfree(tmc_pages->daddrs);
tmc_pages->daddrs = NULL;
return -ENOMEM;
}
}
for (i = 0; i < nr_pages; i++) {
@@ -258,6 +278,39 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
}
EXPORT_SYMBOL_GPL(tmc_free_sg_table);
static long tmc_flat_get_rwp_offset(struct tmc_drvdata *drvdata)
{
dma_addr_t paddr = drvdata->sysfs_buf->hwaddr;
u64 rwp;
rwp = tmc_read_rwp(drvdata);
return rwp - paddr;
}
static long tmc_sg_get_rwp_offset(struct tmc_drvdata *drvdata)
{
struct etr_buf *etr_buf = drvdata->sysfs_buf;
struct etr_sg_table *etr_table = etr_buf->private;
struct tmc_sg_table *table = etr_table->sg_table;
u64 rwp;
long w_offset;
rwp = tmc_read_rwp(drvdata);
w_offset = tmc_sg_get_data_page_offset(table, rwp);
return w_offset;
}
long tmc_get_rwp_offset(struct tmc_drvdata *drvdata)
{
struct etr_buf *etr_buf = drvdata->sysfs_buf;
if (etr_buf->mode == ETR_MODE_FLAT)
return tmc_flat_get_rwp_offset(drvdata);
else
return tmc_sg_get_rwp_offset(drvdata);
}
/*
* Alloc pages for the table. Since this will be used by the device,
* allocate the pages closer to the device (i.e, dev_to_node(dev)
@@ -847,9 +900,23 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata,
bool has_sg, has_catu;
struct etr_buf *etr_buf;
struct device *dev = &drvdata->csdev->dev;
int mapping_config = 0;
struct iommu_domain *domain;
has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG);
has_iommu = iommu_get_domain_for_dev(dev->parent);
domain = iommu_get_domain_for_dev(dev->parent);
if (domain) {
mapping_config = qcom_iommu_get_mappings_configuration(domain);
if (mapping_config < 0)
return ERR_PTR(-ENOMEM);
if (mapping_config & QCOM_IOMMU_MAPPING_CONF_S1_BYPASS)
has_iommu = false;
else
has_iommu = true;
} else {
has_iommu = false;
}
has_catu = !!tmc_etr_get_catu_device(drvdata);
has_sg = has_catu || has_etr_sg;
@@ -906,7 +973,7 @@ static void tmc_free_etr_buf(struct etr_buf *etr_buf)
* Returns: The size of the linear data available @pos, with *bufpp
* updated to point to the buffer.
*/
static ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
u64 offset, size_t len, char **bufpp)
{
/* Adjust the length to limit this transaction to end of buffer */
@@ -957,7 +1024,7 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
return;
}
etr_buf->full = !!(status & TMC_STS_FULL);
etr_buf->full = status & TMC_STS_FULL;
WARN_ON(!etr_buf->ops || !etr_buf->ops->sync);
@@ -1012,10 +1079,18 @@ static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
writel_relaxed(sts, drvdata->base + TMC_STS);
}
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
drvdata->base + TMC_FFCR);
if (drvdata->stop_on_flush) {
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN | TMC_FFCR_STOP_ON_FLUSH,
drvdata->base + TMC_FFCR);
} else {
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
TMC_FFCR_TRIGON_TRIGIN,
drvdata->base + TMC_FFCR);
}
writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
tmc_enable_hw(drvdata);
@@ -1084,7 +1159,12 @@ ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
static struct etr_buf *
tmc_etr_setup_sysfs_buf(struct tmc_drvdata *drvdata)
{
return tmc_alloc_etr_buf(drvdata, drvdata->size,
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB &&
drvdata->usb_data->usb_mode == TMC_ETR_USB_SW)
return tmc_alloc_etr_buf(drvdata, TMC_ETR_SW_USB_BUF_SIZE,
0, cpu_to_node(0), NULL);
else
return tmc_alloc_etr_buf(drvdata, drvdata->size,
0, cpu_to_node(0), NULL);
}
@@ -1119,6 +1199,7 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_stop_on_flush(drvdata);
/*
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
@@ -1143,7 +1224,6 @@ void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
{
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
@@ -1155,41 +1235,29 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
* buffer, provided the size matches. Any allocation has to be done
* with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if ((drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
|| (drvdata->out_mode == TMC_ETR_OUT_MODE_USB &&
drvdata->usb_data->usb_mode ==
TMC_ETR_USB_SW)) {
sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
if (!sysfs_buf || (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM
&& sysfs_buf->size != drvdata->size)
|| (drvdata->out_mode == TMC_ETR_OUT_MODE_USB
&& drvdata->usb_data->usb_mode == TMC_ETR_USB_SW
&& sysfs_buf->size != TMC_ETR_SW_USB_BUF_SIZE)) {
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
if (IS_ERR(new_buf))
return new_buf;
/* Let's try again */
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Allocate memory with the locks released */
free_buf = new_buf = tmc_etr_setup_sysfs_buf(drvdata);
if (IS_ERR(new_buf))
return new_buf;
free_buf = sysfs_buf;
drvdata->sysfs_buf = new_buf;
}
}
if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
/*
* If we don't have a buffer or it doesn't match the requested size,
* use the buffer allocated above. Otherwise reuse the existing buffer.
*/
sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
free_buf = sysfs_buf;
drvdata->sysfs_buf = new_buf;
}
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free memory outside the spinlock if need be */
if (free_buf)
tmc_etr_free_sysfs_buf(free_buf);
return ret ? ERR_PTR(ret) : drvdata->sysfs_buf;
}
@@ -1198,13 +1266,10 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev);
struct etr_buf *sysfs_buf = NULL;
if (IS_ERR(sysfs_buf))
return PTR_ERR(sysfs_buf);
spin_lock_irqsave(&drvdata->spinlock, flags);
/*
* In sysFS mode we can have multiple writers per sink. Since this
* sink is already enabled no memory is needed and the HW need not be
@@ -1212,20 +1277,50 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
*/
if (drvdata->mode == CS_MODE_SYSFS) {
atomic_inc(&csdev->refcnt);
goto out;
goto unlock_out;
}
if (drvdata->reading || drvdata->mode == CS_MODE_PERF ||
drvdata->busy) {
ret = -EBUSY;
goto unlock_out;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
sysfs_buf = tmc_etr_get_sysfs_buffer(csdev);
if (IS_ERR(sysfs_buf))
return PTR_ERR(sysfs_buf);
spin_lock_irqsave(&drvdata->spinlock, flags);
if ((drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) ||
(drvdata->out_mode == TMC_ETR_OUT_MODE_USB &&
drvdata->usb_data->usb_mode ==
TMC_ETR_USB_SW)) {
ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
if (ret)
goto unlock_out;
}
ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
if (!ret) {
drvdata->mode = CS_MODE_SYSFS;
atomic_inc(&csdev->refcnt);
}
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (!ret)
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
tmc_etr_byte_cntr_start(drvdata->byte_cntr);
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
tmc_usb_enable(drvdata->usb_data);
dev_dbg(&csdev->dev, "TMC-ETR enabled\n");
return 0;
unlock_out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
@@ -1632,7 +1727,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't use this sink if it is already claimed by sysFS */
if (drvdata->mode == CS_MODE_SYSFS) {
if (drvdata->mode == CS_MODE_SYSFS || drvdata->busy) {
rc = -EBUSY;
goto unlock_out;
}
@@ -1677,9 +1772,15 @@ unlock_out:
static int tmc_enable_etr_sink(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
switch (mode) {
case CS_MODE_SYSFS:
return tmc_enable_etr_sink_sysfs(csdev);
mutex_lock(&drvdata->mem_lock);
ret = tmc_enable_etr_sink_sysfs(csdev);
mutex_unlock(&drvdata->mem_lock);
return ret;
case CS_MODE_PERF:
return tmc_enable_etr_sink_perf(csdev, data);
default:
@@ -1687,10 +1788,12 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev,
}
}
static int tmc_disable_etr_sink(struct coresight_device *csdev)
static int _tmc_disable_etr_sink(struct coresight_device *csdev,
bool mode_switch)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
u32 previous_mode;
spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -1699,14 +1802,30 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
return -EBUSY;
}
if (atomic_dec_return(&csdev->refcnt)) {
if (atomic_dec_return(&csdev->refcnt) && !mode_switch) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
tmc_etr_disable_hw(drvdata);
if (drvdata->out_mode == TMC_ETR_OUT_MODE_MEM ||
(drvdata->out_mode == TMC_ETR_OUT_MODE_USB &&
drvdata->usb_data->usb_mode == TMC_ETR_USB_SW)) {
if (drvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
tmc_usb_disable(drvdata->usb_data);
spin_lock_irqsave(&drvdata->spinlock, flags);
}
tmc_etr_disable_hw(drvdata);
} else {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
tmc_usb_disable(drvdata->usb_data);
spin_lock_irqsave(&drvdata->spinlock, flags);
}
/* Presave mode to ensure if it's need to stop byte_cntr. */
previous_mode = drvdata->mode;
/* Dissociate from monitored process. */
drvdata->pid = -1;
drvdata->mode = CS_MODE_DISABLED;
@@ -1714,11 +1833,93 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
drvdata->perf_buf = NULL;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (previous_mode == CS_MODE_SYSFS && drvdata->out_mode == TMC_ETR_OUT_MODE_MEM)
tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
dev_dbg(&csdev->dev, "TMC-ETR disabled\n");
return 0;
}
static int tmc_disable_etr_sink(struct coresight_device *csdev)
{
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
uint32_t mode;
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
mode = drvdata->mode;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
switch (mode) {
/* mem_lock is used to support USB mode for protection */
case CS_MODE_SYSFS:
mutex_lock(&drvdata->mem_lock);
ret = _tmc_disable_etr_sink(csdev, false);
mutex_unlock(&drvdata->mem_lock);
return ret;
case CS_MODE_PERF:
return _tmc_disable_etr_sink(csdev, false);
}
/* We shouldn't be here */
return -EINVAL;
}
int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
{
enum tmc_etr_out_mode new_mode, old_mode;
mutex_lock(&drvdata->mem_lock);
if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_MEM]))
new_mode = TMC_ETR_OUT_MODE_MEM;
else if (!strcmp(out_mode, str_tmc_etr_out_mode[TMC_ETR_OUT_MODE_USB])) {
if (drvdata->usb_data->usb_mode == TMC_ETR_USB_NONE) {
dev_err(&drvdata->csdev->dev,
"USB mode is not supported.\n");
mutex_unlock(&drvdata->mem_lock);
return -EINVAL;
}
new_mode = TMC_ETR_OUT_MODE_USB;
} else {
mutex_unlock(&drvdata->mem_lock);
return -EINVAL;
}
if (drvdata->mode == CS_MODE_PERF) {
mutex_unlock(&drvdata->mem_lock);
return -EINVAL;
}
if (new_mode == drvdata->out_mode) {
mutex_unlock(&drvdata->mem_lock);
return 0;
}
if (drvdata->mode == CS_MODE_DISABLED) {
drvdata->out_mode = new_mode;
mutex_unlock(&drvdata->mem_lock);
return 0;
}
_tmc_disable_etr_sink(drvdata->csdev, true);
old_mode = drvdata->out_mode;
drvdata->out_mode = new_mode;
if (tmc_enable_etr_sink_sysfs(drvdata->csdev)) {
drvdata->out_mode = old_mode;
tmc_enable_etr_sink_sysfs(drvdata->csdev);
dev_err(&drvdata->csdev->dev,
"Switch to %s failed. Fall back to %s.\n",
str_tmc_etr_out_mode[new_mode],
str_tmc_etr_out_mode[old_mode]);
mutex_unlock(&drvdata->mem_lock);
return -EINVAL;
}
mutex_unlock(&drvdata->mem_lock);
return 0;
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
@@ -1740,6 +1941,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (drvdata->reading) {
ret = -EBUSY;
@@ -1756,6 +1958,11 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
if (drvdata->byte_cntr && drvdata->byte_cntr->enable) {
ret = -EINVAL;
goto out;
}
/* Disable the TMC if we are trying to read from a running session. */
if (drvdata->mode == CS_MODE_SYSFS)
__tmc_etr_disable_hw(drvdata);
@@ -1763,6 +1970,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
drvdata->reading = true;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
mutex_unlock(&drvdata->mem_lock);
return ret;
}
@@ -1776,6 +1984,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
return -EINVAL;
mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
@@ -1802,5 +2011,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
if (sysfs_buf)
tmc_etr_free_sysfs_buf(sysfs_buf);
mutex_unlock(&drvdata->mem_lock);
return 0;
}

View File

@@ -0,0 +1,705 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/of_address.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/dma-mapping.h>
#include <linux/of_reserved_mem.h>
#include "coresight-priv.h"
#include "coresight-common.h"
#include "coresight-tmc.h"
#include "coresight-qmi.h"
#define APSS 1
#define MPSS 2
#define ETR1 1
/*
* struct secure_etr_buf description of secure etr buffer
* @basevirtual address of the buffer.
* @paddr: physical address of the buffer.
* @size: size of the buffer.
*/
struct secure_etr_buf {
void __iomem *base;
phys_addr_t paddr;
size_t size;
};
/*
* struct secure_etr_drvdata description of secure etr driver data
* @dev: the device entity associated to this component.
* @csdev: standard CoreSight device information.
* @real_name: real etr device name associated with it.
* @real_sink: real etr drvdata.
* @sram_node: name of handle "/dev/xxx.tmc" entry
* @etm_inst_id: supported secure etm inst_id.
* @secure_etr_buf: secure etr buffer.
* @mode: how this TMC is being used.
* @coresight_csr: related csr.
* @csr_name: name of related csr.
* @atid_offset: atid register offset of csr.
* @mem_size: sise of reserved memory region.
* @clk: clock of etr
*/
struct secure_etr_drvdata {
struct device *dev;
struct coresight_device *csdev;
const char *sram_node;
struct cdev sram_dev;
struct class *sram_class;
const char *real_name;
struct tmc_drvdata *real_sink;
uint32_t etm_inst_id;
struct mutex mem_lock;
spinlock_t spinlock;
bool reading;
u32 mode;
struct secure_etr_buf *etr_buf;
struct coresight_csr *csr;
const char *csr_name;
u32 atid_offset;
u32 mem_size;
struct clk *clk;
};
DEFINE_CORESIGHT_DEVLIST(secure_etr_devs, "secure_etr");
/*
* TMC SECURE ETR could be connected to a QMI device, which can send commmand
* to subsystem via QMI. This is represented by the Output port of the TMC
* (ETR) connected to the input port of the QMI.
*
* Returns : coresight_device ptr for the QMI device if a QMI is found.
* : NULL otherwise.
*/
static struct coresight_device *
secure_etr_get_qmi_device(struct secure_etr_drvdata *drvdata)
{
int i;
struct coresight_device *tmp, *etr = drvdata->csdev;
if (!IS_ENABLED(CONFIG_CORESIGHT_QMI))
return NULL;
for (i = 0; i < etr->pdata->nr_outconns; i++) {
tmp = etr->pdata->out_conns[i]->dest_dev;
if (tmp && coresight_is_qmi_device(tmp))
return tmp;
}
return NULL;
}
static int secure_etr_assign_to_mpss(struct secure_etr_drvdata *drvdata)
{
struct coresight_device *qmi = secure_etr_get_qmi_device(drvdata);
struct coresight_etr_assign_req_msg_v01 *etr_data;
etr_data = kzalloc(sizeof(*etr_data), GFP_KERNEL);
if (!etr_data)
return -ENOMEM;
etr_data->subsys_id = MPSS;
etr_data->etr_id = ETR1;
etr_data->buffer_base = drvdata->etr_buf->paddr;
etr_data->buffer_size = drvdata->etr_buf->size;
if (qmi)
return coresight_qmi_etr_assign(qmi, etr_data);
return 0;
}
static int secure_etr_assign_to_apss(struct secure_etr_drvdata *drvdata)
{
struct coresight_device *qmi = secure_etr_get_qmi_device(drvdata);
struct coresight_etr_assign_req_msg_v01 *etr_data;
etr_data = kzalloc(sizeof(*etr_data), GFP_KERNEL);
if (!etr_data)
return -ENOMEM;
etr_data->subsys_id = APSS;
etr_data->etr_id = ETR1;
etr_data->buffer_base = drvdata->etr_buf->paddr;
etr_data->buffer_size = drvdata->etr_buf->size;
if (qmi)
return coresight_qmi_etr_assign(qmi, etr_data);
return 0;
}
static int secure_etr_reenable_remote_source(struct secure_etr_drvdata *drvdata)
{
struct coresight_device *qmi = secure_etr_get_qmi_device(drvdata);
if (qmi)
return coresight_qmi_remote_etm_enable(qmi);
return 0;
}
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
unsigned long hash;
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
if (!csdev->ea)
return 0;
/*
* See function etm_perf_add_symlink_sink() to know where
* this comes from.
*/
hash = (unsigned long)csdev->ea->var;
if ((u32)hash == *(u32 *)data)
return 1;
}
return 0;
}
static struct tmc_drvdata *coresight_get_real_dev(
struct secure_etr_drvdata *drvdata, u32 id)
{
struct device *dev = NULL;
dev = bus_find_device(drvdata->csdev->dev.bus, NULL, &id,
coresight_sink_by_id);
return dev ? dev_get_drvdata(dev->parent) : NULL;
}
static ssize_t buffer_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct secure_etr_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%#x\n", drvdata->mem_size);
}
static DEVICE_ATTR_RO(buffer_size);
static struct attribute *coresight_secure_etr_attrs[] = {
&dev_attr_buffer_size.attr,
NULL,
};
static const struct attribute_group coresight_secure_etr_group = {
.attrs = coresight_secure_etr_attrs,
};
static const struct attribute_group *coresight_secure_etr_groups[] = {
&coresight_secure_etr_group,
NULL,
};
/*
*secure_etr_map_mem_permission - assign the reserved memory region to mpss
*/
static int secure_etr_map_mem_permission(struct secure_etr_buf *etr_buf)
{
struct qcom_scm_vmperm dst_perms;
u64 src_perms;
int ret;
src_perms = BIT(QCOM_SCM_VMID_HLOS);
dst_perms.vmid = QCOM_SCM_VMID_MSS_MSA;
dst_perms.perm = QCOM_SCM_PERM_RW;
ret = qcom_scm_assign_mem(etr_buf->paddr, etr_buf->size,
&src_perms, &dst_perms, 1);
return ret;
}
/*
*secure_etr_unmap_mem_permission - unmap the reserved memory region
*/
static int secure_etr_unmap_mem_permission(struct secure_etr_buf *etr_buf)
{
struct qcom_scm_vmperm dst_perms;
u64 src_perms;
int ret;
src_perms = BIT(QCOM_SCM_VMID_MSS_MSA);
dst_perms.vmid = QCOM_SCM_VMID_HLOS;
dst_perms.perm = QCOM_SCM_PERM_RWX;
ret = qcom_scm_assign_mem(etr_buf->paddr, etr_buf->size,
&src_perms, &dst_perms, 1);
return ret;
}
static int secure_etr_allocate_mem(struct secure_etr_drvdata *drvdata)
{
dma_addr_t dma_handle;
phys_addr_t phys_addr;
void *mem_vaddr;
struct sg_table mem_dump_sgt;
struct secure_etr_buf *etr_buf;
mem_vaddr = dmam_alloc_coherent(drvdata->dev, drvdata->mem_size,
&dma_handle, GFP_KERNEL);
if (!mem_vaddr)
return -ENOMEM;
dma_get_sgtable(drvdata->dev, &mem_dump_sgt, mem_vaddr,
dma_handle, drvdata->mem_size);
phys_addr = page_to_phys(sg_page(mem_dump_sgt.sgl));
sg_free_table(&mem_dump_sgt);
memset(mem_vaddr, 0x0, drvdata->mem_size);
etr_buf = devm_kzalloc(drvdata->dev, sizeof(*etr_buf), GFP_KERNEL);
if (!etr_buf)
return -ENOMEM;
etr_buf->base = mem_vaddr;
etr_buf->size = drvdata->mem_size;
etr_buf->paddr = phys_addr;
drvdata->etr_buf = etr_buf;
return 0;
}
static void secure_etr_free_mem(struct secure_etr_drvdata *drvdata)
{
struct secure_etr_buf *etr_buf = drvdata->etr_buf;
dmam_free_coherent(drvdata->dev, etr_buf->size, etr_buf->base,
etr_buf->paddr);
}
static int secure_etr_open(struct inode *inode, struct file *file)
{
int ret = 0;
struct secure_etr_drvdata *drvdata = container_of(inode->i_cdev,
struct secure_etr_drvdata, sram_dev);
mutex_lock(&drvdata->mem_lock);
if (drvdata->reading) {
ret = -EBUSY;
goto out;
}
if (drvdata->mode != CS_MODE_SYSFS) {
ret = -EINVAL;
goto out;
}
/*
* reclaim ownership of ETR.
* assign the reserved memory region to apss.
*/
ret = secure_etr_assign_to_apss(drvdata);
if (ret)
goto out;
secure_etr_unmap_mem_permission(drvdata->etr_buf);
drvdata->reading = true;
file->private_data = drvdata;
out:
mutex_unlock(&drvdata->mem_lock);
if (!ret)
nonseekable_open(inode, file);
return ret;
}
static ssize_t secure_etr_read(struct file *file, char __user *data,
size_t len, loff_t *ppos)
{
struct secure_etr_drvdata *drvdata = file->private_data;
struct secure_etr_buf *etr_buf = drvdata->etr_buf;
mutex_lock(&drvdata->mem_lock);
if ((*ppos + len) > etr_buf->size || (*ppos + len) < len)
len = etr_buf->size - *ppos;
if (len <= 0) {
mutex_unlock(&drvdata->mem_lock);
return len;
}
if (copy_to_user(data, (etr_buf->base + *ppos), len)) {
dev_dbg(drvdata->dev,
"%s: copy_to_user failed\n", __func__);
mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
*ppos += len;
dev_dbg(drvdata->dev, "%zu bytes copied\n", len);
mutex_unlock(&drvdata->mem_lock);
return len;
}
static int secure_etr_release(struct inode *inode, struct file *file)
{
int ret = 0;
struct secure_etr_drvdata *drvdata = file->private_data;
mutex_lock(&drvdata->mem_lock);
/*
* assign the reserved memory region to mpss.
* assign ownership of ETR to mpss.
* re-enable secure remote etm.
*/
secure_etr_map_mem_permission(drvdata->etr_buf);
ret = secure_etr_assign_to_mpss(drvdata);
if (ret)
goto out;
ret = secure_etr_reenable_remote_source(drvdata);
if (ret)
goto out;
drvdata->reading = false;
out:
mutex_unlock(&drvdata->mem_lock);
return ret;
}
static const struct file_operations secure_etr_fops = {
.owner = THIS_MODULE,
.open = secure_etr_open,
.read = secure_etr_read,
.release = secure_etr_release,
.llseek = no_llseek,
};
static int sec_etr_sram_dev_register(struct secure_etr_drvdata *drvdata)
{
int ret;
struct device *device;
dev_t dev;
ret = alloc_chrdev_region(&dev, 0, 1, drvdata->sram_node);
if (ret)
goto err_alloc;
cdev_init(&drvdata->sram_dev, &secure_etr_fops);
drvdata->sram_dev.owner = THIS_MODULE;
ret = cdev_add(&drvdata->sram_dev, dev, 1);
if (ret)
goto err_cdev_add;
drvdata->sram_class = class_create(drvdata->sram_node);
if (IS_ERR(drvdata->sram_class)) {
ret = PTR_ERR(drvdata->sram_class);
goto err_class_create;
}
device = device_create(drvdata->sram_class, NULL,
drvdata->sram_dev.dev, drvdata,
drvdata->sram_node);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto err_dev_create;
}
return 0;
err_dev_create:
class_destroy(drvdata->sram_class);
err_class_create:
cdev_del(&drvdata->sram_dev);
err_cdev_add:
unregister_chrdev_region(drvdata->sram_dev.dev, 1);
err_alloc:
return ret;
}
static void sec_etr_sram_dev_deregister(struct secure_etr_drvdata *drvdata)
{
device_destroy(drvdata->sram_class, drvdata->sram_dev.dev);
class_destroy(drvdata->sram_class);
cdev_del(&drvdata->sram_dev);
unregister_chrdev_region(drvdata->sram_dev.dev, 1);
}
static int enable_secure_etr_sink(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
struct secure_etr_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
u32 hash;
struct tmc_drvdata *real_sink;
mutex_lock(&drvdata->mem_lock);
if (drvdata->reading) {
ret = -EBUSY;
goto unlock_out;
}
if (drvdata->mode == CS_MODE_SYSFS) {
atomic_inc(&csdev->refcnt);
goto unlock_out;
}
/*
* check the status of real ETR. If real ETR is enabled,
* secure ETR cannot be enabled.
*/
hash = hashlen_hash(hashlen_string(NULL, drvdata->real_name));
real_sink = coresight_get_real_dev(drvdata, hash);
if (!real_sink) {
dev_info(drvdata->dev, "real_sink config error\n");
ret = -EINVAL;
goto unlock_out;
}
if (real_sink->mode == CS_MODE_SYSFS ||
real_sink->mode == CS_MODE_PERF) {
dev_info(drvdata->dev, "%s is enabled, please disable it\n",
drvdata->real_name);
ret = -EBUSY;
goto unlock_out;
}
ret = clk_prepare_enable(drvdata->clk);
if (ret)
goto unlock_out;
ret = secure_etr_allocate_mem(drvdata);
if (ret) {
clk_disable_unprepare(drvdata->clk);
goto unlock_out;
}
/*
* assign the reserved memory region to mpss.
* assign ownership of ETR to mpss.
*/
secure_etr_map_mem_permission(drvdata->etr_buf);
ret = secure_etr_assign_to_mpss(drvdata);
if (ret)
goto err;
dev_info(drvdata->dev, "modem etr enable\n");
drvdata->mode = CS_MODE_SYSFS;
/*
* set real etr busy to true. This ensures that
* real ETR cannot be enabled when secure etr is used.
*/
real_sink->busy = true;
drvdata->real_sink = real_sink;
atomic_inc(&csdev->refcnt);
goto unlock_out;
err:
secure_etr_unmap_mem_permission(drvdata->etr_buf);
secure_etr_free_mem(drvdata);
clk_disable_unprepare(drvdata->clk);
unlock_out:
mutex_unlock(&drvdata->mem_lock);
return ret;
}
static int disable_secure_etr_sink(struct coresight_device *csdev)
{
struct secure_etr_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret = 0;
mutex_lock(&drvdata->mem_lock);
if (drvdata->reading) {
ret = -EBUSY;
goto unlock_out;
}
if (atomic_dec_return(&csdev->refcnt)) {
ret = -EBUSY;
goto unlock_out;
}
/*
* reclaim ownership of ETR.
* assign the reserved memory region to apss.
*/
ret = secure_etr_assign_to_apss(drvdata);
if (ret)
dev_err(drvdata->dev, "assign etr to apss fail\n");
secure_etr_unmap_mem_permission(drvdata->etr_buf);
secure_etr_free_mem(drvdata);
dev_info(drvdata->dev, "disable modem etr\n");
drvdata->mode = CS_MODE_DISABLED;
if (drvdata->real_sink) {
drvdata->real_sink->mode = CS_MODE_DISABLED;
drvdata->real_sink = NULL;
}
unlock_out:
clk_disable_unprepare(drvdata->clk);
mutex_unlock(&drvdata->mem_lock);
return ret;
}
static const struct coresight_ops_sink secure_etr_sink_ops = {
.enable = enable_secure_etr_sink,
.disable = disable_secure_etr_sink,
};
const struct coresight_ops secure_etr_cs_ops = {
.sink_ops = &secure_etr_sink_ops,
};
/*
* secure_etr_map_memory - initialize reserved memory region.
*/
static int secure_etr_map_memory(struct secure_etr_drvdata *drvdata)
{
struct device_node *mem_node;
int ret;
struct device *dev = drvdata->dev;
mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
if (mem_node) {
of_node_put(dev->of_node);
ret = of_reserved_mem_device_init_by_idx(dev,
dev->of_node, 0);
if (ret) {
dev_err(dev,
"Failed to initialize reserved mem, ret %d\n",
ret);
return ret;
}
}
return 0;
}
static int secure_etr_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata;
struct secure_etr_drvdata *drvdata;
struct coresight_desc desc = { 0 };
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = dev;
platform_set_drvdata(pdev, drvdata);
pdata = coresight_get_platform_data(dev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret < 0)
return ret;
if (IS_ERR(pdata))
return PTR_ERR(pdata);
dev->platform_data = pdata;
spin_lock_init(&drvdata->spinlock);
mutex_init(&drvdata->mem_lock);
drvdata->clk = devm_clk_get(dev, "apb_pclk");
if (IS_ERR(drvdata->clk))
dev_err(dev, "not config clk\n");
ret = of_property_read_string(dev->of_node, "real-name",
&drvdata->real_name);
if (ret)
return ret;
ret = of_property_read_u32(dev->of_node, "qdss,buffer-size",
&drvdata->mem_size);
if (ret)
return ret;
ret = of_get_coresight_csr_name(dev->of_node, &drvdata->csr_name);
if (ret)
dev_err(dev, "No csr data\n");
else {
drvdata->csr = coresight_csr_get(drvdata->csr_name);
if (IS_ERR(drvdata->csr)) {
dev_err(dev, "failed to get csr, defer probe\n");
return -EPROBE_DEFER;
}
}
of_property_read_u32(dev->of_node, "csr-atid-offset",
&drvdata->atid_offset);
ret = secure_etr_map_memory(drvdata);
if (ret)
return ret;
desc.name = coresight_alloc_device_name(&secure_etr_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.dev = dev;
desc.pdata = pdata;
desc.type = CORESIGHT_DEV_TYPE_SINK;
desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM;
desc.ops = &secure_etr_cs_ops;
desc.groups = coresight_secure_etr_groups;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
return ret;
}
drvdata->sram_node = desc.name;
ret = sec_etr_sram_dev_register(drvdata);
if (ret) {
coresight_unregister(drvdata->csdev);
return ret;
}
pm_runtime_enable(dev);
return 0;
}
static int secure_etr_remove(struct platform_device *pdev)
{
struct secure_etr_drvdata *drvdata = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
sec_etr_sram_dev_deregister(drvdata);
coresight_unregister(drvdata->csdev);
return 0;
}
static const struct of_device_id secure_etr_match[] = {
{.compatible = "qcom,coresight-secure-etr"},
{}
};
static struct platform_driver secure_etr_driver = {
.probe = secure_etr_probe,
.remove = secure_etr_remove,
.driver = {
.name = "coresight-secure-etr",
.of_match_table = secure_etr_match,
.suppress_bind_attrs = true,
},
};
module_platform_driver(secure_etr_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CoreSight Secure ETR driver");

View File

@@ -0,0 +1,498 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Description: CoreSight TMC USB driver
*/
#include <linux/of_address.h>
#include <linux/delay.h>
#include <linux/qcom-iommu-util.h>
#include <linux/usb/usb_qdss.h>
#include <linux/time.h>
#include <linux/slab.h>
#include "coresight-tmc-usb.h"
#include "coresight-priv.h"
#include "coresight-common.h"
#include "coresight-tmc.h"
#define USB_BLK_SIZE 65536
#define USB_TOTAL_IRQ (TMC_ETR_SW_USB_BUF_SIZE/USB_BLK_SIZE)
#define USB_SG_NUM (USB_BLK_SIZE / PAGE_SIZE)
#define USB_BUF_NUM 255
#define USB_TIME_OUT (5 * HZ)
#define TMC_AXICTL_VALUE (0xf02)
#define TMC_FFCR_VALUE (0x133)
static int usb_bypass_start(struct byte_cntr *byte_cntr_data)
{
long offset;
struct tmc_drvdata *tmcdrvdata;
if (!byte_cntr_data)
return -ENOMEM;
tmcdrvdata = byte_cntr_data->tmcdrvdata;
mutex_lock(&byte_cntr_data->usb_bypass_lock);
dev_info(&tmcdrvdata->csdev->dev,
"%s: Start usb bypass\n", __func__);
if (tmcdrvdata->mode != CS_MODE_SYSFS) {
mutex_unlock(&byte_cntr_data->usb_bypass_lock);
return -EINVAL;
}
offset = tmc_get_rwp_offset(tmcdrvdata);
if (offset < 0) {
dev_err(&tmcdrvdata->csdev->dev,
"%s: invalid rwp offset value\n", __func__);
mutex_unlock(&byte_cntr_data->usb_bypass_lock);
return offset;
}
byte_cntr_data->offset = offset;
byte_cntr_data->total_irq = 0;
tmcdrvdata->usb_data->drop_data_size = 0;
tmcdrvdata->usb_data->data_overwritten = false;
/*Ensure usbch is ready*/
if (!tmcdrvdata->usb_data->usbch) {
int i;
for (i = TIMEOUT_US; i > 0; i--) {
if (tmcdrvdata->usb_data->usbch)
break;
if (i - 1)
udelay(1);
else {
dev_err(&tmcdrvdata->csdev->dev,
"timeout while waiting usbch to be ready\n");
mutex_unlock(&byte_cntr_data->usb_bypass_lock);
return -EAGAIN;
}
}
}
atomic_set(&byte_cntr_data->usb_free_buf, USB_BUF_NUM);
byte_cntr_data->read_active = true;
/*
* IRQ is a '8- byte' counter and to observe interrupt at
* 'block_size' bytes of data
*/
coresight_csr_set_byte_cntr(byte_cntr_data->csr,
byte_cntr_data->irqctrl_offset,
USB_BLK_SIZE / 8);
atomic_set(&byte_cntr_data->irq_cnt, 0);
byte_cntr_data->total_size = 0;
mutex_unlock(&byte_cntr_data->usb_bypass_lock);
return 0;
}
static void usb_bypass_stop(struct byte_cntr *byte_cntr_data)
{
if (!byte_cntr_data)
return;
mutex_lock(&byte_cntr_data->usb_bypass_lock);
if (byte_cntr_data->read_active)
byte_cntr_data->read_active = false;
else {
mutex_unlock(&byte_cntr_data->usb_bypass_lock);
return;
}
wake_up(&byte_cntr_data->usb_wait_wq);
pr_info("coresight: stop usb bypass\n");
byte_cntr_data->rwp_offset = tmc_get_rwp_offset(byte_cntr_data->tmcdrvdata);
coresight_csr_set_byte_cntr(byte_cntr_data->csr, byte_cntr_data->irqctrl_offset, 0);
dev_dbg(&byte_cntr_data->tmcdrvdata->csdev->dev,
"USB total size: %lld, total irq: %lld,current irq:%d, offset: %ld, rwp_offset: %ld, drop_data: %lld\n",
byte_cntr_data->total_size, byte_cntr_data->total_irq,
atomic_read(&byte_cntr_data->irq_cnt),
byte_cntr_data->offset,
byte_cntr_data->rwp_offset,
byte_cntr_data->tmcdrvdata->usb_data->drop_data_size);
mutex_unlock(&byte_cntr_data->usb_bypass_lock);
}
static int usb_transfer_small_packet(struct byte_cntr *drvdata, size_t *small_size)
{
int ret = 0;
struct tmc_drvdata *tmcdrvdata = drvdata->tmcdrvdata;
struct etr_buf *etr_buf = tmcdrvdata->sysfs_buf;
struct qdss_request *usb_req = NULL;
size_t req_size;
long actual;
long w_offset;
w_offset = tmc_get_rwp_offset(tmcdrvdata);
if (w_offset < 0) {
ret = w_offset;
dev_err_ratelimited(&tmcdrvdata->csdev->dev,
"%s: RWP offset is invalid\n", __func__);
goto out;
}
if (unlikely(atomic_read(&drvdata->irq_cnt) > USB_TOTAL_IRQ)) {
tmcdrvdata->usb_data->data_overwritten = true;
dev_err_ratelimited(&tmcdrvdata->csdev->dev, "ETR data is overwritten.\n");
}
req_size = ((w_offset < drvdata->offset) ? etr_buf->size : 0) +
w_offset - drvdata->offset;
/*
* Byte-cntr irq number may mismatch with the data size in ETR sink.
* When irq_cnt is 0 and pending data size is more than block size,
* calculate the irq_cnt by SW.
*/
if (req_size + *small_size >= USB_BLK_SIZE
&& atomic_read(&drvdata->irq_cnt) == 0) {
atomic_set(&drvdata->irq_cnt, (req_size + *small_size)/USB_BLK_SIZE);
goto out;
}
while (req_size > 0) {
usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
if (!usb_req) {
ret = -EFAULT;
goto out;
}
actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
req_size, &usb_req->buf);
if (actual <= 0 || actual > req_size) {
kfree(usb_req);
usb_req = NULL;
dev_err_ratelimited(&tmcdrvdata->csdev->dev,
"%s: Invalid data in ETR\n", __func__);
ret = -EINVAL;
goto out;
}
usb_req->length = actual;
drvdata->usb_req = usb_req;
req_size -= actual;
if ((drvdata->offset + actual) >=
tmcdrvdata->sysfs_buf->size)
drvdata->offset = 0;
else
drvdata->offset += actual;
*small_size += actual;
if (atomic_read(&drvdata->usb_free_buf) > 0) {
ret = usb_qdss_write(tmcdrvdata->usb_data->usbch, usb_req);
if (ret) {
kfree(usb_req);
usb_req = NULL;
drvdata->usb_req = NULL;
dev_err_ratelimited(&tmcdrvdata->csdev->dev,
"Write data failed:%d\n", ret);
goto out;
}
drvdata->total_size += actual;
atomic_dec(&drvdata->usb_free_buf);
} else {
dev_err_ratelimited(&tmcdrvdata->csdev->dev,
"Drop data, offset = %lu, len = %zu\n",
drvdata->offset, req_size);
tmcdrvdata->usb_data->drop_data_size += actual;
kfree(usb_req);
drvdata->usb_req = NULL;
}
}
out:
return ret;
}
static void usb_read_work_fn(struct work_struct *work)
{
int ret, i, seq = 0;
struct qdss_request *usb_req = NULL;
size_t req_size, req_sg_num, small_size = 0;
long actual;
ssize_t actual_total = 0;
char *buf;
struct byte_cntr *drvdata =
container_of(work, struct byte_cntr, read_work);
struct tmc_drvdata *tmcdrvdata = drvdata->tmcdrvdata;
struct etr_buf *etr_buf = tmcdrvdata->sysfs_buf;
while (tmcdrvdata->mode == CS_MODE_SYSFS
&& tmcdrvdata->out_mode == TMC_ETR_OUT_MODE_USB) {
if (!atomic_read(&drvdata->irq_cnt)) {
ret = wait_event_interruptible_timeout(
drvdata->usb_wait_wq,
atomic_read(&drvdata->irq_cnt) > 0
|| tmcdrvdata->mode != CS_MODE_SYSFS || tmcdrvdata->out_mode
!= TMC_ETR_OUT_MODE_USB
|| !drvdata->read_active, USB_TIME_OUT);
if (ret == -ERESTARTSYS || tmcdrvdata->mode != CS_MODE_SYSFS
|| tmcdrvdata->out_mode != TMC_ETR_OUT_MODE_USB
|| !drvdata->read_active)
break;
if (ret == 0) {
ret = usb_transfer_small_packet(drvdata, &small_size);
if (ret && ret != -EAGAIN)
return;
continue;
}
}
if (unlikely(atomic_read(&drvdata->irq_cnt) > USB_TOTAL_IRQ)) {
tmcdrvdata->usb_data->data_overwritten = true;
dev_err_ratelimited(&tmcdrvdata->csdev->dev, "ETR data is overwritten.\n");
}
req_size = USB_BLK_SIZE - small_size;
small_size = 0;
actual_total = 0;
if (req_size > 0) {
seq++;
req_sg_num = (req_size - 1) / PAGE_SIZE + 1;
usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
if (!usb_req)
return;
usb_req->sg = kcalloc(req_sg_num,
sizeof(*(usb_req->sg)), GFP_KERNEL);
if (!usb_req->sg) {
kfree(usb_req);
usb_req = NULL;
return;
}
for (i = 0; i < req_sg_num; i++) {
actual = tmc_etr_buf_get_data(etr_buf,
drvdata->offset,
PAGE_SIZE, &buf);
if (actual <= 0 || actual > PAGE_SIZE) {
kfree(usb_req->sg);
kfree(usb_req);
usb_req = NULL;
dev_err_ratelimited(
&tmcdrvdata->csdev->dev,
"Invalid data in ETR\n");
return;
}
sg_set_buf(&usb_req->sg[i], buf, actual);
if (i == 0)
usb_req->buf = buf;
if (i == req_sg_num - 1)
sg_mark_end(&usb_req->sg[i]);
if ((drvdata->offset + actual) >=
tmcdrvdata->sysfs_buf->size)
drvdata->offset = 0;
else
drvdata->offset += actual;
actual_total += actual;
}
usb_req->length = actual_total;
drvdata->usb_req = usb_req;
usb_req->num_sgs = i;
if (atomic_read(&drvdata->usb_free_buf) > 0) {
ret = usb_qdss_write(tmcdrvdata->usb_data->usbch,
drvdata->usb_req);
if (ret) {
kfree(usb_req->sg);
kfree(usb_req);
usb_req = NULL;
drvdata->usb_req = NULL;
dev_err_ratelimited(
&tmcdrvdata->csdev->dev,
"Write data failed:%d\n", ret);
if (ret == -EAGAIN)
continue;
return;
}
drvdata->total_size += actual_total;
atomic_dec(&drvdata->usb_free_buf);
} else {
dev_err_ratelimited(&tmcdrvdata->csdev->dev,
"Drop data, offset = %lu, seq = %d, irq = %d\n",
drvdata->offset, seq,
atomic_read(&drvdata->irq_cnt));
tmcdrvdata->usb_data->drop_data_size += actual_total;
kfree(usb_req->sg);
kfree(usb_req);
drvdata->usb_req = NULL;
}
}
if (atomic_read(&drvdata->irq_cnt) > 0)
atomic_dec(&drvdata->irq_cnt);
}
dev_err(&tmcdrvdata->csdev->dev, "TMC has been stopped.\n");
}
static void usb_write_done(struct byte_cntr *drvdata,
struct qdss_request *d_req)
{
atomic_inc(&drvdata->usb_free_buf);
if (d_req->status)
pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
kfree(d_req->sg);
kfree(d_req);
}
static int usb_bypass_init(struct byte_cntr *byte_cntr_data)
{
byte_cntr_data->usb_wq = create_singlethread_workqueue("byte-cntr");
if (!byte_cntr_data->usb_wq)
return -ENOMEM;
byte_cntr_data->offset = 0;
mutex_init(&byte_cntr_data->usb_bypass_lock);
init_waitqueue_head(&byte_cntr_data->usb_wait_wq);
atomic_set(&byte_cntr_data->usb_free_buf, USB_BUF_NUM);
INIT_WORK(&(byte_cntr_data->read_work), usb_read_work_fn);
return 0;
}
void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
struct usb_qdss_ch *ch)
{
struct tmc_drvdata *drvdata = priv;
int ret = 0;
if (!drvdata)
return;
if (drvdata->out_mode != TMC_ETR_OUT_MODE_USB) {
dev_err(&drvdata->csdev->dev,
"%s: ETR is not USB mode.\n", __func__);
return;
}
switch (event) {
case USB_QDSS_CONNECT:
if (drvdata->mode == CS_MODE_DISABLED) {
dev_err_ratelimited(&drvdata->csdev->dev,
"%s: ETR is disabled.\n", __func__);
return;
}
if (drvdata->usb_data->usb_mode == TMC_ETR_USB_SW) {
ret = usb_bypass_start(drvdata->byte_cntr);
if (ret < 0)
return;
usb_qdss_alloc_req(ch, USB_BUF_NUM);
queue_work(drvdata->byte_cntr->usb_wq, &(drvdata->byte_cntr->read_work));
}
break;
case USB_QDSS_DISCONNECT:
if (drvdata->mode == CS_MODE_DISABLED) {
dev_err_ratelimited(&drvdata->csdev->dev,
"%s: ETR is disabled.\n", __func__);
return;
}
if (drvdata->usb_data->usb_mode == TMC_ETR_USB_SW) {
usb_bypass_stop(drvdata->byte_cntr);
flush_work(&((drvdata->byte_cntr->read_work)));
usb_qdss_free_req(drvdata->usb_data->usbch);
}
break;
case USB_QDSS_DATA_WRITE_DONE:
if (drvdata->usb_data->usb_mode == TMC_ETR_USB_SW)
usb_write_done(drvdata->byte_cntr, d_req);
break;
default:
break;
}
}
static bool tmc_etr_support_usb_bypass(struct device *dev)
{
return fwnode_property_present(dev->fwnode, "qcom,sw-usb");
}
int tmc_usb_enable(struct tmc_usb_data *usb_data)
{
struct tmc_drvdata *tmcdrvdata;
if (!usb_data)
return -EINVAL;
tmcdrvdata = usb_data->tmcdrvdata;
if (usb_data->usb_mode == TMC_ETR_USB_SW)
usb_data->usbch = usb_qdss_open(USB_QDSS_CH_SW, tmcdrvdata, usb_notifier);
if (IS_ERR_OR_NULL(usb_data->usbch)) {
dev_err(&tmcdrvdata->csdev->dev, "usb_qdss_open failed for qdss.\n");
return -ENODEV;
}
return 0;
}
void tmc_usb_disable(struct tmc_usb_data *usb_data)
{
struct tmc_drvdata *tmcdrvdata = usb_data->tmcdrvdata;
if (usb_data->usb_mode == TMC_ETR_USB_SW)
usb_bypass_stop(tmcdrvdata->byte_cntr);
if (usb_data->usbch)
usb_qdss_close(usb_data->usbch);
else
dev_err(&tmcdrvdata->csdev->dev, "usb channel is null.\n");
}
int tmc_etr_usb_init(struct amba_device *adev,
struct tmc_drvdata *drvdata)
{
struct device *dev = &adev->dev;
struct tmc_usb_data *usb_data;
struct byte_cntr *byte_cntr_data;
int ret;
usb_data = devm_kzalloc(dev, sizeof(*usb_data), GFP_KERNEL);
if (!usb_data)
return -ENOMEM;
drvdata->usb_data = usb_data;
drvdata->usb_data->tmcdrvdata = drvdata;
byte_cntr_data = drvdata->byte_cntr;
if (tmc_etr_support_usb_bypass(dev)) {
usb_data->usb_mode = TMC_ETR_USB_SW;
usb_data->drop_data_size = 0;
usb_data->data_overwritten = false;
if (!byte_cntr_data)
return -EINVAL;
ret = usb_bypass_init(byte_cntr_data);
if (ret)
return -EINVAL;
return 0;
}
usb_data->usb_mode = TMC_ETR_USB_NONE;
pr_err("%s: ETR usb property is not configured!\n", dev_name(dev));
return 0;
}

View File

@@ -0,0 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_TMC_USB_H
#define _CORESIGHT_TMC_USB_H
#include <linux/amba/bus.h>
#include <linux/usb/usb_qdss.h>
#define TMC_USB_BAM_PIPE_INDEX 0
#define TMC_USB_BAM_NR_PIPES 2
enum tmc_etr_usb_mode {
TMC_ETR_USB_NONE,
TMC_ETR_USB_SW,
};
struct tmc_usb_data {
struct usb_qdss_ch *usbch;
enum tmc_etr_usb_mode usb_mode;
struct tmc_drvdata *tmcdrvdata;
bool data_overwritten;
u64 drop_data_size;
};
extern int tmc_usb_enable(struct tmc_usb_data *usb_data);
extern void tmc_usb_disable(struct tmc_usb_data *usb_data);
#endif

View File

@@ -2,6 +2,7 @@
/*
* Copyright(C) 2015 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_TMC_H
@@ -12,6 +13,9 @@
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include "coresight-priv.h"
#include "coresight-byte-cntr.h"
#include "coresight-tmc-usb.h"
#define TMC_RSZ 0x004
#define TMC_STS 0x00c
@@ -131,6 +135,9 @@ enum tmc_mem_intf_width {
#define CORESIGHT_SOC_600_ETR_CAPS \
(TMC_ETR_SAVE_RESTORE | TMC_ETR_AXI_ARCACHE)
/* SW USB reserved memory size */
#define TMC_ETR_SW_USB_BUF_SIZE SZ_64M
enum etr_mode {
ETR_MODE_FLAT, /* Uses contiguous flat buffer */
ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */
@@ -139,6 +146,18 @@ enum etr_mode {
struct etr_buf_operations;
enum tmc_etr_out_mode {
TMC_ETR_OUT_MODE_NONE,
TMC_ETR_OUT_MODE_MEM,
TMC_ETR_OUT_MODE_USB,
};
static const char * const str_tmc_etr_out_mode[] = {
[TMC_ETR_OUT_MODE_NONE] = "none",
[TMC_ETR_OUT_MODE_MEM] = "mem",
[TMC_ETR_OUT_MODE_USB] = "usb",
};
/**
* struct etr_buf - Details of the buffer used by ETR
* refcount ; Number of sources currently using this etr_buf.
@@ -187,6 +206,15 @@ struct etr_buf {
* @idr_mutex: Access serialisation for idr.
* @sysfs_buf: SYSFS buffer for ETR.
* @perf_buf: PERF buffer for ETR.
* @byte_cntr: byte_cntr for ETR.
* @coresight_csrCSR for ETR.
* @csr_name: name for CSR.
* @atid_offset: atid register offset for CSR.
* @out_mode: out mode for ETR.
* @usb_data: usb data for ETR.
* @stop_on_flush: flag of stop_on_flush for ETR.
* @delayed: parameter for delayed probe.
* @dclk: optional clock to be dynamically enabled when this device is enabled.
*/
struct tmc_drvdata {
void __iomem *base;
@@ -195,6 +223,7 @@ struct tmc_drvdata {
spinlock_t spinlock;
pid_t pid;
bool reading;
bool busy;
union {
char *buf; /* TMC ETB */
struct etr_buf *etr_buf; /* TMC ETR */
@@ -205,12 +234,24 @@ struct tmc_drvdata {
u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
struct mutex mem_lock;
u32 trigger_cntr;
u32 etr_caps;
struct idr idr;
struct mutex idr_mutex;
struct etr_buf *sysfs_buf;
struct etr_buf *perf_buf;
struct byte_cntr *byte_cntr;
struct coresight_csr *csr;
const char *csr_name;
u32 atid_offset;
enum tmc_etr_out_mode out_mode;
struct tmc_usb_data *usb_data;
bool stop_on_flush;
struct delay_probe_arg *delayed;
struct clk *dclk;
struct pm_config pm_config;
struct list_head link;
};
struct etr_buf_operations {
@@ -257,7 +298,10 @@ struct tmc_sg_table {
/* Generic functions */
int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_disable_stop_on_flush(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
extern int tmc_etr_usb_init(struct amba_device *adev,
struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);
u32 tmc_get_memwidth_mask(struct tmc_drvdata *drvdata);
@@ -269,14 +313,20 @@ extern const struct coresight_ops tmc_etf_cs_ops;
ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
ssize_t tmc_etr_buf_get_data(struct etr_buf *etr_buf,
u64 offset, size_t len, char **bufpp);
/* ETR functions */
int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
struct byte_cntr *byte_cntr_init(struct amba_device *adev,
struct tmc_drvdata *drvdata);
void byte_cntr_remove(struct byte_cntr *byte_cntr);
extern const struct coresight_ops tmc_etr_cs_ops;
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp);
long tmc_get_rwp_offset(struct tmc_drvdata *drvdata);
int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode);
#define TMC_REG_PAIR(name, lo_off, hi_off) \
static inline u64 \

View File

@@ -1,87 +1,237 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/amba/bus.h>
#include <linux/bitfield.h>
#include <linux/coresight.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/bitmap.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include "coresight-priv.h"
#include "coresight-tpda.h"
#include "coresight-common.h"
#include "coresight-trace-id.h"
#include "coresight-tpda.h"
#define tpda_writel(drvdata, val, off) __raw_writel((val), drvdata->base + off)
#define tpda_readl(drvdata, off) __raw_readl(drvdata->base + off)
#define TPDA_LOCK(drvdata) \
do { \
mb(); /* ensure configuration take effect before we lock it */ \
tpda_writel(drvdata, 0x0, CORESIGHT_LAR); \
} while (0)
#define TPDA_UNLOCK(drvdata) \
do { \
tpda_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \
mb(); /* ensure unlock take effect before we configure */ \
} while (0)
DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
/* Settings pre enabling port control register */
static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
static void __tpda_enable_pre_port(struct tpda_drvdata *drvdata)
{
u32 val;
uint32_t val;
val = readl_relaxed(drvdata->base + TPDA_CR);
val &= ~TPDA_CR_ATID;
val |= FIELD_PREP(TPDA_CR_ATID, drvdata->atid);
writel_relaxed(val, drvdata->base + TPDA_CR);
val = tpda_readl(drvdata, TPDA_CR);
/* Set the master id */
val = val & ~(0x7F << 13);
val = val & ~(0x7F << 6);
val |= (drvdata->atid << 6);
if (drvdata->trig_async)
val = val | BIT(5);
else
val = val & ~BIT(5);
if (drvdata->trig_flag_ts)
val = val | BIT(4);
else
val = val & ~BIT(4);
if (drvdata->trig_freq)
val = val | BIT(3);
else
val = val & ~BIT(3);
if (drvdata->freq_ts)
val = val | BIT(2);
else
val = val & ~BIT(2);
if (drvdata->cmbchan_mode)
val = val | BIT(20);
else
val = val & ~BIT(20);
tpda_writel(drvdata, val, TPDA_CR);
/*
* If FLRIE bit is set, set the master and channel
* id as zero
*/
if (BVAL(tpda_readl(drvdata, TPDA_CR), 4))
tpda_writel(drvdata, 0x0, TPDA_FPID_CR);
}
static void tpda_enable_port(struct tpda_drvdata *drvdata, int port)
static void __tpda_enable_port(struct tpda_drvdata *drvdata, int port)
{
u32 val;
uint32_t val;
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
val = tpda_readl(drvdata, TPDA_Pn_CR(port));
if (drvdata->bc_esize[port] == 32)
val = val & ~BIT(4);
else if (drvdata->bc_esize[port] == 64)
val = val | BIT(4);
if (drvdata->tc_esize[port] == 32)
val = val & ~BIT(5);
else if (drvdata->tc_esize[port] == 64)
val = val | BIT(5);
if (drvdata->dsb_esize[port] == 32)
val = val & ~BIT(8);
else if (drvdata->dsb_esize[port] == 64)
val = val | BIT(8);
val = val & ~(0x3 << 6);
if (drvdata->cmb_esize[port] == 8)
val &= ~(0x3 << 6);
else if (drvdata->cmb_esize[port] == 32)
val |= (0x1 << 6);
else if (drvdata->cmb_esize[port] == 64)
val |= (0x2 << 6);
/* Set the hold time */
val = val & ~(0x7 << 1);
val |= (0x5 << 1);
tpda_writel(drvdata, val, TPDA_Pn_CR(port));
/* Enable the port */
val |= TPDA_Pn_CR_ENA;
writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
val = val | BIT(0);
tpda_writel(drvdata, val, TPDA_Pn_CR(port));
}
static void __tpda_enable_post_port(struct tpda_drvdata *drvdata)
{
uint32_t val;
val = tpda_readl(drvdata, TPDA_SYNCR);
/* Clear the mode */
val = val & ~BIT(12);
/* Program the counter value */
val = val | 0xFFF;
tpda_writel(drvdata, val, TPDA_SYNCR);
if (drvdata->freq_req_val)
tpda_writel(drvdata, drvdata->freq_req_val, TPDA_FREQREQ_VAL);
else
tpda_writel(drvdata, 0x0, TPDA_FREQREQ_VAL);
val = tpda_readl(drvdata, TPDA_CR);
if (drvdata->freq_req)
val = val | BIT(1);
else
val = val & ~BIT(1);
tpda_writel(drvdata, val, TPDA_CR);
}
static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
{
CS_UNLOCK(drvdata->base);
TPDA_UNLOCK(drvdata);
if (!drvdata->csdev->enable)
tpda_enable_pre_port(drvdata);
if (!drvdata->enable)
__tpda_enable_pre_port(drvdata);
tpda_enable_port(drvdata, port);
__tpda_enable_port(drvdata, port);
CS_LOCK(drvdata->base);
if (!drvdata->enable)
__tpda_enable_post_port(drvdata);
TPDA_LOCK(drvdata);
}
static int tpda_alloc_trace_id(struct coresight_device *csdev)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
int i, nr_conns;
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0)
return 0;
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0)
return trace_id;
drvdata->atid = trace_id;
return 0;
}
static void tpda_release_trace_id(struct coresight_device *csdev)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int i, nr_conns;
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0)
return;
coresight_trace_id_put_system_id(drvdata->atid);
drvdata->atid = 0;
}
static int tpda_enable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
{
int ret;
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
if (atomic_read(&in->dest_refcnt) == 0)
__tpda_enable(drvdata, in->dest_port);
mutex_lock(&drvdata->lock);
if (drvdata->dclk) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret) {
mutex_unlock(&drvdata->lock);
return ret;
}
}
ret = tpda_alloc_trace_id(csdev);
if (ret < 0) {
mutex_unlock(&drvdata->lock);
return ret;
}
__tpda_enable(drvdata, in->dest_port);
drvdata->enable = true;
atomic_inc(&in->dest_refcnt);
spin_unlock(&drvdata->spinlock);
mutex_unlock(&drvdata->lock);
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
dev_info(drvdata->dev, "TPDA inport %d enabled\n", in->dest_port);
return 0;
}
static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
{
u32 val;
uint32_t val;
CS_UNLOCK(drvdata->base);
TPDA_UNLOCK(drvdata);
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
val &= ~TPDA_Pn_CR_ENA;
writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
val = tpda_readl(drvdata, TPDA_Pn_CR(port));
val = val & ~BIT(0);
tpda_writel(drvdata, val, TPDA_Pn_CR(port));
CS_LOCK(drvdata->base);
TPDA_LOCK(drvdata);
}
static void tpda_disable(struct coresight_device *csdev,
@@ -90,13 +240,16 @@ static void tpda_disable(struct coresight_device *csdev,
{
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
if (atomic_dec_return(&in->dest_refcnt) == 0)
__tpda_disable(drvdata, in->dest_port);
mutex_lock(&drvdata->lock);
__tpda_disable(drvdata, in->dest_port);
drvdata->enable = false;
atomic_dec(&in->dest_refcnt);
tpda_release_trace_id(csdev);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
mutex_unlock(&drvdata->lock);
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
dev_info(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
}
static const struct coresight_ops_link tpda_link_ops = {
@@ -108,24 +261,519 @@ static const struct coresight_ops tpda_cs_ops = {
.link_ops = &tpda_link_ops,
};
static int tpda_init_default_data(struct tpda_drvdata *drvdata)
static ssize_t trig_async_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int atid;
/*
* TPDA must has a unique atid. This atid can uniquely
* identify the TPDM trace source connected to the TPDA.
* The TPDMs which are connected to same TPDA share the
* same trace-id. When TPDA does packetization, different
* port will have unique channel number for decoding.
*/
atid = coresight_trace_id_get_system_id();
if (atid < 0)
return atid;
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->trig_async);
}
static ssize_t trig_async_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (val)
drvdata->trig_async = true;
else
drvdata->trig_async = false;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(trig_async_enable);
static ssize_t trig_flag_ts_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->trig_flag_ts);
}
static ssize_t trig_flag_ts_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (val)
drvdata->trig_flag_ts = true;
else
drvdata->trig_flag_ts = false;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(trig_flag_ts_enable);
static ssize_t trig_freq_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->trig_freq);
}
static ssize_t trig_freq_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (val)
drvdata->trig_freq = true;
else
drvdata->trig_freq = false;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(trig_freq_enable);
static ssize_t freq_ts_enable_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->freq_ts);
}
static ssize_t freq_ts_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (val)
drvdata->freq_ts = true;
else
drvdata->freq_ts = false;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(freq_ts_enable);
static ssize_t freq_req_val_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val = drvdata->freq_req_val;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t freq_req_val_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
drvdata->freq_req_val = val;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(freq_req_val);
static ssize_t freq_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->freq_req);
}
static ssize_t freq_req_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (val)
drvdata->freq_req = true;
else
drvdata->freq_req = false;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(freq_req);
static ssize_t global_flush_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
mutex_lock(&drvdata->lock);
if (!drvdata->enable) {
mutex_unlock(&drvdata->lock);
return -EPERM;
}
TPDA_UNLOCK(drvdata);
val = tpda_readl(drvdata, TPDA_CR);
TPDA_LOCK(drvdata);
mutex_unlock(&drvdata->lock);
return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
}
static ssize_t global_flush_req_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (!drvdata->enable) {
mutex_unlock(&drvdata->lock);
return -EPERM;
}
if (val) {
TPDA_UNLOCK(drvdata);
val = tpda_readl(drvdata, TPDA_CR);
val = val | BIT(0);
tpda_writel(drvdata, val, TPDA_CR);
TPDA_LOCK(drvdata);
}
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(global_flush_req);
static ssize_t port_flush_req_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
mutex_lock(&drvdata->lock);
if (!drvdata->enable) {
mutex_unlock(&drvdata->lock);
return -EPERM;
}
TPDA_UNLOCK(drvdata);
val = tpda_readl(drvdata, TPDA_FLUSH_CR);
TPDA_LOCK(drvdata);
mutex_unlock(&drvdata->lock);
return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
}
static ssize_t port_flush_req_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (!drvdata->enable) {
mutex_unlock(&drvdata->lock);
return -EPERM;
}
if (val) {
TPDA_UNLOCK(drvdata);
tpda_writel(drvdata, val, TPDA_FLUSH_CR);
TPDA_LOCK(drvdata);
}
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(port_flush_req);
static ssize_t cmbchan_mode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%u\n",
(unsigned int)drvdata->cmbchan_mode);
}
static ssize_t cmbchan_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
bool val;
if (kstrtobool(buf, &val))
return -EINVAL;
mutex_lock(&drvdata->lock);
if (val)
drvdata->cmbchan_mode = true;
else
drvdata->cmbchan_mode = false;
mutex_unlock(&drvdata->lock);
return size;
}
static DEVICE_ATTR_RW(cmbchan_mode);
static struct attribute *tpda_attrs[] = {
&dev_attr_trig_async_enable.attr,
&dev_attr_trig_flag_ts_enable.attr,
&dev_attr_trig_freq_enable.attr,
&dev_attr_freq_ts_enable.attr,
&dev_attr_freq_req_val.attr,
&dev_attr_freq_req.attr,
&dev_attr_global_flush_req.attr,
&dev_attr_port_flush_req.attr,
&dev_attr_cmbchan_mode.attr,
NULL,
};
static struct attribute_group tpda_attr_grp = {
.attrs = tpda_attrs,
};
static const struct attribute_group *tpda_attr_grps[] = {
&tpda_attr_grp,
NULL,
};
static int tpda_parse_tc(struct tpda_drvdata *drvdata)
{
int len, port, i;
const __be32 *prop;
struct device_node *node = drvdata->dev->of_node;
prop = of_get_property(node, "qcom,tc-elem-size", &len);
if (prop) {
len /= sizeof(__be32);
if (len < 2 || len > 63 || len % 2 != 0) {
dev_err(drvdata->dev,
"Dataset TC width entries are wrong\n");
return -EINVAL;
}
for (i = 0; i < len; i++) {
port = be32_to_cpu(prop[i++]);
if (port >= TPDA_MAX_INPORTS) {
dev_err(drvdata->dev,
"Wrong port specified for TC\n");
return -EINVAL;
}
drvdata->tc_esize[port] = be32_to_cpu(prop[i]);
}
}
drvdata->atid = atid;
return 0;
}
static int tpda_parse_bc(struct tpda_drvdata *drvdata)
{
int len, port, i;
const __be32 *prop;
struct device_node *node = drvdata->dev->of_node;
prop = of_get_property(node, "qcom,bc-elem-size", &len);
if (prop) {
len /= sizeof(__be32);
if (len < 2 || len > 63 || len % 2 != 0) {
dev_err(drvdata->dev,
"Dataset BC width entries are wrong\n");
return -EINVAL;
}
for (i = 0; i < len; i++) {
port = be32_to_cpu(prop[i++]);
if (port >= TPDA_MAX_INPORTS) {
dev_err(drvdata->dev,
"Wrong port specified for BC\n");
return -EINVAL;
}
drvdata->bc_esize[port] = be32_to_cpu(prop[i]);
}
}
return 0;
}
static int tpda_parse_dsb(struct tpda_drvdata *drvdata)
{
int len, port, i;
const __be32 *prop;
struct device_node *node = drvdata->dev->of_node;
prop = of_get_property(node, "qcom,dsb-elem-size", &len);
if (prop) {
len /= sizeof(__be32);
if (len < 2 || len > 63 || len % 2 != 0) {
dev_err(drvdata->dev,
"Dataset DSB width entries are wrong\n");
return -EINVAL;
}
for (i = 0; i < len; i++) {
port = be32_to_cpu(prop[i++]);
if (port >= TPDA_MAX_INPORTS) {
dev_err(drvdata->dev,
"Wrong port specified for DSB\n");
return -EINVAL;
}
drvdata->dsb_esize[port] = be32_to_cpu(prop[i]);
}
}
return 0;
}
static int tpda_parse_cmb(struct tpda_drvdata *drvdata)
{
int len, port, i;
const __be32 *prop;
struct device_node *node = drvdata->dev->of_node;
prop = of_get_property(node, "qcom,cmb-elem-size", &len);
if (prop) {
len /= sizeof(__be32);
if (len < 2 || len > 63 || len % 2 != 0) {
dev_err(drvdata->dev,
"Dataset CMB width entries are wrong\n");
return -EINVAL;
}
for (i = 0; i < len; i++) {
port = be32_to_cpu(prop[i++]);
if (port >= TPDA_MAX_INPORTS) {
dev_err(drvdata->dev,
"Wrong port specified for CMB\n");
return -EINVAL;
}
drvdata->cmb_esize[port] = be32_to_cpu(prop[i]);
}
}
return 0;
}
static int tpda_parse_of_data(struct tpda_drvdata *drvdata)
{
int ret;
ret = tpda_parse_tc(drvdata);
if (ret) {
dev_err(drvdata->dev, "Dataset TC width entries are wrong\n");
return -EINVAL;
}
ret = tpda_parse_bc(drvdata);
if (ret) {
dev_err(drvdata->dev, "Dataset BC width entries are wrong\n");
return -EINVAL;
}
ret = tpda_parse_dsb(drvdata);
if (ret) {
dev_err(drvdata->dev, "Dataset DSB width entries are wrong\n");
return -EINVAL;
}
ret = tpda_parse_cmb(drvdata);
if (ret) {
dev_err(drvdata->dev, "Dataset CMB width entries are wrong\n");
return -EINVAL;
}
return 0;
}
static void tpda_init_default_data(struct tpda_drvdata *drvdata)
{
drvdata->freq_ts = true;
}
static bool coresight_authstatus_enabled(void __iomem *addr)
{
int ret;
unsigned int auth_val;
if (!addr)
return false;
auth_val = readl_relaxed(addr + CORESIGHT_AUTHSTATUS);
if ((BMVAL(auth_val, 0, 1) == 0x2) ||
(BMVAL(auth_val, 2, 3) == 0x2) ||
(BMVAL(auth_val, 4, 5) == 0x2) ||
(BMVAL(auth_val, 6, 7) == 0x2))
ret = false;
else
ret = true;
return ret;
}
static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
@@ -133,8 +781,10 @@ static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
struct coresight_platform_data *pdata;
struct tpda_drvdata *drvdata;
struct coresight_desc desc = { 0 };
void __iomem *base;
desc.name = coresight_alloc_device_name(&tpda_devs, dev);
if (!desc.name)
return -ENOMEM;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
@@ -147,55 +797,66 @@ static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
base = devm_ioremap_resource(dev, &adev->res);
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
spin_lock_init(&drvdata->spinlock);
ret = tpda_init_default_data(drvdata);
drvdata->dclk = devm_clk_get(dev, "dynamic_clk");
if (!IS_ERR(drvdata->dclk)) {
ret = clk_prepare_enable(drvdata->dclk);
if (ret)
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
} else
drvdata->dclk = NULL;
drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
mutex_init(&drvdata->lock);
ret = tpda_parse_of_data(drvdata);
if (ret)
return ret;
desc.name = coresight_alloc_device_name(&tpda_devs, dev);
if (!desc.name)
return -ENOMEM;
if (!coresight_authstatus_enabled(drvdata->base))
goto err;
tpda_init_default_data(drvdata);
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.ops = &tpda_cs_ops;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
desc.access = CSDEV_ACCESS_IOMEM(base);
desc.groups = tpda_attr_grps;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
pm_runtime_put(&adev->dev);
pm_runtime_put_sync(&adev->dev);
if (drvdata->dclk)
clk_disable_unprepare(drvdata->dclk);
dev_dbg(drvdata->dev, "TPDA initialized\n");
return 0;
err:
return -EPERM;
}
static void tpda_remove(struct amba_device *adev)
static void __exit tpda_remove(struct amba_device *adev)
{
struct tpda_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_trace_id_put_system_id(drvdata->atid);
coresight_unregister(drvdata->csdev);
}
/*
* Different TPDA has different periph id.
* The difference is 0-7 bits' value. So ignore 0-7 bits.
*/
static struct amba_id tpda_ids[] = {
{
.id = 0x000f0f00,
.mask = 0x000fff00,
.id = 0x0003b969,
.mask = 0x0003ffff,
.data = "TPDA",
},
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, tpda_ids);
static struct amba_driver tpda_driver = {
.drv = {

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CORESIGHT_CORESIGHT_TPDA_H
@@ -8,28 +8,35 @@
#define TPDA_CR (0x000)
#define TPDA_Pn_CR(n) (0x004 + (n * 4))
/* Aggregator port enable bit */
#define TPDA_Pn_CR_ENA BIT(0)
#define TPDA_FPID_CR (0x084)
#define TPDA_FREQREQ_VAL (0x088)
#define TPDA_SYNCR (0x08C)
#define TPDA_FLUSH_CR (0x090)
#define TPDA_FLUSH_SR (0x094)
#define TPDA_FLUSH_ERR (0x098)
#define TPDA_MAX_INPORTS 32
/* Bits 6 ~ 12 is for atid value */
#define TPDA_CR_ATID GENMASK(12, 6)
/**
* struct tpda_drvdata - specifics associated to an TPDA component
* @base: memory mapped base address for this component.
* @dev: The device entity associated to this component.
* @csdev: component vitals needed by the framework.
* @spinlock: lock for the drvdata value.
* @enable: enable status of the component.
*/
struct tpda_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
spinlock_t spinlock;
u8 atid;
struct mutex lock;
bool enable;
uint32_t atid;
uint32_t bc_esize[TPDA_MAX_INPORTS];
uint32_t tc_esize[TPDA_MAX_INPORTS];
uint32_t dsb_esize[TPDA_MAX_INPORTS];
uint32_t cmb_esize[TPDA_MAX_INPORTS];
bool trig_async;
bool trig_flag_ts;
bool trig_freq;
bool freq_ts;
uint32_t freq_req_val;
bool freq_req;
bool cmbchan_mode;
struct clk *dclk;
};
#endif /* _CORESIGHT_CORESIGHT_TPDA_H */

File diff suppressed because it is too large Load Diff

View File

@@ -136,7 +136,7 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
if (ret)
return ret;
return ret == -ETIMEDOUT ? -EPROBE_DEFER : ret;
}
dev_set_drvdata(dev, drvdata);

View File

@@ -249,8 +249,47 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
DUMP_ID_MAP(id_map);
}
static int coresight_trace_id_map_reserve_id(struct coresight_trace_id_map *id_map,
int reserved_id)
{
unsigned long flags;
spin_lock_irqsave(&id_map_lock, flags);
if (IS_VALID_CS_TRACE_ID(reserved_id) &&
!test_bit(reserved_id, id_map->used_ids)) {
set_bit(reserved_id, id_map->used_ids);
spin_unlock_irqrestore(&id_map_lock, flags);
return 0;
}
spin_unlock_irqrestore(&id_map_lock, flags);
return -EBUSY;
}
static void coresight_trace_id_map_free_reserved_id(struct coresight_trace_id_map *id_map,
int reserved_id)
{
unsigned long flags;
spin_lock_irqsave(&id_map_lock, flags);
coresight_trace_id_free(reserved_id, id_map);
spin_unlock_irqrestore(&id_map_lock, flags);
}
/* API functions */
int coresight_trace_id_reserve_id(int id)
{
return coresight_trace_id_map_reserve_id(&id_map_default, id);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_reserve_id);
void coresight_trace_id_free_reserved_id(int id)
{
return coresight_trace_id_map_free_reserved_id(&id_map_default, id);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_free_reserved_id);
int coresight_trace_id_get_cpu_id(int cpu)
{
return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);

View File

@@ -153,4 +153,7 @@ void coresight_trace_id_perf_start(void);
*/
void coresight_trace_id_perf_stop(void);
int coresight_trace_id_reserve_id(int id);
void coresight_trace_id_free_reserved_id(int id);
#endif /* _CORESIGHT_TRACE_ID_H */

View File

@@ -0,0 +1,498 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/coresight.h>
#include <linux/of.h>
#include "coresight-priv.h"
#include "coresight-common.h"
#include "coresight-trace-noc.h"
#include "coresight-trace-id.h"
static ssize_t flush_req_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
u32 reg;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (!drvdata->enable) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (val) {
reg = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
reg = reg | TRACE_NOC_CTRL_FLUSHREQ;
writel_relaxed(reg, drvdata->base + TRACE_NOC_CTRL);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(flush_req);
static ssize_t flush_status_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
u32 val;
spin_lock(&drvdata->spinlock);
if (!drvdata->enable) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
val = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
spin_unlock(&drvdata->spinlock);
return scnprintf(buf, PAGE_SIZE, "%lx\n", BMVAL(val, 2, 2));
}
static DEVICE_ATTR_RO(flush_status);
static ssize_t flag_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%x\n", drvdata->flagType);
}
static ssize_t flag_type_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (val)
drvdata->flagType = FLAG_TS;
else
drvdata->flagType = FLAG;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(flag_type);
static ssize_t freq_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%x\n", drvdata->freqType);
}
static ssize_t freq_type_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (val)
drvdata->freqType = FREQ_TS;
else
drvdata->freqType = FREQ;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(freq_type);
static ssize_t freq_req_val_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%x\n", drvdata->freq_req_val);
}
static ssize_t freq_req_val_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val) {
spin_lock(&drvdata->spinlock);
drvdata->freq_req_val = val;
spin_unlock(&drvdata->spinlock);
}
return size;
}
static DEVICE_ATTR_RW(freq_req_val);
static ssize_t freq_ts_req_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
u32 reg;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
if (!drvdata->enable) {
spin_unlock(&drvdata->spinlock);
return -EPERM;
}
if (val) {
reg = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
if (drvdata->version == TRACE_NOC_VERSION_V2)
reg = reg | TRACE_NOC_CTRL_FREQTSREQ_V2;
else
reg = reg | TRACE_NOC_CTRL_FREQTSREQ;
writel_relaxed(reg, drvdata->base + TRACE_NOC_CTRL);
}
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_WO(freq_ts_req);
static struct attribute *trace_noc_attrs[] = {
&dev_attr_flush_req.attr,
&dev_attr_flush_status.attr,
&dev_attr_flag_type.attr,
&dev_attr_freq_type.attr,
&dev_attr_freq_req_val.attr,
&dev_attr_freq_ts_req.attr,
NULL,
};
static struct attribute_group trace_noc_attr_grp = {
.attrs = trace_noc_attrs,
};
static const struct attribute_group *trace_noc_attr_grps[] = {
&trace_noc_attr_grp,
NULL,
};
static int trace_noc_alloc_trace_id(struct coresight_device *csdev)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
int i, nr_conns;
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0)
return 0;
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0)
return trace_id;
drvdata->atid = trace_id;
return 0;
}
static void trace_noc_release_trace_id(struct coresight_device *csdev)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int i, nr_conns;
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++)
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0)
return;
coresight_trace_id_put_system_id(drvdata->atid);
drvdata->atid = 0;
}
static int trace_noc_enable(struct coresight_device *csdev, struct coresight_connection *inport,
struct coresight_connection *outport)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int ret;
u32 val;
int i, nr_conns;
spin_lock(&drvdata->spinlock);
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++) {
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0) {
atomic_inc(&inport->dest_refcnt);
spin_unlock(&drvdata->spinlock);
return 0;
}
}
ret = trace_noc_alloc_trace_id(csdev);
if (ret < 0) {
spin_unlock(&drvdata->spinlock);
return ret;
}
/* Set ATID */
writel_relaxed(drvdata->atid, drvdata->base + TRACE_NOC_XLD);
/* Config sync CR */
writel_relaxed(0xffff, drvdata->base + TRACE_NOC_SYNCR);
/* Set frequency value */
if (drvdata->freq_req_val)
writel_relaxed(drvdata->freq_req_val,
drvdata->base + TRACE_NOC_FREQVAL);
/* Set Ctrl register */
val = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
if (drvdata->version == TRACE_NOC_VERSION_V2) {
if (drvdata->flagType == FLAG_TS)
val = val | TRACE_NOC_CTRL_FLAGTYPE_V2;
else
val = val & ~TRACE_NOC_CTRL_FLAGTYPE_V2;
if (drvdata->freqType == FREQ_TS)
val = val | TRACE_NOC_CTRL_FREQTYPE_V2;
else
val = val & ~TRACE_NOC_CTRL_FREQTYPE_V2;
} else {
if (drvdata->flagType == FLAG_TS)
val = val | TRACE_NOC_CTRL_FLAGTYPE;
else
val = val & ~TRACE_NOC_CTRL_FLAGTYPE;
if (drvdata->freqType == FREQ_TS)
val = val | TRACE_NOC_CTRL_FREQTYPE;
else
val = val & ~TRACE_NOC_CTRL_FREQTYPE;
}
val = val | TRACE_NOC_CTRL_PORTEN;
writel_relaxed(val, drvdata->base + TRACE_NOC_CTRL);
atomic_inc(&inport->dest_refcnt);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "Trace NOC is enabled\n");
return 0;
}
static void trace_noc_disable(struct coresight_device *csdev, struct coresight_connection *inport,
struct coresight_connection *outport)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int i, nr_conns;
spin_lock(&drvdata->spinlock);
atomic_dec(&inport->dest_refcnt);
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++) {
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0) {
spin_unlock(&drvdata->spinlock);
return;
}
}
writel_relaxed(0x0, drvdata->base + TRACE_NOC_CTRL);
drvdata->enable = false;
trace_noc_release_trace_id(csdev);
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "Trace NOC is disabled\n");
}
static const struct coresight_ops_link trace_noc_link_ops = {
.enable = trace_noc_enable,
.disable = trace_noc_disable,
};
static const struct coresight_ops trace_noc_cs_ops = {
.link_ops = &trace_noc_link_ops,
};
static int interconnect_trace_noc_enable(struct coresight_device *csdev,
struct coresight_connection *inport, struct coresight_connection *outport)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
u32 val;
int i, nr_conns;
spin_lock(&drvdata->spinlock);
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++) {
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0) {
atomic_inc(&inport->dest_refcnt);
spin_unlock(&drvdata->spinlock);
return 0;
}
}
/* Set Ctrl register */
val = readl_relaxed(drvdata->base + TRACE_NOC_CTRL);
val = val | TRACE_NOC_CTRL_PORTEN;
writel_relaxed(val, drvdata->base + TRACE_NOC_CTRL);
drvdata->enable = true;
atomic_inc(&inport->dest_refcnt);
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "Trace NOC is enabled\n");
return 0;
}
static void interconnect_trace_noc_disable(struct coresight_device *csdev,
struct coresight_connection *inport, struct coresight_connection *outport)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int i, nr_conns;
spin_lock(&drvdata->spinlock);
atomic_dec(&inport->dest_refcnt);
nr_conns = csdev->pdata->nr_inconns;
for (i = 0; i < nr_conns; i++) {
if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) != 0) {
spin_unlock(&drvdata->spinlock);
return;
}
}
writel_relaxed(0x0, drvdata->base + TRACE_NOC_CTRL);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);
dev_info(drvdata->dev, "Trace NOC is disabled\n");
}
static const struct coresight_ops_link interconnect_trace_noc_link_ops = {
.enable = interconnect_trace_noc_enable,
.disable = interconnect_trace_noc_disable,
};
static const struct coresight_ops interconnect_trace_noc_cs_ops = {
.link_ops = &interconnect_trace_noc_link_ops,
};
static void trace_noc_init_default_data(struct trace_noc_drvdata *drvdata)
{
drvdata->freqType = FREQ_TS;
drvdata->freqTsReq = true;
}
static int trace_noc_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata;
struct trace_noc_drvdata *drvdata;
struct coresight_desc desc = { 0 };
desc.name = coresight_alloc_device_name(&trace_noc_devs, dev);
if (!desc.name)
return -ENOMEM;
pdata = coresight_get_platform_data(dev);
if (IS_ERR(pdata))
return PTR_ERR(pdata);
adev->dev.platform_data = pdata;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->dev = &adev->dev;
dev_set_drvdata(dev, drvdata);
drvdata->base = devm_ioremap_resource(dev, &adev->res);
if (!drvdata->base)
return -ENOMEM;
if (of_property_read_bool(dev->of_node, "qcom,trace-noc-v2"))
drvdata->version = TRACE_NOC_VERSION_V2;
if (of_property_read_bool(dev->of_node, "qcom,interconnect-trace-noc")) {
drvdata->atid = 0;
desc.ops = &interconnect_trace_noc_cs_ops;
} else {
trace_noc_init_default_data(drvdata);
desc.ops = &trace_noc_cs_ops;
desc.groups = trace_noc_attr_grps;
}
desc.type = CORESIGHT_DEV_TYPE_LINK;
desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
desc.pdata = adev->dev.platform_data;
desc.dev = &adev->dev;
drvdata->csdev = coresight_register(&desc);
if (IS_ERR(drvdata->csdev))
return PTR_ERR(drvdata->csdev);
pm_runtime_put_sync(&adev->dev);
spin_lock_init(&drvdata->spinlock);
dev_dbg(drvdata->dev, "Trace Noc initialized\n");
return 0;
}
static void __exit trace_noc_remove(struct amba_device *adev)
{
struct trace_noc_drvdata *drvdata = dev_get_drvdata(&adev->dev);
coresight_unregister(drvdata->csdev);
}
static struct amba_id trace_noc_ids[] = {
{
.id = 0x000f0c00,
.mask = 0x000fff00,
},
{ 0, 0},
};
MODULE_DEVICE_TABLE(amba, trace_noc_ids);
static struct amba_driver trace_noc_driver = {
.drv = {
.name = "coresight-trace-noc",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
.probe = trace_noc_probe,
.remove = trace_noc_remove,
.id_table = trace_noc_ids,
};
module_amba_driver(trace_noc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Trace NOC driver");

View File

@@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define TRACE_NOC_CTRL 0x008
#define TRACE_NOC_XLD 0x010
#define TRACE_NOC_FREQVAL 0x018
#define TRACE_NOC_SYNCR 0x020
/* Enable generation of output ATB traffic.*/
#define TRACE_NOC_CTRL_PORTEN BIT(0)
/* Writing 1 to initiate a flush sequence.*/
#define TRACE_NOC_CTRL_FLUSHREQ BIT(1)
/* 0: sequence in progress; 1: sequence has been completed.*/
#define TRACE_NOC_CTRL_FLUSHSTATUS BIT(2)
/* Writing 1 to issue a FREQ or FREQ_TS packet*/
#define TRACE_NOC_CTRL_FREQTSREQ BIT(5)
#define TRACE_NOC_CTRL_FREQTSREQ_V2 BIT(6)
/* Sets the type of issued ATB FLAG packets. 0: 'FLAG' packets; 1: 'FLAG_TS' packets.*/
#define TRACE_NOC_CTRL_FLAGTYPE BIT(7)
#define TRACE_NOC_CTRL_FLAGTYPE_V2 BIT(8)
/* sets the type of issued ATB FREQ packets. 0: 'FREQ' packets; 1: 'FREQ_TS' packets.*/
#define TRACE_NOC_CTRL_FREQTYPE BIT(8)
#define TRACE_NOC_CTRL_FREQTYPE_V2 BIT(9)
DEFINE_CORESIGHT_DEVLIST(trace_noc_devs, "traceNoc");
enum trace_noc_version {
TRACE_NOC_VERSION_V1,
TRACE_NOC_VERSION_V2,
};
/**
* struct trace_noc_drvdata - specifics associated to a trace noc component
* @base: memory mapped base address for this component.
* @dev: device node for trace_noc_drvdata.
* @csdev: component vitals needed by the framework.
* @spinlock: lock for the drvdata.
* @enable: status of the component.
* @flushReq: Issue a flush request or not.
* @freqTsReq: Issue a freq_ts request or not.
* @atid: id for the trace packet.
* @freq_req_val: set frequency values carried by 'FREQ' and 'FREQ_TS' packets.
* @flushStatus: 0: sequence in progress; 1: sequence has been completed.
* @freqType: 0: 'FREQ' packets; 1: 'FREQ_TS' packets.
* @flagType: 0: 'FLAG' packets; 1: 'FLAG_TS' packets.
*/
struct trace_noc_drvdata {
void __iomem *base;
struct device *dev;
struct coresight_device *csdev;
spinlock_t spinlock;
enum trace_noc_version version;
bool enable;
bool flushReq;
bool freqTsReq;
u32 atid;
u32 freq_req_val;
u32 flushStatus;
u32 freqType;
u32 flagType;
};
/* freq type */
enum freq_type {
FREQ,
FREQ_TS,
};
/* flag type */
enum flag_type {
FLAG,
FLAG_TS,
};

View File

@@ -0,0 +1,930 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/bitmap.h>
#include <linux/of.h>
#include <linux/coresight.h>
#include <linux/scmi_protocol.h>
#include <linux/qcom_scmi_vendor.h>
#include <linux/platform_device.h>
#include "coresight-priv.h"
#include "coresight-common.h"
#include "coresight-trace-noc.h"
#include "coresight-trace-id.h"
#define PARAM_GET_PLATFORM_CONFIG 0x1
#define PARAM_GET_UETM_CONFIG 0x2
#define PARAM_SET_UETM_CONFIG 0x1
#define START_UETM_TRACE 0x1
#define STOP_UETM_TRACE 0x1
#define SCMI_UETM_ALGO_STR (0x5545544d) /*UETM ASCII*/
#define UETM_MAX_STATE 4
#define UETM_MAX_CFG 2
#define UETM_UNCORE_LANE 4
#define LANE_IDX(n) (n / 2)
#define UETM_ATB_CFG_ATID_MASK GENMASK(6, 0)
static struct scmi_device *sdev;
static const struct qcom_scmi_vendor_ops *ops;
static struct scmi_protocol_handle *ph;
static uint32_t uetm_cnt;
struct __packed uetm_platform_config {
uint32_t uetm_cnt;
};
struct __packed uetm_config {
uint8_t lane;
uint64_t base_address;
uint32_t size;
uint8_t cluster_id;
uint8_t core_id;
};
struct __packed uetm_idx {
uint32_t idx;
};
struct uetm_reg_config {
u64 ocla_cfg1;
u64 atb_cfg;
u64 uetm_cfg;
u64 dmask_cfg[UETM_MAX_STATE][UETM_MAX_CFG];
u64 tmask_cfg[UETM_MAX_STATE][UETM_MAX_CFG];
u64 dmatch_cfg[UETM_MAX_STATE][UETM_MAX_CFG];
u64 tmatch_cfg[UETM_MAX_STATE][UETM_MAX_CFG];
u64 st_cfg[UETM_MAX_STATE];
u64 cntr_cfg[UETM_MAX_STATE][UETM_MAX_CFG];
u64 ocla_cfg2;
u64 ocla_cfg;
u64 diff_dmask_cfg[UETM_MAX_CFG];
};
struct uetm_drvdata {
void __iomem *base;
struct coresight_device *csdev;
spinlock_t spinlock;
uint8_t lane;
uint64_t base_address;
uint32_t uetm_id;
u8 traceid;
uint32_t size;
uint8_t core_id;
uint8_t cluster_id;
uint8_t state_idx;
uint8_t lane_idx;
bool enable;
bool uncore_uetm;
struct uetm_reg_config *config;
};
DEFINE_CORESIGHT_DEVLIST(uetm_devs, "uetm");
static int uetm_scmi_get_uetm_platform_config(void)
{
struct uetm_platform_config rx_value;
int ret;
ret = ops->get_param(ph, &rx_value, SCMI_UETM_ALGO_STR,
PARAM_GET_PLATFORM_CONFIG, 0,
sizeof(struct uetm_platform_config));
if (ret)
return ret;
else
return rx_value.uetm_cnt;
}
static int uetm_scmi_get_uetm_config(struct uetm_drvdata *drvdata)
{
int ret = 0;
uint32_t idx;
struct uetm_config rx_value;
for (idx = 0; idx < uetm_cnt; idx++) {
memcpy(&rx_value, (void *)&idx, sizeof(uint32_t));
ret = ops->get_param(ph, &rx_value, SCMI_UETM_ALGO_STR,
PARAM_GET_UETM_CONFIG, sizeof(uint32_t),
sizeof(struct uetm_config));
if (ret)
return ret;
if (drvdata->uncore_uetm) {
if (rx_value.cluster_id == drvdata->cluster_id
&& rx_value.lane == UETM_UNCORE_LANE)
break;
} else {
if (rx_value.cluster_id == drvdata->cluster_id
&& rx_value.core_id == drvdata->core_id)
break;
}
}
if (idx == uetm_cnt)
return -EINVAL;
drvdata->base_address = rx_value.base_address;
drvdata->size = rx_value.size;
drvdata->lane = rx_value.lane;
drvdata->uetm_id = idx;
return 0;
}
static int uetm_scmi_set_uetm_config(uint32_t uetm_id)
{
struct uetm_idx uetm_idx;
uetm_idx.idx = uetm_id;
return ops->set_param(ph, &uetm_idx, SCMI_UETM_ALGO_STR,
PARAM_SET_UETM_CONFIG, sizeof(struct uetm_idx));
}
static int uetm_scmi_start_uetm_trace(uint32_t uetm_id)
{
struct uetm_idx uetm_idx;
uetm_idx.idx = uetm_id;
return ops->start_activity(ph, &uetm_id, SCMI_UETM_ALGO_STR,
START_UETM_TRACE, sizeof(uint32_t));
}
static int uetm_scmi_stop_uetm_trace(uint32_t uetm_id)
{
struct uetm_idx uetm_idx;
uetm_idx.idx = uetm_id;
return ops->stop_activity(ph, &uetm_id, SCMI_UETM_ALGO_STR,
STOP_UETM_TRACE, sizeof(uint32_t));
}
static int uetm_scmi_init(void)
{
int val, ret = 0;
sdev = get_qcom_scmi_device();
if (IS_ERR(sdev)) {
ret = PTR_ERR(sdev);
pr_err("Error getting scmi_dev ret = %d\n", ret);
return ret;
}
ops = sdev->handle->devm_protocol_get(sdev,
QCOM_SCMI_VENDOR_PROTOCOL, &ph);
if (IS_ERR(ops)) {
pr_err("Error getting qcom_smci_vendor_protocal\n");
return -EFAULT;
}
val = uetm_scmi_get_uetm_platform_config();
if (val > 0)
uetm_cnt = val;
else
uetm_cnt = 0;
return ret;
}
static ssize_t reset_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
spin_lock(&drvdata->spinlock);
memset(config, 0, sizeof(struct uetm_reg_config));
drvdata->lane_idx = 0;
drvdata->state_idx = 0;
spin_unlock(&drvdata->spinlock);
return size;
};
static DEVICE_ATTR_WO(reset);
static ssize_t diff_dmask_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
lane_idx = LANE_IDX(drvdata->lane_idx);
val = config->diff_dmask_cfg[lane_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t diff_dmask_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
lane_idx = LANE_IDX(drvdata->lane_idx);
config->diff_dmask_cfg[lane_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(diff_dmask_cfg);
static ssize_t ocla_cfg1_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
val = config->ocla_cfg1;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t ocla_cfg1_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
config->ocla_cfg1 = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ocla_cfg1);
static ssize_t ocla_cfg2_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
val = config->ocla_cfg2;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t ocla_cfg2_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
config->ocla_cfg2 = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ocla_cfg2);
static ssize_t cntr_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
lane_idx = LANE_IDX(drvdata->lane_idx);
val = config->cntr_cfg[drvdata->state_idx][lane_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t cntr_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
lane_idx = LANE_IDX(drvdata->lane_idx);
config->cntr_cfg[drvdata->state_idx][lane_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(cntr_cfg);
static ssize_t st_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
val = config->st_cfg[drvdata->state_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t st_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
config->st_cfg[drvdata->state_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(st_cfg);
static ssize_t tmatch_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
lane_idx = LANE_IDX(drvdata->lane_idx);
val = config->tmatch_cfg[drvdata->state_idx][lane_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t tmatch_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
lane_idx = LANE_IDX(drvdata->lane_idx);
config->tmatch_cfg[drvdata->state_idx][lane_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(tmatch_cfg);
static ssize_t dmatch_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
lane_idx = LANE_IDX(drvdata->lane_idx);
val = config->dmatch_cfg[drvdata->state_idx][lane_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t dmatch_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
lane_idx = LANE_IDX(drvdata->lane_idx);
config->dmatch_cfg[drvdata->state_idx][lane_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(dmatch_cfg);
static ssize_t tmask_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
lane_idx = LANE_IDX(drvdata->lane_idx);
val = config->tmask_cfg[drvdata->state_idx][lane_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t tmask_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
lane_idx = LANE_IDX(drvdata->lane_idx);
config->tmask_cfg[drvdata->state_idx][lane_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(tmask_cfg);
static ssize_t dmask_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
lane_idx = LANE_IDX(drvdata->lane_idx);
val = config->dmask_cfg[drvdata->state_idx][lane_idx];
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t dmask_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
int lane_idx;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
lane_idx = LANE_IDX(drvdata->lane_idx);
config->dmask_cfg[drvdata->state_idx][lane_idx] = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(dmask_cfg);
static ssize_t uetm_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
val = config->uetm_cfg;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t uetm_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
config->uetm_cfg = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(uetm_cfg);
static ssize_t atb_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
val = config->atb_cfg;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t atb_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
config->atb_cfg = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(atb_cfg);
static ssize_t ocla_cfg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
val = config->ocla_cfg;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static ssize_t ocla_cfg_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct uetm_reg_config *config = drvdata->config;
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
spin_lock(&drvdata->spinlock);
config->ocla_cfg = (u64)val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(ocla_cfg);
static ssize_t lane_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%#hhx\n", drvdata->lane_idx);
}
static ssize_t lane_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
if (val > drvdata->lane)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->lane_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(lane);
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
return scnprintf(buf, PAGE_SIZE, "%#hhx\n", drvdata->state_idx);
}
static ssize_t state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
if (kstrtoul(buf, 16, &val))
return -EINVAL;
if (val >= 4)
return -EINVAL;
spin_lock(&drvdata->spinlock);
drvdata->state_idx = val;
spin_unlock(&drvdata->spinlock);
return size;
}
static DEVICE_ATTR_RW(state);
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
unsigned long val;
struct uetm_drvdata *drvdata = dev_get_drvdata(dev->parent);
val = drvdata->traceid;
return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
}
static DEVICE_ATTR_RO(traceid);
static struct attribute *uetm_attrs[] = {
&dev_attr_reset.attr,
&dev_attr_diff_dmask_cfg.attr,
&dev_attr_ocla_cfg1.attr,
&dev_attr_ocla_cfg2.attr,
&dev_attr_cntr_cfg.attr,
&dev_attr_st_cfg.attr,
&dev_attr_tmatch_cfg.attr,
&dev_attr_dmatch_cfg.attr,
&dev_attr_tmask_cfg.attr,
&dev_attr_dmask_cfg.attr,
&dev_attr_uetm_cfg.attr,
&dev_attr_atb_cfg.attr,
&dev_attr_ocla_cfg.attr,
&dev_attr_lane.attr,
&dev_attr_state.attr,
&dev_attr_traceid.attr,
NULL,
};
static const struct attribute_group uetm_group = {
.attrs = uetm_attrs,
};
const struct attribute_group *uetm_groups[] = {
&uetm_group,
NULL,
};
static void uetm_store_config(struct uetm_drvdata *drvdata)
{
int i, j;
u64 *base = drvdata->base;
int cfg_num;
struct uetm_reg_config *config = drvdata->config;
cfg_num = drvdata->lane / 2;
if (drvdata->uncore_uetm)
*base++ = config->ocla_cfg1;
else
*base++ = config->ocla_cfg;
*base++ = config->atb_cfg;
*base++ = config->uetm_cfg;
for (i = 0; i < UETM_MAX_STATE; i++)
for (j = 0; j < cfg_num; j++)
*base++ = config->dmask_cfg[i][j];
for (i = 0; i < UETM_MAX_STATE; i++)
for (j = 0; j < cfg_num; j++)
*base++ = config->tmask_cfg[i][j];
for (i = 0; i < UETM_MAX_STATE; i++)
for (j = 0; j < cfg_num; j++)
*base++ = config->dmatch_cfg[i][j];
for (i = 0; i < UETM_MAX_STATE; i++)
for (j = 0; j < cfg_num; j++)
*base++ = config->tmatch_cfg[i][j];
for (i = 0; i < UETM_MAX_STATE; i++)
*base++ = config->st_cfg[i];
for (i = 0; i < UETM_MAX_STATE; i++)
for (j = 0; j < cfg_num; j++)
*base++ = config->cntr_cfg[i][j];
if (drvdata->uncore_uetm) {
*base++ = config->ocla_cfg2;
*base++ = config->ocla_cfg;
}
for (j = 0; j < cfg_num; j++)
*base++ = config->diff_dmask_cfg[j];
/* Wait for config to settle */
mb();
}
static int uetm_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct uetm_reg_config *config = drvdata->config;
int ret, trace_id;
if (drvdata->enable) {
dev_err(&csdev->dev,
"uetm %d already enabled,Skipping enable\n",
drvdata->uetm_id);
return -EBUSY;
}
trace_id = coresight_trace_id_get_system_id();
if (trace_id < 0)
return trace_id;
drvdata->traceid = (u8)trace_id;
config->atb_cfg &= ~UETM_ATB_CFG_ATID_MASK;
config->atb_cfg |= drvdata->traceid;
spin_lock(&drvdata->spinlock);
uetm_store_config(drvdata);
spin_unlock(&drvdata->spinlock);
coresight_csr_set_etr_atid(csdev, drvdata->traceid, true);
ret = uetm_scmi_set_uetm_config(drvdata->uetm_id);
if (ret)
goto release_atid;
ret = uetm_scmi_start_uetm_trace(drvdata->uetm_id);
if (ret)
goto release_atid;
drvdata->enable = true;
return 0;
release_atid:
coresight_trace_id_put_system_id(drvdata->traceid);
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
return ret;
};
static void uetm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
struct uetm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
uetm_scmi_stop_uetm_trace(drvdata->uetm_id);
coresight_trace_id_put_system_id(drvdata->traceid);
coresight_csr_set_etr_atid(csdev, drvdata->traceid, false);
drvdata->enable = false;
};
static const struct coresight_ops_source uetm_source_ops = {
.enable = uetm_enable,
.disable = uetm_disable,
};
static const struct coresight_ops uetm_cs_ops = {
.source_ops = &uetm_source_ops,
};
static int uetm_probe(struct platform_device *pdev)
{
int ret;
struct device *dev = &pdev->dev;
struct coresight_platform_data *pdata = NULL;
struct uetm_drvdata *drvdata;
struct uetm_reg_config *config;
uint32_t value;
struct coresight_desc desc = { 0 };
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
if (!config)
return -ENOMEM;
drvdata->config = config;
dev_set_drvdata(dev, drvdata);
ret = of_property_read_u32(pdev->dev.of_node, "cluster",
&value);
if (ret)
return ret;
drvdata->cluster_id = (uint8_t)value;
drvdata->uncore_uetm = of_property_read_bool(pdev->dev.of_node,
"qcom,uncore_uetm");
if (!drvdata->uncore_uetm) {
ret = of_property_read_u32(pdev->dev.of_node, "core",
&value);
if (ret)
return ret;
drvdata->core_id = (uint8_t)value;
}
ret = uetm_scmi_get_uetm_config(drvdata);
if (ret)
return ret;
spin_lock_init(&drvdata->spinlock);
drvdata->base = devm_ioremap(dev, drvdata->base_address, drvdata->size);
if (!drvdata->base)
return -ENOMEM;
pdata = coresight_get_platform_data(dev);
desc.name = coresight_alloc_device_name(&uetm_devs, dev);
if (!desc.name)
return -ENOMEM;
desc.dev = dev;
desc.pdata = pdata;
desc.ops = &uetm_cs_ops;
desc.groups = uetm_groups;
desc.type = CORESIGHT_DEV_TYPE_SOURCE;
desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
drvdata->csdev = coresight_register(&desc);
ret = PTR_ERR_OR_ZERO(drvdata->csdev);
if (ret)
return ret;
return 0;
}
static int uetm_remove(struct platform_device *pdev)
{
struct uetm_drvdata *drvdata = platform_get_drvdata(pdev);
coresight_unregister(drvdata->csdev);
return 0;
}
static const struct of_device_id uetm_match[] = {
{.compatible = "qcom,coresight-uetm"},
{}
};
static struct platform_driver uetm_driver = {
.probe = uetm_probe,
.remove = uetm_remove,
.driver = {
.name = "coresight-uetm",
.of_match_table = uetm_match,
.suppress_bind_attrs = true,
},
};
static int __init uetm_init(void)
{
int ret;
ret = uetm_scmi_init();
if (ret)
return ret;
return platform_driver_register(&uetm_driver);
};
static void __exit uetm_exit(void)
{
platform_driver_unregister(&uetm_driver);
};
module_init(uetm_init);
module_exit(uetm_exit)
MODULE_LICENSE("GPL");

View File

@@ -40,6 +40,20 @@ config STM_PROTO_SYS_T
If you don't know what this is, say N.
config STM_PROTO_OST
tristate "MIPI OST STM framing protocol driver"
default CONFIG_STM
help
This is an implementation of MIPI OST protocol to be used
over the STP transport. In addition to the data payload, it
also carries additional metadata for entity, better
means of trace source identification, etc.
The receiving side must be able to decode this protocol in
addition to the MIPI STP, in order to extract the data.
If you don't know what this is, say N.
config STM_DUMMY
tristate "Dummy STM driver"
help

View File

@@ -5,9 +5,11 @@ stm_core-y := core.o policy.o
obj-$(CONFIG_STM_PROTO_BASIC) += stm_p_basic.o
obj-$(CONFIG_STM_PROTO_SYS_T) += stm_p_sys-t.o
obj-$(CONFIG_STM_PROTO_OST) += stm_p_ost.o
stm_p_basic-y := p_basic.o
stm_p_sys-t-y := p_sys-t.o
stm_p_ost-y := p_ost.o
obj-$(CONFIG_STM_DUMMY) += dummy_stm.o

View File

@@ -27,7 +27,7 @@ static struct stm_console {
},
};
static void
static void notrace __nocfi
stm_console_write(struct console *con, const char *buf, unsigned len)
{
struct stm_console *sc = container_of(con, struct stm_console, console);

View File

@@ -573,7 +573,7 @@ stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
* @buf: data payload buffer
* @count: data payload size
*/
ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
ssize_t notrace __nocfi stm_data_write(struct stm_data *data, unsigned int m,
unsigned int c, bool ts_first, const void *buf,
size_t count)
{
@@ -598,7 +598,7 @@ ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
}
EXPORT_SYMBOL_GPL(stm_data_write);
static ssize_t notrace
static ssize_t notrace __nocfi
stm_write(struct stm_device *stm, struct stm_output *output,
unsigned int chan, const char *buf, size_t count)
{
@@ -1124,13 +1124,13 @@ static void stm_source_link_drop(struct stm_source_device *src)
int idx, ret;
retry:
idx = srcu_read_lock(&stm_source_srcu);
idx = srcu_read_lock_notrace(&stm_source_srcu);
/*
* The stm device will be valid for the duration of this
* read section, but the link may change before we grab
* the src::link_lock in __stm_source_link_drop().
*/
stm = srcu_dereference(src->link, &stm_source_srcu);
stm = srcu_dereference_notrace(src->link, &stm_source_srcu);
ret = 0;
if (stm) {
@@ -1139,7 +1139,7 @@ retry:
mutex_unlock(&stm->link_mutex);
}
srcu_read_unlock(&stm_source_srcu, idx);
srcu_read_unlock_notrace(&stm_source_srcu, idx);
/* if it did change, retry */
if (ret == -EAGAIN)
@@ -1154,11 +1154,11 @@ static ssize_t stm_source_link_show(struct device *dev,
struct stm_device *stm;
int idx, ret;
idx = srcu_read_lock(&stm_source_srcu);
stm = srcu_dereference(src->link, &stm_source_srcu);
idx = srcu_read_lock_notrace(&stm_source_srcu);
stm = srcu_dereference_notrace(src->link, &stm_source_srcu);
ret = sprintf(buf, "%s\n",
stm ? dev_name(&stm->dev) : "<none>");
srcu_read_unlock(&stm_source_srcu, idx);
srcu_read_unlock_notrace(&stm_source_srcu, idx);
return ret;
}
@@ -1280,7 +1280,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
}
EXPORT_SYMBOL_GPL(stm_source_unregister_device);
int notrace stm_source_write(struct stm_source_data *data,
int notrace __nocfi stm_source_write(struct stm_source_data *data,
unsigned int chan,
const char *buf, size_t count)
{
@@ -1294,15 +1294,15 @@ int notrace stm_source_write(struct stm_source_data *data,
if (chan >= src->output.nr_chans)
return -EINVAL;
idx = srcu_read_lock(&stm_source_srcu);
idx = srcu_read_lock_notrace(&stm_source_srcu);
stm = srcu_dereference(src->link, &stm_source_srcu);
stm = srcu_dereference_notrace(src->link, &stm_source_srcu);
if (stm)
count = stm_write(stm, &src->output, chan, buf, count);
else
count = -ENODEV;
srcu_read_unlock(&stm_source_srcu, idx);
srcu_read_unlock_notrace(&stm_source_srcu, idx);
return count;
}

View File

@@ -33,7 +33,7 @@ static struct stm_ftrace {
* @buf: buffer containing the data packet
* @len: length of the data packet
*/
static void notrace
static void notrace __nocfi
stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len)
{
struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace);

View File

@@ -0,0 +1,187 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copied from drivers/hwtracing/stm.p-sys-t.c as of commit d69d5e83110f
* ("stm class: Add MIPI SyS-T protocol support").
*
* Copyright (c) 2022 2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2018, Intel Corporation.
*
* MIPI OST framing protocol for STM devices.
*/
#include <linux/configfs.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/stm.h>
#include <linux/sched/clock.h>
#include "stm.h"
#define OST_TOKEN_STARTSIMPLE (0x10)
#define OST_VERSION_MIPI1 (0x10 << 8)
#define OST_ENTITY_FTRACE (0x01 << 16)
#define OST_ENTITY_DIAG (0xEE << 16)
#define OST_CONTROL_PROTOCOL (0x0 << 24)
#define DATA_HEADER (OST_TOKEN_STARTSIMPLE | OST_VERSION_MIPI1 | \
OST_ENTITY_FTRACE | OST_CONTROL_PROTOCOL)
#define STM_MAKE_VERSION(ma, mi) ((ma << 8) | mi)
#define STM_HEADER_MAGIC (0x5953)
enum ost_entity_type {
OST_ENTITY_TYPE_NONE,
OST_ENTITY_TYPE_FTRACE,
OST_ENTITY_TYPE_DIAG,
};
static const char * const str_ost_entity_type[] = {
[OST_ENTITY_TYPE_NONE] = "none",
[OST_ENTITY_TYPE_FTRACE] = "ftrace",
[OST_ENTITY_TYPE_DIAG] = "diag",
};
struct ost_t_policy_node {
enum ost_entity_type entity_type;
};
struct ost_t_output {
struct ost_t_policy_node node;
};
static int ost_t_output_open(void *priv, struct stm_output *output)
{
struct ost_t_policy_node *pn = priv;
struct ost_t_output *opriv;
opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC);
if (!opriv)
return -ENOMEM;
memcpy(&opriv->node, pn, sizeof(opriv->node));
output->pdrv_private = opriv;
return 0;
}
static void ost_t_output_close(struct stm_output *output)
{
kfree(output->pdrv_private);
}
static ssize_t ost_t_policy_entity_show(struct config_item *item,
char *page)
{
struct ost_t_policy_node *pn = to_pdrv_policy_node(item);
return scnprintf(page, PAGE_SIZE, "%s\n",
str_ost_entity_type[pn->entity_type]);
}
static ssize_t
ost_t_policy_entity_store(struct config_item *item, const char *page,
size_t count)
{
struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
struct ost_t_policy_node *pn = to_pdrv_policy_node(item);
char str[10] = "";
mutex_lock(mutexp);
if (sscanf(page, "%9s", str) != 1) {
mutex_unlock(mutexp);
return -EINVAL;
}
mutex_unlock(mutexp);
if (!strcmp(str, str_ost_entity_type[OST_ENTITY_TYPE_FTRACE]))
pn->entity_type = OST_ENTITY_TYPE_FTRACE;
else if (!strcmp(str, str_ost_entity_type[OST_ENTITY_TYPE_DIAG]))
pn->entity_type = OST_ENTITY_TYPE_DIAG;
else
return -EINVAL;
return count;
}
CONFIGFS_ATTR(ost_t_policy_, entity);
static struct configfs_attribute *ost_t_policy_attrs[] = {
&ost_t_policy_attr_entity,
NULL,
};
static ssize_t notrace __nocfi ost_write(struct stm_data *data,
struct stm_output *output, unsigned int chan,
const char *buf, size_t count)
{
unsigned int c = output->channel + chan;
unsigned int m = output->master;
const unsigned char nil = 0;
u32 header = DATA_HEADER;
u8 trc_hdr[24];
ssize_t sz;
struct ost_t_output *op = output->pdrv_private;
if (op->node.entity_type == OST_ENTITY_TYPE_FTRACE)
header |= OST_ENTITY_FTRACE;
else
header |= OST_ENTITY_DIAG;
/*
* STP framing rules for OST frames:
* * the first packet of the OST frame is marked;
* * the last packet is a FLAG.
* Message layout: HEADER / DATA / TAIL
*/
/* HEADER */
sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_MARKED,
4, (u8 *)&header);
if (sz <= 0)
return sz;
*(uint16_t *)(trc_hdr) = STM_MAKE_VERSION(0, 3);
*(uint16_t *)(trc_hdr + 2) = STM_HEADER_MAGIC;
*(uint32_t *)(trc_hdr + 4) = raw_smp_processor_id();
*(uint64_t *)(trc_hdr + 8) = sched_clock();
*(uint64_t *)(trc_hdr + 16) = task_tgid_nr(get_current());
if (op->node.entity_type != OST_ENTITY_TYPE_DIAG) {
sz = stm_data_write(data, m, c, false, trc_hdr, sizeof(trc_hdr));
if (sz <= 0)
return sz;
}
/* DATA */
sz = stm_data_write(data, m, c, false, buf, count);
/* TAIL */
if (sz > 0)
data->packet(data, m, c, STP_PACKET_FLAG,
STP_PACKET_TIMESTAMPED, 0, &nil);
return sz;
}
static const struct stm_protocol_driver ost_pdrv = {
.owner = THIS_MODULE,
.name = "p_ost",
.priv_sz = sizeof(struct ost_t_policy_node),
.write = ost_write,
.policy_attr = ost_t_policy_attrs,
.output_open = ost_t_output_open,
.output_close = ost_t_output_close,
};
static int ost_stm_init(void)
{
return stm_register_protocol(&ost_pdrv);
}
static void ost_stm_exit(void)
{
stm_unregister_protocol(&ost_pdrv);
}
module_init(ost_stm_init);
module_exit(ost_stm_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MIPI Open System Trace STM framing protocol driver");