sm8750: init kernel modules repo

This commit is contained in:
2025-08-11 12:21:01 +02:00
parent 2681143b87
commit facad83b01
8851 changed files with 6894561 additions and 0 deletions

View File

@@ -0,0 +1,304 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2012-2014, 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/sysfs.h>
#include <linux/remoteproc.h>
#define BOOT_CMD 1
#define IMAGE_UNLOAD_CMD 0
#define CDSP_SUBSYS_DOWN 0
#define CDSP_SUBSYS_LOADED 1
static ssize_t cdsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count);
struct cdsp_loader_private {
void *pil_h;
struct kobject *boot_cdsp_obj;
struct attribute_group *attr_group;
};
static struct kobj_attribute cdsp_boot_attribute =
__ATTR(boot, 0220, NULL, cdsp_boot_store);
static struct attribute *attrs[] = {
&cdsp_boot_attribute.attr,
NULL,
};
static u32 cdsp_state = CDSP_SUBSYS_DOWN;
static struct platform_device *cdsp_private;
static void cdsp_loader_unload(struct platform_device *pdev);
static int cdsp_loader_do(struct platform_device *pdev)
{
struct cdsp_loader_private *priv = NULL;
phandle rproc_phandle;
int rc = 0, sz = 0;
const char *img_name;
if (!pdev) {
pr_err("%s: Platform device null\n", __func__);
goto fail;
}
if (!pdev->dev.of_node) {
dev_err(&pdev->dev,
"%s: Device tree information missing\n", __func__);
goto fail;
}
rc = of_property_read_string(pdev->dev.of_node,
"qcom,proc-img-to-load",
&img_name);
if (rc)
goto fail;
if (!strcmp(img_name, "cdsp")) {
/* cdsp_state always returns "0".*/
if (cdsp_state == CDSP_SUBSYS_DOWN) {
priv = platform_get_drvdata(pdev);
if (!priv) {
dev_err(&pdev->dev,
"%s: Private data get failed\n", __func__);
goto fail;
}
sz = of_property_read_u32(pdev->dev.of_node, "qcom,rproc-handle",
&rproc_phandle);
if (sz) {
pr_err("%s: of_property_read failed, returned value %d\n",
__func__, sz);
dev_err(&pdev->dev, "error reading rproc phandle\n");
goto fail;
}
priv->pil_h = rproc_get_by_phandle(rproc_phandle);
if (!priv->pil_h) {
dev_err(&pdev->dev, "rproc not found\n");
goto fail;
}
dev_dbg(&pdev->dev, "%s: calling rproc_boot on %s\n",
__func__, img_name);
rc = rproc_boot(priv->pil_h);
if (rc) {
dev_err(&pdev->dev, "%s: rproc_boot failed with error %d\n",
__func__, rc);
goto fail;
}
/* Set the state of the CDSP.*/
cdsp_state = CDSP_SUBSYS_LOADED;
} else if (cdsp_state == CDSP_SUBSYS_LOADED) {
dev_dbg(&pdev->dev,
"%s: CDSP state = 0x%x\n", __func__, cdsp_state);
}
dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
return rc;
}
fail:
if (pdev)
dev_err(&pdev->dev,
"%s: CDSP image loading failed\n", __func__);
return rc;
}
static ssize_t cdsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf,
size_t count)
{
int ret = 0;
uint32_t boot = 0;
ret = kstrtou32(buf, 0, &boot);
if (ret) {
pr_debug("%s: invalid arguments for cdsp_loader.\n", __func__);
return ret;
}
if (boot == BOOT_CMD) {
pr_debug("%s: going to call cdsp_loader_do\n", __func__);
cdsp_loader_do(cdsp_private);
} else if (boot == IMAGE_UNLOAD_CMD) {
pr_debug("%s: going to call cdsp_unloader\n", __func__);
cdsp_loader_unload(cdsp_private);
}
return count;
}
static void cdsp_loader_unload(struct platform_device *pdev)
{
struct cdsp_loader_private *priv = NULL;
priv = platform_get_drvdata(pdev);
if (!priv)
return;
if (priv->pil_h && cdsp_state == CDSP_SUBSYS_LOADED) {
dev_dbg(&pdev->dev, "%s: calling subsystem_put\n", __func__);
rproc_shutdown(priv->pil_h);
priv->pil_h = NULL;
cdsp_state = CDSP_SUBSYS_DOWN;
}
}
static int cdsp_loader_init_sysfs(struct platform_device *pdev)
{
int ret = -EINVAL;
struct cdsp_loader_private *priv = NULL;
cdsp_private = NULL;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
return ret;
}
platform_set_drvdata(pdev, priv);
priv->pil_h = NULL;
priv->boot_cdsp_obj = NULL;
priv->attr_group = devm_kzalloc(&pdev->dev,
sizeof(*(priv->attr_group)),
GFP_KERNEL);
if (!priv->attr_group) {
ret = -ENOMEM;
goto error_return;
}
priv->attr_group->attrs = attrs;
priv->boot_cdsp_obj = kobject_create_and_add("boot_cdsp", kernel_kobj);
if (!priv->boot_cdsp_obj) {
dev_err(&pdev->dev, "%s: sysfs create and add failed\n",
__func__);
ret = -ENOMEM;
goto error_return;
}
ret = sysfs_create_group(priv->boot_cdsp_obj, priv->attr_group);
if (ret) {
dev_err(&pdev->dev, "%s: sysfs create group failed %d\n",
__func__, ret);
goto error_return;
}
cdsp_private = pdev;
return 0;
error_return:
if (priv->boot_cdsp_obj) {
kobject_del(priv->boot_cdsp_obj);
priv->boot_cdsp_obj = NULL;
}
if (ret)
dev_err(&pdev->dev, "%s failed with ret %d\n",
__func__, ret);
return ret;
}
static int cdsp_loader_remove(struct platform_device *pdev)
{
struct cdsp_loader_private *priv = NULL;
priv = platform_get_drvdata(pdev);
if (!priv)
return 0;
if (priv->pil_h) {
rproc_shutdown(priv->pil_h);
priv->pil_h = NULL;
cdsp_state = CDSP_SUBSYS_DOWN;
}
if (priv->boot_cdsp_obj) {
sysfs_remove_group(priv->boot_cdsp_obj, priv->attr_group);
kobject_del(priv->boot_cdsp_obj);
priv->boot_cdsp_obj = NULL;
}
return 0;
}
static int cdsp_loader_probe(struct platform_device *pdev)
{
phandle rproc_phandle;
struct property *prop = NULL;
int size = 0;
struct rproc *cdsp = NULL;
int ret = 0;
prop = of_find_property(pdev->dev.of_node, "qcom,rproc-handle", &size);
if (!prop) {
dev_err(&pdev->dev, "%s: error reading rproc phandle\n", __func__);
return -ENOPARAM;
}
rproc_phandle = be32_to_cpup(prop->value);
cdsp = rproc_get_by_phandle(rproc_phandle);
if (!cdsp) {
dev_err(&pdev->dev, "%s: rproc not found\n", __func__);
return -EPROBE_DEFER;
}
ret = cdsp_loader_init_sysfs(pdev);
if (ret != 0) {
dev_err(&pdev->dev, "%s: Error in initing sysfs\n", __func__);
return ret;
}
return 0;
}
static const struct of_device_id cdsp_loader_dt_match[] = {
{ .compatible = "qcom,cdsp-loader" },
{ }
};
MODULE_DEVICE_TABLE(of, cdsp_loader_dt_match);
static struct platform_driver cdsp_loader_driver = {
.driver = {
.name = "cdsp-loader",
.of_match_table = cdsp_loader_dt_match,
},
.probe = cdsp_loader_probe,
.remove = cdsp_loader_remove,
};
static int __init cdsp_loader_init(void)
{
return platform_driver_register(&cdsp_loader_driver);
}
module_init(cdsp_loader_init);
static void __exit cdsp_loader_exit(void)
{
platform_driver_unregister(&cdsp_loader_driver);
}
module_exit(cdsp_loader_exit);
MODULE_DESCRIPTION("CDSP Loader module");
MODULE_LICENSE("GPL v2");

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,333 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018, Linaro Limited
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/rpmsg.h>
#include <linux/pm_qos.h>
#include "../include/uapi/misc/fastrpc.h"
#include <linux/of_reserved_mem.h>
#include "fastrpc_shared.h"
#include <linux/soc/qcom/pdr.h>
#include <linux/delay.h>
void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx);
void fastrpc_update_gctx(struct fastrpc_channel_ctx *cctx, int flag);
void fastrpc_lowest_capacity_corecount(struct device *dev, struct fastrpc_channel_ctx *cctx);
int fastrpc_init_privileged_gids(struct device *dev, char *prop_name,
struct gid_list *gidlist);
int fastrpc_setup_service_locator(struct fastrpc_channel_ctx *cctx, char *client_name,
char *service_name, char *service_path, int spd_session);
void fastrpc_register_wakeup_source(struct device *dev,
const char *client_name, struct wakeup_source **device_wake_source);
int fastrpc_mmap_remove_ssr(struct fastrpc_channel_ctx *cctx);
void fastrpc_queue_pd_status(struct fastrpc_user *fl, int domain, int status, int sessionid);
struct fastrpc_channel_ctx* get_current_channel_ctx(struct device *dev)
{
return dev_get_drvdata(dev->parent);
}
static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct device *rdev = &rpdev->dev;
struct fastrpc_channel_ctx *data;
int i, err, domain_id = -1, vmcount;
const char *domain;
bool secure_dsp;
unsigned int vmids[FASTRPC_MAX_VMIDS];
dev_info(rdev, "%s started\n", __func__);
err = of_property_read_string(rdev->of_node, "label", &domain);
if (err) {
dev_info(rdev, "FastRPC Domain not specified in DT\n");
return err;
}
for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
if (!strcmp(domains[i], domain)) {
domain_id = i;
break;
}
}
if (domain_id < 0) {
dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
return -EINVAL;
}
if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0))
dev_info(rdev, "no reserved DMA memory for FASTRPC\n");
vmcount = of_property_read_variable_u32_array(rdev->of_node,
"qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
if (vmcount < 0)
vmcount = 0;
else if (!qcom_scm_is_available())
return -EPROBE_DEFER;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
err = fastrpc_init_privileged_gids(rdev, "qcom,fastrpc-gids", &data->gidlist);
if (err)
dev_err(rdev, "Privileged gids init failed.\n");
if (vmcount) {
data->vmcount = vmcount;
data->perms = BIT(QCOM_SCM_VMID_HLOS);
for (i = 0; i < data->vmcount; i++) {
data->vmperms[i].vmid = vmids[i];
data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
}
}
atomic_set(&data->teardown, 0);
secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
data->secure = secure_dsp;
of_property_read_u32(rdev->of_node, "qcom,rpc-latency-us",
&data->qos_latency);
fastrpc_lowest_capacity_corecount(rdev, data);
if (data->lowest_capacity_core_count > 0 &&
of_property_read_bool(rdev->of_node, "qcom,single-core-latency-vote"))
data->lowest_capacity_core_count = 1;
kref_init(&data->refcount);
dev_set_drvdata(&rpdev->dev, data);
rdev->dma_mask = &data->dma_mask;
dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
INIT_LIST_HEAD(&data->users);
INIT_LIST_HEAD(&data->gmaps);
INIT_LIST_HEAD(&data->rootheap_bufs.list);
mutex_init(&data->wake_mutex);
spin_lock_init(&data->lock);
spin_lock_init(&(data->gmsg_log.tx_lock));
spin_lock_init(&(data->gmsg_log.rx_lock));
idr_init(&data->ctx_idr);
ida_init(&data->tgid_frpc_ida);
init_completion(&data->ssr_complete);
init_waitqueue_head(&data->ssr_wait_queue);
data->domain_id = domain_id;
data->max_sess_per_proc = FASTRPC_MAX_SESSIONS_PER_PROCESS;
data->rpdev = rpdev;
err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
if (err)
goto populate_error;
switch (domain_id) {
case ADSP_DOMAIN_ID:
case MDSP_DOMAIN_ID:
case SDSP_DOMAIN_ID:
/* Unsigned PD offloading is only supported on CDSP*/
data->unsigned_support = false;
err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
if (err)
goto fdev_error;
data->cpuinfo_todsp = FASTRPC_CPUINFO_DEFAULT;
break;
case CDSP_DOMAIN_ID:
data->unsigned_support = true;
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
if (err)
goto fdev_error;
err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
if (err)
goto fdev_error;
data->cpuinfo_todsp = FASTRPC_CPUINFO_EARLY_WAKEUP;
break;
default:
err = -EINVAL;
goto fdev_error;
}
if (domain_id == ADSP_DOMAIN_ID) {
err = fastrpc_setup_service_locator(data, AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME,
AUDIO_PDR_ADSP_SERVICE_NAME, ADSP_AUDIOPD_NAME, 0);
if (err)
goto fdev_error;
err = fastrpc_setup_service_locator(data, SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME,
SENSORS_PDR_ADSP_SERVICE_NAME, ADSP_SENSORPD_NAME, 1);
if (err)
goto fdev_error;
err = fastrpc_setup_service_locator(data, OIS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME,
OIS_PDR_ADSP_SERVICE_NAME, ADSP_OISPD_NAME, 2);
if (err)
goto fdev_error;
} else if (domain_id == SDSP_DOMAIN_ID) {
err = fastrpc_setup_service_locator(data, SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME,
SENSORS_PDR_SLPI_SERVICE_NAME, SLPI_SENSORPD_NAME, 0);
if (err)
goto fdev_error;
}
mutex_lock(&data->wake_mutex);
if(data->fdevice)
fastrpc_register_wakeup_source(data->fdevice->miscdev.this_device,
FASTRPC_NON_SECURE_WAKE_SOURCE_CLIENT_NAME, &data->wake_source);
if(data->secure_fdevice)
fastrpc_register_wakeup_source(data->secure_fdevice->miscdev.this_device,
FASTRPC_SECURE_WAKE_SOURCE_CLIENT_NAME, &data->wake_source_secure);
mutex_unlock(&data->wake_mutex);
fastrpc_update_gctx(data, 1);
dev_info(rdev, "Opened rpmsg channel for %s", domain);
return 0;
fdev_error:
if (data->fdevice)
misc_deregister(&data->fdevice->miscdev);
if (data->secure_fdevice)
misc_deregister(&data->secure_fdevice->miscdev);
populate_error:
kfree(data->gidlist.gids);
data->gidlist.gids = NULL;
kfree(data);
return err;
}
static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
struct fastrpc_user *user;
unsigned long flags;
int i = 0;
dev_info(cctx->dev, "%s started", __func__);
/* No invocations past this point */
spin_lock_irqsave(&cctx->lock, flags);
atomic_set(&cctx->teardown, 1);
cctx->staticpd_status = false;
list_for_each_entry(user, &cctx->users, user) {
fastrpc_queue_pd_status(user, cctx->domain_id, FASTRPC_DSP_SSR, user->sessionid);
fastrpc_notify_users(user);
/* cancel all waiting dspsignals */
fastrpc_ssr_dspsignal_cancel_wait(user);
}
spin_unlock_irqrestore(&cctx->lock, flags);
if (cctx->fdevice)
misc_deregister(&cctx->fdevice->miscdev);
if (cctx->secure_fdevice)
misc_deregister(&cctx->secure_fdevice->miscdev);
for (i = 0; i < FASTRPC_MAX_SPD; i++) {
if (cctx->spd[i].pdrhandle)
pdr_handle_release(cctx->spd[i].pdrhandle);
}
spin_lock_irqsave(&cctx->lock, flags);
/*
* If there are other ongoing remote invocations, wait for them to
* complete before cleaning up the channel resources, to avoid UAF.
*/
while (cctx->invoke_cnt > 0) {
spin_unlock_irqrestore(&cctx->lock, flags);
wait_event_interruptible(cctx->ssr_wait_queue,
cctx->invoke_cnt == 0);
spin_lock_irqsave(&cctx->lock, flags);
}
spin_unlock_irqrestore(&cctx->lock, flags);
/*
* As remote channel is down, corresponding SMMU devices will also
* be removed. So free all SMMU mappings of every process using this
* channel to avoid any UAF later.
*/
list_for_each_entry(user, &cctx->users, user) {
fastrpc_free_user(user);
}
mutex_lock(&cctx->wake_mutex);
if (cctx->wake_source) {
wakeup_source_unregister(cctx->wake_source);
cctx->wake_source = NULL;
}
if (cctx->wake_source_secure) {
wakeup_source_unregister(cctx->wake_source_secure);
cctx->wake_source_secure = NULL;
}
mutex_unlock(&cctx->wake_mutex);
dev_info(cctx->dev, "Closing rpmsg channel for %s", domains[cctx->domain_id]);
kfree(cctx->gidlist.gids);
of_platform_depopulate(&rpdev->dev);
fastrpc_mmap_remove_ssr(cctx);
cctx->dev = NULL;
cctx->rpdev = NULL;
// Wake up all process releases, if waiting for SSR to complete
complete_all(&cctx->ssr_complete);
fastrpc_update_gctx(cctx, 0);
fastrpc_channel_ctx_put(cctx);
}
static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
int len, void *priv, u32 addr)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
return fastrpc_handle_rpc_response(cctx, data, len);
}
static const struct of_device_id fastrpc_rpmsg_of_match[] = {
{ .compatible = "qcom,fastrpc" },
{ },
};
MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
static struct rpmsg_driver fastrpc_driver = {
.probe = fastrpc_rpmsg_probe,
.remove = fastrpc_rpmsg_remove,
.callback = fastrpc_rpmsg_callback,
.drv = {
.name = "qcom,fastrpc",
.of_match_table = fastrpc_rpmsg_of_match,
},
};
int fastrpc_transport_send(struct fastrpc_channel_ctx *cctx, void *rpc_msg, uint32_t rpc_msg_size) {
int err = 0;
if (atomic_read(&cctx->teardown))
return -EPIPE;
err = rpmsg_send(cctx->rpdev->ept, rpc_msg, rpc_msg_size);
if (err == -EIO) {
pr_err("fastrpc: failed to send message due to SSR\n");
err = -EPIPE;
}
return err;
}
int fastrpc_transport_init(void) {
int ret;
ret = register_rpmsg_driver(&fastrpc_driver);
if (ret < 0) {
pr_err("fastrpc: failed to register rpmsg driver\n");
return ret;
}
return 0;
}
void fastrpc_transport_deinit(void) {
unregister_rpmsg_driver(&fastrpc_driver);
}

View File

@@ -0,0 +1,955 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2018, Linaro Limited
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __FASTRPC_SHARED_H__
#define __FASTRPC_SHARED_H__
#include <linux/rpmsg.h>
#include <linux/uaccess.h>
#include <linux/qrtr.h>
#include <net/sock.h>
#include <linux/workqueue.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/version.h>
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
#include <linux/cpu.h>
#include <linux/firmware/qcom/qcom_scm.h>
#else
#include <linux/qcom_scm.h>
#endif
#define ADSP_DOMAIN_ID (0)
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_SESSIONS_PER_PROCESS 4
/* Max number of SMMU context banks in a pool */
#define FASTRPC_MAX_CB_POOL 7
#define FASTRPC_MAX_SPD 4
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
#define FASTRPC_MAX_CRCLIST 64
#define FASTRPC_KERNEL_PERF_LIST (PERF_KEY_MAX)
#define FASTRPC_DSP_PERF_LIST 12
#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_DSP_UTILITIES_HANDLE 2
#define FASTRPC_MAX_STATIC_HANDLE (20)
#define INIT_FILELEN_MAX (5 * 1024 * 1024)
#define INIT_FILE_NAMELEN_MAX (128)
#define FASTRPC_DEVICE_NAME "fastrpc"
#define SESSION_ID_INDEX (30)
#define SESSION_ID_MASK (1 << SESSION_ID_INDEX)
#define MAX_FRPC_TGID 64
#define COPY_BUF_WARN_LIMIT (512*1024)
#define SMMU_4GB_ADDRESS_SPACE 0xFFFFFFFF
#define SMMU_4K 0x1000
#define SMMU_2M 0x200000
#define SMMU_1G 0x40000000
/*
* Align the size to next IOMMU page size
* for example 1MB gets aligned to 2MB, as
* IOMMU has only 3 page sizes 4K, 2M and 1G
*/
#define SMMU_ALIGN(size) ({ \
u64 align_size = 0; \
if (size > SMMU_1G) \
align_size = SMMU_1G; \
else if (size > SMMU_2M) \
align_size = SMMU_2M; \
else \
align_size = SMMU_4K; \
ALIGN(size, align_size); \
})
/*
* Default SMMU CB device index
* Used to log messages on this SMMU device
*/
#define DEFAULT_SMMU_IDX 0
/*
* Fastrpc context ID bit-map:
*
* bits 0-3 : type of remote PD
* bit 4 : type of job (sync/async)
* bit 5 : reserved
* bits 6-15 : IDR id
* bits 16-63 : job id counter
*/
/* Starting position of idr in context id */
#define FASTRPC_CTXID_IDR_POS (6)
/* Number of idr bits in context id */
#define FASTRPC_CTXID_IDR_BITS (10)
/* Max idr value */
#define FASTRPC_CTX_MAX (1 << FASTRPC_CTXID_IDR_BITS)
/* Bit-mask for idr */
#define FASTRPC_CTXID_IDR_MASK (FASTRPC_CTX_MAX - 1)
/* Macro to pack idr into context id */
#define FASTRPC_PACK_IDR_IN_CTXID(ctxid, idr) (ctxid | ((idr & \
FASTRPC_CTXID_IDR_MASK) << FASTRPC_CTXID_IDR_POS))
/* Macro to extract idr from context id */
#define FASTRPC_GET_IDR_FROM_CTXID(ctxid) ((ctxid >> FASTRPC_CTXID_IDR_POS) & \
FASTRPC_CTXID_IDR_MASK)
/* Number of pd bits in context id (starting pos 0) */
#define FASTRPC_CTXID_PD_BITS (4)
/* Bit-mask for pd type */
#define FASTRPC_CTXID_PD_MASK ((1 << FASTRPC_CTXID_PD_BITS) - 1)
/* Macro to pack pd type into context id */
#define FASTRPC_PACK_PD_IN_CTXID(ctxid, pd) (ctxid | (pd & \
FASTRPC_CTXID_PD_MASK))
/* Starting position of job id counter in context id */
#define FASTRPC_CTXID_JOBID_POS (16)
/* Macro to pack job id counter into context id */
#define FASTRPC_PACK_JOBID_IN_CTXID(ctxid, jobid) (ctxid | \
(jobid << FASTRPC_CTXID_JOBID_POS))
/* Macro to extract ctxid (mask pd type) from response context */
#define FASTRPC_GET_CTXID_FROM_RSP_CTX(rsp_ctx) (rsp_ctx & \
~FASTRPC_CTXID_PD_MASK)
/* Maximum buffers cached in cached buffer list */
#define FASTRPC_MAX_CACHED_BUFS (32)
#define FASTRPC_MAX_CACHE_BUF_SIZE (8*1024*1024)
/* Max no. of persistent headers pre-allocated per user process */
#define FASTRPC_MAX_PERSISTENT_HEADERS (8)
/* Process status notifications from DSP will be sent with this unique context */
#define FASTRPC_NOTIF_CTX_RESERVED 0xABCDABCD
#define FASTRPC_UNIQUE_ID_CONST 1000
/* Add memory to static PD pool, protection thru XPU */
#define ADSP_MMAP_HEAP_ADDR 4
/* MAP static DMA buffer on DSP User PD */
#define ADSP_MMAP_DMA_BUFFER 6
/* Add memory to static PD pool protection thru hypervisor */
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
/* Add memory to userPD pool, for user heap */
#define ADSP_MMAP_ADD_PAGES 0x1000
/* Add memory to userPD pool, for LLC heap */
#define ADSP_MMAP_ADD_PAGES_LLC 0x3000
/* Map persistent header buffer on DSP */
#define ADSP_MMAP_PERSIST_HDR 0x4000
/* Fastrpc attribute for no mapping of fd */
#define FASTRPC_ATTR_NOMAP (16)
/* This flag is used to skip CPU mapping */
#define FASTRPC_MAP_FD_NOMAP (16)
/* Map the DMA handle in the invoke call for backward compatibility */
#define FASTRPC_MAP_LEGACY_DMA_HANDLE 0x20000
#define DSP_UNSUPPORTED_API (0x80000414)
/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
#define FASTRPC_MAX_DSP_ATTRIBUTES (256)
#define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
/* Retrives number of output buffers from the scalars parameter */
#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
/* Retrives number of input handles from the scalars parameter */
#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
/* Retrives number of output handles from the scalars parameter */
#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
REMOTE_SCALARS_OUTBUFS(sc) + \
REMOTE_SCALARS_INHANDLES(sc)+ \
REMOTE_SCALARS_OUTHANDLES(sc))
#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
(((attr & 0x07) << 29) | \
((method & 0x1f) << 24) | \
((in & 0xff) << 16) | \
((out & 0xff) << 8) | \
((oin & 0x0f) << 4) | \
(oout & 0x0f))
#define FASTRPC_SCALARS(method, in, out) \
FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
#define FASTRPC_CREATE_PROCESS_NARGS 6
#define FASTRPC_CREATE_STATIC_PROCESS_NARGS 3
/* Remote Method id table */
#define FASTRPC_RMID_INIT_ATTACH 0
#define FASTRPC_RMID_INIT_RELEASE 1
#define FASTRPC_RMID_INIT_MMAP 4
#define FASTRPC_RMID_INIT_MUNMAP 5
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
#define FASTRPC_RMID_INIT_MEM_MAP 10
#define FASTRPC_RMID_INIT_MEM_UNMAP 11
#define FASTRPC_RMID_INIT_MAX (20)
/*
* Num of pages shared with process spawn call.
* Page 1 : init-mem buf
* Page 2 : proc attrs debug buf
* Page 3 : rootheap buf
* Page 4 : proc_init shared buf
*/
#define NUM_PAGES_WITH_SHARED_BUF 2
#define NUM_PAGES_WITH_ROOTHEAP_BUF 3
#define NUM_PAGES_WITH_PROC_INIT_SHAREDBUF 4
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device_node, miscdev)
/* Length of glink transaction history to store */
#define GLINK_MSG_HISTORY_LEN (128)
#define FASTRPC_RSP_VERSION2 2
/* Early wake up poll completion number received from remoteproc */
#define FASTRPC_EARLY_WAKEUP_POLL (0xabbccdde)
/* Poll response number from remote processor for call completion */
#define FASTRPC_POLL_RESPONSE (0xdecaf)
/* timeout in us for polling until memory barrier */
#define FASTRPC_POLL_TIME_MEM_UPDATE (500)
/* timeout in us for busy polling after early response from remoteproc */
#define FASTRPC_POLL_TIME (4000)
/* timeout in us for polling completion signal after user early hint */
#define FASTRPC_USER_EARLY_HINT_TIMEOUT (500)
/* CPU feature information to DSP */
#define FASTRPC_CPUINFO_DEFAULT (0)
#define FASTRPC_CPUINFO_EARLY_WAKEUP (1)
/* Maximum PM timeout that can be voted through fastrpc */
#define FASTRPC_MAX_PM_TIMEOUT_MS 50
#define FASTRPC_NON_SECURE_WAKE_SOURCE_CLIENT_NAME "fastrpc-non_secure"
#define FASTRPC_SECURE_WAKE_SOURCE_CLIENT_NAME "fastrpc-secure"
#ifndef topology_cluster_id
#define topology_cluster_id(cpu) topology_physical_package_id(cpu)
#endif
#define FASTRPC_DSPSIGNAL_TIMEOUT_NONE 0xffffffff
#define FASTRPC_DSPSIGNAL_NUM_SIGNALS 1024
#define FASTRPC_DSPSIGNAL_GROUP_SIZE 256
/* Macro to return PDR status */
#define IS_PDR(fl) (fl->spd && fl->spd->pdrcount != fl->spd->prevpdrcount)
/* Macro to return SSR status */
#define IS_SSR(fl) (fl && fl->cctx && atomic_read(&fl->cctx->teardown))
#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsp"
#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
#define ADSP_AUDIOPD_NAME "msm/adsp/audio_pd"
#define SENSORS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_adsp"
#define SENSORS_PDR_ADSP_SERVICE_NAME "tms/servreg"
#define ADSP_SENSORPD_NAME "msm/adsp/sensor_pd"
#define SENSORS_PDR_SLPI_SERVICE_LOCATION_CLIENT_NAME "sensors_pdr_slpi"
#define SENSORS_PDR_SLPI_SERVICE_NAME SENSORS_PDR_ADSP_SERVICE_NAME
#define SLPI_SENSORPD_NAME "msm/slpi/sensor_pd"
#define OIS_PDR_ADSP_SERVICE_LOCATION_CLIENT_NAME "ois_pdr_adsprpc"
#define OIS_PDR_ADSP_SERVICE_NAME "tms/servreg"
#define ADSP_OISPD_NAME "msm/adsp/ois_pd"
#define PERF_END ((void)0)
#define PERF(enb, cnt, ff) \
{\
struct timespec64 startT = {0};\
uint64_t *counter = cnt;\
if (enb && counter) {\
ktime_get_real_ts64(&startT);\
} \
ff ;\
if (enb && counter) {\
*counter += getnstimediff(&startT);\
} \
}
#define GET_COUNTER(perf_ptr, offset) \
(perf_ptr != NULL ?\
(((offset >= 0) && (offset < PERF_KEY_MAX)) ?\
(uint64_t *)(perf_ptr + offset)\
: (uint64_t *)NULL) : (uint64_t *)NULL)
/* Registered QRTR service ID */
#define FASTRPC_REMOTE_SERVER_SERVICE_ID 5012
/*
* Fastrpc remote server instance ID bit-map:
*
* bits 0-1 : channel ID
* bits 2-7 : reserved
* bits 8-9 : remote domains (SECURE_PD, GUEST_OS)
* bits 10-31 : reserved
*/
#define REMOTE_DOMAIN_INSTANCE_INDEX (8)
#define GET_SERVER_INSTANCE(remote_domain, cid) \
((remote_domain << REMOTE_DOMAIN_INSTANCE_INDEX) | cid)
#define GET_CID_FROM_SERVER_INSTANCE(remote_server_instance) \
(remote_server_instance & 0x3)
/* Maximun received fastprc packet size */
#define FASTRPC_SOCKET_RECV_SIZE sizeof(union rsp)
#define FIND_DIGITS(number) ({ \
unsigned int count = 0, i= number; \
while(i != 0) { \
i /= 10; \
count++; \
} \
count; \
})
#define COUNT_OF(number) (number == 0 ? 1 : FIND_DIGITS(number))
/*
* Process types on remote subsystem
* Always add new PD types at the end, before MAX_PD_TYPE
*/
#define DEFAULT_UNUSED 0 /* pd type not configured for context banks */
#define ROOT_PD 1 /* Root PD */
#define AUDIO_STATICPD 2 /* ADSP Audio Static PD */
#define SENSORS_STATICPD 3 /* ADSP Sensors Static PD */
#define SECURE_STATICPD 4 /* CDSP Secure Static PD */
#define OIS_STATICPD 5 /* ADSP OIS Static PD */
#define CPZ_USERPD 6 /* CDSP CPZ USER PD */
#define USERPD 7 /* DSP User Dynamic PD */
#define GUEST_OS_SHARED 8 /* Legacy Guest OS Shared */
#define USER_UNSIGNEDPD_POOL 9 /* DSP User Dynamic Unsigned PD pool */
#define MAX_PD_TYPE 10 /* Max PD type */
/* Attributes for internal purposes. Clients cannot query these */
enum fastrpc_internal_attributes {
/* DMA handle reverse RPC support */
DMA_HANDLE_REVERSE_RPC_CAP = 129,
ROOTPD_RPC_HEAP_SUPPORT = 132,
};
enum fastrpc_remote_domains_id {
SECURE_PD = 0,
GUEST_OS = 1,
MAX_REMOTE_ID = SECURE_PD + 1,
};
/* Types of fastrpc DMA bufs sent to DSP */
enum fastrpc_buf_type {
METADATA_BUF,
COPYDATA_BUF,
INITMEM_BUF,
USER_BUF,
REMOTEHEAP_BUF,
ROOTHEAP_BUF,
};
/* Types of RPC calls to DSP */
enum fastrpc_msg_type {
USER_MSG = 0,
KERNEL_MSG_WITH_ZERO_PID,
KERNEL_MSG_WITH_NONZERO_PID,
};
enum fastrpc_response_flags {
NORMAL_RESPONSE = 0,
EARLY_RESPONSE = 1,
USER_EARLY_SIGNAL = 2,
COMPLETE_SIGNAL = 3,
STATUS_RESPONSE = 4,
POLL_MODE = 5,
};
/* To maintain the dsp map current state */
enum fastrpc_map_state {
/* Default smmu/global mapping */
FD_MAP_DEFAULT = 0,
/* Initiated DSP mapping */
FD_DSP_MAP_IN_PROGRESS,
/* Completed DSP mapping */
FD_DSP_MAP_COMPLETE,
/* Initiated DSP unmapping */
FD_DSP_UNMAP_IN_PROGRESS,
};
struct fastrpc_socket {
struct socket *sock; // Socket used to communicate with remote domain
struct sockaddr_qrtr local_sock_addr; // Local socket address on kernel side
struct sockaddr_qrtr remote_sock_addr; // Remote socket address on remote domain side
struct mutex socket_mutex; // Mutex for socket synchronization
void *recv_buf; // Received packet buffer
};
struct frpc_transport_session_control {
struct fastrpc_socket frpc_socket; // Fastrpc socket data structure
u32 remote_server_instance; // Unique remote server instance ID
bool remote_server_online; // Flag to indicate remote server status
struct work_struct work; // work for handling incoming messages
struct workqueue_struct *wq; // workqueue to post @work on
};
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
"sdsp", "cdsp"};
struct fastrpc_phy_page {
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
};
struct fastrpc_invoke_buf {
u32 num; /* number of contiguous regions */
u32 pgidx; /* index to start of contiguous region */
};
struct fastrpc_remote_dmahandle {
s32 fd; /* dma handle fd */
u32 offset; /* dma handle offset */
u32 len; /* dma handle length */
};
struct fastrpc_remote_buf {
u64 pv; /* buffer pointer */
u64 len; /* length of buffer */
};
union fastrpc_remote_arg {
struct fastrpc_remote_buf buf;
struct fastrpc_remote_dmahandle dma;
};
struct fastrpc_mmap_rsp_msg {
u64 vaddr;
};
struct fastrpc_mmap_req_msg {
s32 pgid;
u32 flags;
u64 vaddr;
s32 num;
};
struct fastrpc_mem_map_req_msg {
s32 pgid;
s32 fd;
s32 offset;
u32 flags;
u64 vaddrin;
s32 num;
s32 data_len;
};
struct fastrpc_munmap_req_msg {
s32 pgid;
u64 vaddr;
u64 size;
};
struct fastrpc_mem_unmap_req_msg {
s32 pgid;
s32 fd;
u64 vaddrin;
u64 len;
};
struct gid_list {
u32 *gids;
u32 gidcount;
};
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
u64 ctx; /* invoke caller context */
u32 handle; /* handle to invoke */
u32 sc; /* scalars structure describing the data */
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
};
struct fastrpc_invoke_rsp {
u64 ctx; /* invoke caller context */
int retval; /* invoke return value */
};
struct fastrpc_invoke_rspv2 {
u64 ctx; /* invoke caller context */
int retval; /* invoke return value */
u32 flags; /* early response flags */
u32 early_wake_time; /* user hint in us */
u32 version; /* version number */
};
struct fastrpc_tx_msg {
struct fastrpc_msg msg; /* Msg sent to remote subsystem */
int rpmsg_send_err; /* rpmsg error */
s64 ns; /* Timestamp (in ns) of msg */
};
struct fastrpc_rx_msg {
struct fastrpc_invoke_rspv2 rsp; /* Response from remote subsystem */
s64 ns; /* Timestamp (in ns) of response */
};
struct fastrpc_rpmsg_log {
u32 tx_index; /* Current index of 'tx_msgs' array */
u32 rx_index; /* Current index of 'rx_msgs' array */
/* Rolling history of messages sent to remote subsystem */
struct fastrpc_tx_msg tx_msgs[GLINK_MSG_HISTORY_LEN];
/* Rolling history of responses from remote subsystem */
struct fastrpc_rx_msg rx_msgs[GLINK_MSG_HISTORY_LEN];
spinlock_t tx_lock;
spinlock_t rx_lock;
};
struct dsp_notif_rsp {
u64 ctx; /* response context */
u32 type; /* Notification type */
int pid; /* user process pid */
u32 status; /* userpd status notification */
};
union rsp {
struct fastrpc_invoke_rsp rsp;
struct fastrpc_invoke_rspv2 rsp2;
struct dsp_notif_rsp rsp3;
};
struct fastrpc_buf_overlap {
u64 start;
u64 end;
int raix;
u64 mstart;
u64 mend;
u64 offset;
};
struct fastrpc_buf {
/* Node for adding to buffer lists */
struct list_head node;
struct fastrpc_user *fl;
struct dma_buf *dmabuf;
struct device *dev;
/* Context bank with which DMA buffer was allocated */
struct fastrpc_smmu *smmucb;
void *virt;
u32 type;
u64 phys;
u64 size;
/* Lock for dma buf attachments */
struct mutex lock;
struct list_head attachments;
uintptr_t raddr;
bool in_use;
u32 domain_id;
/* time counter to trace buffer allocation latency */
struct timespec64 alloc_time;
/* time counter to trace scm assign latency */
struct timespec64 scm_assign_time;
};
struct fastrpc_dma_buf_attachment {
struct device *dev;
struct sg_table sgt;
struct list_head node;
};
struct fastrpc_map {
struct list_head node;
struct fastrpc_user *fl;
int fd;
struct dma_buf *buf;
struct sg_table *table;
struct dma_buf_attachment *attach;
/* Context bank with which buffer was mapped on SMMU */
struct fastrpc_smmu *smmucb;
u64 phys;
u64 size;
void *va;
u64 len;
u64 raddr;
u32 attr;
u32 flags;
struct kref refcount;
int secure;
atomic_t state;
};
struct fastrpc_perf {
u64 count;
u64 flush;
u64 map;
u64 copy;
u64 link;
u64 getargs;
u64 putargs;
u64 invargs;
u64 invoke;
u64 tid;
};
struct fastrpc_smmu {
struct device *dev;
int sid;
bool valid;
struct mutex map_mutex;
/* gen pool for QRTR */
struct gen_pool *frpc_genpool;
/* fastrpc gen pool buffer */
struct fastrpc_buf *frpc_genpool_buf;
/* fastrpc gen pool buffer fixed IOVA */
unsigned long genpool_iova;
/* fastrpc gen pool buffer size */
size_t genpool_size;
/* Total bytes allocated using this CB */
u64 allocatedbytes;
/* Total size of the context bank */
u64 totalbytes;
/* Min alloc size for which CB can be used */
u64 minallocsize;
/* Max alloc size for which CB can be used */
u64 maxallocsize;
/* To indentify the parent session this SMMU CB belomngs to */
struct fastrpc_pool_ctx *sess;
};
struct fastrpc_pool_ctx {
/* Context bank pool */
struct fastrpc_smmu smmucb[FASTRPC_MAX_CB_POOL];
u32 pd_type;
bool secure;
bool sharedcb;
/* Number of context banks in the pool */
u32 smmucount;
/* Number of applications using the pool */
int usecount;
};
struct fastrpc_static_pd {
char *servloc_name;
char *spdname;
void *pdrhandle;
u64 pdrcount;
u64 prevpdrcount;
atomic_t ispdup;
atomic_t is_attached;
struct fastrpc_channel_ctx *cctx;
};
struct heap_bufs {
/* List of bufs */
struct list_head list;
/* Number of bufs */
unsigned int num;
};
struct fastrpc_channel_ctx {
int domain_id;
int sesscount;
int vmcount;
u64 perms;
struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
#if !IS_ENABLED(CONFIG_QCOM_FASTRPC_TRUSTED)
struct rpmsg_device *rpdev;
#else
struct frpc_transport_session_control session_control;
#endif
struct device *dev;
struct fastrpc_pool_ctx session[FASTRPC_MAX_SESSIONS];
struct fastrpc_static_pd spd[FASTRPC_MAX_SPD];
spinlock_t lock;
struct idr ctx_idr;
struct ida tgid_frpc_ida;
struct list_head users;
struct kref refcount;
/* Flag if dsp attributes are cached */
bool valid_attributes;
bool cpuinfo_status;
bool staticpd_status;
u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
u32 lowest_capacity_core_count;
u32 qos_latency;
struct fastrpc_device_node *secure_fdevice;
struct fastrpc_device_node *fdevice;
struct gid_list gidlist;
struct list_head gmaps;
struct fastrpc_rpmsg_log gmsg_log;
/* Secure subsystems like ADSP/SLPI will use secure client */
struct wakeup_source *wake_source_secure;
/* Non-secure subsystem like CDSP will use regular client */
struct wakeup_source *wake_source;
struct mutex wake_mutex;
bool secure;
bool unsigned_support;
u64 dma_mask;
u64 cpuinfo_todsp;
int max_sess_per_proc;
bool pd_type;
/* Set teardown flag when remoteproc is shutting down */
atomic_t teardown;
/* Buffers donated to grow rootheap on DSP */
struct heap_bufs rootheap_bufs;
/* jobid counter to prepend into ctxid */
u64 jobid;
/* Flag to indicate CB pooling is enabled for channel */
bool smmucb_pool;
/* Number of active ongoing invocations (device ioctl / release) */
u32 invoke_cnt;
/* Completion object for threads to wait for SSR handling to finish */
struct completion ssr_complete;
/* Wait queue to block/resume SSR until all invocations are complete */
wait_queue_head_t ssr_wait_queue;
};
struct fastrpc_invoke_ctx {
/* Node for adding to context list */
struct list_head node;
int nscalars;
int nbufs;
int retval;
int pid;
int tgid;
u32 sc;
u32 handle;
u32 *crc;
/* user hint of completion time in us */
u32 early_wake_time;
u64 *perf_kernel;
u64 *perf_dsp;
u64 ctxid;
u64 msg_sz;
/* work done status flag */
bool is_work_done;
/* response flags from remote processor */
enum fastrpc_response_flags rsp_flags;
struct kref refcount;
struct completion work;
// struct work_struct put_work;
struct fastrpc_msg msg;
struct fastrpc_user *fl;
union fastrpc_remote_arg *rpra;
union fastrpc_remote_arg *outbufs;
struct fastrpc_map **maps;
struct fastrpc_buf *buf;
struct fastrpc_invoke_args *args;
struct fastrpc_buf_overlap *olaps;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_perf *perf;
};
struct fastrpc_device_node {
struct fastrpc_channel_ctx *cctx;
struct miscdevice miscdev;
bool secure;
};
struct fastrpc_internal_config {
int user_fd;
int user_size;
u64 root_addr;
u32 root_size;
};
/* FastRPC ioctl structure to set session related info */
struct fastrpc_internal_sessinfo {
uint32_t domain_id; /* Set the remote subsystem, Domain ID of the session */
uint32_t session_id; /* Unused, Set the Session ID on remote subsystem */
uint32_t pd; /* Set the process type on remote subsystem */
uint32_t sharedcb; /* Unused, Session can share context bank with other sessions */
};
struct fastrpc_notif_queue {
/* Number of pending status notifications in queue */
atomic_t notif_queue_count;
/* Wait queue to synchronize notifier thread and response */
wait_queue_head_t notif_wait_queue;
/* IRQ safe spin lock for protecting notif queue */
spinlock_t nqlock;
};
struct fastrpc_internal_notif_rsp {
u32 domain; /* Domain of User PD */
u32 session; /* Session ID of User PD */
u32 status; /* Status of the process */
};
struct fastrpc_notif_rsp {
struct list_head notifn;
u32 domain;
u32 session;
enum fastrpc_status_flags status;
};
enum fastrpc_process_state {
/* Default state */
DEFAULT_PROC_STATE = 0,
/* Process create on DSP initiated */
DSP_CREATE_START,
/* Process create on DSP complete */
DSP_CREATE_COMPLETE,
/* Process exit on DSP initiated */
DSP_EXIT_START,
/* Process exit on DSP complete */
DSP_EXIT_COMPLETE,
};
struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
struct list_head interrupted;
struct list_head mmaps;
struct list_head cached_bufs;
/* list of client drivers registered to fastrpc driver*/
struct list_head fastrpc_drivers;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_pool_ctx *sctx;
struct fastrpc_pool_ctx *secsctx;
struct fastrpc_buf *init_mem;
/* Pre-allocated header buffer */
struct fastrpc_buf *pers_hdr_buf;
/* proc_init shared buffer */
struct fastrpc_buf *proc_init_sharedbuf;
struct fastrpc_static_pd *spd;
/* Pre-allocated buffer divided into N chunks */
struct fastrpc_buf *hdr_bufs;
/*
* Unique device struct for each process, shared with
* client drivers when attached to fastrpc driver.
*/
struct fastrpc_device *device;
#ifdef CONFIG_DEBUG_FS
atomic_t debugfs_file_create;
struct dentry *debugfs_file;
char *debugfs_buf;
#endif
int tgid;
/* Unique pid send to dsp*/
int tgid_frpc;
/* PD type of remote subsystem process */
u32 pd_type;
/* total cached buffers */
u32 num_cached_buf;
/* total persistent headers */
u32 num_pers_hdrs;
u32 profile;
int sessionid;
/* Threads poll for specified timeout and fall back to glink wait */
u32 poll_timeout;
u32 ws_timeout;
u32 qos_request;
/* Flag to enable PM wake/relax voting for invoke */
u32 wake_enable;
bool is_secure_dev;
/* If set, threads will poll for DSP response instead of glink wait */
bool poll_mode;
bool is_unsigned_pd;
/* Variable to identify if client driver dma operation are pending*/
bool is_dma_invoke_pend;
bool sharedcb;
char *servloc_name;;
/* Lock for lists */
spinlock_t lock;
/* lock for dsp signals */
spinlock_t dspsignals_lock;
/* mutex for remote mapping synchronization*/
struct mutex remote_map_mutex;
/*mutex for process maps synchronization*/
struct mutex map_mutex;
struct mutex signal_create_mutex;
/* mutex for qos request synchronization */
struct mutex pm_qos_mutex;
/* Compleation object for dma invocations by client driver*/
struct completion dma_invoke;
/* Completion objects and state for dspsignals */
struct fastrpc_dspsignal *signal_groups[FASTRPC_DSPSIGNAL_NUM_SIGNALS /FASTRPC_DSPSIGNAL_GROUP_SIZE];
struct dev_pm_qos_request *dev_pm_qos_req;
/* Process status notification queue */
struct fastrpc_notif_queue proc_state_notif;
struct list_head notif_queue;
struct fastrpc_internal_config config;
bool multi_session_support;
bool untrusted_process;
bool set_session_info;
/* Various states throughout process life cycle */
atomic_t state;
/* Flag to indicate notif thread exit requested */
bool exit_notif;
};
struct fastrpc_ctrl_latency {
u32 enable; /* latency control enable */
u32 latency; /* latency request in us */
};
struct fastrpc_ctrl_smmu {
u32 sharedcb; /* Set to SMMU share context bank */
};
struct fastrpc_ctrl_wakelock {
u32 enable; /* wakelock control enable */
};
struct fastrpc_ctrl_pm {
u32 timeout; /* timeout(in ms) for PM to keep system awake */
};
struct fastrpc_internal_control {
u32 req;
union {
struct fastrpc_ctrl_latency lp;
struct fastrpc_ctrl_smmu smmu;
struct fastrpc_ctrl_wakelock wp;
struct fastrpc_ctrl_pm pm;
};
};
enum fastrpc_dspsignal_state {
DSPSIGNAL_STATE_UNUSED = 0,
DSPSIGNAL_STATE_PENDING,
DSPSIGNAL_STATE_SIGNALED,
DSPSIGNAL_STATE_CANCELED,
};
struct fastrpc_internal_dspsignal {
u32 req;
u32 signal_id;
union {
u32 flags;
u32 timeout_usec;
};
};
struct fastrpc_dspsignal {
struct completion comp;
int state;
};
int fastrpc_transport_send(struct fastrpc_channel_ctx *cctx, void *rpc_msg, uint32_t rpc_msg_size);
int fastrpc_transport_init(void);
void fastrpc_transport_deinit(void);
int fastrpc_handle_rpc_response(struct fastrpc_channel_ctx *cctx, void *data, int len);
int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
bool is_secured, const char *domain);
struct fastrpc_channel_ctx* get_current_channel_ctx(struct device *dev);
void fastrpc_notify_users(struct fastrpc_user *user);
void fastrpc_ssr_dspsignal_cancel_wait(struct fastrpc_user *fl);
/* Function to clean all SMMU mappings associated with a fastrpc user obj */
void fastrpc_free_user(struct fastrpc_user *fl);
#endif /* __FASTRPC_SHARED_H__ */

View File

@@ -0,0 +1,458 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/sort.h>
#include <linux/of_platform.h>
#include "../include/uapi/misc/fastrpc.h"
#include <linux/of_reserved_mem.h>
#include "fastrpc_shared.h"
struct fastrpc_channel_ctx *scctx = NULL;
struct fastrpc_channel_ctx* get_current_channel_ctx(struct device *dev)
{
if (scctx)
return scctx;
scctx = kzalloc(sizeof(*scctx), GFP_KERNEL);
if (IS_ERR_OR_NULL(scctx)) {
dev_err(dev, "failed to get channel ctx\n");
return ERR_PTR(-ENOMEM);
}
scctx->domain_id = CDSP_DOMAIN_ID;
atomic_set(&scctx->teardown, 0);
scctx->secure = 0;
scctx->unsigned_support = false;
kref_init(&scctx->refcount);
INIT_LIST_HEAD(&scctx->users);
INIT_LIST_HEAD(&scctx->gmaps);
mutex_init(&scctx->wake_mutex);
spin_lock_init(&scctx->lock);
spin_lock_init(&(scctx->gmsg_log.tx_lock));
spin_lock_init(&(scctx->gmsg_log.rx_lock));
idr_init(&scctx->ctx_idr);
ida_init(&scctx->tgid_frpc_ida);
init_waitqueue_head(&scctx->ssr_wait_queue);
scctx->max_sess_per_proc = FASTRPC_MAX_SESSIONS_PER_PROCESS;
fastrpc_device_register(dev, scctx, true, domains[scctx->domain_id]);
fastrpc_device_register(dev, scctx, false, domains[scctx->domain_id]);
return scctx;
}
static void fastrpc_recv_new_server(struct frpc_transport_session_control *session_control,
unsigned int service, unsigned int instance,
unsigned int node, unsigned int port)
{
u32 remote_server_instance = session_control->remote_server_instance;
int err = 0;
/* Ignore EOF marker */
if (!node && !port) {
err = -EINVAL;
dev_err(scctx->dev, "Ignoring ctrl packet: node %u, port %u, err %d",
node, port, err);
return;
}
if (service != FASTRPC_REMOTE_SERVER_SERVICE_ID ||
instance != remote_server_instance) {
err = -ENOMSG;
dev_err(scctx->dev, "Ignoring ctrl packet: service id %u, instance id %u, err %d",
service, instance, err);
return;
}
mutex_lock(&session_control->frpc_socket.socket_mutex);
session_control->frpc_socket.remote_sock_addr.sq_family = AF_QIPCRTR;
session_control->frpc_socket.remote_sock_addr.sq_node = node;
session_control->frpc_socket.remote_sock_addr.sq_port = port;
session_control->remote_server_online = true;
mutex_unlock(&session_control->frpc_socket.socket_mutex);
dev_info(scctx->dev, "Remote server is up: remote ID (0x%x), node %u, port %u",
remote_server_instance, node, port);
}
static void fastrpc_recv_del_server(struct frpc_transport_session_control *session_control,
unsigned int node, unsigned int port)
{
u32 remote_server_instance = session_control->remote_server_instance;
int err = 0;
struct fastrpc_user *user;
unsigned long flags;
/* Ignore EOF marker */
if (!node && !port) {
err = -EINVAL;
dev_err(scctx->dev, "Ignoring ctrl packet: node %u, port %u, err %d",
node, port, err);
return;
}
if (node != session_control->frpc_socket.remote_sock_addr.sq_node ||
port != session_control->frpc_socket.remote_sock_addr.sq_port) {
dev_err(scctx->dev, "Ignoring ctrl packet: node %u, port %u, err %d", node, port, err);
return;
}
mutex_lock(&session_control->frpc_socket.socket_mutex);
session_control->frpc_socket.remote_sock_addr.sq_node = 0;
session_control->frpc_socket.remote_sock_addr.sq_port = 0;
session_control->remote_server_online = false;
mutex_unlock(&session_control->frpc_socket.socket_mutex);
spin_lock_irqsave(&scctx->lock, flags);
list_for_each_entry(user, &scctx->users, user)
fastrpc_notify_users(user);
spin_unlock_irqrestore(&scctx->lock, flags);
dev_info(scctx->dev, "Remote server is down: remote ID (0x%x)", remote_server_instance);
}
/**
* fastrpc_recv_ctrl_pkt()
* @session_control: Data structure that contains information related to socket and
* remote server availability.
* @buf: Control packet.
* @len: Control packet length.
*
* Handle control packet status notifications from remote domain.
*/
static void fastrpc_recv_ctrl_pkt(struct frpc_transport_session_control *session_control,
const void *buf, size_t len)
{
const struct qrtr_ctrl_pkt *pkt = buf;
if (len < sizeof(struct qrtr_ctrl_pkt)) {
dev_err(scctx->dev, "Ignoring short control packet (%zu bytes)", len);
return;
}
switch (le32_to_cpu(pkt->cmd)) {
case QRTR_TYPE_NEW_SERVER:
fastrpc_recv_new_server(session_control,
le32_to_cpu(pkt->server.service),
le32_to_cpu(pkt->server.instance),
le32_to_cpu(pkt->server.node),
le32_to_cpu(pkt->server.port));
break;
case QRTR_TYPE_DEL_SERVER:
fastrpc_recv_del_server(session_control,
le32_to_cpu(pkt->server.node),
le32_to_cpu(pkt->server.port));
break;
default:
dev_err(scctx->dev, "Ignoring unknown ctrl packet with size %zu", len);
}
}
/**
* fastrpc_socket_callback_wq()
* @work: workqueue structure for incoming socket packets
*
* Callback function to receive responses that were posted on workqueue.
* We expect to receive control packets with remote domain status notifications or
* RPC data packets from remote domain.
*/
static void fastrpc_socket_callback_wq(struct work_struct *work)
{
int err = 0, cid = -1, bytes_rx = 0;
u32 remote_server_instance = (u32)-1;
bool ignore_err = false;
struct kvec msg = {0};
struct sockaddr_qrtr remote_sock_addr = {0};
struct msghdr remote_server = {0};
struct frpc_transport_session_control *session_control = NULL;
__u32 sq_node = 0, sq_port = 0;
session_control = container_of(work, struct frpc_transport_session_control, work);
if (session_control == NULL) {
err = -EFAULT;
goto bail;
}
remote_server.msg_name = &remote_sock_addr;
remote_server.msg_namelen = sizeof(remote_sock_addr);
msg.iov_base = session_control->frpc_socket.recv_buf;
msg.iov_len = FASTRPC_SOCKET_RECV_SIZE;
remote_server_instance = session_control->remote_server_instance;
for (;;) {
err = kernel_recvmsg(session_control->frpc_socket.sock, &remote_server, &msg, 1,
msg.iov_len, MSG_DONTWAIT);
if (err == -EAGAIN) {
ignore_err = true;
goto bail;
}
if (err < 0)
goto bail;
bytes_rx = err;
err = 0;
sq_node = remote_sock_addr.sq_node;
sq_port = remote_sock_addr.sq_port;
if (sq_node == session_control->frpc_socket.local_sock_addr.sq_node &&
sq_port == QRTR_PORT_CTRL) {
fastrpc_recv_ctrl_pkt(session_control,
session_control->frpc_socket.recv_buf,
bytes_rx);
} else {
cid = GET_CID_FROM_SERVER_INSTANCE(remote_server_instance);
fastrpc_handle_rpc_response(scctx, msg.iov_base, msg.iov_len);
}
}
bail:
if (!ignore_err && err < 0) {
dev_err(scctx->dev,
"invalid response data %pK (rx %d bytes), buffer len %zu from remote ID (0x%x) err %d\n",
msg.iov_base, bytes_rx, msg.iov_len, remote_server_instance, err);
}
}
/**
* fastrpc_socket_callback()
* @sk: Sock data structure with information related to the callback response.
*
* Callback function to receive responses from socket layer.
* Responses are posted on workqueue to be process.
*/
static void fastrpc_socket_callback(struct sock *sk)
{
int err = 0;
struct frpc_transport_session_control *session_control = NULL;
if (sk == NULL) {
dev_err(scctx->dev, "invalid sock received, err %d", err);
return;
}
rcu_read_lock();
session_control = rcu_dereference_sk_user_data(sk);
if (session_control)
queue_work(session_control->wq, &session_control->work);
rcu_read_unlock();
}
/**
* fastrpc_transport_send()
* @cid: Channel ID.
* @rpc_msg: RPC message to send to remote domain.
* @rpc_msg_size: RPC message size.
* @tvm_remote_domain: Remote domain on TVM.
*
* Send RPC message to remote domain. Depending on tvm_remote_domain flag message will be
* sent to one of the remote domains on remote subsystem.
*
* Return: 0 on success or negative errno value on failure.
*/
int fastrpc_transport_send(struct fastrpc_channel_ctx *cctx, void *rpc_msg, uint32_t rpc_msg_size)
{
int ret = 0;
struct fastrpc_socket *frpc_socket = NULL;
struct frpc_transport_session_control *session_control = NULL;
struct msghdr remote_server = {0};
struct kvec msg = {0};
session_control = &cctx->session_control;
frpc_socket = &session_control->frpc_socket;
remote_server.msg_name = &frpc_socket->remote_sock_addr;
remote_server.msg_namelen = sizeof(frpc_socket->remote_sock_addr);
msg.iov_base = rpc_msg;
msg.iov_len = rpc_msg_size;
mutex_lock(&frpc_socket->socket_mutex);
if (frpc_socket->sock == NULL || session_control->remote_server_online == false) {
mutex_unlock(&frpc_socket->socket_mutex);
return -EPIPE;
}
ret = kernel_sendmsg(frpc_socket->sock, &remote_server, &msg, 1, msg.iov_len);
if (ret > 0)
ret = 0;
mutex_unlock(&frpc_socket->socket_mutex);
return ret;
}
/**
* create_socket()
* @session_control: Data structure that contains information related to socket and
* remote server availability.
*
* Initializes and creates a kernel socket.
*
* Return: pointer to a socket on success or negative errno value on failure.
*/
static struct socket *create_socket(struct frpc_transport_session_control *session_control)
{
int err = 0;
struct socket *sock = NULL;
struct fastrpc_socket *frpc_socket = NULL;
err = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
PF_QIPCRTR, &sock);
if (err < 0) {
dev_err(scctx->dev, "sock_create_kern failed with err %d\n", err);
return ERR_PTR(err);
}
frpc_socket = &session_control->frpc_socket;
err = kernel_getsockname(sock, (struct sockaddr *)&frpc_socket->local_sock_addr);
if (err < 0) {
sock_release(sock);
dev_err(scctx->dev, "kernel_getsockname failed with err %d\n", err);
return ERR_PTR(err);
}
rcu_assign_sk_user_data(sock->sk, session_control);
sock->sk->sk_data_ready = fastrpc_socket_callback;
sock->sk->sk_error_report = fastrpc_socket_callback;
return sock;
}
/**
* register_remote_server_notifications()
* @frpc_socket: Socket to send message to register for remote service notifications.
* @remote_server_instance: ID to uniquely identify remote server
*
* Register socket to receive status notifications from remote service
* using remote service ID FASTRPC_REMOTE_SERVER_SERVICE_ID and instance ID.
*
* Return: 0 on success or negative errno value on failure.
*/
static int register_remote_server_notifications(struct fastrpc_socket *frpc_socket,
uint32_t remote_server_instance)
{
struct qrtr_ctrl_pkt pkt = {0};
struct sockaddr_qrtr sq = {0};
struct msghdr remote_server = {0};
struct kvec msg = { &pkt, sizeof(pkt) };
int ret = 0;
memset(&pkt, 0, sizeof(pkt));
pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_LOOKUP);
pkt.server.service = cpu_to_le32(FASTRPC_REMOTE_SERVER_SERVICE_ID);
pkt.server.instance = cpu_to_le32(remote_server_instance);
sq.sq_family = frpc_socket->local_sock_addr.sq_family;
sq.sq_node = frpc_socket->local_sock_addr.sq_node;
sq.sq_port = QRTR_PORT_CTRL;
remote_server.msg_name = &sq;
remote_server.msg_namelen = sizeof(sq);
ret = kernel_sendmsg(frpc_socket->sock, &remote_server, &msg, 1, sizeof(pkt));
if (ret < 0)
dev_err(scctx->dev, "failed to send lookup registration: %d\n", ret);
return ret;
}
/**
* fastrpc_transport_init() - Initialize sockets for fastrpc driver.
*
* Initialize and create all sockets that are enabled from all channels
* and remote domains.
* Traverse array configurations and initialize session on glist_session_ctrl if remote
* domain is enabled.
*
* Return: 0 on success or negative errno value on failure.
*/
int fastrpc_transport_init(void)
{
int err = 0;
struct socket *sock = NULL;
struct fastrpc_socket *frpc_socket = NULL;
struct frpc_transport_session_control *session_control = NULL;
struct workqueue_struct *wq = NULL;
if (!scctx) {
err = -ENOMEM;
goto bail;
}
session_control = &scctx->session_control;
session_control->remote_server_online = false;
frpc_socket = &session_control->frpc_socket;
mutex_init(&frpc_socket->socket_mutex);
sock = create_socket(session_control);
if (!sock) {
err = PTR_ERR(sock);
goto bail;
}
frpc_socket->sock = sock;
frpc_socket->recv_buf = kzalloc(FASTRPC_SOCKET_RECV_SIZE, GFP_KERNEL);
if (!frpc_socket->recv_buf) {
err = -ENOMEM;
goto bail;
}
INIT_WORK(&session_control->work, fastrpc_socket_callback_wq);
wq = alloc_workqueue("fastrpc_msg_handler", WQ_UNBOUND|WQ_HIGHPRI, 0);
if (!wq) {
err = -ENOMEM;
goto bail;
}
session_control->wq = wq;
session_control->remote_server_instance = GET_SERVER_INSTANCE(SECURE_PD, scctx->domain_id);
err = register_remote_server_notifications(frpc_socket,
session_control->remote_server_instance);
if (err < 0)
goto bail;
dev_info(scctx->dev, "Created and registered socket for remote server (service ID %u, instance ID 0x%x)\n",
FASTRPC_REMOTE_SERVER_SERVICE_ID, session_control->remote_server_instance);
err = 0;
bail:
if (err) {
kfree(scctx);
scctx = NULL;
pr_err("fastrpc_transport_init failed with err %d\n", err);
}
return err;
}
/**
* fastrpc_transport_deinit() - Deinitialize sockets for fastrpc driver.
*
* Deinitialize and release all sockets that are enabled from all channels
* and remote domains.
* Traverse array configurations and deinitialize corresponding session from
* glist_session_ctrl.
*/
void fastrpc_transport_deinit(void)
{
struct fastrpc_socket *frpc_socket = NULL;
struct frpc_transport_session_control *session_control = NULL;
if (!scctx) {
pr_err("fastrpc_transport_deinit failed as scctx is NULL\n");
return;
}
session_control = &scctx->session_control;
if (!session_control)
return;
frpc_socket = &session_control->frpc_socket;
if (frpc_socket->sock)
sock_release(frpc_socket->sock);
if (session_control->wq)
destroy_workqueue(session_control->wq);
kfree(frpc_socket->recv_buf);
frpc_socket->recv_buf = NULL;
frpc_socket->sock = NULL;
mutex_destroy(&frpc_socket->socket_mutex);
}

View File

@@ -0,0 +1,428 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#if !defined(TRACE_FASTRPC_H) || defined(TRACE_HEADER_MULTI_READ)
#define TRACE_FASTRPC_H
#undef TRACE_SYSTEM
#define TRACE_SYSTEM fastrpc
/*
* Path must be relative to location of 'define_trace.h' header in kernel
* Define path if not defined in bazel file
*/
#ifndef DSP_TRACE_INCLUDE_PATH
#define DSP_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/dsp-kernel/dsp
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH DSP_TRACE_INCLUDE_PATH
/* Name of trace header file */
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE fastrpc_trace
#include <linux/tracepoint.h>
TRACE_EVENT(fastrpc_transport_send,
TP_PROTO(int cid, uint64_t smq_ctx,
uint64_t ctx, uint32_t handle,
uint32_t sc, uint64_t addr, uint64_t size),
TP_ARGS(cid, smq_ctx, ctx, handle, sc, addr, size),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, smq_ctx)
__field(u64, ctx)
__field(u32, handle)
__field(u32, sc)
__field(u64, addr)
__field(u64, size)
),
TP_fast_assign(
__entry->cid = cid;
__entry->smq_ctx = smq_ctx;
__entry->ctx = ctx;
__entry->handle = handle;
__entry->sc = sc;
__entry->addr = addr;
__entry->size = size;
),
TP_printk("to cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x, addr 0x%llx, size %llu",
__entry->cid, __entry->smq_ctx, __entry->ctx, __entry->handle,
__entry->sc, __entry->addr, __entry->size)
);
TRACE_EVENT(fastrpc_transport_response,
TP_PROTO(int cid, uint64_t ctx, int retval,
uint32_t rsp_flags, uint32_t early_wake_time),
TP_ARGS(cid, ctx, retval, rsp_flags, early_wake_time),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, ctx)
__field(int, retval)
__field(u32, rsp_flags)
__field(u32, early_wake_time)
),
TP_fast_assign(
__entry->cid = cid;
__entry->ctx = ctx;
__entry->retval = retval;
__entry->rsp_flags = rsp_flags;
__entry->early_wake_time = early_wake_time;
),
TP_printk("from cid %d: ctx 0x%llx, retval 0x%x, rsp_flags %u, early_wake_time %u",
__entry->cid, __entry->ctx, __entry->retval,
__entry->rsp_flags, __entry->early_wake_time)
);
TRACE_EVENT(fastrpc_context_interrupt,
TP_PROTO(int cid, uint64_t smq_ctx, uint64_t ctx,
uint32_t handle, uint32_t sc),
TP_ARGS(cid, smq_ctx, ctx, handle, sc),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, smq_ctx)
__field(u64, ctx)
__field(u32, handle)
__field(u32, sc)
),
TP_fast_assign(
__entry->cid = cid;
__entry->smq_ctx = smq_ctx;
__entry->ctx = ctx;
__entry->handle = handle;
__entry->sc = sc;
),
TP_printk("to cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
__entry->cid, __entry->smq_ctx,
__entry->ctx, __entry->handle, __entry->sc)
);
TRACE_EVENT(fastrpc_context_restore,
TP_PROTO(int cid, uint64_t smq_ctx, uint64_t ctx,
uint32_t handle, uint32_t sc),
TP_ARGS(cid, smq_ctx, ctx, handle, sc),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, smq_ctx)
__field(u64, ctx)
__field(u32, handle)
__field(u32, sc)
),
TP_fast_assign(
__entry->cid = cid;
__entry->smq_ctx = smq_ctx;
__entry->ctx = ctx;
__entry->handle = handle;
__entry->sc = sc;
),
TP_printk("for cid %d: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
__entry->cid, __entry->smq_ctx,
__entry->ctx, __entry->handle, __entry->sc)
);
TRACE_EVENT(fastrpc_dma_map,
TP_PROTO(int cid, int fd, uint64_t phys, size_t size,
size_t len, unsigned int attr, int mflags),
TP_ARGS(cid, fd, phys, size, len, attr, mflags),
TP_STRUCT__entry(
__field(int, cid)
__field(int, fd)
__field(u64, phys)
__field(size_t, size)
__field(size_t, len)
__field(unsigned int, attr)
__field(int, mflags)
),
TP_fast_assign(
__entry->cid = cid;
__entry->fd = fd;
__entry->phys = phys;
__entry->size = size;
__entry->len = len;
__entry->attr = attr;
__entry->mflags = mflags;
),
TP_printk("cid %d, fd %d, phys 0x%llx, size %zu (len %zu), attr 0x%x, flags 0x%x",
__entry->cid, __entry->fd, __entry->phys, __entry->size,
__entry->len, __entry->attr, __entry->mflags)
);
TRACE_EVENT(fastrpc_dma_unmap,
TP_PROTO(int cid, uint64_t phys, size_t size, int fd),
TP_ARGS(cid, phys, size, fd),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, phys)
__field(size_t, size)
__field(int, fd)
),
TP_fast_assign(
__entry->cid = cid;
__entry->phys = phys;
__entry->size = size;
__entry->fd = fd;
),
TP_printk("cid %d, phys 0x%llx, size %zu, fd %d",
__entry->cid, __entry->phys, __entry->size, __entry->fd)
);
TRACE_EVENT(fastrpc_dma_alloc,
TP_PROTO(int cid, uint64_t phys, size_t size,
unsigned long attr, int mflags),
TP_ARGS(cid, phys, size, attr, mflags),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, phys)
__field(size_t, size)
__field(unsigned long, attr)
__field(int, mflags)
),
TP_fast_assign(
__entry->cid = cid;
__entry->phys = phys;
__entry->size = size;
__entry->attr = attr;
__entry->mflags = mflags;
),
TP_printk("cid %d, phys 0x%llx, size %zu, attr 0x%lx, flags 0x%x",
__entry->cid, __entry->phys, __entry->size,
__entry->attr, __entry->mflags)
);
TRACE_EVENT(fastrpc_dma_free,
TP_PROTO(int cid, uint64_t phys, size_t size),
TP_ARGS(cid, phys, size),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, phys)
__field(size_t, size)
),
TP_fast_assign(
__entry->cid = cid;
__entry->phys = phys;
__entry->size = size;
),
TP_printk("cid %d, phys 0x%llx, size %zu",
__entry->cid, __entry->phys, __entry->size)
);
TRACE_EVENT(fastrpc_context_complete,
TP_PROTO(int cid, uint64_t smq_ctx, int retval,
uint64_t ctx, uint32_t handle, uint32_t sc),
TP_ARGS(cid, smq_ctx, retval, ctx, handle, sc),
TP_STRUCT__entry(
__field(int, cid)
__field(u64, smq_ctx)
__field(int, retval)
__field(u64, ctx)
__field(u32, handle)
__field(u32, sc)
),
TP_fast_assign(
__entry->cid = cid;
__entry->smq_ctx = smq_ctx;
__entry->retval = retval;
__entry->ctx = ctx;
__entry->handle = handle;
__entry->sc = sc;
),
TP_printk("from cid %d: smq_ctx 0x%llx, retval 0x%x, ctx 0x%llx, handle 0x%x, sc 0x%x",
__entry->cid, __entry->smq_ctx, __entry->retval,
__entry->ctx, __entry->handle, __entry->sc)
);
TRACE_EVENT(fastrpc_context_alloc,
TP_PROTO(uint64_t smq_ctx, uint64_t ctx,
uint32_t handle, uint32_t sc),
TP_ARGS(smq_ctx, ctx, handle, sc),
TP_STRUCT__entry(
__field(u64, smq_ctx)
__field(u64, ctx)
__field(u32, handle)
__field(u32, sc)
),
TP_fast_assign(
__entry->smq_ctx = smq_ctx;
__entry->ctx = ctx;
__entry->handle = handle;
__entry->sc = sc;
),
TP_printk("for: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
__entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc)
);
TRACE_EVENT(fastrpc_context_free,
TP_PROTO(uint64_t smq_ctx, uint64_t ctx,
uint32_t handle, uint32_t sc),
TP_ARGS(smq_ctx, ctx, handle, sc),
TP_STRUCT__entry(
__field(u64, smq_ctx)
__field(u64, ctx)
__field(u32, handle)
__field(u32, sc)
),
TP_fast_assign(
__entry->smq_ctx = smq_ctx;
__entry->ctx = ctx;
__entry->handle = handle;
__entry->sc = sc;
),
TP_printk("for: smq_ctx 0x%llx, ctx 0x%llx, handle 0x%x, sc 0x%x",
__entry->smq_ctx, __entry->ctx, __entry->handle, __entry->sc)
);
TRACE_EVENT(fastrpc_perf_counters,
TP_PROTO(uint32_t handle, uint32_t sc,
uint64_t count, uint64_t flush, uint64_t map,
uint64_t copy, uint64_t link, uint64_t getargs,
uint64_t putargs, uint64_t invargs, uint64_t invoke,
uint64_t tid),
TP_ARGS(handle, sc, count, flush, map, copy, link, getargs,
putargs, invargs, invoke, tid),
TP_STRUCT__entry(
__field(u32, handle)
__field(u32, sc)
__field(u64, count)
__field(u64, flush)
__field(u64, map)
__field(u64, copy)
__field(u64, link)
__field(u64, getargs)
__field(u64, putargs)
__field(u64, invargs)
__field(u64, invoke)
__field(u64, tid)
),
TP_fast_assign(
__entry->handle = handle;
__entry->sc = sc;
__entry->count = count;
__entry->flush = flush;
__entry->map = map;
__entry->copy = copy;
__entry->link = link;
__entry->getargs = getargs;
__entry->putargs = putargs;
__entry->invargs = invargs;
__entry->invoke = invoke;
__entry->tid = tid;
),
TP_printk("for: handle 0x%x, sc 0x%x, count %lld, flush %lld ns, map %lld ns, copy %lld ns, link %lld ns, getargs %lld ns, putargs %lld ns, invargs %lld ns, invoke %lld ns, tid %lld",
__entry->handle, __entry->sc, __entry->count,
__entry->flush, __entry->map, __entry->copy, __entry->link,
__entry->getargs, __entry->putargs, __entry->invargs,
__entry->invoke, __entry->tid)
);
TRACE_EVENT(fastrpc_msg,
TP_PROTO(const char *message),
TP_ARGS(message),
TP_STRUCT__entry(__string(buf, message)),
TP_fast_assign(
#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED)
memcpy(__get_str(buf), (message), (sizeof(message) - 1));
__get_str(buf)[sizeof(message) - 1] = '\0';
#else
__assign_str(buf, message);
#endif
),
TP_printk(" %s", __get_str(buf))
);
TRACE_EVENT(fastrpc_dspsignal,
TP_PROTO(const char *event, uint32_t signal_id,
int state, uint32_t timeout),
TP_ARGS(event, signal_id, state, timeout),
TP_STRUCT__entry(
__string(buf, event)
__field(u32, signal_id)
__field(int, state)
__field(u32, timeout)
),
TP_fast_assign(
#if IS_ENABLED(CONFIG_MSM_ADSPRPC_TRUSTED)
memcpy(__get_str(buf), (event), (sizeof(event) - 1));
__get_str(buf)[sizeof(event) - 1] = '\0';
#else
__assign_str(buf, event);
#endif
__entry->signal_id = signal_id;
__entry->state = state;
__entry->timeout = timeout;
),
TP_printk("%s for sig id %u, state %d, timeout %u",
__get_str(buf), __entry->signal_id, __entry->state, __entry->timeout)
);
#endif
/* This part must be outside protection */
#include <trace/define_trace.h>