Add samsung specific changes
This commit is contained in:
@@ -226,6 +226,17 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
|
||||
|
||||
Say Y here to enable "download mode" by default.
|
||||
|
||||
config QTEE_SHM_BRIDGE
|
||||
bool "QTI TEE shared memory bridge"
|
||||
depends on QCOM_SCM
|
||||
default y if QCOM_SCM
|
||||
help
|
||||
QTEE shared memory bridge driver provides kernel APIs to share
|
||||
memory between trustzone & other VMs through shared memory bridge.
|
||||
It allows kernel clients to create bridge, delete bridge, and do
|
||||
memory sub-allocation and free from the default kernel bridge
|
||||
created by bridge driver.
|
||||
|
||||
config SYSFB
|
||||
bool
|
||||
select BOOT_VESA_SUPPORT
|
||||
@@ -314,5 +325,6 @@ source "drivers/firmware/psci/Kconfig"
|
||||
source "drivers/firmware/smccc/Kconfig"
|
||||
source "drivers/firmware/tegra/Kconfig"
|
||||
source "drivers/firmware/xilinx/Kconfig"
|
||||
source "drivers/firmware/qcom/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
@@ -19,7 +19,8 @@ obj-$(CONFIG_MTK_ADSP_IPC) += mtk-adsp-ipc.o
|
||||
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
|
||||
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
|
||||
obj-$(CONFIG_QCOM_SCM) += qcom-scm.o
|
||||
qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
|
||||
qcom-scm-objs-$(CONFIG_QTEE_SHM_BRIDGE) += qtee_shmbridge.o
|
||||
qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o $(qcom-scm-objs-y)
|
||||
obj-$(CONFIG_SYSFB) += sysfb.o
|
||||
obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
|
||||
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
|
||||
@@ -38,3 +39,4 @@ obj-y += psci/
|
||||
obj-y += smccc/
|
||||
obj-y += tegra/
|
||||
obj-y += xilinx/
|
||||
obj-y += qcom/
|
||||
|
@@ -166,6 +166,52 @@ config ARM_SCMI_TRANSPORT_VIRTIO_ATOMIC_ENABLE
|
||||
in atomic context too, at the price of using a number of busy-waiting
|
||||
primitives all over instead. If unsure say N.
|
||||
|
||||
config QTI_SCMI_PMU_PROTOCOL
|
||||
tristate "Qualcomm Technologies, Inc. SCMI PMU vendor Protocol"
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
depends on ARM_SCMI_PROTOCOL && QCOM_CPUCP
|
||||
help
|
||||
System Control and Management Interface (SCMI) pmu vendor protocol.
|
||||
This protocol provides interface to communicate with micro controller
|
||||
which maintains the PMU configuration for multiple clients.
|
||||
|
||||
This driver defines the commands or message ID's used for this
|
||||
communication and also exposes the ops used by clients.
|
||||
|
||||
config QTI_SCMI_VENDOR_PROTOCOL
|
||||
tristate "Qualcomm Technologies, Inc. Qcom SCMI vendor Protocol"
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
depends on ARM_SCMI_PROTOCOL && QCOM_CPUCP
|
||||
help
|
||||
System Control and Management Interface (SCMI) Qcom vendor protocol.
|
||||
This protocol provides interface to communicate with micro controller.
|
||||
|
||||
This driver defines the commands or message ID's used for this
|
||||
communication and also exposes the ops used by clients.
|
||||
|
||||
config QTI_SCMI_C1DCVS_PROTOCOL
|
||||
tristate "Qualcomm Technologies, Inc. SCMI C1DCVS vendor Protocol"
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
depends on ARM_SCMI_PROTOCOL && QCOM_CPUCP
|
||||
help
|
||||
System Control and Management Interface (SCMI) c1dcvs vendor protocol.
|
||||
This protocol provides interface to communicate with micro controller
|
||||
which maintains the c1dcvs algorithm.
|
||||
|
||||
This driver defines the comands or message ID's used for this
|
||||
communication and also exposes the ops used by clients.
|
||||
|
||||
config QTI_SCMI_PLH_PROTOCOL
|
||||
tristate "Qualcomm Technologies, Inc. SCMI PLH vendor Protocol"
|
||||
depends on ARM_SCMI_PROTOCOL && QCOM_CPUCP
|
||||
help
|
||||
System Control and Management Interface (SCMI) plh vendor protocol
|
||||
this protocol provides interface to communicate with micro controller
|
||||
which is executing the plh algorithm.
|
||||
|
||||
This driver defines the comands or message ID's used for this
|
||||
communication and also exposes the ops used by clients.
|
||||
|
||||
endif #ARM_SCMI_PROTOCOL
|
||||
|
||||
config ARM_SCMI_POWER_DOMAIN
|
||||
|
@@ -19,6 +19,10 @@ obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o
|
||||
|
||||
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
|
||||
obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o
|
||||
obj-$(CONFIG_QTI_SCMI_PMU_PROTOCOL) += pmu_vendor.o
|
||||
obj-$(CONFIG_QTI_SCMI_C1DCVS_PROTOCOL) += c1dcvs_vendor.o
|
||||
obj-$(CONFIG_QTI_SCMI_VENDOR_PROTOCOL) += qcom_scmi_vendor.o
|
||||
obj-$(CONFIG_QTI_SCMI_PLH_PROTOCOL) += plh_vendor.o
|
||||
|
||||
ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy)
|
||||
# The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame
|
||||
|
164
drivers/firmware/arm_scmi/c1dcvs_vendor.c
Normal file
164
drivers/firmware/arm_scmi/c1dcvs_vendor.c
Normal file
@@ -0,0 +1,164 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/scmi_c1dcvs.h>
|
||||
#include "common.h"
|
||||
|
||||
#define SCMI_MAX_RX_SIZE 128
|
||||
|
||||
enum scmi_c1dcvs_protocol_cmd {
|
||||
SET_ENABLE_C1DCVS = 11,
|
||||
GET_ENABLE_C1DCVS,
|
||||
SET_ENABLE_TRACE,
|
||||
GET_ENABLE_TRACE,
|
||||
SET_IPC_THRESH,
|
||||
GET_IPC_THRESH,
|
||||
SET_EFREQ_THRESH,
|
||||
GET_EFREQ_THRESH,
|
||||
SET_HYSTERESIS,
|
||||
GET_HYSTERESIS,
|
||||
};
|
||||
|
||||
struct c1dcvs_thresh {
|
||||
unsigned int cluster;
|
||||
unsigned int thresh;
|
||||
};
|
||||
|
||||
static int scmi_send_tunable_c1dcvs(const struct scmi_protocol_handle *ph,
|
||||
void *buf, u32 msg_id)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
unsigned int *msg;
|
||||
unsigned int *src = buf;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), sizeof(*msg),
|
||||
&t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg = t->tx.buf;
|
||||
*msg = cpu_to_le32(*src);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_get_tunable_c1dcvs(const struct scmi_protocol_handle *ph,
|
||||
void *buf, u32 msg_id)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
struct c1dcvs_thresh *msg;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), SCMI_MAX_RX_SIZE,
|
||||
&t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
memcpy(buf, t->rx.buf, t->rx.len);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_send_thresh_c1dcvs(const struct scmi_protocol_handle *ph,
|
||||
void *buf, u32 msg_id)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
struct c1dcvs_thresh *msg;
|
||||
unsigned int *src = buf;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), sizeof(*msg),
|
||||
&t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg = t->tx.buf;
|
||||
msg->cluster = cpu_to_le32(src[0]);
|
||||
msg->thresh = cpu_to_le32(src[1]);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_set_enable_c1dcvs(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_tunable_c1dcvs(ph, buf, SET_ENABLE_C1DCVS);
|
||||
}
|
||||
static int scmi_get_enable_c1dcvs(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_get_tunable_c1dcvs(ph, buf, GET_ENABLE_C1DCVS);
|
||||
}
|
||||
static int scmi_set_enable_trace(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_tunable_c1dcvs(ph, buf, SET_ENABLE_TRACE);
|
||||
}
|
||||
static int scmi_get_enable_trace(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_get_tunable_c1dcvs(ph, buf, GET_ENABLE_TRACE);
|
||||
}
|
||||
static int scmi_set_ipc_thresh(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_thresh_c1dcvs(ph, buf, SET_IPC_THRESH);
|
||||
}
|
||||
static int scmi_get_ipc_thresh(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_get_tunable_c1dcvs(ph, buf, GET_IPC_THRESH);
|
||||
}
|
||||
static int scmi_set_efreq_thresh(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_thresh_c1dcvs(ph, buf, SET_EFREQ_THRESH);
|
||||
}
|
||||
static int scmi_get_efreq_thresh(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_get_tunable_c1dcvs(ph, buf, GET_EFREQ_THRESH);
|
||||
}
|
||||
static int scmi_set_hysteresis(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_tunable_c1dcvs(ph, buf, SET_HYSTERESIS);
|
||||
}
|
||||
static int scmi_get_hysteresis(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_get_tunable_c1dcvs(ph, buf, GET_HYSTERESIS);
|
||||
}
|
||||
|
||||
static struct scmi_c1dcvs_vendor_ops c1dcvs_config_ops = {
|
||||
.set_enable_c1dcvs = scmi_set_enable_c1dcvs,
|
||||
.get_enable_c1dcvs = scmi_get_enable_c1dcvs,
|
||||
.set_enable_trace = scmi_set_enable_trace,
|
||||
.get_enable_trace = scmi_get_enable_trace,
|
||||
.set_ipc_thresh = scmi_set_ipc_thresh,
|
||||
.get_ipc_thresh = scmi_get_ipc_thresh,
|
||||
.set_efreq_thresh = scmi_set_efreq_thresh,
|
||||
.get_efreq_thresh = scmi_get_efreq_thresh,
|
||||
.set_hysteresis = scmi_set_hysteresis,
|
||||
.get_hysteresis = scmi_get_hysteresis,
|
||||
};
|
||||
|
||||
static int scmi_c1dcvs_protocol_init(const struct scmi_protocol_handle *ph)
|
||||
{
|
||||
u32 version;
|
||||
|
||||
ph->xops->version_get(ph, &version);
|
||||
|
||||
dev_dbg(ph->dev, "version %d.%d\n",
|
||||
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scmi_protocol scmi_c1dcvs = {
|
||||
.id = SCMI_C1DCVS_PROTOCOL,
|
||||
.owner = THIS_MODULE,
|
||||
.instance_init = &scmi_c1dcvs_protocol_init,
|
||||
.ops = &c1dcvs_config_ops,
|
||||
};
|
||||
module_scmi_protocol(scmi_c1dcvs);
|
||||
|
||||
MODULE_DESCRIPTION("SCMI C1DCVS vendor Protocol");
|
||||
MODULE_LICENSE("GPL");
|
175
drivers/firmware/arm_scmi/plh_vendor.c
Normal file
175
drivers/firmware/arm_scmi/plh_vendor.c
Normal file
@@ -0,0 +1,175 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/scmi_plh.h>
|
||||
#include "common.h"
|
||||
|
||||
#define SCMI_VENDOR_MSG_MAX_TX_SIZE (100) /* in bytes */
|
||||
#define SCMI_VENDOR_MSG_START (3) /* MSG 3-15 can be used for spl purpose */
|
||||
#define SCMI_VENDOR_MSG_SPLH_START (16) /* Each PLH module to use MAX 16 MSG */
|
||||
#define SCMI_VENDOR_MSG_SPLH_END (31)
|
||||
#define SCMI_VENDOR_MSG_LPLH_START (32) /* Each PLH module to use MAX 16 MSG */
|
||||
#define SCMI_VENDOR_MSG_LPLH_END (47)
|
||||
|
||||
enum scmi_plh_protocol_cmd {
|
||||
PERF_LOCK_SCROLL_INIT_IPC_FREQ_TBL_MSG_ID = SCMI_VENDOR_MSG_SPLH_START,
|
||||
PERF_LOCK_SCROLL_START_MSG_ID,
|
||||
PERF_LOCK_SCROLL_STOP_MSG_ID,
|
||||
PERF_LOCK_SCROLL_SET_SAMPLE_MS,
|
||||
PERF_LOCK_SCROLL_SET_LOG_LEVEL,
|
||||
PERF_LOCK_SCROLL_MAX_MSG_ID = SCMI_VENDOR_MSG_SPLH_END,
|
||||
PERF_LOCK_LAUNCH_INIT_IPC_FREQ_TBL_MSG_ID = SCMI_VENDOR_MSG_LPLH_START,
|
||||
PERF_LOCK_LAUNCH_START_MSG_ID,
|
||||
PERF_LOCK_LAUNCH_STOP_MSG_ID,
|
||||
PERF_LOCK_LAUNCH_SET_SAMPLE_MS,
|
||||
PERF_LOCK_LAUNCH_SET_LOG_LEVEL,
|
||||
PERF_LOCK_LAUNCH_MAX_MSG_ID = SCMI_VENDOR_MSG_LPLH_END,
|
||||
};
|
||||
|
||||
|
||||
static int scmi_plh_init_ipc_freq_tbl(const struct scmi_protocol_handle *ph,
|
||||
u16 *p_init_args, u16 init_len, enum plh_features feature)
|
||||
{
|
||||
uint32_t *msg, msg_size, msg_val, align_init_len = init_len;
|
||||
struct scmi_xfer *t;
|
||||
int ret, i = 0;
|
||||
|
||||
if (init_len % 2)
|
||||
align_init_len += 1; /* align in multiple of u32 */
|
||||
|
||||
msg_size = align_init_len * sizeof(*p_init_args);
|
||||
|
||||
if (msg_size > SCMI_VENDOR_MSG_MAX_TX_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
if (feature == PERF_LOCK_SCROLL)
|
||||
ret = ph->xops->xfer_get_init(ph, PERF_LOCK_SCROLL_INIT_IPC_FREQ_TBL_MSG_ID,
|
||||
(msg_size), sizeof(uint32_t), &t);
|
||||
else if (feature == PERF_LOCK_LAUNCH)
|
||||
ret = ph->xops->xfer_get_init(ph, PERF_LOCK_LAUNCH_INIT_IPC_FREQ_TBL_MSG_ID,
|
||||
(msg_size), sizeof(uint32_t), &t);
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg = t->tx.buf;
|
||||
|
||||
for (i = 0; i < init_len/2 ; i++) {
|
||||
msg_val = *p_init_args++;
|
||||
msg_val |= ((*p_init_args++) << 16);
|
||||
*msg++ = cpu_to_le32(msg_val);
|
||||
}
|
||||
|
||||
if (init_len % 2)
|
||||
*msg = cpu_to_le32(*p_init_args);
|
||||
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_plh_set_u16_val(const struct scmi_protocol_handle *ph,
|
||||
u16 val, u32 msg_id)
|
||||
{
|
||||
int ret = 0;
|
||||
struct scmi_xfer *t;
|
||||
uint32_t *msg;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, msg_id,
|
||||
sizeof(*msg), sizeof(uint32_t), &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg = t->tx.buf;
|
||||
*msg = cpu_to_le32(val);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_plh_start_cmd(const struct scmi_protocol_handle *ph,
|
||||
u16 value, enum plh_features feature)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (feature == PERF_LOCK_SCROLL)
|
||||
ret = scmi_plh_set_u16_val(ph, value, PERF_LOCK_SCROLL_START_MSG_ID);
|
||||
else if (feature == PERF_LOCK_LAUNCH)
|
||||
ret = scmi_plh_set_u16_val(ph, value, PERF_LOCK_LAUNCH_START_MSG_ID);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_plh_stop_cmd(const struct scmi_protocol_handle *ph, enum plh_features feature)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (feature == PERF_LOCK_SCROLL)
|
||||
ret = scmi_plh_set_u16_val(ph, 0, PERF_LOCK_SCROLL_STOP_MSG_ID);
|
||||
else if (feature == PERF_LOCK_LAUNCH)
|
||||
ret = scmi_plh_set_u16_val(ph, 0, PERF_LOCK_LAUNCH_STOP_MSG_ID);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_plh_set_sample_ms(const struct scmi_protocol_handle *ph,
|
||||
u16 sample_ms, enum plh_features feature)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (feature == PERF_LOCK_SCROLL)
|
||||
ret = scmi_plh_set_u16_val(ph, sample_ms, PERF_LOCK_SCROLL_SET_SAMPLE_MS);
|
||||
else if (feature == PERF_LOCK_LAUNCH)
|
||||
ret = scmi_plh_set_u16_val(ph, sample_ms, PERF_LOCK_LAUNCH_SET_SAMPLE_MS);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_plh_set_log_level(const struct scmi_protocol_handle *ph,
|
||||
u16 log_level, enum plh_features feature)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (feature == PERF_LOCK_SCROLL)
|
||||
ret = scmi_plh_set_u16_val(ph, log_level, PERF_LOCK_SCROLL_SET_LOG_LEVEL);
|
||||
else if (feature == PERF_LOCK_LAUNCH)
|
||||
ret = scmi_plh_set_u16_val(ph, log_level, PERF_LOCK_LAUNCH_SET_LOG_LEVEL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct scmi_plh_vendor_ops plh_proto_ops = {
|
||||
.init_plh_ipc_freq_tbl = scmi_plh_init_ipc_freq_tbl,
|
||||
.start_plh = scmi_plh_start_cmd,
|
||||
.stop_plh = scmi_plh_stop_cmd,
|
||||
.set_plh_sample_ms = scmi_plh_set_sample_ms,
|
||||
.set_plh_log_level = scmi_plh_set_log_level,
|
||||
};
|
||||
|
||||
static int scmi_plh_vendor_protocol_init(const struct scmi_protocol_handle *ph)
|
||||
{
|
||||
u32 version;
|
||||
|
||||
ph->xops->version_get(ph, &version);
|
||||
|
||||
dev_dbg(ph->dev, "PLH version %d.%d\n",
|
||||
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scmi_protocol scmi_plh_vendor = {
|
||||
.id = SCMI_PROTOCOL_PLH,
|
||||
.owner = THIS_MODULE,
|
||||
.instance_init = &scmi_plh_vendor_protocol_init,
|
||||
.ops = &plh_proto_ops,
|
||||
};
|
||||
module_scmi_protocol(scmi_plh_vendor);
|
||||
|
||||
MODULE_DESCRIPTION("SCMI plh vendor Protocol");
|
||||
MODULE_LICENSE("GPL");
|
107
drivers/firmware/arm_scmi/pmu_vendor.c
Normal file
107
drivers/firmware/arm_scmi/pmu_vendor.c
Normal file
@@ -0,0 +1,107 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/scmi_pmu.h>
|
||||
#include <soc/qcom/pmu_lib.h>
|
||||
#include "common.h"
|
||||
|
||||
enum scmi_c1dcvs_protocol_cmd {
|
||||
SET_PMU_MAP = 11,
|
||||
SET_ENABLE_TRACE,
|
||||
SET_ENABLE_CACHING,
|
||||
};
|
||||
|
||||
struct pmu_map_msg {
|
||||
uint8_t hw_cntrs[MAX_NUM_CPUS][MAX_CPUCP_EVT];
|
||||
};
|
||||
|
||||
static int scmi_send_pmu_map(const struct scmi_protocol_handle *ph,
|
||||
void *buf, u32 msg_id)
|
||||
{
|
||||
int ret, i, j;
|
||||
struct scmi_xfer *t;
|
||||
struct pmu_map_msg *msg;
|
||||
uint8_t *src = buf;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), sizeof(*msg),
|
||||
&t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg = t->tx.buf;
|
||||
|
||||
for (i = 0; i < MAX_NUM_CPUS; i++)
|
||||
for (j = 0; j < MAX_CPUCP_EVT; j++)
|
||||
msg->hw_cntrs[i][j] = *((src + i * MAX_CPUCP_EVT) + j);
|
||||
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_send_tunable_pmu(const struct scmi_protocol_handle *ph,
|
||||
void *buf, u32 msg_id)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_xfer *t;
|
||||
unsigned int *msg;
|
||||
unsigned int *src = buf;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, msg_id, sizeof(*msg), sizeof(*msg),
|
||||
&t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msg = t->tx.buf;
|
||||
*msg = cpu_to_le32(*src);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int scmi_pmu_map(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_pmu_map(ph, buf, SET_PMU_MAP);
|
||||
}
|
||||
|
||||
static int scmi_set_enable_trace(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_tunable_pmu(ph, buf, SET_ENABLE_TRACE);
|
||||
}
|
||||
|
||||
static int scmi_set_caching_enable(const struct scmi_protocol_handle *ph, void *buf)
|
||||
{
|
||||
return scmi_send_tunable_pmu(ph, buf, SET_ENABLE_CACHING);
|
||||
}
|
||||
|
||||
static struct scmi_pmu_vendor_ops pmu_config_ops = {
|
||||
.set_pmu_map = scmi_pmu_map,
|
||||
.set_enable_trace = scmi_set_enable_trace,
|
||||
.set_cache_enable = scmi_set_caching_enable,
|
||||
};
|
||||
|
||||
static int scmi_pmu_protocol_init(const struct scmi_protocol_handle *ph)
|
||||
{
|
||||
u32 version;
|
||||
|
||||
ph->xops->version_get(ph, &version);
|
||||
|
||||
dev_dbg(ph->dev, "version %d.%d\n",
|
||||
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scmi_protocol scmi_pmu = {
|
||||
.id = SCMI_PMU_PROTOCOL,
|
||||
.owner = THIS_MODULE,
|
||||
.instance_init = &scmi_pmu_protocol_init,
|
||||
.ops = &pmu_config_ops,
|
||||
};
|
||||
module_scmi_protocol(scmi_pmu);
|
||||
|
||||
MODULE_DESCRIPTION("SCMI PMU vendor Protocol");
|
||||
MODULE_LICENSE("GPL");
|
156
drivers/firmware/arm_scmi/qcom_scmi_vendor.c
Normal file
156
drivers/firmware/arm_scmi/qcom_scmi_vendor.c
Normal file
@@ -0,0 +1,156 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "common.h"
|
||||
#include <linux/qcom_scmi_vendor.h>
|
||||
|
||||
#define EXTENDED_MSG_ID 0
|
||||
#define SCMI_MAX_TX_RX_SIZE 128
|
||||
#define PROTOCOL_PAYLOAD_SIZE 16
|
||||
#define SET_PARAM 0x10
|
||||
#define GET_PARAM 0x11
|
||||
#define START_ACTIVITY 0x12
|
||||
#define STOP_ACTIVITY 0x13
|
||||
|
||||
static int qcom_scmi_set_param(const struct scmi_protocol_handle *ph, void *buf, u64 algo_str,
|
||||
u32 param_id, size_t size)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct scmi_xfer *t;
|
||||
uint32_t *msg;
|
||||
|
||||
if (!ph || !ph->xops)
|
||||
return ret;
|
||||
ret = ph->xops->xfer_get_init(ph, SET_PARAM, size + PROTOCOL_PAYLOAD_SIZE,
|
||||
SCMI_MAX_TX_RX_SIZE, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
msg = t->tx.buf;
|
||||
*msg++ = cpu_to_le32(EXTENDED_MSG_ID);
|
||||
*msg++ = cpu_to_le32(algo_str & GENMASK(31, 0));
|
||||
*msg++ = cpu_to_le32((algo_str & GENMASK(63, 32)) >> 32);
|
||||
*msg++ = cpu_to_le32(param_id);
|
||||
memcpy(msg, buf, size);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_scmi_get_param(const struct scmi_protocol_handle *ph, void *buf, u64 algo_str,
|
||||
u32 param_id, size_t tx_size, size_t rx_size)
|
||||
{
|
||||
|
||||
int ret = -EINVAL;
|
||||
struct scmi_xfer *t;
|
||||
uint32_t *msg;
|
||||
|
||||
if (!ph || !ph->xops || !buf)
|
||||
return ret;
|
||||
ret = ph->xops->xfer_get_init(ph, GET_PARAM, tx_size + PROTOCOL_PAYLOAD_SIZE,
|
||||
SCMI_MAX_TX_RX_SIZE, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
msg = t->tx.buf;
|
||||
*msg++ = cpu_to_le32(EXTENDED_MSG_ID);
|
||||
*msg++ = cpu_to_le32(algo_str & GENMASK(31, 0));
|
||||
*msg++ = cpu_to_le32((algo_str & GENMASK(63, 32)) >> 32);
|
||||
*msg++ = cpu_to_le32(param_id);
|
||||
memcpy(msg, buf, tx_size);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
if (t->rx.len > rx_size) {
|
||||
pr_err("SCMI received buffer size %lu is more than expected size %lu\n",
|
||||
t->rx.len, rx_size);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
memcpy(buf, t->rx.buf, t->rx.len);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_scmi_start_activity(const struct scmi_protocol_handle *ph,
|
||||
void *buf, u64 algo_str, u32 param_id, size_t size)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct scmi_xfer *t;
|
||||
uint32_t *msg;
|
||||
|
||||
if (!ph || !ph->xops)
|
||||
return ret;
|
||||
ret = ph->xops->xfer_get_init(ph, START_ACTIVITY, size + PROTOCOL_PAYLOAD_SIZE,
|
||||
SCMI_MAX_TX_RX_SIZE, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
msg = t->tx.buf;
|
||||
*msg++ = cpu_to_le32(EXTENDED_MSG_ID);
|
||||
*msg++ = cpu_to_le32(algo_str & GENMASK(31, 0));
|
||||
*msg++ = cpu_to_le32((algo_str & GENMASK(63, 32)) >> 32);
|
||||
*msg++ = cpu_to_le32(param_id);
|
||||
memcpy(msg, buf, size);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_scmi_stop_activity(const struct scmi_protocol_handle *ph, void *buf, u64 algo_str,
|
||||
u32 param_id, size_t size)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
struct scmi_xfer *t;
|
||||
uint32_t *msg;
|
||||
|
||||
if (!ph || !ph->xops)
|
||||
return ret;
|
||||
ret = ph->xops->xfer_get_init(ph, STOP_ACTIVITY, size + PROTOCOL_PAYLOAD_SIZE,
|
||||
SCMI_MAX_TX_RX_SIZE, &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
msg = t->tx.buf;
|
||||
*msg++ = cpu_to_le32(EXTENDED_MSG_ID);
|
||||
*msg++ = cpu_to_le32(algo_str & GENMASK(31, 0));
|
||||
*msg++ = cpu_to_le32((algo_str & GENMASK(63, 32)) >> 32);
|
||||
*msg++ = cpu_to_le32(param_id);
|
||||
memcpy(msg, buf, size);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
ph->xops->xfer_put(ph, t);
|
||||
return ret;
|
||||
}
|
||||
static struct qcom_scmi_vendor_ops qcom_proto_ops = {
|
||||
.set_param = qcom_scmi_set_param,
|
||||
.get_param = qcom_scmi_get_param,
|
||||
.start_activity = qcom_scmi_start_activity,
|
||||
.stop_activity = qcom_scmi_stop_activity,
|
||||
};
|
||||
|
||||
static int qcom_scmi_vendor_protocol_init(const struct scmi_protocol_handle *ph)
|
||||
{
|
||||
u32 version;
|
||||
int ret;
|
||||
|
||||
ret = ph->xops->version_get(ph, &version);
|
||||
if (ret == -ETIMEDOUT)
|
||||
ret = -EPROBE_DEFER;
|
||||
if (ret) {
|
||||
dev_err(ph->dev, "Unable to get version\n");
|
||||
return dev_err_probe(ph->dev, ret, "Unable to get version\n");
|
||||
}
|
||||
|
||||
dev_dbg(ph->dev, "qcom scmi version %d.%d\n",
|
||||
PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct scmi_protocol qcom_scmi_vendor = {
|
||||
.id = QCOM_SCMI_VENDOR_PROTOCOL,
|
||||
.owner = THIS_MODULE,
|
||||
.instance_init = &qcom_scmi_vendor_protocol_init,
|
||||
.ops = &qcom_proto_ops,
|
||||
};
|
||||
module_scmi_protocol(qcom_scmi_vendor);
|
||||
|
||||
MODULE_SOFTDEP("pre: qcom_cpucp");
|
||||
MODULE_DESCRIPTION("qcom scmi vendor Protocol");
|
||||
MODULE_LICENSE("GPL");
|
@@ -3,3 +3,12 @@
|
||||
config FW_CS_DSP
|
||||
tristate
|
||||
default n
|
||||
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
comment "Cirrus firmware configs"
|
||||
|
||||
config CIRRUS_FIRMWARE_CL_DSP
|
||||
tristate "Cirrus Logic Haptics DSP driver"
|
||||
help
|
||||
This driver is used to handle firmware loading
|
||||
and configuration for Cirrus Logic Haptic devices.
|
||||
|
@@ -1,3 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
obj-$(CONFIG_FW_CS_DSP) += cs_dsp.o
|
||||
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_CIRRUS_FIRMWARE_CL_DSP) += cl_dsp.o cl_dsp-debugfs.o
|
||||
|
521
drivers/firmware/cirrus/cl_dsp-debugfs.c
Normal file
521
drivers/firmware/cirrus/cl_dsp-debugfs.c
Normal file
@@ -0,0 +1,521 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
//
|
||||
// cl_dsp.c -- DSP Control for non-ALSA Cirrus Logic Devices
|
||||
//
|
||||
// Copyright 2021 Cirrus Logic, Inc.
|
||||
//
|
||||
// Author: Fred Treven <fred.treven@cirrus.com>
|
||||
|
||||
#include <linux/firmware/cirrus/cl_dsp.h>
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
static inline u32 host_buffer_field_reg(struct cl_dsp_logger *dl,
|
||||
unsigned long offset)
|
||||
{
|
||||
return (u32)(CL_DSP_HALO_XMEM_UNPACKED24_BASE +
|
||||
((dl->host_buf_ptr + offset) * CL_DSP_BYTES_PER_WORD));
|
||||
}
|
||||
|
||||
static inline u32 host_buffer_data_reg(struct cl_dsp_logger *dl, int offset)
|
||||
{
|
||||
return (u32)(CL_DSP_HALO_XMEM_UNPACKED24_BASE +
|
||||
((dl->host_buf_base + offset) * CL_DSP_BYTES_PER_WORD));
|
||||
}
|
||||
|
||||
static int cl_dsp_host_buffer_field_read(struct cl_dsp_debugfs *db,
|
||||
unsigned long field_offset, u32 *data)
|
||||
{
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
__be32 raw;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
reg = host_buffer_field_reg(&db->dl, field_offset);
|
||||
|
||||
ret = regmap_raw_read(regmap, reg, &raw, sizeof(raw));
|
||||
if (ret) {
|
||||
dev_err(db->core->dev, "Failed to get raw host buffer data\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
*data = CL_DSP_HOST_BUFFER_DATA_MASK & be32_to_cpu(raw);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cl_dsp_host_buffer_field_write(struct cl_dsp_debugfs *db,
|
||||
unsigned long field_offset, u32 data)
|
||||
{
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
struct device *dev = db->core->dev;
|
||||
int ret;
|
||||
u32 reg;
|
||||
|
||||
reg = host_buffer_field_reg(&db->dl, field_offset);
|
||||
|
||||
ret = regmap_write(regmap, reg, data);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to set host buffer data: %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t cl_dsp_debugfs_logger_en_read(struct file *file,
|
||||
char __user *user_buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct cl_dsp_debugfs *db = file->private_data;
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
char str[CL_DSP_DEBUGFS_TRACE_LOG_STRING_SIZE];
|
||||
u32 reg, val;
|
||||
ssize_t ret;
|
||||
|
||||
ret = cl_dsp_get_reg(db->core, "ENABLED", CL_DSP_XM_UNPACKED_TYPE,
|
||||
db->dl.algo_id, ®);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pm_runtime_get_sync(db->core->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(db->core->dev, "PM Runtime Resume Failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read(regmap, reg, &val);
|
||||
if (ret) {
|
||||
dev_err(db->core->dev, "Failed to get host buffer status\n");
|
||||
goto pm_exit;
|
||||
}
|
||||
|
||||
ret = snprintf(str, CL_DSP_DEBUGFS_TRACE_LOG_STRING_SIZE, "%d\n", val);
|
||||
if (ret <= 0) {
|
||||
dev_err(db->core->dev, "Failed to parse host buffer status\n");
|
||||
goto pm_exit;
|
||||
}
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, str, strlen(str));
|
||||
|
||||
pm_exit:
|
||||
pm_runtime_mark_last_busy(db->core->dev);
|
||||
pm_runtime_put_autosuspend(db->core->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t cl_dsp_debugfs_logger_en_write(struct file *file,
|
||||
const char __user *user_buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct cl_dsp_debugfs *db = file->private_data;
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
struct device *dev = db->core->dev;
|
||||
u32 reg, val;
|
||||
ssize_t ret;
|
||||
char *str;
|
||||
|
||||
str = kzalloc(count, GFP_KERNEL);
|
||||
if (!str)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = simple_write_to_buffer(str, count, ppos, user_buf, count);
|
||||
if (ret <= 0) {
|
||||
dev_err(dev, "Failed to write debugfs data\n");
|
||||
goto exit_free;
|
||||
}
|
||||
|
||||
ret = kstrtou32(str, 10, &val);
|
||||
if (ret)
|
||||
goto exit_free;
|
||||
|
||||
if (val != CL_DSP_DEBUGFS_TRACE_LOG_DISABLE &&
|
||||
val != CL_DSP_DEBUGFS_TRACE_LOG_ENABLE) {
|
||||
dev_err(dev, "Invalid trace log write: %u\n", val);
|
||||
ret = -EINVAL;
|
||||
goto exit_free;
|
||||
}
|
||||
|
||||
ret = cl_dsp_get_reg(db->core, "ENABLED", CL_DSP_XM_UNPACKED_TYPE,
|
||||
db->dl.algo_id, ®);
|
||||
if (ret)
|
||||
goto exit_free;
|
||||
|
||||
ret = pm_runtime_get_sync(dev);
|
||||
if (ret < 0) {
|
||||
dev_err(db->core->dev, "PM Runtime Resume Failed\n");
|
||||
goto exit_free;
|
||||
}
|
||||
|
||||
ret = regmap_write(regmap, reg, val);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to set trace log status\n");
|
||||
goto exit_pm;
|
||||
}
|
||||
|
||||
if (val == CL_DSP_DEBUGFS_TRACE_LOG_DISABLE) {
|
||||
/* Set next_read_index to -1 to reset logger */
|
||||
ret = cl_dsp_host_buffer_field_write(db,
|
||||
HOST_BUFFER_FIELD(next_read_index),
|
||||
CL_DSP_HOST_BUFFER_READ_INDEX_RESET);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to reset event logger\n");
|
||||
goto exit_pm;
|
||||
}
|
||||
|
||||
db->dl.buf_data_size = 0;
|
||||
kfree(db->dl.buf_data);
|
||||
}
|
||||
|
||||
exit_pm:
|
||||
pm_runtime_mark_last_busy(dev);
|
||||
pm_runtime_put_autosuspend(dev);
|
||||
|
||||
exit_free:
|
||||
kfree(str);
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
static ssize_t cl_dsp_debugfs_timestamp_shift_read(struct file *file,
|
||||
char __user *user_buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct cl_dsp_debugfs *db = file->private_data;
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
char str[CL_DSP_DEBUGFS_TRACE_LOG_STRING_SIZE];
|
||||
u32 reg, val;
|
||||
ssize_t ret;
|
||||
|
||||
ret = cl_dsp_get_reg(db->core, "TIMESTAMP_SHIFT",
|
||||
CL_DSP_XM_UNPACKED_TYPE, db->dl.algo_id, ®);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = pm_runtime_get_sync(db->core->dev);
|
||||
if (ret < 0) {
|
||||
dev_err(db->core->dev, "PM Runtime Resume Failed\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read(regmap, reg, &val);
|
||||
if (ret) {
|
||||
dev_err(db->core->dev, "Failed to get timestamp shift\n");
|
||||
goto pm_exit;
|
||||
}
|
||||
|
||||
ret = snprintf(str, CL_DSP_DEBUGFS_TRACE_LOG_STRING_SIZE, "%d\n", val);
|
||||
if (ret <= 0) {
|
||||
dev_err(db->core->dev, "Failed to parse host buffer status\n");
|
||||
goto pm_exit;
|
||||
}
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, str, strlen(str));
|
||||
|
||||
pm_exit:
|
||||
pm_runtime_mark_last_busy(db->core->dev);
|
||||
pm_runtime_put_autosuspend(db->core->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cl_dsp_host_buffer_data_read(struct cl_dsp_debugfs *db,
|
||||
u32 read_index, u32 num_words)
|
||||
{
|
||||
u32 start_reg, offset = db->dl.buf_data_size;
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
struct device *dev = db->core->dev;
|
||||
size_t new_data_size;
|
||||
u32 *new_data;
|
||||
int ret;
|
||||
|
||||
start_reg = host_buffer_data_reg(&db->dl, (unsigned long)read_index);
|
||||
|
||||
new_data_size = db->dl.buf_data_size + num_words;
|
||||
new_data = krealloc(db->dl.buf_data, new_data_size * sizeof(u32), GFP_KERNEL);
|
||||
if (IS_ERR_OR_NULL(new_data)) {
|
||||
dev_err(dev, "Failed to re-allocate buffer data space\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
db->dl.buf_data_size = new_data_size;
|
||||
|
||||
db->dl.buf_data = new_data;
|
||||
|
||||
ret = regmap_bulk_read(regmap, start_reg, db->dl.buf_data + offset,
|
||||
num_words);
|
||||
if (ret)
|
||||
dev_err(dev, "Failed to get host buffer data\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cl_dsp_logger_update(struct cl_dsp_debugfs *db)
|
||||
{
|
||||
struct cl_dsp_logger *dl = &db->dl;
|
||||
struct device *dev = db->core->dev;
|
||||
u32 n_read_index, n_write_index, num_words;
|
||||
u32 nirq, irq, error_code;
|
||||
int ret;
|
||||
|
||||
/* Check if interrupt was asserted due to an error */
|
||||
ret = cl_dsp_host_buffer_field_read(db, HOST_BUFFER_FIELD(error),
|
||||
&error_code);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (error_code) {
|
||||
if (error_code != CL_DSP_HOST_BUFFER_ERROR_OVERFLOW) {
|
||||
dev_err(dev, "Fatal Host Buffer Error with code 0x%X\n",
|
||||
error_code);
|
||||
return -ENOTRECOVERABLE;
|
||||
}
|
||||
dev_warn(dev, "Data lost from Host Buffer Overflow\n");
|
||||
}
|
||||
|
||||
/* Check if next read index is != -1 in order to continue */
|
||||
ret = cl_dsp_host_buffer_field_read(db,
|
||||
HOST_BUFFER_FIELD(next_read_index), &n_read_index);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (n_read_index == CL_DSP_HOST_BUFFER_READ_INDEX_RESET) {
|
||||
dev_err(dev, "Host Buffer Not Initialized\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(db, HOST_BUFFER_FIELD(irq_count),
|
||||
&nirq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(db, HOST_BUFFER_FIELD(irq_ack),
|
||||
&irq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(
|
||||
db, HOST_BUFFER_FIELD(next_write_index), &n_write_index);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (n_write_index < n_read_index)
|
||||
num_words = (n_write_index + dl->host_buf_size_words) -
|
||||
n_read_index;
|
||||
else
|
||||
num_words = n_write_index - n_read_index;
|
||||
|
||||
/* Get all messages in buffer */
|
||||
ret = cl_dsp_host_buffer_data_read(db, n_read_index, num_words);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Set next_read_index to next_write_index */
|
||||
ret = cl_dsp_host_buffer_field_write(db,
|
||||
HOST_BUFFER_FIELD(next_read_index), n_write_index - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Reset irq_ack by writing irq_count | 0x1 */
|
||||
ret = cl_dsp_host_buffer_field_write(db, HOST_BUFFER_FIELD(irq_ack),
|
||||
nirq | CL_DSP_HOST_BUFFER_IRQ_MASK);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(db,
|
||||
HOST_BUFFER_FIELD(irq_ack), &irq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cl_dsp_logger_update);
|
||||
|
||||
static int cl_dsp_debugfs_logger_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct cl_dsp_debugfs *db;
|
||||
int ret;
|
||||
|
||||
ret = simple_open(inode, file);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
db = file->private_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t cl_dsp_debugfs_logger_read(struct file *file,
|
||||
char __user *user_buf, size_t count,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct cl_dsp_debugfs *db = file->private_data;
|
||||
struct cl_dsp_logger *dl = &db->dl;
|
||||
struct device *dev = db->core->dev;
|
||||
ssize_t ret, buf_str_size;
|
||||
char *str, *buf_str;
|
||||
int i;
|
||||
|
||||
if (dl->buf_data_size == 0)
|
||||
return -ENODATA;
|
||||
|
||||
buf_str_size =
|
||||
CL_DSP_HOST_BUFFER_DATA_SLOT_SIZE * dl->buf_data_size;
|
||||
buf_str = kzalloc(buf_str_size, GFP_KERNEL);
|
||||
if (!buf_str)
|
||||
return -ENOMEM;
|
||||
|
||||
str = kzalloc(CL_DSP_HOST_BUFFER_DATA_SLOT_SIZE, GFP_KERNEL);
|
||||
if (!str) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free2;
|
||||
}
|
||||
|
||||
for (i = 0; i < dl->buf_data_size; i++) {
|
||||
ret = snprintf(str, CL_DSP_HOST_BUFFER_DATA_SLOT_SIZE, "%08X ",
|
||||
dl->buf_data[i]);
|
||||
if (ret <= 0) {
|
||||
dev_err(dev, "Failed to get host buffer data string\n");
|
||||
goto err_free1;
|
||||
}
|
||||
|
||||
strncat(buf_str, str, CL_DSP_HOST_BUFFER_DATA_SLOT_SIZE);
|
||||
}
|
||||
|
||||
ret = simple_read_from_buffer(user_buf, count, ppos, buf_str,
|
||||
strlen(buf_str));
|
||||
|
||||
err_free1:
|
||||
kfree(str);
|
||||
err_free2:
|
||||
kfree(buf_str);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct {
|
||||
const char *name;
|
||||
const struct file_operations fops;
|
||||
} cl_dsp_debugfs_fops[] = {
|
||||
{
|
||||
.name = "log_data",
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = cl_dsp_debugfs_logger_open,
|
||||
.read = cl_dsp_debugfs_logger_read,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "logger_en",
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = cl_dsp_debugfs_logger_en_read,
|
||||
.write = cl_dsp_debugfs_logger_en_write,
|
||||
},
|
||||
},
|
||||
{
|
||||
.name = "timestamp_shift",
|
||||
.fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = cl_dsp_debugfs_timestamp_shift_read,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static int cl_dsp_logger_init(struct cl_dsp_debugfs *db)
|
||||
{
|
||||
struct regmap *regmap = db->core->regmap;
|
||||
struct cl_dsp *dsp = db->core;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = cl_dsp_get_reg(dsp, "EVENT_LOG_HEADER", CL_DSP_XM_UNPACKED_TYPE,
|
||||
db->dl.algo_id, ®);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_read(regmap, reg, &db->dl.host_buf_ptr);
|
||||
if (ret) {
|
||||
dev_err(db->core->dev, "Failed to get host buffer address\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(db, HOST_BUFFER_FIELD(buf1_base),
|
||||
&db->dl.host_buf_base);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(db,
|
||||
HOST_BUFFER_FIELD(buf_total_size),
|
||||
&db->dl.host_buf_size_words);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = cl_dsp_host_buffer_field_read(db,
|
||||
HOST_BUFFER_FIELD(high_water_mark),
|
||||
&db->dl.high_watermark);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Set next_read_index to -1 to reset logger */
|
||||
ret = cl_dsp_host_buffer_field_write(db,
|
||||
HOST_BUFFER_FIELD(next_read_index),
|
||||
CL_DSP_HOST_BUFFER_READ_INDEX_RESET);
|
||||
if (ret)
|
||||
dev_err(db->core->dev, "Failed to reset event logger\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
struct cl_dsp_debugfs *cl_dsp_debugfs_create(struct cl_dsp *dsp,
|
||||
struct dentry *parent_node,
|
||||
u32 event_log_algo_id)
|
||||
{
|
||||
struct cl_dsp_debugfs *db;
|
||||
int ret, i;
|
||||
|
||||
if (!dsp || !parent_node)
|
||||
return NULL;
|
||||
|
||||
db = kzalloc(sizeof(*db), GFP_KERNEL);
|
||||
if (!db)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
db->core = dsp;
|
||||
db->debugfs_root = parent_node ? parent_node : NULL;
|
||||
|
||||
db->debugfs_node = debugfs_create_dir("cl_dsp", db->debugfs_root);
|
||||
if (IS_ERR(db->debugfs_node)) {
|
||||
ret = PTR_ERR(db->debugfs_node);
|
||||
kfree(db);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
for (i = 0; i < CL_DSP_DEBUGFS_NUM_CONTROLS; i++)
|
||||
debugfs_create_file(cl_dsp_debugfs_fops[i].name,
|
||||
CL_DSP_DEBUGFS_RW_FILE_MODE, db->debugfs_node,
|
||||
db, &cl_dsp_debugfs_fops[i].fops);
|
||||
|
||||
db->dl.algo_id = event_log_algo_id;
|
||||
|
||||
ret = cl_dsp_logger_init(db);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
debugfs_create_u32("high_watermark", CL_DSP_DEBUGFS_RO_FILE_MODE,
|
||||
db->debugfs_node, &db->dl.high_watermark);
|
||||
|
||||
return db;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cl_dsp_debugfs_create);
|
||||
|
||||
void cl_dsp_debugfs_destroy(struct cl_dsp_debugfs *db)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(db))
|
||||
return;
|
||||
|
||||
debugfs_remove_recursive(db->debugfs_node);
|
||||
kfree(db);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cl_dsp_debugfs_destroy);
|
||||
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
MODULE_DESCRIPTION("CL DSP Debugfs Driver");
|
||||
MODULE_AUTHOR("Fred Treven, Cirrus Logic Inc. <fred.treven@cirrus.com>");
|
||||
MODULE_LICENSE("GPL");
|
1236
drivers/firmware/cirrus/cl_dsp.c
Normal file
1236
drivers/firmware/cirrus/cl_dsp.c
Normal file
File diff suppressed because it is too large
Load Diff
7
drivers/firmware/qcom/Kconfig
Normal file
7
drivers/firmware/qcom/Kconfig
Normal file
@@ -0,0 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# For a description of the syntax of this configuration file,
|
||||
# see Documentation/kbuild/kconfig-language.rst.
|
||||
#
|
||||
|
||||
source "drivers/firmware/qcom/si_core/Kconfig"
|
6
drivers/firmware/qcom/Makefile
Normal file
6
drivers/firmware/qcom/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
obj-y += si_core/
|
33
drivers/firmware/qcom/si_core/Kconfig
Normal file
33
drivers/firmware/qcom/si_core/Kconfig
Normal file
@@ -0,0 +1,33 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
config QCOM_SI_CORE
|
||||
tristate "Secure QTEE Communication Support"
|
||||
depends on QCOM_SCM
|
||||
help
|
||||
Enable SI-CORE driver which supports capability based secure
|
||||
communication between QTEE and VM. Using SI-CORE, kernel can
|
||||
issue calls to QTEE or TAs to request a service or exposes services
|
||||
to QTEE and TAs. The SI-CORE implements the necessary marshaling of
|
||||
messages with QTEE.
|
||||
|
||||
config QCOM_SI_CORE_WQ
|
||||
bool "Use direct invocation for RELEASE requests"
|
||||
depends on QCOM_SI_CORE
|
||||
help
|
||||
Use a kernel thread to issue RELEASE request to QTEE and TAs
|
||||
instead of using asynchronous messages. It may slow down the system
|
||||
and prone to failure as it uses direct invocation which requires
|
||||
extra resources from QTEE.
|
||||
|
||||
Enable if only QTEE does not support asynchronous message.
|
||||
|
||||
config QCOM_SI_CORE_MEM_OBJECT
|
||||
tristate "Add support for memory object"
|
||||
depends on QCOM_SI_CORE
|
||||
depends on QTEE_SHM_BRIDGE
|
||||
help
|
||||
Enable support for memory object. This provide an interface
|
||||
to export or sharing memory with QTEE. It allows kernel clients
|
||||
to create memory object and do the necessary mapping and unmapping
|
||||
using QTEE shared memory bridge driver.
|
||||
|
10
drivers/firmware/qcom/si_core/Makefile
Normal file
10
drivers/firmware/qcom/si_core/Makefile
Normal file
@@ -0,0 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
si-core-wq-$(CONFIG_QCOM_SI_CORE_WQ) += si_core_wq.o
|
||||
si_core_module-objs := qcom_scm_invoke.o si_core_async.o si_core.o si_core_adci.o $(si-core-wq-y)
|
||||
obj-$(CONFIG_QCOM_SI_CORE) += si_core_module.o
|
||||
|
||||
# Add si_core extenstions here!
|
||||
|
||||
obj-$(CONFIG_QCOM_SI_CORE_MEM_OBJECT) += mem_object.o
|
||||
mem_object-objs := xts/mem-object.o
|
52
drivers/firmware/qcom/si_core/qcom_scm_invoke.c
Normal file
52
drivers/firmware/qcom/si_core/qcom_scm_invoke.c
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/firmware/qcom/qcom_scm.h>
|
||||
|
||||
#include "si_core.h"
|
||||
|
||||
/* 6 Sec. retry seems reasonable!? */
|
||||
#define SCM_EBUSY_WAIT_MS 30
|
||||
#define SCM_EBUSY_MAX_RETRY 200
|
||||
|
||||
int si_object_invoke_ctx_invoke(struct si_object_invoke_ctx *oic,
|
||||
int *result, u64 *response_type, unsigned int *data)
|
||||
{
|
||||
int ret, i = 0;
|
||||
|
||||
/* TODO. buffers always coherent!? */
|
||||
|
||||
do {
|
||||
/* Direct invocation of callback!? */
|
||||
if (!(oic->flags & OIC_FLAG_BUSY)) {
|
||||
ret = qcom_scm_invoke_smc(oic->in.paddr,
|
||||
oic->in.msg.size,
|
||||
oic->out.paddr,
|
||||
oic->out.msg.size,
|
||||
result,
|
||||
response_type,
|
||||
data);
|
||||
|
||||
} else {
|
||||
ret = qcom_scm_invoke_callback_response(oic->out.paddr,
|
||||
oic->out.msg.size,
|
||||
result,
|
||||
response_type,
|
||||
data);
|
||||
}
|
||||
|
||||
if (ret != -EBUSY)
|
||||
break;
|
||||
|
||||
msleep(SCM_EBUSY_WAIT_MS);
|
||||
|
||||
} while (++i < SCM_EBUSY_MAX_RETRY);
|
||||
|
||||
if (ret)
|
||||
pr_err("QTEE returned with %d!\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
1243
drivers/firmware/qcom/si_core/si_core.c
Normal file
1243
drivers/firmware/qcom/si_core/si_core.c
Normal file
File diff suppressed because it is too large
Load Diff
194
drivers/firmware/qcom/si_core/si_core.h
Normal file
194
drivers/firmware/qcom/si_core/si_core.h
Normal file
@@ -0,0 +1,194 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __SI_CORE_H__
|
||||
#define __SI_CORE_H__
|
||||
|
||||
#include <linux/qtee_shmbridge.h>
|
||||
#include <linux/firmware/qcom/si_object.h>
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "si-core: %s: " fmt, __func__
|
||||
|
||||
/* QTEE object ID API. */
|
||||
|
||||
enum si_object_type si_object_type(unsigned int object_id);
|
||||
|
||||
/* 'get_object_id' allocates a QTEE handler for a si_object. */
|
||||
/* '__put_object_id' erases the QTEE handler. */
|
||||
/* 'qtee_get_si_object' returns si_object for a QTEE handler and increase the refcount. */
|
||||
|
||||
int get_object_id(struct si_object *object, unsigned int *object_id);
|
||||
void __put_object_id(unsigned int object_id);
|
||||
struct si_object *qtee_get_si_object(unsigned int object_id);
|
||||
|
||||
int si_object_invoke_ctx_invoke(struct si_object_invoke_ctx *oic, int *result,
|
||||
u64 *response_type, unsigned int *data);
|
||||
|
||||
#ifdef CONFIG_QCOM_SI_CORE_WQ
|
||||
int init_si_core_wq(void);
|
||||
void destroy_si_core_wq(void);
|
||||
#else
|
||||
static inline int init_si_core_wq(void) { return 0; }
|
||||
static inline void destroy_si_core_wq(void) { }
|
||||
#endif /* CONFIG_QCOM_SI_CORE_WQ */
|
||||
|
||||
void release_user_object(struct si_object *object);
|
||||
|
||||
/* ASYNC message management API. */
|
||||
|
||||
void __append__async_reqs(struct si_object_invoke_ctx *oic);
|
||||
void __revive__async_queued_reqs(struct si_object_invoke_ctx *oic);
|
||||
void __release__async_queued_reqs(struct si_object_invoke_ctx *oic);
|
||||
void __fetch__async_reqs(struct si_object_invoke_ctx *oic);
|
||||
|
||||
/* ''QTEE'' related definitions. */
|
||||
|
||||
#define QTEE_RESULT_INBOUND_REQ_NEEDED 3
|
||||
|
||||
#define INVOKE_MESSAGE_ALIGN_BYTES 8U
|
||||
|
||||
#define QTEE_OBJ_NULL (0U)
|
||||
#define QTEE_OBJ_ROOT (1U)
|
||||
|
||||
#define QTEE_OBJ_NS_BIT (1U << 31)
|
||||
|
||||
#define align_offset(o) PTR_ALIGN((o), INVOKE_MESSAGE_ALIGN_BYTES)
|
||||
|
||||
/* Definitions from QTEE as part of the transport protocol. */
|
||||
/* 'qtee_smcinvoke_msg_arg', 'struct qtee_object_invoke', and 'struct qtee_callback'. */
|
||||
|
||||
union qtee_smcinvoke_msg_arg {
|
||||
struct {
|
||||
u32 offset;
|
||||
u32 size;
|
||||
} b;
|
||||
u32 o;
|
||||
};
|
||||
|
||||
/* Check if a buffer argument 'arg' can fit in a message of size 'sz'. */
|
||||
#define arg_in_bounds(arg, sz) \
|
||||
(((arg)->b.offset < (sz)) && ((arg)->b.size < ((sz) - (arg)->b.offset)))
|
||||
|
||||
struct qtee_object_invoke {
|
||||
u32 cxt;
|
||||
u32 op;
|
||||
u32 counts;
|
||||
union qtee_smcinvoke_msg_arg args[];
|
||||
};
|
||||
|
||||
struct qtee_callback {
|
||||
u32 result;
|
||||
u32 cxt;
|
||||
u32 op;
|
||||
u32 counts;
|
||||
union qtee_smcinvoke_msg_arg args[];
|
||||
};
|
||||
|
||||
#define OFFSET_TO_PTR(m, off) ((void *)&((char *)(m))[(off)])
|
||||
|
||||
/* Offset in the message for the beginning of buffer argument's contents. */
|
||||
#define OFFSET_TO_BUFFER_ARGS(m, n) \
|
||||
align_offset(offsetof(typeof(*m), args) + (n * sizeof((m)->args[0])))
|
||||
|
||||
#define counts_num__bi_(x) (((x) >> 0) & 0xFU)
|
||||
#define counts_num__bo_(x) (((x) >> 4) & 0xFU)
|
||||
#define counts_num__oi_(x) (((x) >> 8) & 0xFU)
|
||||
#define counts_num__oo_(x) (((x) >> 12) & 0xFU)
|
||||
|
||||
#define counts_idx__bi_(x) 0U
|
||||
#define counts_idx__bo_(x) (counts_idx__bi_(x) + counts_num__bi_(x))
|
||||
#define counts_idx__oi_(x) (counts_idx__bo_(x) + counts_num__bo_(x))
|
||||
#define counts_idx__oo_(x) (counts_idx__oi_(x) + counts_num__oi_(x))
|
||||
#define counts_total(x) (counts_idx__oo_(x) + counts_num__oo_(x))
|
||||
|
||||
#define FOR_ARGS(i, c, type) \
|
||||
for (i = counts_idx##type(c); i < (counts_idx##type(c) + counts_num##type(c)); i++)
|
||||
|
||||
#define for_each_input_buffer(i, c) FOR_ARGS(i, c, __bi_)
|
||||
#define for_each_output_buffer(i, c) FOR_ARGS(i, c, __bo_)
|
||||
#define for_each_input_object(i, c) FOR_ARGS(i, c, __oi_)
|
||||
#define for_each_output_object(i, c) FOR_ARGS(i, c, __oo_)
|
||||
|
||||
#define bi_shift 0
|
||||
#define ob_shift 4
|
||||
#define io_shift 8
|
||||
#define oo_shift 12
|
||||
|
||||
static inline void init_oi_msg(struct qtee_object_invoke *msg,
|
||||
u32 cxt, u32 op, int ib, int ob, int io, int oo)
|
||||
{
|
||||
u32 counts = 0;
|
||||
|
||||
counts |= ((oo - io) & 0xFU) << oo_shift; /* No. Output Objects. */
|
||||
counts |= ((io - ob) & 0xFU) << io_shift; /* No. Input Objects. */
|
||||
counts |= ((ob - ib) & 0xFU) << ob_shift; /* No. Output Buffer. */
|
||||
counts |= (ib & 0xFU) << bi_shift; /* No. Input Buffer. */
|
||||
|
||||
msg->cxt = cxt;
|
||||
msg->op = op;
|
||||
msg->counts = counts;
|
||||
}
|
||||
|
||||
static inline void err_to_qtee_err(struct qtee_callback *cb_msg, int err)
|
||||
{
|
||||
|
||||
/* Generic error codes */
|
||||
|
||||
#define OBJECT_OK 0 /* non-specific success code */
|
||||
#define OBJECT_ERROR 1 /* non-specific error */
|
||||
#define OBJECT_ERROR_INVALID 2 /* unsupported/unrecognized request */
|
||||
#define OBJECT_ERROR_SIZE_IN 3 /* supplied buffer/string too large */
|
||||
#define OBJECT_ERROR_SIZE_OUT 4 /* supplied output buffer too small */
|
||||
|
||||
#define OBJECT_ERROR_USERBASE 10 /* start of user-defined error range */
|
||||
|
||||
/* Transport layer error codes */
|
||||
|
||||
#define OBJECT_ERROR_DEFUNCT -90 /* object no longer exists */
|
||||
#define OBJECT_ERROR_ABORT -91 /* calling thread must exit */
|
||||
#define OBJECT_ERROR_BADOBJ -92 /* invalid object context */
|
||||
#define OBJECT_ERROR_NOSLOTS -93 /* caller's object table full */
|
||||
#define OBJECT_ERROR_MAXARGS -94 /* too many args */
|
||||
#define OBJECT_ERROR_MAXDATA -95 /* buffers too large */
|
||||
#define OBJECT_ERROR_UNAVAIL -96 /* the request could not be processed */
|
||||
#define OBJECT_ERROR_KMEM -97 /* kernel out of memory */
|
||||
#define OBJECT_ERROR_REMOTE -98 /* local method sent to remote object */
|
||||
#define OBJECT_ERROR_BUSY -99 /* Object is busy */
|
||||
#define OBJECT_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */
|
||||
|
||||
switch (err) {
|
||||
case 0:
|
||||
cb_msg->result = OBJECT_OK;
|
||||
|
||||
break;
|
||||
case -ENOMEM:
|
||||
cb_msg->result = OBJECT_ERROR_KMEM;
|
||||
|
||||
break;
|
||||
case -ENODEV:
|
||||
cb_msg->result = OBJECT_ERROR_DEFUNCT;
|
||||
|
||||
break;
|
||||
case -ENOSPC:
|
||||
case -EBUSY:
|
||||
cb_msg->result = OBJECT_ERROR_BUSY;
|
||||
|
||||
break;
|
||||
case -EBADF:
|
||||
cb_msg->result = OBJECT_ERROR_UNAVAIL;
|
||||
|
||||
break;
|
||||
case -EINVAL:
|
||||
cb_msg->result = OBJECT_ERROR_INVALID;
|
||||
|
||||
break;
|
||||
default:
|
||||
/* Positive err. are sent back as-is, negatives are transport related. */
|
||||
cb_msg->result = (err >= OBJECT_OK) ? err : OBJECT_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __SI_CORE_H__ */
|
72
drivers/firmware/qcom/si_core/si_core_adci.c
Normal file
72
drivers/firmware/qcom/si_core/si_core_adci.c
Normal file
@@ -0,0 +1,72 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#include "si_core.h"
|
||||
|
||||
static struct task_struct *adci_task;
|
||||
static struct si_object_invoke_ctx oic;
|
||||
|
||||
static void wait_to_die(void)
|
||||
{
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
schedule();
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
static int adci_fn(void *unused)
|
||||
{
|
||||
int ret, result = 0;
|
||||
struct si_arg args[1] = { 0 };
|
||||
|
||||
/* IClientEnv_OP_adciAccept is 8. */
|
||||
ret = si_object_do_invoke(&oic, ROOT_SI_OBJECT, 8, args, &result);
|
||||
|
||||
if (ret)
|
||||
pr_err("unable to register ADCI thread (%d).\n", ret);
|
||||
else if (result == OBJECT_ERROR_INVALID)
|
||||
pr_err("ADCI feature is not supported on this chipsets.\n");
|
||||
|
||||
pr_debug("exited.\n");
|
||||
|
||||
/* Let's wait for someone to collect our result. */
|
||||
if (!kthread_should_stop())
|
||||
wait_to_die();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
void adci_start(void)
|
||||
{
|
||||
adci_task = kthread_run(adci_fn, NULL, "adci_thread");
|
||||
|
||||
/* Who cares if it fails?! */
|
||||
if (IS_ERR(adci_task))
|
||||
pr_err("failed (%ld).\n", PTR_ERR(adci_task));
|
||||
}
|
||||
|
||||
int adci_shutdown(void)
|
||||
{
|
||||
int ret, result = 0;
|
||||
struct si_arg args[1] = { 0 };
|
||||
|
||||
/* IClientEnv_OP_adciShutdown is 9. */
|
||||
ret = si_object_do_invoke(&oic, ROOT_SI_OBJECT, 9, args, &result);
|
||||
|
||||
if (ret || result)
|
||||
pr_err("failed (ret = %d, %d).\n", ret, result);
|
||||
|
||||
/* If IClientEnv_OP_adciShutdown fails, we may stuck here. */
|
||||
kthread_stop(adci_task);
|
||||
|
||||
return ret;
|
||||
}
|
12
drivers/firmware/qcom/si_core/si_core_adci.h
Normal file
12
drivers/firmware/qcom/si_core/si_core_adci.h
Normal file
@@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __SI_CORE_ADCI_H__
|
||||
#define __SI_CORE_ADCI_H__
|
||||
|
||||
void adci_start(void);
|
||||
int adci_shutdown(void);
|
||||
|
||||
#endif /* __SI_CORE_ADCI_H__ */
|
481
drivers/firmware/qcom/si_core/si_core_async.c
Normal file
481
drivers/firmware/qcom/si_core/si_core_async.c
Normal file
@@ -0,0 +1,481 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
#include "si_core.h"
|
||||
|
||||
/* Processing of the async messges happens without any ordering. */
|
||||
|
||||
struct si_object *erase_si_object(u32 idx);
|
||||
u32 global_version;
|
||||
|
||||
struct async_msg_info {
|
||||
u32 owner;
|
||||
u32 out;
|
||||
u64 user_data[];
|
||||
};
|
||||
|
||||
/* Async handlers and providers. */
|
||||
struct async_msg {
|
||||
struct {
|
||||
u32 version; /* Protocol version: top 16b major, lower 16b minor. */
|
||||
u32 op; /* Async operation. */
|
||||
} header;
|
||||
|
||||
/* Format of the Async data field is defined by the specified operation. */
|
||||
|
||||
union {
|
||||
struct {
|
||||
u32 count; /* Number of objects that should be released. */
|
||||
u32 obj[];
|
||||
} op_release;
|
||||
|
||||
/* This is a generic structure. */
|
||||
struct {
|
||||
u32 count; /* SET to one; We create per-object 'owner' messages. */
|
||||
struct async_msg_info async_info;
|
||||
} info;
|
||||
};
|
||||
};
|
||||
|
||||
/* Async Operations and header information. */
|
||||
|
||||
#define ASYNC_HEADER_SIZE sizeof(((struct async_msg *)(0))->header)
|
||||
|
||||
/* ASYNC_OP_x: operation.
|
||||
* ASYNC_OP_x_HDR_SIZE: header size for the operation.
|
||||
* ASYNC_OP_x_SIZE: size of each entry in a message for the operation.
|
||||
* ASYNC_OP_x_MSG_SIZE: size of a message with n entries.
|
||||
*/
|
||||
|
||||
#define ASYNC_OP_USER_HDR_SIZE offsetof(struct async_msg, info.async_info.user_data)
|
||||
|
||||
#define ASYNC_OP_RELEASE SI_OBJECT_OP_RELEASE /* Added in minor version 0x0000. **/
|
||||
#define ASYNC_OP_RELEASE_HDR_SIZE offsetof(struct async_msg, op_release.obj)
|
||||
#define ASYNC_OP_RELEASE_SIZE sizeof(((struct async_msg *)(0))->op_release.obj[0])
|
||||
#define ASYNC_OP_RELEASE_MSG_SIZE(n) \
|
||||
(ASYNC_OP_RELEASE_HDR_SIZE + ((n) * ASYNC_OP_RELEASE_SIZE))
|
||||
|
||||
int get_async_proto_version(void)
|
||||
{
|
||||
return global_version;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_async_proto_version);
|
||||
|
||||
/* 'async_si_buffer' return the available async buffer in the output buffer. */
|
||||
|
||||
static struct si_buffer async_si_buffer(struct si_object_invoke_ctx *oic)
|
||||
{
|
||||
int i;
|
||||
size_t offset;
|
||||
|
||||
struct qtee_callback *msg = (struct qtee_callback *)oic->out.msg.addr;
|
||||
|
||||
if (!(oic->flags & OIC_FLAG_BUSY))
|
||||
return oic->out.msg;
|
||||
|
||||
/* Async requests are appended to the output buffer after the CB message. */
|
||||
|
||||
offset = OFFSET_TO_BUFFER_ARGS(msg, counts_total(msg->counts));
|
||||
|
||||
for_each_input_buffer(i, msg->counts)
|
||||
offset += align_offset(msg->args[i].b.size);
|
||||
|
||||
for_each_output_buffer(i, msg->counts)
|
||||
offset += align_offset(msg->args[i].b.size);
|
||||
|
||||
if (oic->out.msg.size > offset) {
|
||||
return (struct si_buffer)
|
||||
{
|
||||
{ oic->out.msg.addr + offset },
|
||||
oic->out.msg.size - offset
|
||||
};
|
||||
}
|
||||
|
||||
pr_err("no space left for async messages! or malformed message.\n");
|
||||
|
||||
return (struct si_buffer) { { 0 }, 0 };
|
||||
}
|
||||
|
||||
#ifndef CONFIG_QCOM_SI_CORE_WQ
|
||||
|
||||
/* 'async_ops_mutex' serialize the construction of async message. */
|
||||
static DEFINE_MUTEX(async_ops_mutex);
|
||||
|
||||
/* List of objects that should be released. */
|
||||
/* All objects in 'release_ops_list' should have refcount set to ''zero''. */
|
||||
static LIST_HEAD(release_ops_list);
|
||||
|
||||
/* 'release_user_object' put object in release pending list.
|
||||
* 'async_release_provider' remove objects from release pending list and construct
|
||||
* the async message.
|
||||
* 'revive_async_queued_reqs' move object back to release pending list.
|
||||
* 'destroy_user_object' called to finish the job after QTEE acknowledged the release.
|
||||
*/
|
||||
|
||||
void release_user_object(struct si_object *object)
|
||||
{
|
||||
/* Use async message for RELEASE request.
|
||||
* We free the object in '__release__async_queued_reqs' when appropriate.
|
||||
*/
|
||||
|
||||
pr_debug("%s queued for async release.\n", si_object_name(object));
|
||||
|
||||
mutex_lock(&async_ops_mutex);
|
||||
list_add_tail(&object->node, &release_ops_list);
|
||||
mutex_unlock(&async_ops_mutex);
|
||||
}
|
||||
|
||||
static size_t async_release_provider(struct si_object_invoke_ctx *oic,
|
||||
struct async_msg *async_msg, size_t size)
|
||||
{
|
||||
int i = 0;
|
||||
struct si_object *object, *t;
|
||||
|
||||
/* We need space for at least a single entry. */
|
||||
if (size < ASYNC_OP_RELEASE_MSG_SIZE(1))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&async_ops_mutex);
|
||||
list_for_each_entry_safe(object, t, &release_ops_list, node) {
|
||||
async_msg->op_release.obj[i] = object->info.object_ptr;
|
||||
|
||||
/* Move object to the oic's object_head.
|
||||
* We only free objects in '__release__async_queued_reqs' if QTEE
|
||||
* acknowledge the release; otherwise, move back objects to 'release_ops_list'
|
||||
* in '__revive__async_queued_reqs'.
|
||||
*/
|
||||
|
||||
list_move(&object->node, &oic->objects_head);
|
||||
|
||||
if (size - ASYNC_OP_RELEASE_SIZE < ASYNC_OP_RELEASE_MSG_SIZE(++i))
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&async_ops_mutex);
|
||||
|
||||
/* INITIALIZE the header and message. */
|
||||
|
||||
if (i) {
|
||||
async_msg->header.version = 0x00010002U;
|
||||
async_msg->header.op = ASYNC_OP_RELEASE;
|
||||
async_msg->op_release.count = i;
|
||||
}
|
||||
|
||||
return (i) ? ASYNC_OP_RELEASE_MSG_SIZE(i) : 0;
|
||||
}
|
||||
|
||||
static void revive_async_queued_reqs(struct si_object *object)
|
||||
{
|
||||
if (!kref_read(&object->refcount)) {
|
||||
|
||||
/* Move it back to 'release_ops_list' and retry again. */
|
||||
|
||||
mutex_lock(&async_ops_mutex);
|
||||
list_add(&object->node, &release_ops_list);
|
||||
mutex_unlock(&async_ops_mutex);
|
||||
|
||||
} else {
|
||||
/* Two Puts, one for a get in 'call_prepare' and */
|
||||
put_si_object(object);
|
||||
|
||||
/* ... try to RELEASE the object. */
|
||||
put_si_object(object);
|
||||
}
|
||||
}
|
||||
|
||||
static void destroy_user_object(struct si_object *object)
|
||||
{
|
||||
if (!kref_read(&object->refcount)) {
|
||||
kfree(object->name);
|
||||
|
||||
/* QTEE release should be done! free the object. */
|
||||
free_si_object(object);
|
||||
} else {
|
||||
|
||||
/* Put object we got in 'call_prepare'. */
|
||||
put_si_object(object);
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t release_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
struct si_object *object;
|
||||
size_t len = 0;
|
||||
|
||||
mutex_lock(&async_ops_mutex);
|
||||
list_for_each_entry(object, &release_ops_list, node)
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s\n", si_object_name(object));
|
||||
mutex_unlock(&async_ops_mutex);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
#else /* CONFIG_QCOM_SI_CORE_WQ */
|
||||
static size_t async_release_provider(struct si_object_invoke_ctx *oic,
|
||||
struct async_msg *async_msg, size_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void revive_async_queued_reqs(struct si_object *object)
|
||||
{
|
||||
/* Two Puts, one for a get in 'call_prepare' and */
|
||||
put_si_object(object);
|
||||
|
||||
/* ... try to RELEASE the object. */
|
||||
put_si_object(object);
|
||||
}
|
||||
|
||||
static void destroy_user_object(struct si_object *object)
|
||||
{
|
||||
/* Put object we got in 'call_prepare'. */
|
||||
put_si_object(object);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_QCOM_SI_CORE_WQ */
|
||||
|
||||
/* Object prepare. */
|
||||
|
||||
static void call_prepare(struct si_object_invoke_ctx *oic,
|
||||
unsigned int object_id, struct si_buffer *async_buffer)
|
||||
{
|
||||
struct async_msg *async_msg = async_buffer->addr;
|
||||
|
||||
switch (si_object_type(object_id)) {
|
||||
case SI_OT_CB_OBJECT: {
|
||||
struct si_object *object, *t_object;
|
||||
struct si_arg args[3] = { 0 };
|
||||
|
||||
/* We are sure 'async_buffer' has enough space for a header. */
|
||||
|
||||
args[0].b.addr = async_buffer->addr + ASYNC_OP_USER_HDR_SIZE;
|
||||
args[0].b.size = async_buffer->size - ASYNC_OP_USER_HDR_SIZE;
|
||||
args[0].type = SI_AT_OB;
|
||||
args[1].type = SI_AT_OO;
|
||||
|
||||
object = qtee_get_si_object(object_id);
|
||||
|
||||
if (!object)
|
||||
break;
|
||||
|
||||
if (object->ops->prepare) {
|
||||
unsigned long op = object->ops->prepare(object, args);
|
||||
|
||||
if (op != SI_OBJECT_OP_NO_OP) {
|
||||
t_object = args[1].o;
|
||||
|
||||
/* Object's provider has done some preparation on the object. */
|
||||
|
||||
async_msg->info.async_info.owner = object_id;
|
||||
|
||||
if (typeof_si_object(t_object) != SI_OT_NULL) {
|
||||
if (get_object_id(t_object,
|
||||
&async_msg->info.async_info.out)) {
|
||||
put_si_object(t_object);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
/* We temporarily keep si_object for ourselves; get one. */
|
||||
get_si_object(t_object);
|
||||
|
||||
list_add_tail(&t_object->node, &oic->objects_head);
|
||||
}
|
||||
|
||||
async_msg->info.count = 1;
|
||||
async_msg->header.version = 0x00010002U;
|
||||
async_msg->header.op = op;
|
||||
|
||||
/* Move forward. */
|
||||
|
||||
async_buffer->addr += align_offset(args[0].b.size +
|
||||
ASYNC_OP_USER_HDR_SIZE);
|
||||
async_buffer->size -= align_offset(args[0].b.size +
|
||||
ASYNC_OP_USER_HDR_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
put_si_object(object);
|
||||
}
|
||||
|
||||
break;
|
||||
case SI_OT_USER:
|
||||
case SI_OT_NULL:
|
||||
default:
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void prepare_objects(struct si_object_invoke_ctx *oic, struct si_buffer async_buffer)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!(oic->flags & OIC_FLAG_BUSY)) {
|
||||
/* Use input message buffer in 'oic'; Process input objects. */
|
||||
|
||||
struct qtee_object_invoke *msg = (struct qtee_object_invoke *)oic->in.msg.addr;
|
||||
|
||||
for_each_input_object(i, msg->counts) {
|
||||
if (async_buffer.size < ASYNC_OP_USER_HDR_SIZE)
|
||||
break;
|
||||
|
||||
call_prepare(oic, msg->args[i].o, &async_buffer);
|
||||
}
|
||||
|
||||
} else {
|
||||
/* Use output message buffer in 'oic'; Process output objects. */
|
||||
|
||||
struct qtee_callback *msg = (struct qtee_callback *)oic->out.msg.addr;
|
||||
|
||||
for_each_output_object(i, msg->counts) {
|
||||
if (async_buffer.size < ASYNC_OP_USER_HDR_SIZE)
|
||||
break;
|
||||
|
||||
call_prepare(oic, msg->args[i].o, &async_buffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* '__append__async_reqs',
|
||||
* '__revive__async_queued_reqs', and
|
||||
* '__release__async_queued_reqs'.
|
||||
*/
|
||||
|
||||
/* '__append__async_reqs' is a provider dispatcher (from si_core to QTEE). */
|
||||
|
||||
void __append__async_reqs(struct si_object_invoke_ctx *oic)
|
||||
{
|
||||
size_t size;
|
||||
|
||||
struct si_buffer async_buffer = async_si_buffer(oic);
|
||||
|
||||
pr_debug("%u.\n", oic->context_id);
|
||||
|
||||
/* Processe RELEASE requests first. */
|
||||
size = async_release_provider(oic, async_buffer.addr, async_buffer.size);
|
||||
|
||||
/* Let's use whatever buffer left for remaining of async messages. */
|
||||
|
||||
async_buffer.addr += align_offset(size);
|
||||
async_buffer.size -= align_offset(size);
|
||||
|
||||
prepare_objects(oic, async_buffer);
|
||||
}
|
||||
|
||||
void __revive__async_queued_reqs(struct si_object_invoke_ctx *oic)
|
||||
{
|
||||
struct si_object *object, *t;
|
||||
|
||||
pr_debug("%u.\n", oic->context_id);
|
||||
|
||||
/* Something went wrong; QTEE did not recived the async messages. */
|
||||
|
||||
list_for_each_entry_safe(object, t, &oic->objects_head, node) {
|
||||
list_del(&object->node);
|
||||
|
||||
revive_async_queued_reqs(object);
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&oic->objects_head);
|
||||
}
|
||||
|
||||
void __release__async_queued_reqs(struct si_object_invoke_ctx *oic)
|
||||
{
|
||||
struct si_object *object, *t;
|
||||
|
||||
pr_debug("%u.\n", oic->context_id);
|
||||
|
||||
list_for_each_entry_safe(object, t, &oic->objects_head, node) {
|
||||
list_del_init(&object->node);
|
||||
|
||||
destroy_user_object(object);
|
||||
}
|
||||
}
|
||||
|
||||
static size_t async_release_handler(struct si_object_invoke_ctx *oic,
|
||||
struct async_msg *async_msg, size_t size)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* We need space for at least a single entry. */
|
||||
if (size < ASYNC_OP_RELEASE_MSG_SIZE(1))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < async_msg->op_release.count; i++) {
|
||||
struct si_object *object;
|
||||
|
||||
/* Remove the 'object' from 'xa_si_objects' so that the 'object_id'
|
||||
* becomes invalid for further use. However, call 'put_si_object'
|
||||
* to schedule the actual release if there is no user.
|
||||
*/
|
||||
|
||||
object = erase_si_object(async_msg->op_release.obj[i]);
|
||||
|
||||
pr_debug("%s (refs: %u).\n", si_object_name(object), kref_read(&object->refcount));
|
||||
|
||||
put_si_object(object);
|
||||
}
|
||||
|
||||
return ASYNC_OP_RELEASE_MSG_SIZE(i);
|
||||
}
|
||||
|
||||
/* '__fetch__async_reqs' is a handler dispatcher (from QTEE to si_core). */
|
||||
|
||||
void __fetch__async_reqs(struct si_object_invoke_ctx *oic)
|
||||
{
|
||||
size_t consumed, used = 0;
|
||||
|
||||
struct si_buffer async_buffer = async_si_buffer(oic);
|
||||
|
||||
pr_debug("%u.\n", oic->context_id);
|
||||
|
||||
while (async_buffer.size - used > ASYNC_HEADER_SIZE) {
|
||||
struct async_msg *async_msg = (struct async_msg *)(async_buffer.addr + used);
|
||||
|
||||
/* QTEE assumes unused buffer is set to zero. */
|
||||
if (!async_msg->header.version)
|
||||
goto out;
|
||||
|
||||
switch (async_msg->header.op) {
|
||||
case ASYNC_OP_RELEASE:
|
||||
global_version = async_msg->header.version;
|
||||
consumed = async_release_handler(oic,
|
||||
async_msg, async_buffer.size - used);
|
||||
|
||||
break;
|
||||
default: /* Unsupported operations. */
|
||||
consumed = 0;
|
||||
}
|
||||
|
||||
used += align_offset(consumed);
|
||||
|
||||
if (!consumed) {
|
||||
pr_err("Drop async buffer (context_id %d): buffer %p, (%p, %zx), processed %zx\n",
|
||||
oic->context_id,
|
||||
oic->out.msg.addr, /* Address of Output buffer. */
|
||||
async_buffer.addr, /* Address of beginning of async buffer. */
|
||||
async_buffer.size, /* Available size of async buffer. */
|
||||
used); /* Processed async buffer. */
|
||||
|
||||
/* Dump a couple of lines from the buffer. */
|
||||
print_hex_dump(KERN_INFO, "si-core: ", DUMP_PREFIX_OFFSET, 16, 4,
|
||||
async_buffer.addr,
|
||||
min(ASYNC_HEADER_SIZE + 88, async_buffer.size),
|
||||
true);
|
||||
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
/* Reset the async buffer for next use in '__append__async_reqs'. */
|
||||
memset(async_buffer.addr, 0, async_buffer.size);
|
||||
}
|
113
drivers/firmware/qcom/si_core/si_core_wq.c
Normal file
113
drivers/firmware/qcom/si_core/si_core_wq.c
Normal file
@@ -0,0 +1,113 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "si_core.h"
|
||||
|
||||
static struct workqueue_struct *si_core_wq;
|
||||
|
||||
/* Number of all release requests submitted. */
|
||||
static atomic_t pending_releases = ATOMIC_INIT(0);
|
||||
|
||||
/* Number of release reqests dropped. */
|
||||
static int release_failed;
|
||||
|
||||
/* 'release_user_object' put object in release work queue.
|
||||
* 'si_object_do_release' make direct invocation to release an object.
|
||||
* 'destroy_user_object' called to finish the job after QTEE acknowledged the release.
|
||||
*/
|
||||
|
||||
static void destroy_user_object(struct work_struct *work);
|
||||
void release_user_object(struct si_object *object)
|
||||
{
|
||||
INIT_WORK(&object->work, destroy_user_object);
|
||||
|
||||
pr_debug("%s queued for release.\n", si_object_name(object));
|
||||
|
||||
atomic_inc(&pending_releases);
|
||||
|
||||
/* QUEUE a release work. */
|
||||
queue_work(si_core_wq, &object->work);
|
||||
}
|
||||
|
||||
static void si_object_do_release(struct si_object *object)
|
||||
{
|
||||
int ret, result;
|
||||
|
||||
/* We are on ordered workqueue; it's safe to do this! */
|
||||
static struct si_object_invoke_ctx oic;
|
||||
static struct si_arg args[1] = { 0 };
|
||||
|
||||
ret = si_object_do_invoke(&oic, object, SI_OBJECT_OP_RELEASE, args, &result);
|
||||
|
||||
if (ret == -EAGAIN) {
|
||||
|
||||
/* On faioure, failure no callback response is in progress. */
|
||||
|
||||
pr_debug("%s rescheduled for release.\n", si_object_name(object));
|
||||
|
||||
queue_work(si_core_wq, &object->work);
|
||||
} else {
|
||||
|
||||
/* On failure, there are two scenarios:
|
||||
* - ret != 0 while retuning a callback response.
|
||||
* - ret == 0 and result != 0.
|
||||
* In any of these case, there is nothing we can do to cleanup.
|
||||
*/
|
||||
|
||||
if (ret || result) {
|
||||
release_failed++;
|
||||
|
||||
pr_err("release failed for %s (%d result = %x); %d objects remain zombie.\n",
|
||||
si_object_name(object), ret, result, release_failed);
|
||||
}
|
||||
|
||||
atomic_dec(&pending_releases);
|
||||
|
||||
kfree(object->name);
|
||||
|
||||
free_si_object(object);
|
||||
}
|
||||
}
|
||||
|
||||
static void destroy_user_object(struct work_struct *work)
|
||||
{
|
||||
struct si_object *object = container_of(work, struct si_object, work);
|
||||
|
||||
pr_debug("%s releasing.\n", si_object_name(object));
|
||||
|
||||
si_object_do_release(object);
|
||||
}
|
||||
|
||||
ssize_t release_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return scnprintf(buf, PAGE_SIZE, "%d %d\n",
|
||||
atomic_read(&pending_releases), release_failed);
|
||||
}
|
||||
|
||||
/* 'init_si_core_wq' and 'destroy_si_core_wq'. */
|
||||
|
||||
int init_si_core_wq(void)
|
||||
{
|
||||
|
||||
/* We use ordered workqueue. If decide to change to workqueue with more
|
||||
* concurrency make sure to update 'si_object_do_release'.
|
||||
*/
|
||||
|
||||
si_core_wq = alloc_ordered_workqueue("si_core_wq", 0);
|
||||
if (!si_core_wq) {
|
||||
pr_err("failed to create si_core_wq.\n");
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void destroy_si_core_wq(void)
|
||||
{
|
||||
destroy_workqueue(si_core_wq);
|
||||
}
|
538
drivers/firmware/qcom/si_core/xts/mem-object.c
Normal file
538
drivers/firmware/qcom/si_core/xts/mem-object.c
Normal file
@@ -0,0 +1,538 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "si-mo: %s: " fmt, __func__
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/mem-buf.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/qtee_shmbridge.h>
|
||||
#include <linux/firmware/qcom/si_core_xts.h>
|
||||
|
||||
/* Memory object operations. */
|
||||
/* ... */
|
||||
|
||||
/* 'Primordial Object' operations related to memory object. */
|
||||
#define OBJECT_OP_MAP_REGION 0
|
||||
|
||||
/* Auto mapping operation. */
|
||||
#define OBJECT_OP_AUTO_MAP 0x00000003UL
|
||||
|
||||
#define SMCINVOKE_ASYNC_VERSION 0x00010002U
|
||||
|
||||
static struct platform_device *mem_object_pdev;
|
||||
|
||||
static struct si_object primordial_object;
|
||||
|
||||
/* **/
|
||||
/* Memory object reference counting details:
|
||||
* There is one reference counter in memory object, i.e. 'object'.
|
||||
* 'object' counts number of times this object has been exported to TZ plus
|
||||
* total number of mappings plus one (for ownership reference).
|
||||
*
|
||||
* HOW IT WORKS
|
||||
*
|
||||
* Client obtains an instance of 'si_object' by calling 'init_si_mem_object_user'
|
||||
* with an instance of 'struct dma_buf' to initialize a memory object. It can
|
||||
* immediately use this instance of 'si_object' to share memory with TZ.
|
||||
* However, by transferring this object to TZ, client will lose it's ownership.
|
||||
* To retain the ownership it should call 'get_si_object' and send a second
|
||||
* instance of this object to TZ while keeping the initial 'si_object' instance
|
||||
* (hence plus one for ownership).
|
||||
*
|
||||
* Every time TZ request mapping of the memory object, the driver issues
|
||||
* 'get_si_object' on 'object'.
|
||||
*
|
||||
**/
|
||||
|
||||
struct mem_object {
|
||||
struct si_object object;
|
||||
|
||||
struct dma_buf *dma_buf;
|
||||
|
||||
union {
|
||||
struct {
|
||||
|
||||
/* SHMBridge information. */
|
||||
/* Select with 'qcom,shmbridge'. */
|
||||
|
||||
struct map {
|
||||
struct dma_buf_attachment *buf_attach;
|
||||
struct sg_table *sgt;
|
||||
|
||||
/* 'lock' to protect concurrent request from QTEE. */
|
||||
struct mutex lock;
|
||||
int early_mapped;
|
||||
} map;
|
||||
|
||||
/* Use SHMBridge, hence the handle. */
|
||||
u64 shm_bridge_handle;
|
||||
|
||||
struct mapping_info {
|
||||
phys_addr_t p_addr;
|
||||
size_t p_addr_len;
|
||||
} mapping_info;
|
||||
};
|
||||
|
||||
/* XXX information. */
|
||||
/* struct { ... } */
|
||||
};
|
||||
|
||||
struct list_head node;
|
||||
|
||||
/* Private pointer passed for callbacks. */
|
||||
|
||||
void *private;
|
||||
|
||||
void (*release)(void *private);
|
||||
};
|
||||
|
||||
#define to_mem_object(o) container_of((o), struct mem_object, object)
|
||||
|
||||
/* List of memory objects. Only used for sysfs. */
|
||||
|
||||
static LIST_HEAD(mo_list);
|
||||
static DEFINE_MUTEX(mo_list_mutex);
|
||||
|
||||
/* 'mo_notify' and 'mo_dispatch' are shared by all types of memory objects. */
|
||||
|
||||
static void mo_notify(unsigned int context_id, struct si_object *object, int status)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
static int mo_dispatch(unsigned int context_id,
|
||||
struct si_object *object, unsigned long op, struct si_arg args[])
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct si_object_operations mem_ops = {
|
||||
.notify = mo_notify,
|
||||
.dispatch = mo_dispatch
|
||||
};
|
||||
|
||||
int op_supported(unsigned long op)
|
||||
{
|
||||
switch (op) {
|
||||
case OBJECT_OP_MAP_REGION:
|
||||
return 1;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/** Support for 'SHMBridge'. **/
|
||||
|
||||
/* 'make_shm_bridge_single' only support single continuous memory. */
|
||||
|
||||
static int make_shm_bridge_single(struct mem_object *mo)
|
||||
{
|
||||
int ret;
|
||||
|
||||
u32 *vmid_list, *perms_list, nelems;
|
||||
|
||||
/* 'sgt' should have one mapped entry. **/
|
||||
|
||||
if (mo->map.sgt->nents != 1)
|
||||
return -EINVAL;
|
||||
|
||||
ret = mem_buf_dma_buf_copy_vmperm(mo->dma_buf,
|
||||
(int **)(&vmid_list),
|
||||
(int **)(&perms_list),
|
||||
(int *)(&nelems));
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mem_buf_dma_buf_exclusive_owner(mo->dma_buf))
|
||||
perms_list[0] = PERM_READ | PERM_WRITE;
|
||||
|
||||
mo->mapping_info.p_addr = sg_dma_address(mo->map.sgt->sgl);
|
||||
mo->mapping_info.p_addr_len = sg_dma_len(mo->map.sgt->sgl);
|
||||
|
||||
ret = qtee_shmbridge_register(mo->mapping_info.p_addr, mo->mapping_info.p_addr_len,
|
||||
vmid_list, perms_list, nelems, PERM_READ | PERM_WRITE,
|
||||
&mo->shm_bridge_handle);
|
||||
|
||||
kfree(perms_list);
|
||||
kfree(vmid_list);
|
||||
|
||||
if (ret) {
|
||||
|
||||
/* If 'p_addr' is not zero, then the memory object is already mapped. */
|
||||
|
||||
mo->mapping_info.p_addr = 0;
|
||||
mo->mapping_info.p_addr_len = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rm_shm_bridge(struct mem_object *mo)
|
||||
{
|
||||
if (mo->shm_bridge_handle)
|
||||
qtee_shmbridge_deregister(mo->shm_bridge_handle);
|
||||
}
|
||||
|
||||
static void detach_dma_buf(struct mem_object *mo)
|
||||
{
|
||||
if (mo->map.sgt) {
|
||||
dma_buf_unmap_attachment_unlocked(mo->map.buf_attach,
|
||||
mo->map.sgt, DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
if (mo->map.buf_attach)
|
||||
dma_buf_detach(mo->dma_buf, mo->map.buf_attach);
|
||||
}
|
||||
|
||||
/* 'init_tz_shared_memory' is called while holding the 'map.lock' mutex. */
|
||||
|
||||
static int init_tz_shared_memory(struct mem_object *mo)
|
||||
{
|
||||
int ret;
|
||||
struct dma_buf_attachment *buf_attach;
|
||||
struct sg_table *sgt;
|
||||
|
||||
mo->map.buf_attach = NULL;
|
||||
mo->map.sgt = NULL;
|
||||
|
||||
buf_attach = dma_buf_attach(mo->dma_buf, &mem_object_pdev->dev);
|
||||
if (IS_ERR(buf_attach))
|
||||
return PTR_ERR(buf_attach);
|
||||
|
||||
mo->map.buf_attach = buf_attach;
|
||||
|
||||
sgt = dma_buf_map_attachment_unlocked(buf_attach, DMA_BIDIRECTIONAL);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
|
||||
goto out_failed;
|
||||
}
|
||||
|
||||
mo->map.sgt = sgt;
|
||||
|
||||
ret = make_shm_bridge_single(mo);
|
||||
if (ret)
|
||||
goto out_failed;
|
||||
|
||||
return 0;
|
||||
|
||||
out_failed:
|
||||
detach_dma_buf(mo);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int map_memory_obj(struct mem_object *mo, int advisory)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (mo->map.early_mapped)
|
||||
pr_debug("%s auto-mapped. Memory optimization unavailable.\n",
|
||||
si_object_name(&mo->object));
|
||||
|
||||
mutex_lock(&mo->map.lock);
|
||||
if (mo->mapping_info.p_addr == 0) {
|
||||
|
||||
/* 'mo' has not been mapped before. Do it now. */
|
||||
|
||||
ret = init_tz_shared_memory(mo);
|
||||
|
||||
} else {
|
||||
|
||||
/* 'mo' is already mapped. Just return. */
|
||||
|
||||
ret = advisory;
|
||||
}
|
||||
|
||||
mutex_unlock(&mo->map.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void release_memory_obj(struct mem_object *mo)
|
||||
{
|
||||
rm_shm_bridge(mo);
|
||||
|
||||
detach_dma_buf(mo);
|
||||
}
|
||||
|
||||
static unsigned long mo_shm_bridge_prepare(struct si_object *object, struct si_arg args[])
|
||||
{
|
||||
struct mem_object *mo = to_mem_object(object);
|
||||
|
||||
struct {
|
||||
u64 p_addr;
|
||||
u64 len;
|
||||
u32 perms;
|
||||
} *mi;
|
||||
|
||||
if (get_async_proto_version() != SMCINVOKE_ASYNC_VERSION)
|
||||
return SI_OBJECT_OP_NO_OP;
|
||||
|
||||
if (args[0].b.size < sizeof(*mi))
|
||||
return SI_OBJECT_OP_NO_OP;
|
||||
|
||||
if (!map_memory_obj(mo, 1)) {
|
||||
mo->map.early_mapped = 1;
|
||||
|
||||
/* 'object' has been mapped. Share it. */
|
||||
|
||||
get_si_object(object);
|
||||
|
||||
mi = (typeof(mi)) (args[0].b.addr);
|
||||
mi->p_addr = mo->mapping_info.p_addr;
|
||||
mi->len = mo->mapping_info.p_addr_len;
|
||||
mi->perms = 6; /* RW Permission. */
|
||||
args[0].b.size = sizeof(*mi);
|
||||
|
||||
args[1].o = object;
|
||||
|
||||
return OBJECT_OP_AUTO_MAP;
|
||||
}
|
||||
|
||||
return SI_OBJECT_OP_NO_OP;
|
||||
}
|
||||
|
||||
static void mo_shm_bridge_release(struct si_object *object)
|
||||
{
|
||||
struct mem_object *mo = to_mem_object(object);
|
||||
|
||||
release_memory_obj(mo);
|
||||
|
||||
if (mo->release)
|
||||
mo->release(mo->private);
|
||||
|
||||
/* Put a dam-buf copy obtained in 'init_si_mem_object_user'.*/
|
||||
|
||||
dma_buf_put(mo->dma_buf);
|
||||
|
||||
mutex_lock(&mo_list_mutex);
|
||||
list_del(&mo->node);
|
||||
mutex_unlock(&mo_list_mutex);
|
||||
|
||||
pr_info("%s unmapped.\n", si_object_name(object));
|
||||
|
||||
kfree(mo);
|
||||
}
|
||||
|
||||
/* Primordial object for 'SHMBridge'. */
|
||||
|
||||
static int shm_bridge__po_dispatch(unsigned int context_id,
|
||||
struct si_object *unused, unsigned long op, struct si_arg args[])
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct si_object *object;
|
||||
struct mem_object *mo;
|
||||
|
||||
switch (op) {
|
||||
case OBJECT_OP_MAP_REGION: {
|
||||
|
||||
/* Format of response as expected by TZ. */
|
||||
|
||||
struct {
|
||||
u64 p_addr;
|
||||
u64 len;
|
||||
u32 perms;
|
||||
} *mi;
|
||||
|
||||
if (size_of_arg(args) != 3 ||
|
||||
args[0].type != SI_AT_OB ||
|
||||
args[1].type != SI_AT_IO ||
|
||||
args[2].type != SI_AT_OO) {
|
||||
|
||||
pr_err("mapping of a memory object with invalid message format.\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
object = args[1].o;
|
||||
|
||||
if (!is_mem_object(object)) {
|
||||
pr_err("mapping of a non-memory object.\n");
|
||||
|
||||
put_si_object(object);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mo = to_mem_object(object);
|
||||
|
||||
ret = map_memory_obj(mo, 0);
|
||||
if (!ret) {
|
||||
|
||||
/* 'object' has been mapped. Share it. */
|
||||
|
||||
args[2].o = object;
|
||||
|
||||
mi = (typeof(mi)) (args[0].b.addr);
|
||||
mi->p_addr = mo->mapping_info.p_addr;
|
||||
mi->len = mo->mapping_info.p_addr_len;
|
||||
mi->perms = 6; /* RW Permission. */
|
||||
|
||||
pr_info("%s mapped %llx %llx\n",
|
||||
si_object_name(object), mi->p_addr, mi->len);
|
||||
|
||||
} else {
|
||||
pr_err("mapping memory object %s failed.\n",
|
||||
si_object_name(object));
|
||||
|
||||
put_si_object(object);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
|
||||
/* The operation is not supported! */
|
||||
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct si_object_operations shm_bridge__po_ops = {
|
||||
.op_supported = op_supported,
|
||||
.dispatch = shm_bridge__po_dispatch
|
||||
};
|
||||
|
||||
/* Memory Object Extension. */
|
||||
|
||||
struct si_object *init_si_mem_object_user(struct dma_buf *dma_buf,
|
||||
void (*release)(void *), void *private)
|
||||
{
|
||||
struct mem_object *mo;
|
||||
|
||||
if (!mem_ops.release) {
|
||||
pr_err("memory object type is unknown.\n");
|
||||
|
||||
return NULL_SI_OBJECT;
|
||||
}
|
||||
|
||||
mo = kzalloc(sizeof(*mo), GFP_KERNEL);
|
||||
if (!mo)
|
||||
return NULL_SI_OBJECT;
|
||||
|
||||
mutex_init(&mo->map.lock);
|
||||
|
||||
/* Get a copy of dma-buf. */
|
||||
get_dma_buf(dma_buf);
|
||||
|
||||
mo->dma_buf = dma_buf;
|
||||
mo->private = private;
|
||||
mo->release = release;
|
||||
|
||||
init_si_object_user(&mo->object, SI_OT_CB_OBJECT, &mem_ops, "mem-object-%p", dma_buf);
|
||||
|
||||
mutex_lock(&mo_list_mutex);
|
||||
list_add_tail(&mo->node, &mo_list);
|
||||
mutex_unlock(&mo_list_mutex);
|
||||
|
||||
return &mo->object;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(init_si_mem_object_user);
|
||||
|
||||
struct dma_buf *mem_object_to_dma_buf(struct si_object *object)
|
||||
{
|
||||
if (is_mem_object(object))
|
||||
return to_mem_object(object)->dma_buf;
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mem_object_to_dma_buf);
|
||||
|
||||
int is_mem_object(struct si_object *object)
|
||||
{
|
||||
/* Check 'typeof_si_object' to make sure 'object''s 'ops' has been
|
||||
* initialized before checking it.
|
||||
*/
|
||||
|
||||
return (typeof_si_object(object) == SI_OT_CB_OBJECT) &&
|
||||
(object->ops == &mem_ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(is_mem_object);
|
||||
|
||||
static ssize_t mem_objects_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
size_t len = 0;
|
||||
struct mem_object *mo;
|
||||
|
||||
mutex_lock(&mo_list_mutex);
|
||||
list_for_each_entry(mo, &mo_list, node) {
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s %u (%llx %zx) %d\n",
|
||||
si_object_name(&mo->object), kref_read(&mo->object.refcount),
|
||||
mo->mapping_info.p_addr, mo->mapping_info.p_addr_len, mo->map.early_mapped);
|
||||
}
|
||||
|
||||
mutex_unlock(&mo_list_mutex);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
/* 'struct device_attribute dev_attr_mem_objects'. */
|
||||
/* Use device attribute rather than driver attribute in case we want to support
|
||||
* multiple types of memory objects as different devices.
|
||||
*/
|
||||
|
||||
static DEVICE_ATTR_RO(mem_objects);
|
||||
|
||||
static struct attribute *attrs[] = {
|
||||
&dev_attr_mem_objects.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group attr_group = {
|
||||
.attrs = attrs,
|
||||
};
|
||||
|
||||
static const struct attribute_group *attr_groups[] = {
|
||||
&attr_group,
|
||||
NULL
|
||||
};
|
||||
|
||||
static int mem_object_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Select memory object type: default to SHMBridge. */
|
||||
mem_ops.release = mo_shm_bridge_release;
|
||||
mem_ops.prepare = mo_shm_bridge_prepare;
|
||||
|
||||
init_si_object_user(&primordial_object,
|
||||
SI_OT_ROOT, &shm_bridge__po_ops, "po_in_mem_object");
|
||||
|
||||
mem_object_pdev = pdev;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id mem_object_match[] = {
|
||||
{ .compatible = "qcom,mem-object", }, {}
|
||||
};
|
||||
|
||||
static struct platform_driver mem_object_plat_driver = {
|
||||
.probe = mem_object_probe,
|
||||
.driver = {
|
||||
.name = "mem-object",
|
||||
.dev_groups = attr_groups,
|
||||
.of_match_table = mem_object_match,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(mem_object_plat_driver);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("Memory object driver");
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
@@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (C) 2015 Linaro Ltd.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
@@ -15,9 +16,6 @@
|
||||
|
||||
#include "qcom_scm.h"
|
||||
|
||||
static DEFINE_MUTEX(qcom_scm_lock);
|
||||
|
||||
|
||||
/**
|
||||
* struct arm_smccc_args
|
||||
* @args: The array of values used in registers in smc instruction
|
||||
@@ -26,7 +24,6 @@ struct arm_smccc_args {
|
||||
unsigned long args[8];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* struct scm_legacy_command - one SCM command buffer
|
||||
* @len: total available memory for command and response
|
||||
@@ -148,6 +145,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
__le32 *arg_buf;
|
||||
const __le32 *res_buf;
|
||||
|
||||
if (!dev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
@@ -173,11 +173,11 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
smc.args[1] = (unsigned long)&context_id;
|
||||
smc.args[2] = cmd_phys;
|
||||
|
||||
mutex_lock(&qcom_scm_lock);
|
||||
down(&qcom_scm_sem_lock);
|
||||
__scm_legacy_do(&smc, &smc_res);
|
||||
if (smc_res.a0)
|
||||
ret = qcom_scm_remap_error(smc_res.a0);
|
||||
mutex_unlock(&qcom_scm_lock);
|
||||
up(&qcom_scm_sem_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
@@ -11,6 +12,7 @@
|
||||
#include <linux/firmware/qcom/qcom_scm.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/qtee_shmbridge.h>
|
||||
|
||||
#include "qcom_scm.h"
|
||||
|
||||
@@ -52,6 +54,9 @@ static void __scm_smc_do_quirk(const struct arm_smccc_args *smc,
|
||||
} while (res->a0 == QCOM_SCM_INTERRUPTED);
|
||||
}
|
||||
|
||||
#define IS_WAITQ_SLEEP_OR_WAKE(res) \
|
||||
(res->a0 == QCOM_SCM_WAITQ_SLEEP || res->a0 == QCOM_SCM_WAITQ_WAKE)
|
||||
|
||||
static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
|
||||
{
|
||||
memset(resume->args, 0, sizeof(resume->args[0]) * ARRAY_SIZE(resume->args));
|
||||
@@ -65,15 +70,35 @@ static void fill_wq_resume_args(struct arm_smccc_args *resume, u32 smc_call_ctx)
|
||||
resume->args[2] = smc_call_ctx;
|
||||
}
|
||||
|
||||
static void fill_wq_wake_ack_args(struct arm_smccc_args *wake_ack, u32 smc_call_ctx)
|
||||
{
|
||||
memset(wake_ack->args, 0, ARRAY_SIZE(wake_ack->args));
|
||||
|
||||
wake_ack->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
|
||||
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
|
||||
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_ACK));
|
||||
|
||||
wake_ack->args[1] = QCOM_SCM_ARGS(1);
|
||||
|
||||
wake_ack->args[2] = smc_call_ctx;
|
||||
}
|
||||
|
||||
static void fill_get_wq_ctx_args(struct arm_smccc_args *get_wq_ctx)
|
||||
{
|
||||
memset(get_wq_ctx->args, 0, ARRAY_SIZE(get_wq_ctx->args));
|
||||
|
||||
get_wq_ctx->args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,
|
||||
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
|
||||
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
|
||||
}
|
||||
|
||||
int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
|
||||
{
|
||||
int ret;
|
||||
struct arm_smccc_res get_wq_res;
|
||||
struct arm_smccc_args get_wq_ctx = {0};
|
||||
|
||||
get_wq_ctx.args[0] = ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL,
|
||||
ARM_SMCCC_SMC_64, ARM_SMCCC_OWNER_SIP,
|
||||
SCM_SMC_FNID(QCOM_SCM_SVC_WAITQ, QCOM_SCM_WAITQ_GET_WQ_CTX));
|
||||
fill_get_wq_ctx_args(&get_wq_ctx);
|
||||
|
||||
/* Guaranteed to return only success or error, no WAITQ_* */
|
||||
__scm_smc_do_quirk(&get_wq_ctx, &get_wq_res);
|
||||
@@ -88,55 +113,80 @@ int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __scm_smc_do_quirk_handle_waitq(struct device *dev, struct arm_smccc_args *waitq,
|
||||
struct arm_smccc_res *res)
|
||||
static int scm_smc_do_quirk(struct device *dev, struct arm_smccc_args *smc,
|
||||
struct arm_smccc_res *res)
|
||||
{
|
||||
int ret;
|
||||
u32 wq_ctx, smc_call_ctx;
|
||||
struct arm_smccc_args resume;
|
||||
struct arm_smccc_args *smc = waitq;
|
||||
struct completion *wq = NULL;
|
||||
struct qcom_scm *qscm;
|
||||
struct arm_smccc_args original = *smc;
|
||||
u32 wq_ctx, smc_call_ctx, flags;
|
||||
|
||||
do {
|
||||
__scm_smc_do_quirk(smc, res);
|
||||
|
||||
if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
|
||||
if (IS_WAITQ_SLEEP_OR_WAKE(res)) {
|
||||
wq_ctx = res->a1;
|
||||
smc_call_ctx = res->a2;
|
||||
flags = res->a3;
|
||||
|
||||
ret = qcom_scm_wait_for_wq_completion(wq_ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!dev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
fill_wq_resume_args(&resume, smc_call_ctx);
|
||||
smc = &resume;
|
||||
}
|
||||
} while (res->a0 == QCOM_SCM_WAITQ_SLEEP);
|
||||
qscm = dev_get_drvdata(dev);
|
||||
wq = qcom_scm_lookup_wq(qscm, wq_ctx);
|
||||
if (IS_ERR_OR_NULL(wq)) {
|
||||
pr_err("Did not find waitqueue for wq_ctx %d: %ld\n",
|
||||
wq_ctx, PTR_ERR(wq));
|
||||
return PTR_ERR(wq);
|
||||
}
|
||||
|
||||
if (res->a0 == QCOM_SCM_WAITQ_SLEEP) {
|
||||
wait_for_completion(wq);
|
||||
fill_wq_resume_args(smc, smc_call_ctx);
|
||||
continue;
|
||||
} else {
|
||||
fill_wq_wake_ack_args(smc, smc_call_ctx);
|
||||
scm_waitq_flag_handler(wq, flags);
|
||||
continue;
|
||||
}
|
||||
} else if ((long)res->a0 < 0) {
|
||||
/* Error, return to caller with original SMC call */
|
||||
*smc = original;
|
||||
break;
|
||||
} else
|
||||
return 0;
|
||||
} while (IS_WAITQ_SLEEP_OR_WAKE(res));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
|
||||
struct arm_smccc_res *res, bool atomic)
|
||||
struct arm_smccc_res *res,
|
||||
enum qcom_scm_call_type call_type,
|
||||
bool multicall_allowed)
|
||||
{
|
||||
int ret, retry_count = 0;
|
||||
bool multi_smc_call = qcom_scm_multi_call_allow(dev, multicall_allowed);
|
||||
|
||||
if (atomic) {
|
||||
if (call_type == QCOM_SCM_CALL_ATOMIC) {
|
||||
__scm_smc_do_quirk(smc, res);
|
||||
return 0;
|
||||
}
|
||||
|
||||
do {
|
||||
mutex_lock(&qcom_scm_lock);
|
||||
|
||||
ret = __scm_smc_do_quirk_handle_waitq(dev, smc, res);
|
||||
|
||||
mutex_unlock(&qcom_scm_lock);
|
||||
|
||||
if (!multi_smc_call)
|
||||
mutex_lock(&qcom_scm_lock);
|
||||
down(&qcom_scm_sem_lock);
|
||||
ret = scm_smc_do_quirk(dev, smc, res);
|
||||
up(&qcom_scm_sem_lock);
|
||||
if (!multi_smc_call)
|
||||
mutex_unlock(&qcom_scm_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (res->a0 == QCOM_SCM_V2_EBUSY) {
|
||||
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY)
|
||||
if (retry_count++ > QCOM_SCM_EBUSY_MAX_RETRY ||
|
||||
(call_type == QCOM_SCM_CALL_NORETRY))
|
||||
break;
|
||||
msleep(QCOM_SCM_EBUSY_WAIT_MS);
|
||||
}
|
||||
@@ -148,14 +198,15 @@ static int __scm_smc_do(struct device *dev, struct arm_smccc_args *smc,
|
||||
|
||||
int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
enum qcom_scm_convention qcom_convention,
|
||||
struct qcom_scm_res *res, bool atomic)
|
||||
struct qcom_scm_res *res, enum qcom_scm_call_type call_type)
|
||||
{
|
||||
int arglen = desc->arginfo & 0xf;
|
||||
int i, ret;
|
||||
dma_addr_t args_phys = 0;
|
||||
void *args_virt = NULL;
|
||||
struct qtee_shm shm = {0};
|
||||
bool use_qtee_shmbridge;
|
||||
size_t alloc_len;
|
||||
gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
|
||||
const bool atomic = (call_type == QCOM_SCM_CALL_ATOMIC);
|
||||
gfp_t flag = atomic ? GFP_ATOMIC : GFP_NOIO;
|
||||
u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
|
||||
u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
|
||||
ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
|
||||
@@ -172,43 +223,59 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
|
||||
|
||||
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
|
||||
alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
|
||||
args_virt = kzalloc(PAGE_ALIGN(alloc_len), flag);
|
||||
if (!dev)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
|
||||
use_qtee_shmbridge = qtee_shmbridge_is_enabled();
|
||||
if (use_qtee_shmbridge) {
|
||||
ret = qtee_shmbridge_allocate_shm(alloc_len, &shm);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
shm.vaddr = kzalloc(PAGE_ALIGN(alloc_len), flag);
|
||||
if (!shm.vaddr)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!args_virt)
|
||||
return -ENOMEM;
|
||||
|
||||
if (qcom_smccc_convention == ARM_SMCCC_SMC_32) {
|
||||
__le32 *args = args_virt;
|
||||
__le32 *args = shm.vaddr;
|
||||
|
||||
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
|
||||
args[i] = cpu_to_le32(desc->args[i +
|
||||
SCM_SMC_FIRST_EXT_IDX]);
|
||||
} else {
|
||||
__le64 *args = args_virt;
|
||||
__le64 *args = shm.vaddr;
|
||||
|
||||
for (i = 0; i < SCM_SMC_N_EXT_ARGS; i++)
|
||||
args[i] = cpu_to_le64(desc->args[i +
|
||||
SCM_SMC_FIRST_EXT_IDX]);
|
||||
}
|
||||
|
||||
args_phys = dma_map_single(dev, args_virt, alloc_len,
|
||||
shm.paddr = dma_map_single(dev, shm.vaddr, alloc_len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (dma_mapping_error(dev, args_phys)) {
|
||||
kfree(args_virt);
|
||||
if (dma_mapping_error(dev, shm.paddr)) {
|
||||
if (use_qtee_shmbridge)
|
||||
qtee_shmbridge_free_shm(&shm);
|
||||
else
|
||||
kfree(shm.vaddr);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
smc.args[SCM_SMC_LAST_REG_IDX] = args_phys;
|
||||
smc.args[SCM_SMC_LAST_REG_IDX] = shm.paddr;
|
||||
}
|
||||
|
||||
/* ret error check follows after args_virt cleanup*/
|
||||
ret = __scm_smc_do(dev, &smc, &smc_res, atomic);
|
||||
/* ret error check follows after shm cleanup*/
|
||||
ret = __scm_smc_do(dev, &smc, &smc_res, call_type, desc->multicall_allowed);
|
||||
|
||||
if (args_virt) {
|
||||
dma_unmap_single(dev, args_phys, alloc_len, DMA_TO_DEVICE);
|
||||
kfree(args_virt);
|
||||
if (shm.vaddr) {
|
||||
dma_unmap_single(dev, shm.paddr, alloc_len, DMA_TO_DEVICE);
|
||||
if (use_qtee_shmbridge)
|
||||
qtee_shmbridge_free_shm(&shm);
|
||||
else
|
||||
kfree(shm.vaddr);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
@@ -220,6 +287,8 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
res->result[2] = smc_res.a3;
|
||||
}
|
||||
|
||||
return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
|
||||
ret = (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
|
||||
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,12 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2010-2015,2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef __QCOM_SCM_INT_H
|
||||
#define __QCOM_SCM_INT_H
|
||||
|
||||
#include <linux/semaphore.h>
|
||||
|
||||
enum qcom_scm_convention {
|
||||
SMC_CONVENTION_UNKNOWN,
|
||||
SMC_CONVENTION_LEGACY,
|
||||
@@ -12,6 +15,7 @@ enum qcom_scm_convention {
|
||||
};
|
||||
|
||||
extern enum qcom_scm_convention qcom_scm_convention;
|
||||
extern struct semaphore qcom_scm_sem_lock;
|
||||
|
||||
#define MAX_QCOM_SCM_ARGS 10
|
||||
#define MAX_QCOM_SCM_RETS 3
|
||||
@@ -50,6 +54,7 @@ struct qcom_scm_desc {
|
||||
u32 arginfo;
|
||||
u64 args[MAX_QCOM_SCM_ARGS];
|
||||
u32 owner;
|
||||
bool multicall_allowed;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -60,15 +65,29 @@ struct qcom_scm_res {
|
||||
u64 result[MAX_QCOM_SCM_RETS];
|
||||
};
|
||||
|
||||
int qcom_scm_wait_for_wq_completion(u32 wq_ctx);
|
||||
int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
|
||||
enum qcom_scm_call_type {
|
||||
QCOM_SCM_CALL_NORMAL,
|
||||
QCOM_SCM_CALL_ATOMIC,
|
||||
QCOM_SCM_CALL_NORETRY,
|
||||
};
|
||||
|
||||
enum qcom_scm_wq_feature {
|
||||
QCOM_SCM_SINGLE_SMC_ALLOW,
|
||||
QCOM_SCM_MULTI_SMC_WHITE_LIST_ALLOW, /* Release global lock for certain allowed SMC calls */
|
||||
};
|
||||
|
||||
struct qcom_scm;
|
||||
extern struct completion *qcom_scm_lookup_wq(struct qcom_scm *scm, u32 wq_ctx);
|
||||
extern void scm_waitq_flag_handler(struct completion *wq, u32 flags);
|
||||
extern int scm_get_wq_ctx(u32 *wq_ctx, u32 *flags, u32 *more_pending);
|
||||
extern bool qcom_scm_multi_call_allow(struct device *dev, bool multicall_allowed);
|
||||
|
||||
#define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
|
||||
extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
enum qcom_scm_convention qcom_convention,
|
||||
struct qcom_scm_res *res, bool atomic);
|
||||
#define scm_smc_call(dev, desc, res, atomic) \
|
||||
__scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
|
||||
struct qcom_scm_res *res, enum qcom_scm_call_type call_type);
|
||||
#define scm_smc_call(dev, desc, res, call_type) \
|
||||
__scm_smc_call((dev), (desc), qcom_scm_convention, (res), (call_type))
|
||||
|
||||
#define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
|
||||
extern int scm_legacy_call_atomic(struct device *dev,
|
||||
@@ -81,7 +100,14 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
#define QCOM_SCM_BOOT_SET_ADDR 0x01
|
||||
#define QCOM_SCM_BOOT_TERMINATE_PC 0x02
|
||||
#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
|
||||
#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11
|
||||
#define QCOM_SCM_BOOT_SEC_WDOG_DIS 0x07
|
||||
#define QCOM_SCM_BOOT_SEC_WDOG_TRIGGER 0x08
|
||||
#define QCOM_SCM_BOOT_WDOG_DEBUG_PART 0x09
|
||||
#define QCOM_SCM_BOOT_SET_ADDR_MC 0x11
|
||||
#define QCOM_SCM_BOOT_SPIN_CPU 0x0d
|
||||
#define QCOM_SCM_BOOT_SWITCH_MODE 0x0f
|
||||
#define QCOM_SCM_BOOT_SET_DLOAD_MODE 0x10
|
||||
#define QCOM_SCM_BOOT_CONFIG_CPU_ERRATA 0x12
|
||||
#define QCOM_SCM_BOOT_SET_REMOTE_STATE 0x0a
|
||||
#define QCOM_SCM_FLUSH_FLAG_MASK 0x3
|
||||
#define QCOM_SCM_BOOT_MAX_CPUS 4
|
||||
@@ -96,21 +122,46 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
#define QCOM_SCM_PIL_PAS_SHUTDOWN 0x06
|
||||
#define QCOM_SCM_PIL_PAS_IS_SUPPORTED 0x07
|
||||
#define QCOM_SCM_PIL_PAS_MSS_RESET 0x0a
|
||||
#define QCOM_SCM_SVC_UTIL 0x03
|
||||
#define QCOM_SCM_UTIL_GET_SEC_DUMP_STATE 0x10
|
||||
#define QCOM_SCM_UTIL_DUMP_TABLE_ASSIGN 0x13
|
||||
|
||||
#define QCOM_SCM_SVC_IO 0x05
|
||||
#define QCOM_SCM_IO_READ 0x01
|
||||
#define QCOM_SCM_IO_WRITE 0x02
|
||||
#define QCOM_SCM_IO_RESET 0x03
|
||||
|
||||
#define QCOM_SCM_SVC_INFO 0x06
|
||||
#define QCOM_SCM_INFO_IS_CALL_AVAIL 0x01
|
||||
#define QCOM_SCM_INFO_GET_FEAT_VERSION_CMD 0x03
|
||||
|
||||
#define QCOM_SCM_SVC_PWR 0x09
|
||||
#define QCOM_SCM_PWR_IO_DISABLE_PMIC_ARBITER 0x01
|
||||
#define QCOM_SCM_SVC_MP 0x0c
|
||||
#define QCOM_SCM_MP_RESTORE_SEC_CFG 0x02
|
||||
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE 0x03
|
||||
#define QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT 0x04
|
||||
#define QCOM_SCM_MP_IOMMU_SET_CP_POOL_SIZE 0x05
|
||||
#define QCOM_SCM_MP_VIDEO_VAR 0x08
|
||||
#define QCOM_SCM_MP_MEM_PROTECT_REGION_ID 0x10
|
||||
#define QCOM_SCM_MP_ASSIGN 0x16
|
||||
#define QCOM_SCM_MP_CMD_SD_CTRL 0x18
|
||||
#define QCOM_SCM_MP_CP_SMMU_APERTURE_ID 0x1b
|
||||
#define QCOM_SCM_MEMP_SHM_BRIDGE_ENABLE 0x1c
|
||||
#define QCOM_SCM_MEMP_SHM_BRIDGE_DELETE 0x1d
|
||||
#define QCOM_SCM_MEMP_SHM_BRDIGE_CREATE 0x1e
|
||||
#define QCOM_SCM_CP_APERTURE_REG 0x0
|
||||
#define QCOM_SCM_CP_LPAC_APERTURE_REG 0x1
|
||||
|
||||
#define QCOM_SCM_SVC_DCVS 0x0D
|
||||
#define QCOM_SCM_DCVS_RESET 0x07
|
||||
#define QCOM_SCM_DCVS_UPDATE 0x08
|
||||
#define QCOM_SCM_DCVS_INIT 0x09
|
||||
#define QCOM_SCM_DCVS_UPDATE_V2 0x0a
|
||||
#define QCOM_SCM_DCVS_INIT_V2 0x0b
|
||||
#define QCOM_SCM_DCVS_INIT_CA_V2 0x0c
|
||||
#define QCOM_SCM_DCVS_UPDATE_CA_V2 0x0d
|
||||
#define QCOM_SCM_DCVS_TUNING 0x0e
|
||||
|
||||
#define QCOM_SCM_SVC_OCMEM 0x0f
|
||||
#define QCOM_SCM_OCMEM_LOCK_CMD 0x01
|
||||
@@ -119,6 +170,9 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
#define QCOM_SCM_SVC_ES 0x10 /* Enterprise Security */
|
||||
#define QCOM_SCM_ES_INVALIDATE_ICE_KEY 0x03
|
||||
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY 0x04
|
||||
#define QCOM_SCM_ES_CONFIG_SET_ICE_KEY_V2 0x05
|
||||
#define QCOM_SCM_ES_CLEAR_ICE_KEY 0x06
|
||||
#define QCOM_SCM_ES_DERIVE_RAW_SECRET 0x07
|
||||
|
||||
#define QCOM_SCM_SVC_HDCP 0x11
|
||||
#define QCOM_SCM_HDCP_INVOKE 0x01
|
||||
@@ -126,12 +180,49 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
#define QCOM_SCM_SVC_LMH 0x13
|
||||
#define QCOM_SCM_LMH_LIMIT_PROFILE_CHANGE 0x01
|
||||
#define QCOM_SCM_LMH_LIMIT_DCVSH 0x10
|
||||
#define QCOM_SCM_LMH_DEBUG_FETCH_DATA 0x0D
|
||||
|
||||
#define QCOM_SCM_SVC_SMMU_PROGRAM 0x15
|
||||
#define QCOM_SCM_SMMU_PT_FORMAT 0x01
|
||||
#define QCOM_SCM_SMMU_SECURE_LUT 0x03
|
||||
#define QCOM_SCM_SMMU_CONFIG_ERRATA1 0x03
|
||||
#define QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL 0x02
|
||||
|
||||
#define QCOM_SCM_SVC_CAMERA 0x18
|
||||
#define QCOM_SCM_CAMERA_PROTECT_ALL 0x06
|
||||
#define QCOM_SCM_CAMERA_PROTECT_PHY_LANES 0x07
|
||||
#define QCOM_SCM_CAMERA_UPDATE_CAMNOC_QOS 0x0A
|
||||
|
||||
#define QCOM_SCM_SVC_WAITQ 0x24
|
||||
#define QCOM_SCM_WAITQ_ACK 0x01
|
||||
#define QCOM_SCM_WAITQ_RESUME 0x02
|
||||
#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
|
||||
#define QCOM_SCM_GET_WQ_QUEUE_INFO 0x04
|
||||
#define QCOM_SCM_SVC_TSENS 0x1E
|
||||
#define QCOM_SCM_TSENS_INIT_ID 0x5
|
||||
|
||||
/* OEM Services and Function IDs */
|
||||
#define QCOM_SCM_SVC_OEM_POWER 0x09
|
||||
#define QCOM_SCM_OEM_POWER_REBOOT 0x22
|
||||
|
||||
/* GPU Service IDs */
|
||||
#define QCOM_SCM_SVC_GPU 0x28
|
||||
#define QCOM_SCM_SVC_GPU_INIT_REGS 0x1
|
||||
|
||||
/* TOS Services and Function IDs */
|
||||
#define QCOM_SCM_SVC_QSEELOG 0x01
|
||||
#define QCOM_SCM_QSEELOG_REGISTER 0x06
|
||||
#define QCOM_SCM_QUERY_ENCR_LOG_FEAT_ID 0x0b
|
||||
#define QCOM_SCM_REQUEST_ENCR_LOG_ID 0x0c
|
||||
|
||||
#define QCOM_SCM_SVC_SMCINVOKE 0x06
|
||||
#define QCOM_SCM_SMCINVOKE_INVOKE_LEGACY 0x00
|
||||
#define QCOM_SCM_SMCINVOKE_INVOKE 0x02
|
||||
#define QCOM_SCM_SMCINVOKE_CB_RSP 0x01
|
||||
|
||||
/* Feature IDs for QCOM_SCM_INFO_GET_FEAT_VERSION */
|
||||
#define QCOM_SCM_FEAT_LOG_ID 0x0a
|
||||
|
||||
#define QCOM_SCM_SVC_WAITQ 0x24
|
||||
#define QCOM_SCM_WAITQ_RESUME 0x02
|
||||
#define QCOM_SCM_WAITQ_GET_WQ_CTX 0x03
|
||||
@@ -145,6 +236,7 @@ extern int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
|
||||
#define QCOM_SCM_ERROR -1
|
||||
#define QCOM_SCM_INTERRUPTED 1
|
||||
#define QCOM_SCM_WAITQ_SLEEP 2
|
||||
#define QCOM_SCM_WAITQ_WAKE 3
|
||||
|
||||
static inline int qcom_scm_remap_error(int err)
|
||||
{
|
||||
|
624
drivers/firmware/qtee_shmbridge.c
Normal file
624
drivers/firmware/qtee_shmbridge.c
Normal file
@@ -0,0 +1,624 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* QTI TEE shared memory bridge driver
|
||||
*
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/firmware/qcom/qcom_scm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <soc/qcom/qseecomi.h>
|
||||
#include <linux/qtee_shmbridge.h>
|
||||
#include <linux/of_platform.h>
|
||||
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
|
||||
#include "qtee_shmbridge_internal.h"
|
||||
|
||||
#define DEFAULT_BRIDGE_SIZE SZ_4M /*4M*/
|
||||
#define MIN_BRIDGE_SIZE SZ_4K /*4K*/
|
||||
|
||||
#define MAXSHMVMS 4
|
||||
#define PERM_BITS 3
|
||||
#define VM_BITS 16
|
||||
#define SELF_OWNER_BIT 1
|
||||
#define SHM_NUM_VM_SHIFT 9
|
||||
#define SHM_VM_MASK 0xFFFF
|
||||
#define SHM_PERM_MASK 0x7
|
||||
|
||||
#define VM_PERM_R PERM_READ
|
||||
#define VM_PERM_W PERM_WRITE
|
||||
|
||||
#define SHMBRIDGE_E_NOT_SUPPORTED 4 /* SHMbridge is not implemented */
|
||||
|
||||
#define AC_ERR_SHARED_MEMORY_SINGLE_SOURCE 15
|
||||
|
||||
/* ns_vmids */
|
||||
#define UPDATE_NS_VMIDS(ns_vmids, id) \
|
||||
(((uint64_t)(ns_vmids) << VM_BITS) \
|
||||
| ((uint64_t)(id) & SHM_VM_MASK))
|
||||
|
||||
/* ns_perms */
|
||||
#define UPDATE_NS_PERMS(ns_perms, perm) \
|
||||
(((uint64_t)(ns_perms) << PERM_BITS) \
|
||||
| ((uint64_t)(perm) & SHM_PERM_MASK))
|
||||
|
||||
/* pfn_and_ns_perm_flags = paddr | ns_perms */
|
||||
#define UPDATE_PFN_AND_NS_PERM_FLAGS(paddr, ns_perms) \
|
||||
((uint64_t)(paddr) | (ns_perms))
|
||||
|
||||
|
||||
/* ipfn_and_s_perm_flags = ipaddr | tz_perm */
|
||||
#define UPDATE_IPFN_AND_S_PERM_FLAGS(ipaddr, tz_perm) \
|
||||
((uint64_t)(ipaddr) | (uint64_t)(tz_perm))
|
||||
|
||||
/* size_and_flags when dest_vm is not HYP */
|
||||
#define UPDATE_SIZE_AND_FLAGS(size, destnum) \
|
||||
((size) | (destnum) << SHM_NUM_VM_SHIFT)
|
||||
|
||||
struct bridge_info {
|
||||
phys_addr_t paddr;
|
||||
void *vaddr;
|
||||
size_t size;
|
||||
uint64_t handle;
|
||||
int min_alloc_order;
|
||||
struct gen_pool *genpool;
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
struct bridge_list {
|
||||
struct list_head head;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct bridge_list_entry {
|
||||
struct list_head list;
|
||||
phys_addr_t paddr;
|
||||
uint64_t handle;
|
||||
int32_t ref_count;
|
||||
};
|
||||
|
||||
struct cma_heap_bridge_info {
|
||||
uint32_t heapid;
|
||||
uint64_t handle;
|
||||
};
|
||||
|
||||
enum CMA_HEAP_TYPE {
|
||||
QSEECOM_HEAP = 0,
|
||||
QSEECOM_TA_HEAP,
|
||||
USER_CONTI_HEAP,
|
||||
HEAP_TYPE_MAX
|
||||
};
|
||||
|
||||
static struct bridge_info default_bridge;
|
||||
static struct bridge_list bridge_list_head;
|
||||
static bool qtee_shmbridge_enabled;
|
||||
static bool support_hyp;
|
||||
|
||||
/* enable shared memory bridge mechanism in HYP */
|
||||
static int32_t qtee_shmbridge_enable(bool enable)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
|
||||
qtee_shmbridge_enabled = false;
|
||||
if (!enable) {
|
||||
pr_warn("shmbridge isn't enabled\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qcom_scm_enable_shm_bridge();
|
||||
|
||||
if (ret) {
|
||||
pr_err("Failed to enable shmbridge, ret = %d\n", ret);
|
||||
|
||||
if (ret == -EIO || ret == SHMBRIDGE_E_NOT_SUPPORTED)
|
||||
pr_warn("shmbridge is not supported by this target\n");
|
||||
return ret;
|
||||
}
|
||||
qtee_shmbridge_enabled = true;
|
||||
pr_warn("shmbridge is enabled\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Check whether shmbridge mechanism is enabled in HYP or not */
|
||||
bool qtee_shmbridge_is_enabled(void)
|
||||
{
|
||||
return qtee_shmbridge_enabled;
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_is_enabled);
|
||||
|
||||
static int32_t qtee_shmbridge_list_add_locked(phys_addr_t paddr,
|
||||
uint64_t handle)
|
||||
{
|
||||
struct bridge_list_entry *entry;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
entry->handle = handle;
|
||||
entry->paddr = paddr;
|
||||
entry->ref_count = 0;
|
||||
|
||||
list_add_tail(&entry->list, &bridge_list_head.head);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qtee_shmbridge_list_del_locked(uint64_t handle)
|
||||
{
|
||||
struct bridge_list_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, &bridge_list_head.head, list) {
|
||||
if (entry->handle == handle) {
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
*Function: Decrement the reference count of registered shmbridge
|
||||
*and if refcount reached to zero delete the shmbridge (i.e send a
|
||||
*scm call to tz and remove that from out local list too.
|
||||
*Conditions: API suppose to be called in a locked enviorment
|
||||
*Return: return 0 in case of success
|
||||
* return error code in case of failure
|
||||
********************************************************************/
|
||||
static int32_t qtee_shmbridge_list_dec_refcount_locked(uint64_t handle)
|
||||
{
|
||||
struct bridge_list_entry *entry;
|
||||
int32_t ret = -EINVAL;
|
||||
|
||||
list_for_each_entry(entry, &bridge_list_head.head, list)
|
||||
if (entry->handle == handle) {
|
||||
|
||||
if (entry->ref_count > 0) {
|
||||
//decrement reference count
|
||||
entry->ref_count--;
|
||||
pr_debug("%s: bridge on %lld exists decrease refcount :%d\n",
|
||||
__func__, handle, entry->ref_count);
|
||||
|
||||
if (entry->ref_count == 0) {
|
||||
ret = qcom_scm_delete_shm_bridge(handle);
|
||||
if (ret) {
|
||||
pr_err(" %s: Failed to del bridge %lld, ret = %d\n"
|
||||
, __func__, handle, ret);
|
||||
//restore reference count in case of failure
|
||||
entry->ref_count++;
|
||||
goto exit;
|
||||
}
|
||||
qtee_shmbridge_list_del_locked(handle);
|
||||
}
|
||||
ret = 0;
|
||||
} else {
|
||||
pr_err("%s: weird, ref_count should not be negative handle %lld , refcount: %d\n",
|
||||
__func__, handle, entry->ref_count);
|
||||
}
|
||||
break;
|
||||
}
|
||||
exit:
|
||||
if (ret == -EINVAL)
|
||||
pr_err("Not able to find bridge handle %lld in map\n", handle);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/********************************************************************
|
||||
*Function: Increment the ref count in case if we try to register a
|
||||
*pre-registered phyaddr with shmbridge and provide a valid handle
|
||||
*to the caller API which was passed by caller as a pointer.
|
||||
*Conditions: API suppose to be called in a locked enviorment.
|
||||
*Return: return 0 in case of success.
|
||||
* return error code in case of failure.
|
||||
********************************************************************/
|
||||
static int32_t qtee_shmbridge_list_inc_refcount_locked(phys_addr_t paddr, uint64_t *handle)
|
||||
{
|
||||
struct bridge_list_entry *entry;
|
||||
int32_t ret = -EINVAL;
|
||||
|
||||
list_for_each_entry(entry, &bridge_list_head.head, list)
|
||||
if (entry->paddr == paddr) {
|
||||
|
||||
entry->ref_count++;
|
||||
pr_debug("%s: bridge on %llx exists increase refcount :%d\n",
|
||||
__func__, (uint64_t)paddr, entry->ref_count);
|
||||
|
||||
//update handle in case we found paddr already exist
|
||||
*handle = entry->handle;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
pr_err("%s: Not able to find bridge paddr %llx in map\n",
|
||||
__func__, (uint64_t)paddr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int32_t qtee_shmbridge_query_locked(phys_addr_t paddr)
|
||||
{
|
||||
struct bridge_list_entry *entry;
|
||||
|
||||
list_for_each_entry(entry, &bridge_list_head.head, list)
|
||||
if (entry->paddr == paddr) {
|
||||
pr_debug("A bridge on %llx exists\n", (uint64_t)paddr);
|
||||
return -EEXIST;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Check whether a bridge starting from paddr exists */
|
||||
int32_t qtee_shmbridge_query(phys_addr_t paddr)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
|
||||
mutex_lock(&bridge_list_head.lock);
|
||||
ret = qtee_shmbridge_query_locked(paddr);
|
||||
mutex_unlock(&bridge_list_head.lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_query);
|
||||
|
||||
/* Register paddr & size as a bridge, return bridge handle */
|
||||
int32_t qtee_shmbridge_register(
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
uint32_t *ns_vmid_list,
|
||||
uint32_t *ns_vm_perm_list,
|
||||
uint32_t ns_vmid_num,
|
||||
uint32_t tz_perm,
|
||||
uint64_t *handle)
|
||||
|
||||
{
|
||||
int32_t ret = 0;
|
||||
uint64_t pfn_and_ns_perm_flags = 0;
|
||||
uint64_t ipfn_and_s_perm_flags = 0;
|
||||
uint64_t size_and_flags = 0;
|
||||
uint64_t ns_perms = 0;
|
||||
uint64_t ns_vmids = 0;
|
||||
int i = 0;
|
||||
|
||||
gh_vmid_t temp_vmid;
|
||||
|
||||
if (!qtee_shmbridge_enabled)
|
||||
return 0;
|
||||
|
||||
if (!handle || !ns_vmid_list || !ns_vm_perm_list ||
|
||||
ns_vmid_num > MAXSHMVMS) {
|
||||
pr_err("invalid input parameters\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&bridge_list_head.lock);
|
||||
ret = qtee_shmbridge_query_locked(paddr);
|
||||
if (ret) {
|
||||
pr_debug("%s: found 0%llu already exist with shmbridge\n",
|
||||
__func__, paddr);
|
||||
goto bridge_exist;
|
||||
}
|
||||
|
||||
if (support_hyp) {
|
||||
|
||||
/* Calls to create SHMBridge from HLOS-VM is handled by QHEEBSP AC Layer while from
|
||||
* secondary CPU-VMs, such as OEM-VM and QTVM, it is handled by Hypervisor RM.
|
||||
* RM always expects the destination VM fields to be 0 and only expects the self
|
||||
* owner bit to be set.
|
||||
*/
|
||||
|
||||
if (ns_vmid_num == 1) {
|
||||
if (!gh_rm_get_this_vmid(&temp_vmid) &&
|
||||
(temp_vmid == ns_vmid_list[0])) {
|
||||
|
||||
ns_vmid_num = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ns_vmid_num; i++) {
|
||||
ns_perms = UPDATE_NS_PERMS(ns_perms, ns_vm_perm_list[i]);
|
||||
ns_vmids = UPDATE_NS_VMIDS(ns_vmids, ns_vmid_list[i]);
|
||||
}
|
||||
|
||||
pfn_and_ns_perm_flags = UPDATE_PFN_AND_NS_PERM_FLAGS(paddr, ns_perms);
|
||||
ipfn_and_s_perm_flags = UPDATE_IPFN_AND_S_PERM_FLAGS(paddr, tz_perm);
|
||||
size_and_flags = UPDATE_SIZE_AND_FLAGS(size, ns_vmid_num);
|
||||
|
||||
if (support_hyp) {
|
||||
size_and_flags |= SELF_OWNER_BIT << 1;
|
||||
size_and_flags |= (VM_PERM_R | VM_PERM_W) << 2;
|
||||
}
|
||||
|
||||
pr_debug("%s: desc.args[0] %llx, args[1] %llx, args[2] %llx, args[3] %llx\n",
|
||||
__func__, pfn_and_ns_perm_flags, ipfn_and_s_perm_flags,
|
||||
size_and_flags, ns_vmids);
|
||||
|
||||
ret = qcom_scm_create_shm_bridge(pfn_and_ns_perm_flags,
|
||||
ipfn_and_s_perm_flags, size_and_flags, ns_vmids,
|
||||
handle);
|
||||
|
||||
if (ret) {
|
||||
pr_err("%s: create shmbridge failed, ret = %d\n", __func__, ret);
|
||||
|
||||
/* if bridge is already existing and we are not real owner also paddr not
|
||||
* exist in our map we will add an entry in our map and go for deregister
|
||||
* for this since QTEE also maintain ref_count. So for this we should
|
||||
* deregister to decrease ref_count in QTEE.
|
||||
*/
|
||||
if (ret == AC_ERR_SHARED_MEMORY_SINGLE_SOURCE) {
|
||||
pr_err("%s: bridge %llx exist but not registered in our map\n",
|
||||
__func__, (uint64_t)paddr);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
ret = qtee_shmbridge_list_add_locked(paddr, *handle);
|
||||
bridge_exist:
|
||||
ret = qtee_shmbridge_list_inc_refcount_locked(paddr, handle);
|
||||
exit:
|
||||
mutex_unlock(&bridge_list_head.lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_register);
|
||||
|
||||
/* Deregister bridge */
|
||||
int32_t qtee_shmbridge_deregister(uint64_t handle)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
|
||||
if (!qtee_shmbridge_enabled)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&bridge_list_head.lock);
|
||||
ret = qtee_shmbridge_list_dec_refcount_locked(handle);
|
||||
mutex_unlock(&bridge_list_head.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_deregister);
|
||||
|
||||
|
||||
/* Sub-allocate from default kernel bridge created by shmb driver */
|
||||
int32_t qtee_shmbridge_allocate_shm(size_t size, struct qtee_shm *shm)
|
||||
{
|
||||
int32_t ret = 0;
|
||||
unsigned long va;
|
||||
|
||||
if (IS_ERR_OR_NULL(shm)) {
|
||||
pr_err("qtee_shm is NULL\n");
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (size > default_bridge.size) {
|
||||
pr_err("requestd size %zu is larger than bridge size %zu\n",
|
||||
size, default_bridge.size);
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
size = roundup(size, 1 << default_bridge.min_alloc_order);
|
||||
|
||||
va = gen_pool_alloc(default_bridge.genpool, size);
|
||||
if (!va) {
|
||||
pr_err("failed to sub-allocate %zu bytes from bridge\n", size);
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
memset((void *)va, 0, size);
|
||||
shm->vaddr = (void *)va;
|
||||
shm->paddr = gen_pool_virt_to_phys(default_bridge.genpool, va);
|
||||
shm->size = size;
|
||||
|
||||
pr_debug("%s: shm->paddr %llx, size %zu\n",
|
||||
__func__, (uint64_t)shm->paddr, shm->size);
|
||||
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_allocate_shm);
|
||||
|
||||
|
||||
/* Free buffer that is sub-allocated from default kernel bridge */
|
||||
void qtee_shmbridge_free_shm(struct qtee_shm *shm)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(shm) || !shm->vaddr)
|
||||
return;
|
||||
gen_pool_free(default_bridge.genpool, (unsigned long)shm->vaddr,
|
||||
shm->size);
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_free_shm);
|
||||
|
||||
/* cache clean operation for buffer sub-allocated from default bridge */
|
||||
void qtee_shmbridge_flush_shm_buf(struct qtee_shm *shm)
|
||||
{
|
||||
if (shm)
|
||||
return dma_sync_single_for_device(default_bridge.dev,
|
||||
shm->paddr, shm->size, DMA_TO_DEVICE);
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_flush_shm_buf);
|
||||
|
||||
/* cache invalidation operation for buffer sub-allocated from default bridge */
|
||||
void qtee_shmbridge_inv_shm_buf(struct qtee_shm *shm)
|
||||
{
|
||||
if (shm)
|
||||
return dma_sync_single_for_cpu(default_bridge.dev,
|
||||
shm->paddr, shm->size, DMA_FROM_DEVICE);
|
||||
}
|
||||
EXPORT_SYMBOL(qtee_shmbridge_inv_shm_buf);
|
||||
|
||||
/*
|
||||
* shared memory bridge initialization
|
||||
*
|
||||
*/
|
||||
static int qtee_shmbridge_init(struct platform_device *pdev)
|
||||
{
|
||||
int ret = 0;
|
||||
uint32_t custom_bridge_size;
|
||||
uint32_t *ns_vm_ids;
|
||||
uint32_t ns_vm_ids_hlos[] = {VMID_HLOS};
|
||||
uint32_t ns_vm_ids_hyp[] = {};
|
||||
uint32_t ns_vm_perms[] = {VM_PERM_R|VM_PERM_W};
|
||||
int mem_protection_enabled = 0;
|
||||
|
||||
support_hyp = of_property_read_bool((&pdev->dev)->of_node,
|
||||
"qcom,support-hypervisor");
|
||||
if (support_hyp)
|
||||
ns_vm_ids = ns_vm_ids_hyp;
|
||||
else
|
||||
ns_vm_ids = ns_vm_ids_hlos;
|
||||
|
||||
if (default_bridge.vaddr) {
|
||||
pr_err("qtee shmbridge is already initialized\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32((&pdev->dev)->of_node,
|
||||
"qcom,custom-bridge-size", &custom_bridge_size);
|
||||
if (ret)
|
||||
default_bridge.size = DEFAULT_BRIDGE_SIZE;
|
||||
else
|
||||
default_bridge.size = custom_bridge_size * MIN_BRIDGE_SIZE;
|
||||
|
||||
pr_err("qtee shmbridge registered default bridge with size %zu bytes\n",
|
||||
default_bridge.size);
|
||||
|
||||
default_bridge.vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP,
|
||||
get_order(default_bridge.size));
|
||||
if (!default_bridge.vaddr)
|
||||
return -ENOMEM;
|
||||
|
||||
default_bridge.paddr = dma_map_single(&pdev->dev,
|
||||
default_bridge.vaddr, default_bridge.size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&pdev->dev, default_bridge.paddr)) {
|
||||
pr_err("dma_map_single() failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto exit_freebuf;
|
||||
}
|
||||
default_bridge.dev = &pdev->dev;
|
||||
|
||||
/* create a general mem pool */
|
||||
default_bridge.min_alloc_order = PAGE_SHIFT; /* 4K page size aligned */
|
||||
default_bridge.genpool = gen_pool_create(
|
||||
default_bridge.min_alloc_order, -1);
|
||||
if (!default_bridge.genpool) {
|
||||
pr_err("gen_pool_add_virt() failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto exit_unmap;
|
||||
}
|
||||
|
||||
gen_pool_set_algo(default_bridge.genpool, gen_pool_best_fit, NULL);
|
||||
ret = gen_pool_add_virt(default_bridge.genpool,
|
||||
(uintptr_t)default_bridge.vaddr,
|
||||
default_bridge.paddr, default_bridge.size, -1);
|
||||
if (ret) {
|
||||
pr_err("gen_pool_add_virt() failed, ret = %d\n", ret);
|
||||
goto exit_destroy_pool;
|
||||
}
|
||||
|
||||
mutex_init(&bridge_list_head.lock);
|
||||
INIT_LIST_HEAD(&bridge_list_head.head);
|
||||
|
||||
/* temporarily disable shm bridge mechanism */
|
||||
ret = qtee_shmbridge_enable(true);
|
||||
if (ret) {
|
||||
/* keep the mem pool and return if failed to enable bridge */
|
||||
ret = 0;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*register default bridge*/
|
||||
if (support_hyp)
|
||||
ret = qtee_shmbridge_register(default_bridge.paddr,
|
||||
default_bridge.size, ns_vm_ids,
|
||||
ns_vm_perms, 0, VM_PERM_R|VM_PERM_W,
|
||||
&default_bridge.handle);
|
||||
else
|
||||
ret = qtee_shmbridge_register(default_bridge.paddr,
|
||||
default_bridge.size, ns_vm_ids,
|
||||
ns_vm_perms, 1, VM_PERM_R|VM_PERM_W,
|
||||
&default_bridge.handle);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Failed to register default bridge, size %zu\n",
|
||||
default_bridge.size);
|
||||
goto exit_deregister_default_bridge;
|
||||
}
|
||||
|
||||
pr_debug("qtee shmbridge registered default bridge with size %zu bytes\n",
|
||||
default_bridge.size);
|
||||
|
||||
mem_protection_enabled = scm_mem_protection_init_do();
|
||||
pr_err("MEM protection %s, %d\n",
|
||||
(!mem_protection_enabled ? "Enabled" : "Not enabled"),
|
||||
mem_protection_enabled);
|
||||
return 0;
|
||||
|
||||
exit_deregister_default_bridge:
|
||||
qtee_shmbridge_deregister(default_bridge.handle);
|
||||
qtee_shmbridge_enable(false);
|
||||
exit_destroy_pool:
|
||||
gen_pool_destroy(default_bridge.genpool);
|
||||
exit_unmap:
|
||||
dma_unmap_single(&pdev->dev, default_bridge.paddr, default_bridge.size,
|
||||
DMA_TO_DEVICE);
|
||||
exit_freebuf:
|
||||
free_pages((long)default_bridge.vaddr, get_order(default_bridge.size));
|
||||
default_bridge.vaddr = NULL;
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qtee_shmbridge_probe(struct platform_device *pdev)
|
||||
{
|
||||
#ifdef CONFIG_ARM64
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
#endif
|
||||
return qtee_shmbridge_init(pdev);
|
||||
}
|
||||
|
||||
static int qtee_shmbridge_remove(struct platform_device *pdev)
|
||||
{
|
||||
qtee_shmbridge_deregister(default_bridge.handle);
|
||||
gen_pool_destroy(default_bridge.genpool);
|
||||
dma_unmap_single(&pdev->dev, default_bridge.paddr, default_bridge.size,
|
||||
DMA_TO_DEVICE);
|
||||
free_pages((long)default_bridge.vaddr, get_order(default_bridge.size));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qtee_shmbridge_of_match[] = {
|
||||
{ .compatible = "qcom,tee-shared-memory-bridge"},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, qtee_shmbridge_of_match);
|
||||
|
||||
static struct platform_driver qtee_shmbridge_driver = {
|
||||
.probe = qtee_shmbridge_probe,
|
||||
.remove = qtee_shmbridge_remove,
|
||||
.driver = {
|
||||
.name = "shared_memory_bridge",
|
||||
.of_match_table = qtee_shmbridge_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
int qtee_shmbridge_driver_init(void)
|
||||
{
|
||||
return platform_driver_register(&qtee_shmbridge_driver);
|
||||
}
|
||||
|
||||
void qtee_shmbridge_driver_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&qtee_shmbridge_driver);
|
||||
}
|
14
drivers/firmware/qtee_shmbridge_internal.h
Normal file
14
drivers/firmware/qtee_shmbridge_internal.h
Normal file
@@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef __QCOM_QTEE_SHM_BRIDGE_INT_H_
|
||||
#define __QCOM_QTEE_SHM_BRIDGE_INT_H_
|
||||
|
||||
int qtee_shmbridge_driver_init(void);
|
||||
void qtee_shmbridge_driver_exit(void);
|
||||
|
||||
#define SCM_SVC_RTIC 0x19
|
||||
#define TZ_HLOS_NOTIFY_CORE_KERNEL_BOOTUP 0x7
|
||||
int scm_mem_protection_init_do(void);
|
||||
#endif
|
Reference in New Issue
Block a user