replace common qcom sources with samsung ones

This commit is contained in:
SaschaNes
2025-08-12 22:13:00 +02:00
parent ba24dcded9
commit 6f7753de11
5682 changed files with 2450203 additions and 103634 deletions

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QTI_SMMU_PROXY_H_
#define __QTI_SMMU_PROXY_H_
@@ -12,16 +12,7 @@
#include <smmu-proxy/include/uapi/linux/qti-smmu-proxy.h>
#define SMMU_PROXY_MEM_ALIGNMENT (1 << 21)
#define SMMU_PROXY_SWITCH_OP_ACQUIRE_SID 0
#define SMMU_PROXY_SWITCH_OP_RELEASE_SID 1
int smmu_proxy_get_csf_version(struct csf_version *csf_version);
/*
* Decouple the unmap call from the SID switch, to allow the SID switch
* to happen more deterministically compared to the lazy unmap call which
* delays the SID switch.
*/
int smmu_proxy_switch_sid(struct device *client_dev, u32 op);
#endif /* __QTI_SMMU_PROXY_H_ */

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022,2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef SMMU_PROXY_MSGQ_H
#define SMMU_PROXY_MSGQ_H
@@ -28,8 +28,6 @@ enum smmu_proxy_msg_type {
SMMU_PROXY_UNMAP,
SMMU_PROXY_UNMAP_RESP,
SMMU_PROXY_ERR_RESP,
SMMU_PROXY_SWITCH_SID,
SMMU_PROXY_SWITCH_SID_RESP,
SMMU_PROXY_MSG_MAX,
};
@@ -106,26 +104,4 @@ struct smmu_proxy_unmap_resp {
struct smmu_proxy_resp_hdr hdr;
} __packed;
/**
* struct smmu_proxy_switch_req: The message format for an SID switch request from
* another VM.
* @hdr: Message header
* @cb_id: The context bank id to-be switched.
* @switch_dir: One of the values in enum sid_switch_direction.
* SID_ACQUIRE or SID_RELEASE.
*/
struct smmu_proxy_switch_sid_req {
struct smmu_proxy_msg_hdr hdr;
u32 cb_id;
u32 switch_dir;
} __packed;
/**
* struct smmu_proxy_switch_resp: The message format for an SID switch
* request response.
* @hdr: Response header
*/
struct smmu_proxy_switch_sid_resp {
struct smmu_proxy_resp_hdr hdr;
} __packed;
#endif /* SMMU_PROXY_MSGQ_H */

View File

@@ -5,13 +5,11 @@
#include "qti-smmu-proxy-common.h"
#include <linux/qcom-iommu-util.h>
#include <linux/qti-smmu-proxy-callbacks.h>
#include <linux/qcom-dma-mapping.h>
#include <linux/of.h>
#include <linux/delay.h>
#define DELAY_MS 30
#define GH_MSGQ_RECV_RETRY_CNT 10
static void *msgq_hdl;
@@ -60,7 +58,7 @@ int smmu_proxy_unmap(void *data)
* No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
* GH_MSGQ_MAX_MSG_SIZE_BYTES
*/
retry_cnt = GH_MSGQ_RECV_RETRY_CNT;
retry_cnt = 10;
do {
ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
if (ret >= 0)
@@ -96,96 +94,6 @@ out:
return ret;
}
int smmu_proxy_switch_sid(struct device *client_dev, u32 op)
{
void *buf;
size_t size;
int ret;
struct smmu_proxy_switch_sid_req *req;
struct smmu_proxy_switch_sid_resp *resp;
int retry_cnt;
mutex_lock(&sender_mutex);
buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
pr_err("%s: Failed to allocate memory!\n", __func__);
goto out;
}
req = buf;
req->hdr.msg_type = SMMU_PROXY_SWITCH_SID;
req->hdr.msg_size = sizeof(*req);
ret = of_property_read_u32(client_dev->of_node,
"qti,smmu-proxy-cb-id",
&req->cb_id);
if (ret) {
dev_err(client_dev, "%s: Err reading 'qti,smmu-proxy-cb-id' rc: %d\n",
__func__, ret);
goto free_buf;
}
switch (op) {
case SMMU_PROXY_SWITCH_OP_RELEASE_SID:
req->switch_dir = SID_RELEASE;
break;
case SMMU_PROXY_SWITCH_OP_ACQUIRE_SID:
req->switch_dir = SID_ACQUIRE;
break;
default:
ret = -EINVAL;
goto free_buf;
}
ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
if (ret < 0) {
pr_err("%s: failed to send switch message rc: %d cb_id: %d\n",
__func__, ret, req->cb_id);
goto free_buf;
}
/*
* No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
* GH_MSGQ_MAX_MSG_SIZE_BYTES
*/
retry_cnt = GH_MSGQ_RECV_RETRY_CNT;
do {
ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
if (ret >= 0)
break;
if (retry_cnt == 1) {
pr_err_ratelimited("%s: failed to receive message rc: %d cb_id: %d\n",
__func__, ret, req->cb_id);
goto free_buf;
}
pr_err_ratelimited("%s: failed to receive message rc: %d, retry cb_id: %d\n",
__func__, ret, req->cb_id);
mdelay(DELAY_MS);
} while (--retry_cnt);
resp = buf;
if (resp->hdr.ret) {
ret = resp->hdr.ret;
pr_err("%s: Switch call failed on remote VM, rc: %d\n", __func__,
resp->hdr.ret);
}
if (resp->hdr.msg_type != SMMU_PROXY_SWITCH_SID_RESP) {
pr_err("%s: received incorrect msg (type: %d) for cb_id: %d\n", __func__,
resp->hdr.msg_type, req->cb_id);
}
free_buf:
kfree(buf);
out:
mutex_unlock(&sender_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(smmu_proxy_switch_sid);
int smmu_proxy_map(struct device *client_dev, struct sg_table *proxy_iova,
struct dma_buf *dmabuf)
{
@@ -272,7 +180,7 @@ int smmu_proxy_map(struct device *client_dev, struct sg_table *proxy_iova,
* No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
* GH_MSGQ_MAX_MSG_SIZE_BYTES
*/
retry_cnt = GH_MSGQ_RECV_RETRY_CNT;
retry_cnt = 10;
resp = buf;
flags = 0;
do {

View File

@@ -30,12 +30,7 @@ static DEFINE_MUTEX(buffer_state_lock);
static DEFINE_XARRAY(buffer_state_arr);
static unsigned int cb_map_counts[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
struct cb_dev {
struct device *dev;
struct {
uint32_t acquired : 1;
};
} cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
struct device *cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
struct task_struct *receiver_msgq_handler_thread;
@@ -100,14 +95,13 @@ static int iommu_unmap_and_relinquish(u32 hdl)
/* If nothing left is mapped for this CB, unprogram its SMR */
cb_map_counts[cb_id]--;
if (!cb_map_counts[cb_id] && cb_devices[cb_id].acquired) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id].dev, SID_RELEASE);
if (!cb_map_counts[cb_id]) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
if (ret) {
pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
__func__, cb_id, ret);
break;
}
cb_devices[cb_id].acquired = false;
}
}
}
@@ -154,59 +148,6 @@ static int process_unmap_request(struct smmu_proxy_unmap_req *req, size_t size)
return ret;
}
static int process_switch_sid_request(struct smmu_proxy_switch_sid_req *req, size_t size)
{
struct smmu_proxy_switch_sid_resp *resp;
int ret = 0;
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp)
return -ENOMEM;
if (req->cb_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
ret = -ERANGE;
goto exit_resp;
}
/*
* For now, we only expect sid switch for the Display CB,
* but we are not disabling other CBs in case it is needed.
*/
mutex_lock(&buffer_state_lock);
if (req->switch_dir == SID_ACQUIRE && cb_devices[req->cb_id].acquired)
pr_info("cb_id: %d has already been acquired. Ignoring request.\n", req->cb_id);
else if (req->switch_dir == SID_RELEASE && !cb_devices[req->cb_id].acquired)
pr_info("cb_id: %d has already been released. Ignoring request.\n", req->cb_id);
else
ret = qcom_iommu_sid_switch(cb_devices[req->cb_id].dev, req->switch_dir);
if (ret)
pr_err("%s: Failed to switch sid request: %d for cb_id %d %s\n", __func__,
ret, req->cb_id, (req->switch_dir == SID_ACQUIRE) ? "ACQUIRE" : "RELEASE");
else
cb_devices[req->cb_id].acquired = (req->switch_dir == SID_ACQUIRE);
mutex_unlock(&buffer_state_lock);
exit_resp:
resp->hdr.msg_type = SMMU_PROXY_SWITCH_SID_RESP;
resp->hdr.msg_size = sizeof(*resp);
resp->hdr.ret = ret;
ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
if (ret < 0)
pr_err("%s: failed to send response to switch request rc: %d\n", __func__, ret);
else
pr_info("%s: response to switch sid request: %d for cb_id %d %s\n", __func__,
resp->hdr.ret, req->cb_id,
(req->switch_dir == SID_ACQUIRE) ? "ACQUIRE" : "RELEASE");
kfree(resp);
return ret;
}
static
inline
struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retrieve_arg,
@@ -224,7 +165,7 @@ struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retr
return ERR_PTR(-EINVAL);
}
if (!cb_devices[cb_id].dev) {
if (!cb_devices[cb_id]) {
pr_err("%s: CB of ID %d not defined\n", __func__, cb_id);
return ERR_PTR(-EINVAL);
}
@@ -270,7 +211,7 @@ struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retr
buf_state->dmabuf = dmabuf;
}
attachment = dma_buf_attach(dmabuf, cb_devices[cb_id].dev);
attachment = dma_buf_attach(dmabuf, cb_devices[cb_id]);
if (IS_ERR(attachment)) {
ret = PTR_ERR(attachment);
pr_err("%s: Failed to attach rc: %d\n", __func__, ret);
@@ -294,14 +235,13 @@ struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retr
buf_state->cb_info[cb_id].attachment = attachment;
buf_state->cb_info[cb_id].sg_table = table;
if (!cb_devices[cb_id].acquired) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id].dev, SID_ACQUIRE);
if (!cb_map_counts[cb_id]) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_ACQUIRE);
if (ret) {
pr_err("%s: Failed to program SMRs for cb_id %d rc: %d\n", __func__,
cb_id, ret);
goto unmap;
}
cb_devices[cb_id].acquired = true;
}
cb_map_counts[cb_id]++;
@@ -320,13 +260,11 @@ unlock:
dec_cb_map_count:
cb_map_counts[cb_id]--;
if (!cb_map_counts[cb_id] && cb_devices[cb_id].acquired) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id].dev, SID_RELEASE);
if (!cb_map_counts[cb_id]) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
if (ret)
pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
__func__, cb_id, ret);
else
cb_devices[cb_id].acquired = false;
}
unmap:
dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
@@ -443,9 +381,6 @@ static void smmu_proxy_process_msg(void *buf, size_t size)
case SMMU_PROXY_UNMAP:
ret = process_unmap_request(buf, size);
break;
case SMMU_PROXY_SWITCH_SID:
ret = process_switch_sid_request(buf, size);
break;
default:
pr_err("%s: received message of unknown type: %d\n", __func__,
msg_hdr->msg_type);
@@ -761,7 +696,7 @@ static int cb_probe_handler(struct device *dev)
return -EINVAL;
}
if (cb_devices[context_bank_id].dev) {
if (cb_devices[context_bank_id]) {
dev_err(dev, "Context bank %u is already populated\n", context_bank_id);
return -EINVAL;
}
@@ -785,7 +720,7 @@ static int cb_probe_handler(struct device *dev)
}
iommu_set_fault_handler(domain, proxy_fault_handler, NULL);
cb_devices[context_bank_id].dev = dev;
cb_devices[context_bank_id] = dev;
return 0;
}