sm8750: init kernel modules repo

This commit is contained in:
2025-08-11 12:21:01 +02:00
parent 2681143b87
commit facad83b01
8851 changed files with 6894561 additions and 0 deletions

View File

@@ -0,0 +1,336 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/version.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/unistd.h>
#include <linux/qcedev.h>
#include <linux/slab.h>
#include <linux/cdev.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#include "qcedev_smmu.h"
#include "qcedev_fe.h"
#define QCE_FE_FIRST_MINOR 0
#define QCE_FE_MINOR_CNT 1
#define QCEDEV_MAX_BUFFERS 16
#define QCE_MM_HAB_ID MM_GPCE_1
#define HAB_OPEN_WAIT_TIMEOUT_MS (300)
static struct cdev cdev_qce_fe;
static dev_t dev_qce_fe;
static struct class *class_qce_fe;
static struct completion create_hab_channel_done;
struct qce_fe_drv_hab_handles *drv_handles;
static int qce_fe_hab_open(uint32_t *handle);
static int qce_fe_open(struct inode *i, struct file *f)
{
struct qcedev_fe_handle *handle;
handle = kzalloc(sizeof(struct qcedev_fe_handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
f->private_data = handle;
mutex_init(&handle->registeredbufs.lock);
INIT_LIST_HEAD(&handle->registeredbufs.list);
pr_info("%s : Done succesffuly\n", __func__);
return 0;
}
static int qce_fe_close(struct inode *i, struct file *f)
{
struct qcedev_fe_handle *handle;
pr_info("%s: Driver close\n", __func__);
handle = f->private_data;
if (qcedev_unmap_all_buffers(handle, drv_handles))
pr_err("%s: failed to unmap all ion buffers\n", __func__);
kfree(handle);
f->private_data = NULL;
return 0;
}
#if (KERNEL_VERSION(2, 6, 35) > LINUX_VERSION_CODE)
static int gce_fe_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
#else
static long gce_fe_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
#endif
{
int err = 0;
struct qcedev_fe_handle *handle;
handle = f->private_data;
/* Verify user arguments. */
if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) {
err = -ENOTTY;
goto exit_qcedev;
}
switch (cmd) {
case QCEDEV_IOCTL_MAP_BUF_REQ:
{
unsigned long long vaddr = 0;
struct qcedev_map_buf_req map_buf = { {0} };
int i = 0;
if (copy_from_user(&map_buf,
(void __user *)arg, sizeof(map_buf))) {
err = -EFAULT;
goto exit_qcedev;
}
if (map_buf.num_fds > ARRAY_SIZE(map_buf.fd)) {
pr_err("%s: err: num_fds = %d exceeds max value\n",
__func__, map_buf.num_fds);
err = -EINVAL;
goto exit_qcedev;
}
for (i = 0; i < map_buf.num_fds; i++) {
err = qcedev_check_and_map_buffer(handle,
map_buf.fd[i],
map_buf.fd_offset[i],
map_buf.fd_size[i],
&vaddr,
drv_handles);
if (err) {
pr_err(
"%s: err: failed to map fd(%d) - %d\n",
__func__, map_buf.fd[i], err);
goto exit_qcedev;
}
map_buf.buf_vaddr[i] = vaddr;
pr_info("%s: info: vaddr = %llx\n",
__func__, vaddr);
}
if (copy_to_user((void __user *)arg, &map_buf,
sizeof(map_buf))) {
err = -EFAULT;
goto exit_qcedev;
}
break;
}
case QCEDEV_IOCTL_UNMAP_BUF_REQ:
{
struct qcedev_unmap_buf_req unmap_buf = { { 0 } };
int i = 0;
if (copy_from_user(&unmap_buf,
(void __user *)arg, sizeof(unmap_buf))) {
err = -EFAULT;
goto exit_qcedev;
}
if (unmap_buf.num_fds > ARRAY_SIZE(unmap_buf.fd)) {
pr_err("%s: err: num_fds = %d exceeds max value\n",
__func__, unmap_buf.num_fds);
err = -EINVAL;
goto exit_qcedev;
}
for (i = 0; i < unmap_buf.num_fds; i++) {
err = qcedev_check_and_unmap_buffer(handle,
unmap_buf.fd[i],
drv_handles);
if (err) {
pr_err(
"%s: err: failed to unmap fd(%d) - %d\n",
__func__,
unmap_buf.fd[i], err);
goto exit_qcedev;
}
}
break;
}
default:
pr_err("QCE_FE: Failed. Invalid IOCTL cmd 0x%x\n", cmd);
err = -EINVAL;
}
exit_qcedev:
return err;
}
static const struct file_operations qce_fe_fops = {
.owner = THIS_MODULE,
.open = qce_fe_open,
.release = qce_fe_close,
#if (KERNEL_VERSION(2, 6, 35) > LINUX_VERSION_CODE)
.ioctl = gce_fe_ioctl
#else
.unlocked_ioctl = gce_fe_ioctl
#endif
};
static int qce_fe_hab_open(uint32_t *handle)
{
int ret;
if (handle == NULL || *handle != 0) {
pr_err("Invalid parameters\n");
return -EINVAL;
}
ret = habmm_socket_open(handle, QCE_MM_HAB_ID, 0, 0);
if (ret) {
pr_err("habmm_socket_open failed with ret = %d\n", ret);
return ret;
}
return 0;
}
static int qce_fe_create_hab_channel(void *pv)
{
int ret = 0;
int i = 0;
while (!kthread_should_stop()) {
drv_handles = kzalloc(sizeof(struct qce_fe_drv_hab_handles), GFP_KERNEL);
if (drv_handles == NULL)
return -ENOMEM;
spin_lock_init(&(drv_handles->handle_lock));
/* open hab handles which will be used for communication with QCE backend */
for (i = 0 ; i < HAB_HANDLE_NUM; i++) {
ret = qce_fe_hab_open(&(drv_handles->qce_fe_hab_handles[i].handle));
if (ret != 0)
return ret;
drv_handles->qce_fe_hab_handles[i].in_use = false;
if (i == 0)
drv_handles->initialized = true;
}
complete(&create_hab_channel_done);
pr_info("%s: Create hab channel succeeded\n", __func__);
break;
}
return ret;
}
static int qce_fe_hab_close(uint32_t handle)
{
int ret;
if (handle == 0)
return 0;
ret = habmm_socket_close(handle);
if (ret) {
pr_err("habmm_socket_close failed with ret = %d\n", ret);
return ret;
}
return 0;
}
static void qce_fe_destroy_hab_channel(void)
{
int i = 0;
if (drv_handles == NULL)
return;
spin_lock(&(drv_handles->handle_lock));
/* open hab handles which will be used for communication with QCE backend */
for (i = 0 ; i < HAB_HANDLE_NUM; i++)
qce_fe_hab_close(drv_handles->qce_fe_hab_handles[i].handle);
spin_unlock(&(drv_handles->handle_lock));
pr_info("%s: Close hab channel succeeded\n", __func__);
}
static int __init qce_fe_init(void)
{
int ret;
struct device *dev_ret;
struct task_struct *create_channel_kthread_task;
pr_info("%s:QCE FE driver init\n", __func__);
/* Dynamically allocate device numbers */
ret = alloc_chrdev_region(&dev_qce_fe, QCE_FE_FIRST_MINOR, QCE_FE_MINOR_CNT, "qce");
if (ret < 0) {
pr_err("%s: failed with error: %d\n", __func__, ret);
return -EFAULT;
}
cdev_init(&cdev_qce_fe, &qce_fe_fops);
ret = cdev_add(&cdev_qce_fe, dev_qce_fe, QCE_FE_MINOR_CNT);
if (ret < 0) {
pr_err("%s: cdev_add() failed with error: %d\n", __func__, ret);
ret = -EFAULT;
goto unregister_chrdev_region_error;
}
class_qce_fe = class_create(THIS_MODULE, "qce");
if (IS_ERR_OR_NULL(class_qce_fe)) {
pr_err("%s: class_create() failed\n", __func__);
ret = -EFAULT;
goto cdev_del_error;
}
/* create a device and registers it with sysfs */
dev_ret = device_create(class_qce_fe, NULL, dev_qce_fe, NULL, "qce");
if (IS_ERR_OR_NULL(dev_ret)) {
pr_err("%s: device_create() failed\n", __func__);
ret = -EFAULT;
goto class_destroy_error;
}
init_completion(&create_hab_channel_done);
create_channel_kthread_task = kthread_run(qce_fe_create_hab_channel, NULL,
"thread_create_channel");
if (IS_ERR(create_channel_kthread_task)) {
pr_err("fail to create kthread to create hab channels\n");
ret = -EFAULT;
goto device_destroy_error;
}
if (wait_for_completion_interruptible_timeout(
&create_hab_channel_done, msecs_to_jiffies(HAB_OPEN_WAIT_TIMEOUT_MS)) <= 0) {
pr_err("%s: timeout hit\n", __func__);
if ((drv_handles != NULL) && (drv_handles->initialized)) {
pr_info("%s:create hab channels partially succeeded\t"
"performance might be affected\n", __func__);
kthread_stop(create_channel_kthread_task);
return 0;
}
pr_err("%s:create hab channels failed, unloading qce_fe\n", __func__);
kthread_stop(create_channel_kthread_task);
/*termporarily don't set ret value */
//ret = -ETIME;
ret = 0;
goto device_destroy_error;
}
return 0;
device_destroy_error:
device_destroy(class_qce_fe, dev_qce_fe);
class_destroy_error:
class_destroy(class_qce_fe);
cdev_del_error:
cdev_del(&cdev_qce_fe);
unregister_chrdev_region_error:
unregister_chrdev_region(dev_qce_fe, QCE_FE_MINOR_CNT);
return ret;
}
static void __exit qce_fe_exit(void)
{
pr_info("%s: Unloading qce fe driver.\n", __func__);
qce_fe_destroy_hab_channel();
device_destroy(class_qce_fe, dev_qce_fe);
class_destroy(class_qce_fe);
cdev_del(&cdev_qce_fe);
unregister_chrdev_region(dev_qce_fe, QCE_FE_MINOR_CNT);
}
module_init(qce_fe_init);
module_exit(qce_fe_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("QCE FE");
MODULE_IMPORT_NS(DMA_BUF);

View File

@@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_FE_PARSE_H_
#define _DRIVERS_CRYPTO_FE_PARSE_H_
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/types.h>
#include <linux/habmm.h>
#define HAB_HANDLE_NUM 6
struct qcedev_fe_ion_buf_info {
u64 smmu_device_address;
unsigned long mapped_buf_size;
int buf_ion_fd;
u32 export_id;
u32 is_secure;
struct dma_buf *dma_buf;
};
struct qcedev_fe_reg_buf_info {
struct list_head list;
union {
struct qcedev_fe_ion_buf_info ion_buf;
};
atomic_t ref_count;
};
struct qcedev_fe_buffer_list {
struct list_head list;
struct mutex lock;
};
struct qcedev_fe_handle {
/* qcedev mapped buffer list */
struct qcedev_fe_buffer_list registeredbufs;
};
struct qce_fe_hab_handle {
uint32_t handle;
bool in_use;
};
struct qce_fe_drv_hab_handles {
struct qce_fe_hab_handle qce_fe_hab_handles[HAB_HANDLE_NUM];
spinlock_t handle_lock;
bool initialized;
};
#endif //_DRIVERS_CRYPTO_FE_PARSE_H_

View File

@@ -0,0 +1,363 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <linux/qcom-dma-mapping.h>
#include <linux/list.h>
#include <linux/mem-buf.h>
#include <linux/slab.h>
#include "qcedev_smmu.h"
#include "soc/qcom/secure_buffer.h"
static int find_free_hab_handle(struct qce_fe_drv_hab_handles *drv_handles)
{
int handle_id = -1;
int i = 0;
if (drv_handles == NULL) {
pr_err("%s : invalid handle\n", __func__);
return handle_id;
}
spin_lock(&(drv_handles->handle_lock));
for (i = 0; i < HAB_HANDLE_NUM; i++) {
if (!drv_handles->qce_fe_hab_handles[i].in_use) {
drv_handles->qce_fe_hab_handles[i].in_use = true;
handle_id = i;
spin_unlock(&(drv_handles->handle_lock));
return handle_id;
}
}
spin_unlock(&(drv_handles->handle_lock));
pr_err("%s : there is no available hab handle\n", __func__);
return handle_id;
}
static void free_hab_handle(struct qce_fe_drv_hab_handles *drv_handles, int handle_id)
{
spin_lock(&(drv_handles->handle_lock));
drv_handles->qce_fe_hab_handles[handle_id].in_use = false;
spin_unlock(&(drv_handles->handle_lock));
}
static int msm_gpce_ion_smmu_map(struct dma_buf *dma_buf,
struct qcedev_fe_reg_buf_info *binfo, struct qce_fe_drv_hab_handles *drv_handles)
{
int rc = -2;
u32 export_id;
u32 cmd_rsp_size;
u32 is_secure;
bool exported = false;
struct msm_gpce_smmu_vm_map_cmd smmu_map_cmd;
struct msm_gpce_smmu_vm_map_cmd_rsp cmd_rsp;
unsigned long delay = jiffies + (HZ / 2);
int handle_id = find_free_hab_handle(drv_handles);
u32 msm_gpce_ion_hab_handle = 0;
size_t len = 0;
if (handle_id < 0) {
pr_err("%s : there is no available hab handle\n", __func__);
return -ENODEV;
}
if (IS_ERR_OR_NULL(dma_buf)) {
pr_err("%s : Failed to get dma_buf\n", __func__);
dma_buf = NULL;
goto err;
}
msm_gpce_ion_hab_handle = (drv_handles->qce_fe_hab_handles[handle_id]).handle;
len = dma_buf->size;
//Check if Buffer is secure or not
is_secure = !(mem_buf_dma_buf_exclusive_owner(dma_buf));
/* Export the buffer to physical VM */
rc = habmm_export(msm_gpce_ion_hab_handle, dma_buf, len,
&export_id, HABMM_EXPIMP_FLAGS_DMABUF);
if (rc) {
pr_err("%s: habmm_export failed dma_buf = %pK, len = %zd, rc = %d\n",
__func__, dma_buf, len, rc);
goto err;
}
exported = true;
smmu_map_cmd.cmd_id = MSM_GPCE_SMMU_VM_CMD_MAP;
smmu_map_cmd.export_id = export_id;
smmu_map_cmd.buf_size = len;
smmu_map_cmd.mem_handle = NULL;
smmu_map_cmd.is_secure = is_secure;
rc = habmm_socket_send(msm_gpce_ion_hab_handle,
(void *)&smmu_map_cmd, sizeof(smmu_map_cmd), 0);
if (rc) {
pr_err("%s: habmm_socket_send failed %d\n",
__func__, rc);
goto err;
}
do {
cmd_rsp_size = sizeof(cmd_rsp);
rc = habmm_socket_recv(msm_gpce_ion_hab_handle,
(void *)&cmd_rsp,
&cmd_rsp_size,
0xFFFFFFFF,
0);
} while (time_before(jiffies, delay) && (rc == -EINTR) &&
(cmd_rsp_size == 0));
if (rc) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, rc);
goto err;
}
if (cmd_rsp_size != sizeof(cmd_rsp)) {
pr_err("%s: invalid size for cmd rsp %u, expected %zu\n",
__func__, cmd_rsp_size, sizeof(cmd_rsp));
rc = -EIO;
goto err;
}
if (cmd_rsp.status) {
pr_err("%s: SMMU map command failed %d\n",
__func__, cmd_rsp.status);
rc = cmd_rsp.status;
goto err;
}
binfo->ion_buf.export_id = export_id;
binfo->ion_buf.smmu_device_address = cmd_rsp.addr;
binfo->ion_buf.mapped_buf_size = len;
binfo->ion_buf.is_secure = is_secure;
binfo->ion_buf.dma_buf = dma_buf;
free_hab_handle(drv_handles, handle_id);
return 0;
err:
if (exported)
(void)habmm_unexport(msm_gpce_ion_hab_handle, export_id, 0);
free_hab_handle(drv_handles, handle_id);
return rc;
}
static int msm_gpce_ion_smmu_unmap(struct qcedev_fe_ion_buf_info *dma_buffer_info,
struct qce_fe_drv_hab_handles *drv_handles)
{
int rc;
u32 cmd_rsp_size;
struct msm_gpce_smmu_vm_unmap_cmd smmu_unmap_cmd;
struct msm_gpce_smmu_vm_unmap_cmd_rsp cmd_rsp;
unsigned long delay = jiffies + (HZ / 2);
int handle_id = find_free_hab_handle(drv_handles);
u32 msm_gpce_ion_hab_handle = 0;
if (handle_id < 0) {
pr_err("%s : there is no available hab handle\n", __func__);
return -ENODEV;
}
msm_gpce_ion_hab_handle = drv_handles->qce_fe_hab_handles[handle_id].handle;
smmu_unmap_cmd.cmd_id = MSM_GPCE_SMMU_VM_CMD_UNMAP;
smmu_unmap_cmd.export_id = dma_buffer_info->export_id;
smmu_unmap_cmd.is_secure = dma_buffer_info->is_secure;
pr_debug("%s: export_id %u\n", __func__, smmu_unmap_cmd.export_id);
rc = habmm_socket_send(msm_gpce_ion_hab_handle,
(void *)&smmu_unmap_cmd,
sizeof(smmu_unmap_cmd), 0);
if (rc) {
pr_err("%s: habmm_socket_send failed %d\n",
__func__, rc);
goto err;
}
do {
cmd_rsp_size = sizeof(cmd_rsp);
rc = habmm_socket_recv(msm_gpce_ion_hab_handle,
(void *)&cmd_rsp,
&cmd_rsp_size,
0xFFFFFFFF,
0);
} while (time_before(jiffies, delay) &&
(rc == -EINTR) && (cmd_rsp_size == 0));
if (rc) {
pr_err("%s: habmm_socket_recv failed %d\n",
__func__, rc);
goto err;
}
if (cmd_rsp_size != sizeof(cmd_rsp)) {
pr_err("%s: invalid size for cmd rsp %u\n",
__func__, cmd_rsp_size);
rc = -EIO;
goto err;
}
if (cmd_rsp.status) {
pr_err("%s: SMMU unmap command failed %d\n", __func__, cmd_rsp.status);
rc = cmd_rsp.status;
goto err;
}
rc = habmm_unexport(msm_gpce_ion_hab_handle,
dma_buffer_info->export_id, 0xFFFFFFFF);
if (rc) {
pr_err("%s: habmm_unexport failed export_id = %d, rc = %d\n",
__func__, dma_buffer_info->export_id, rc);
}
free_hab_handle(drv_handles, handle_id);
pr_err("%s: SMMU unmap command status %d\n", __func__, rc);
return rc;
err:
free_hab_handle(drv_handles, handle_id);
return rc;
}
int qcedev_check_and_map_buffer(void *handle,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr, struct qce_fe_drv_hab_handles *drv_handles)
{
struct qcedev_fe_reg_buf_info *binfo = NULL, *temp = NULL;
bool found = false;
struct qcedev_fe_handle *qce_hndl = (struct qcedev_fe_handle *)handle;
int rc = 0;
struct dma_buf *buf = NULL;
unsigned long mapped_size = 0;
if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
/* Check if the buffer fd is already mapped */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
if (temp->ion_buf.buf_ion_fd == fd) {
found = true;
*vaddr = temp->ion_buf.smmu_device_address;
mapped_size = temp->ion_buf.mapped_buf_size;
atomic_inc(&temp->ref_count);
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
/* If buffer fd is not mapped then create a fresh mapping */
if (!found) {
pr_debug("%s: info: ion fd not registered with driver\n",
__func__);
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
pr_err("%s: err: failed to allocate binfo\n",
__func__);
rc = -ENOMEM;
goto error;
}
binfo->ion_buf.buf_ion_fd = fd;
buf = dma_buf_get(fd);
if (IS_ERR_OR_NULL(buf)) {
rc = -EINVAL;
goto error;
}
rc = msm_gpce_ion_smmu_map(buf, binfo, drv_handles);
if (rc) {
pr_err("%s: err: failed to map fd (%d) error = %d\n",
__func__, fd, rc);
goto error;
}
*vaddr = binfo->ion_buf.smmu_device_address;
mapped_size = binfo->ion_buf.mapped_buf_size;
atomic_inc(&binfo->ref_count);
/* Add buffer mapping information to regd buffer list */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
/* Make sure the offset is within the mapped range */
if (offset >= mapped_size) {
pr_err(
"%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
__func__, offset, mapped_size, fd);
rc = -ERANGE;
goto unmap;
}
/* return the mapped virtual address adjusted by offset */
*vaddr += offset;
return 0;
unmap:
if (!found) {
msm_gpce_ion_smmu_unmap(&(binfo->ion_buf), drv_handles);
mutex_lock(&qce_hndl->registeredbufs.lock);
list_del(&binfo->list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
error:
kfree(binfo);
return rc;
}
int qcedev_check_and_unmap_buffer(void *handle, int fd, struct qce_fe_drv_hab_handles *drv_handles)
{
struct qcedev_fe_reg_buf_info *binfo = NULL, *dummy = NULL;
struct qcedev_fe_handle *qce_hndl = handle;
bool found = false;
if (!handle || fd < 0) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
/* Check if the buffer fd is mapped and present in the regd list. */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry_safe(binfo, dummy,
&qce_hndl->registeredbufs.list, list) {
if (binfo->ion_buf.buf_ion_fd == fd) {
found = true;
atomic_dec(&binfo->ref_count);
/* Unmap only if there are no more references */
if (atomic_read(&binfo->ref_count) == 0) {
msm_gpce_ion_smmu_unmap(&(binfo->ion_buf), drv_handles);
dma_buf_put(binfo->ion_buf.dma_buf);
list_del(&binfo->list);
kfree(binfo);
}
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
if (!found) {
pr_err("%s: err: calling unmap on unknown fd %d\n",
__func__, fd);
return -EINVAL;
}
return 0;
}
int qcedev_unmap_all_buffers(void *handle, struct qce_fe_drv_hab_handles *drv_handles)
{
struct qcedev_fe_reg_buf_info *binfo = NULL;
struct qcedev_fe_handle *qce_hndl = handle;
struct list_head *pos;
if (!handle) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
mutex_lock(&qce_hndl->registeredbufs.lock);
while (!list_empty(&qce_hndl->registeredbufs.list)) {
pos = qce_hndl->registeredbufs.list.next;
binfo = list_entry(pos, struct qcedev_fe_reg_buf_info, list);
if (binfo)
msm_gpce_ion_smmu_unmap(&(binfo->ion_buf), drv_handles);
list_del(pos);
kfree(binfo);
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
pr_info("%s: Done successfully\n", __func__);
return 0;
}

View File

@@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DRIVERS_QCEDEV_SMMU_H_
#define _DRIVERS_QCEDEV_SMMU_H_
#include <linux/dma-buf.h>
#include <linux/types.h>
#include "qcedev_fe.h"
struct msm_gpce_smmu_vm_map_cmd {
int cmd_id;
u32 export_id;
u32 buf_size;
void *mem_handle;
u32 is_secure;
} __packed;
struct msm_gpce_smmu_vm_map_cmd_rsp {
int status;
u64 addr;
} __packed;
struct msm_gpce_smmu_vm_unmap_cmd {
int cmd_id;
u32 export_id;
void *unused; // for aligement with the host
u32 is_secure;
} __packed;
struct msm_gpce_smmu_vm_unmap_cmd_rsp {
int status;
} __packed;
enum gpce_cmd_id {
MSM_GPCE_SMMU_VM_CMD_MAP,
MSM_GPCE_SMMU_VM_CMD_UNMAP,
};
int qcedev_check_and_map_buffer(void *qce_hndl,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr, struct qce_fe_drv_hab_handles *drv_handles);
int qcedev_check_and_unmap_buffer(void *handle, int fd, struct qce_fe_drv_hab_handles *drv_handles);
int qcedev_unmap_all_buffers(void *handle, struct qce_fe_drv_hab_handles *drv_handles);
#endif //_DRIVERS_QCEDEV_SMMU_H_