replace common qcom sources with samsung ones

This commit is contained in:
SaschaNes
2025-08-12 22:13:00 +02:00
parent ba24dcded9
commit 6f7753de11
5682 changed files with 2450203 additions and 103634 deletions

View File

@@ -38,7 +38,7 @@ static struct attribute *attrs[] = {
NULL,
};
static u32 cdsp_state = CDSP_SUBSYS_DOWN;
static u32 cdsp_state = CDSP_SUBSYS_LOADED;
static struct platform_device *cdsp_private;
static void cdsp_loader_unload(struct platform_device *pdev);
@@ -155,7 +155,6 @@ static void cdsp_loader_unload(struct platform_device *pdev)
if (priv->pil_h && cdsp_state == CDSP_SUBSYS_LOADED) {
dev_dbg(&pdev->dev, "%s: calling subsystem_put\n", __func__);
rproc_shutdown(priv->pil_h);
priv->pil_h = NULL;
cdsp_state = CDSP_SUBSYS_DOWN;
}
}

View File

@@ -226,12 +226,7 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
/*
* Retrieve the map if the DMA buffer and fd match. For
* duplicated fds with the same DMA buffer, create separate
* maps for each duplicated fd.
*/
if (map->buf == buf && map->fd == fd)
if (map->buf == buf)
goto map_found;
}
goto error;
@@ -449,8 +444,7 @@ static int __fastrpc_buf_alloc(struct fastrpc_user *fl,
struct fastrpc_buf *buf;
struct timespec64 start_ts, end_ts;
/* Check if the size is valid (non-zero and within integer range) */
if (!size || size > INT_MAX)
if (!size)
return -EFAULT;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
@@ -526,7 +520,7 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl,
return ret;
}
return ret;
return 0;
}
/**
@@ -663,7 +657,6 @@ static void fastrpc_context_free(struct kref *ref)
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx->args);
kfree(ctx->outbufs);
kfree(ctx);
fastrpc_channel_ctx_put(cctx);
@@ -1210,7 +1203,6 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
int err = 0, sgl_index = 0;
struct device *dev = NULL;
struct fastrpc_smmu *smmucb = NULL;
struct fastrpc_pool_ctx *secsctx = NULL;
u32 smmuidx = DEFAULT_SMMU_IDX;
if (!fastrpc_map_lookup(fl, fd, va, len, buf, mflags, ppmap, take_ref))
@@ -1250,13 +1242,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
if (map->secure && (!(attr & FASTRPC_ATTR_NOMAP || mflags == FASTRPC_MAP_FD_NOMAP))) {
if (!fl->secsctx) {
secsctx = fastrpc_session_alloc(fl, true);
if (!secsctx) {
fl->secsctx = fastrpc_session_alloc(fl, true);
if (!fl->secsctx) {
dev_err(fl->cctx->dev, "No secure session available\n");
err = -EBUSY;
goto attach_err;
}
fl->secsctx = secsctx;
}
sess = fl->secsctx;
} else {
@@ -1309,7 +1300,6 @@ map_retry:
smmuidx++;
goto map_retry;
} else if (err) {
mutex_unlock(&smmucb->map_mutex);
goto map_err;
}
@@ -1488,69 +1478,14 @@ static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf
return (struct fastrpc_phy_page *)(&buf[len]);
}
/*
* Validate the user provided buffer against the map buffer and
* retrieve the offset if the buffer is valid.
* @arg1: invoke context
* @arg2: current index passed during ctx map iteration
* @arg3: output argument pointer to get the offset
*
* Return: returns 0 on success, error code on failure.
*/
static int fastrpc_get_buffer_offset(struct fastrpc_invoke_ctx *ctx, int index,
u64 *offset)
{
u64 addr = (u64)ctx->args[index].ptr & PAGE_MASK, vm_start = 0, vm_end = 0;
struct vm_area_struct *vma;
struct file *vma_file = NULL;
int err = 0;
if (!(ctx->maps[index]->attr & FASTRPC_ATTR_NOVA)) {
mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[index].ptr);
if (vma) {
vm_start = vma->vm_start;
vm_end = vma->vm_end;
vma_file = vma->vm_file;
}
mmap_read_unlock(current->mm);
/*
* Error out if:
* 1. The DMA buffer file does not match the VMA file
* retrieved from the user provided buffer va
* 2. The user provided buffers address does not fall
* within the VMA address range
* 3. The length of the user provided buffer does not
* fall within the VMA address range
*/
if ((ctx->maps[index]->buf->file != vma_file) ||
(addr < vm_start || addr + ctx->args[index].length > vm_end ||
(addr - vm_start) + ctx->args[index].length >
ctx->maps[index]->size)) {
err = -EFAULT;
goto bail;
}
*offset = addr - vm_start;
}
return 0;
bail:
dev_err(ctx->fl->cctx->dev,
"Invalid buffer fd %d addr 0x%llx len 0x%llx vm start 0x%llx vm end 0x%llx IPA 0x%llx size 0x%llx, err %d\n",
ctx->maps[index]->fd, ctx->args[index].ptr,
ctx->args[index].length, vm_start, vm_end,
ctx->maps[index]->phys, ctx->maps[index]->size, err);
return err;
}
static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
{
struct device *dev = ctx->fl->sctx->smmucb[DEFAULT_SMMU_IDX].dev;
union fastrpc_remote_arg *rpra;
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
int inbufs, outbufs, i, oix, err = 0;
u64 len, rlen, pkt_size, outbufslen;
int inbufs, i, oix, err = 0;
u64 len, rlen, pkt_size;
u64 pg_start, pg_end;
u64 *perf_counter = NULL;
uintptr_t args;
@@ -1560,7 +1495,6 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
perf_counter = (u64 *)ctx->perf + PERF_COUNT;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
metalen = fastrpc_get_meta_size(ctx);
pkt_size = fastrpc_get_payload_size(ctx, metalen);
if (!pkt_size) {
@@ -1605,12 +1539,15 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
continue;
if (ctx->maps[i]) {
struct vm_area_struct *vma = NULL;
u64 addr = (u64)ctx->args[i].ptr & PAGE_MASK, vm_start = 0,
vm_end = 0;
PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
rpra[i].buf.pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
/* validate user passed buffer length with map buffer size */
if (len > ctx->maps[i]->size) {
err = -EFAULT;
dev_err(dev,
@@ -1619,12 +1556,29 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
ctx->maps[i]->size, ctx->maps[i]->fd);
goto bail;
}
err = fastrpc_get_buffer_offset(ctx, i, &offset);
if (err)
goto bail;
if (!(ctx->maps[i]->attr & FASTRPC_ATTR_NOVA)) {
mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma) {
vm_start = vma->vm_start;
vm_end = vma->vm_end;
}
mmap_read_unlock(current->mm);
if (addr < vm_start || addr + len > vm_end ||
(addr - vm_start) + len > ctx->maps[i]->size) {
err = -EFAULT;
dev_err(dev,
"Invalid buffer addr 0x%llx len 0x%llx vm start 0x%llx vm end 0x%llx IPA 0x%llx size 0x%llx\n",
ctx->args[i].ptr, len, vm_start, vm_end,
ctx->maps[i]->phys, ctx->maps[i]->size);
goto bail;
}
else
offset = addr - vm_start;
pages[i].addr += offset;
}
pages[i].addr += offset;
pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
pg_start = addr >> PAGE_SHIFT;
pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
PAGE_SHIFT;
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
@@ -1688,13 +1642,6 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
rpra[i].dma.len = ctx->args[i].length;
rpra[i].dma.offset = (u64) ctx->args[i].ptr;
}
outbufslen = sizeof(struct fastrpc_remote_buf) * outbufs;
ctx->outbufs = kzalloc(outbufslen, GFP_KERNEL);
if (!ctx->outbufs) {
err = -ENOMEM;
goto bail;
}
memcpy(ctx->outbufs, rpra + inbufs, outbufslen);
bail:
if (err)
@@ -1727,10 +1674,9 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
for (i = inbufs; i < ctx->nbufs; ++i) {
if (!ctx->maps[i]) {
int j = i - inbufs;
void *src = (void *)(uintptr_t)ctx->outbufs[j].buf.pv;
void *src = (void *)(uintptr_t)rpra[i].buf.pv;
void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
u64 len = ctx->outbufs[j].buf.len;
u64 len = rpra[i].buf.len;
if (!kernel) {
if (copy_to_user((void __user *)dst, src, len))
@@ -2328,30 +2274,17 @@ static int fastrpc_get_process_gids(struct gid_list *gidlist)
static void fastrpc_check_privileged_process(struct fastrpc_user *fl,
struct fastrpc_init_create *init)
{
struct gid_list gidlist = {0};
u32 gid;
u32 gid = sorted_lists_intersection(fl->gidlist.gids,
fl->gidlist.gidcount, fl->cctx->gidlist.gids,
fl->cctx->gidlist.gidcount);
/* disregard any privilege bits from userspace */
init->attrs &= (~FASTRPC_MODE_PRIVILEGED);
if (fastrpc_get_process_gids(&gidlist)) {
dev_info(fl->cctx->dev, "%s failed to get gidlist\n",
__func__);
return;
}
gid = sorted_lists_intersection(gidlist.gids,
gidlist.gidcount, fl->cctx->gidlist.gids,
fl->cctx->gidlist.gidcount);
if (gid) {
dev_info(fl->cctx->dev, "%s: %s (PID %d, GID %u) is a privileged process\n",
__func__, current->comm, fl->tgid, gid);
init->attrs |= FASTRPC_MODE_PRIVILEGED;
}
/* Free memory for gid allocated in fastrpc_get_process_gids */
kfree(gidlist.gids);
}
int fastrpc_mmap_remove_ssr(struct fastrpc_channel_ctx *cctx)
@@ -2602,12 +2535,11 @@ static int fastrpc_debugfs_show(struct seq_file *s_file, void *data)
seq_printf(s_file,"%s %9s %d\n", "pd_type", ":", fl->pd_type);
seq_printf(s_file,"%s %9s %d\n", "profile", ":", fl->profile);
if(!fl->cctx)
return 0;
seq_printf(s_file,"\n=============== Channel Context ===============\n");
ctx = fl->cctx;
print_ctx_info(s_file, ctx);
if(fl->cctx) {
seq_printf(s_file,"\n=============== Channel Context ===============\n");
ctx = fl->cctx;
print_ctx_info(s_file, ctx);
}
if(fl->sctx) {
seq_printf(s_file,"\n=============== Session Context ===============\n");
sctx = fl->sctx;
@@ -2657,9 +2589,9 @@ static int fastrpc_debugfs_show(struct seq_file *s_file, void *data)
print_map_info(s_file, map);
}
seq_printf(s_file,"\n=============== Kernel maps ===============\n");
list_for_each_entry(buf, &fl->mmaps, node) {
if (buf)
print_buf_info(s_file, buf);
list_for_each_entry(map, &fl->mmaps, node) {
if (map)
print_map_info(s_file, map);
}
seq_printf(s_file,"\n=============== Cached Bufs ===============\n");
list_for_each_entry_safe(buf, n, &fl->cached_bufs, node) {
@@ -2689,37 +2621,37 @@ static int fastrpc_create_session_debugfs(struct fastrpc_user *fl)
int domain_id = -1, size = 0;
struct dentry *debugfs_root = g_frpc.debugfs_root;
if (atomic_cmpxchg(&fl->debugfs_file_create, 0, 1))
return 0;
memcpy(cur_comm, current->comm, TASK_COMM_LEN);
cur_comm[TASK_COMM_LEN-1] = '\0';
if (debugfs_root != NULL && fl != NULL) {
if (debugfs_root != NULL) {
domain_id = fl->cctx->domain_id;
size = strlen(cur_comm) + strlen("_")
+ COUNT_OF(current->pid) + strlen("_")
+ COUNT_OF(fl->tgid_frpc) + strlen("_")
+ COUNT_OF(FASTRPC_DEV_MAX)
+ 1;
if (!(fl->debugfs_file_create)) {
size = strlen(cur_comm) + strlen("_")
+ COUNT_OF(current->pid) + strlen("_")
+ COUNT_OF(FASTRPC_DEV_MAX)
+ 1;
fl->debugfs_buf = kzalloc(size, GFP_KERNEL);
if (fl->debugfs_buf == NULL) {
return -ENOMEM;
fl->debugfs_buf = kzalloc(size, GFP_KERNEL);
if (fl->debugfs_buf == NULL) {
return -ENOMEM;
}
/*
* Use HLOS process name, HLOS PID, unique fastrpc PID
* domain_id in debugfs filename to create unique file name
*/
snprintf(fl->debugfs_buf, size, "%.10s%s%d%s%d%s%d",
cur_comm, "_", current->pid, "_",
fl->tgid_frpc, "_", domain_id);
fl->debugfs_file = debugfs_create_file(fl->debugfs_buf, 0644,
debugfs_root, fl, &fastrpc_debugfs_fops);
if (IS_ERR_OR_NULL(fl->debugfs_file)) {
pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
cur_comm, __func__, fl->debugfs_buf);
fl->debugfs_file = NULL;
}
kfree(fl->debugfs_buf);
fl->debugfs_file_create = true;
}
/*
* Use HLOS process name, HLOS PID, unique fastrpc PID
* domain_id in debugfs filename to create unique file name
*/
snprintf(fl->debugfs_buf, size, "%.10s%s%d%s%d%s%d",
cur_comm, "_", current->pid, "_",
fl->tgid_frpc, "_", domain_id);
fl->debugfs_file = debugfs_create_file(fl->debugfs_buf, 0644,
debugfs_root, fl, &fastrpc_debugfs_fops);
if (IS_ERR_OR_NULL(fl->debugfs_file)) {
pr_warn("Error: %s: %s: failed to create debugfs file %s\n",
cur_comm, __func__, fl->debugfs_buf);
fl->debugfs_file = NULL;
}
kfree(fl->debugfs_buf);
}
return 0;
}
@@ -2734,7 +2666,6 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
struct fastrpc_phy_page pages[1];
struct fastrpc_buf *buf = NULL;
struct fastrpc_smmu *smmucb = NULL;
struct fastrpc_pool_ctx *sctx = NULL;
u64 phys = 0, size = 0;
char *name;
int err = 0;
@@ -2763,13 +2694,12 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
if (IS_ERR(name))
return PTR_ERR(name);
sctx = fastrpc_session_alloc(fl, false);
if (!sctx) {
fl->sctx = fastrpc_session_alloc(fl, false);
if (!fl->sctx) {
dev_err(fl->cctx->dev, "No session available\n");
err = -EBUSY;
goto err_name;
}
fl->sctx = sctx;
smmucb = &fl->sctx->smmucb[DEFAULT_SMMU_IDX];
is_oispd = !strcmp(name, "oispd");
@@ -3058,11 +2988,9 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
struct fastrpc_phy_page pages[NUM_PAGES_WITH_PROC_INIT_SHAREDBUF] = {0};
struct fastrpc_map *configmap = NULL;
struct fastrpc_buf *imem = NULL;
struct fastrpc_pool_ctx *sctx = NULL;
int memlen;
int err = 0;
int user_fd = fl->config.user_fd, user_size = fl->config.user_size;
void *file = NULL;
struct {
int pgid;
u32 namelen;
@@ -3083,27 +3011,6 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
DSP_CREATE_START) != DEFAULT_PROC_STATE)
return -EALREADY;
/* Verify shell file passed by user */
if (init.filefd <= 0) {
if (!init.filelen || !init.file) {
/*In this case shell will be loaded by DSP using daemon */
init.file = 0;
init.filelen = 0;
} else {
file = kzalloc(init.filelen, GFP_KERNEL);
if (!file) {
err = -ENOMEM;
goto err_out;
}
if (copy_from_user(file,
(void *)(uintptr_t)init.file,
init.filelen)) {
err = -EFAULT;
dev_err(fl->cctx->dev, "copy_from_user failed for shell file\n");
goto err_out;
}
}
}
/*
* Third-party apps don't have permission to open the fastrpc device, so
* it is opened on their behalf by DSP HAL. This is detected by
@@ -3134,13 +3041,14 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
if (fl->is_unsigned_pd && fl->cctx->smmucb_pool)
fl->pd_type = USER_UNSIGNEDPD_POOL;
sctx = fastrpc_session_alloc(fl, false);
if (!sctx) {
fl->sctx = fastrpc_session_alloc(fl, false);
if (!fl->sctx) {
dev_err(fl->cctx->dev, "No session available\n");
err = -EBUSY;
goto err_out;
}
fl->sctx = sctx;
fastrpc_get_process_gids(&fl->gidlist);
/* In case of privileged process update attributes */
fastrpc_check_privileged_process(fl, &init);
@@ -3196,7 +3104,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
args[1].length = inbuf.namelen;
args[1].fd = -1;
args[2].ptr = file ? (u64)(uintptr_t)file : init.file;
args[2].ptr = (u64) init.file;
args[2].length = inbuf.filelen;
args[2].fd = init.filefd;
@@ -3230,6 +3138,7 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
}
#ifdef CONFIG_DEBUG_FS
if (fl != NULL)
fastrpc_create_session_debugfs(fl);
#endif
/* remove buffer on success as no longer required */
@@ -3237,7 +3146,6 @@ static int fastrpc_init_create_process(struct fastrpc_user *fl,
fastrpc_buf_free(fl->proc_init_sharedbuf, false);
fl->proc_init_sharedbuf = NULL;
}
kfree(file);
return 0;
@@ -3257,7 +3165,6 @@ err_alloc:
mutex_unlock(&fl->map_mutex);
}
err_out:
kfree(file);
/* Reset the process state to its default in case of an error. */
atomic_set(&fl->state, DEFAULT_PROC_STATE);
return err;
@@ -3300,20 +3207,21 @@ static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
return fastrpc_internal_invoke(fl, KERNEL_MSG_WITH_NONZERO_PID, &ioctl);
}
/*
* Helper function to increment / decrement invoke count of channel
* Caller of this function MUST spin-lock 'cctx->lock' first.
*/
/* Helper function to increment / decrement invoke count of channel */
static inline void fastrpc_channel_update_invoke_cnt(
struct fastrpc_channel_ctx *cctx, bool incr)
{
unsigned long flags = 0;
if (incr) {
cctx->invoke_cnt++;
atomic_inc(&cctx->invoke_cnt);
} else {
cctx->invoke_cnt--;
spin_lock_irqsave(&cctx->lock, flags);
atomic_dec(&cctx->invoke_cnt);
/* Wake up any waiting SSR handling thread */
if (cctx->invoke_cnt == 0)
if (atomic_read(&cctx->invoke_cnt) == 0)
wake_up_interruptible(&cctx->ssr_wait_queue);
spin_unlock_irqrestore(&cctx->lock, flags);
}
}
@@ -3440,6 +3348,7 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
spin_lock_irqsave(&cctx->lock, flags);
list_del(&fl->user);
spin_unlock_irqrestore(&cctx->lock, flags);
kfree(fl->gidlist.gids);
spin_lock_irqsave(&fl->proc_state_notif.nqlock, flags);
atomic_add(1, &fl->proc_state_notif.notif_queue_count);
@@ -3495,13 +3404,10 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
mutex_destroy(&fl->signal_create_mutex);
mutex_destroy(&fl->remote_map_mutex);
mutex_destroy(&fl->map_mutex);
mutex_destroy(&fl->pm_qos_mutex);
spin_lock_irqsave(glock, irq_flags);
kfree(fl);
spin_lock_irqsave(&cctx->lock, flags);
fastrpc_channel_update_invoke_cnt(cctx, false);
spin_unlock_irqrestore(&cctx->lock, flags);
fastrpc_channel_ctx_put(cctx);
file->private_data = NULL;
spin_unlock_irqrestore(glock, irq_flags);
@@ -3535,7 +3441,6 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
mutex_init(&fl->map_mutex);
spin_lock_init(&fl->dspsignals_lock);
mutex_init(&fl->signal_create_mutex);
mutex_init(&fl->pm_qos_mutex);
INIT_LIST_HEAD(&fl->pending);
INIT_LIST_HEAD(&fl->interrupted);
INIT_LIST_HEAD(&fl->maps);
@@ -3674,19 +3579,17 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
struct fastrpc_enhanced_invoke ioctl;
struct fastrpc_pool_ctx *sctx = NULL;
int err, tgid = fl->tgid_frpc;
if (!fl->is_secure_dev) {
dev_err(fl->cctx->dev, "untrusted app trying to attach to privileged DSP PD\n");
return -EACCES;
}
sctx = fastrpc_session_alloc(fl, false);
if (!sctx) {
fl->sctx = fastrpc_session_alloc(fl, false);
if (!fl->sctx) {
dev_err(fl->cctx->dev, "No session available\n");
return -EBUSY;
}
fl->sctx = sctx;
/*
* Default value at fastrpc_device_open is set as DEFAULT_UNUSED.
@@ -3722,6 +3625,7 @@ static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
return err;
#ifdef CONFIG_DEBUG_FS
if (fl != NULL)
fastrpc_create_session_debugfs(fl);
#endif
return 0;
@@ -3869,7 +3773,7 @@ static int fastrpc_manage_poll_mode(struct fastrpc_user *fl, u32 enable, u32 tim
static int fastrpc_internal_control(struct fastrpc_user *fl,
struct fastrpc_internal_control *cp)
{
int err = 0;
int err = 0, ret = 0;
struct fastrpc_channel_ctx *cctx = fl->cctx;
u32 latency = 0, cpu = 0;
unsigned long flags = 0;
@@ -3899,30 +3803,28 @@ static int fastrpc_internal_control(struct fastrpc_user *fl,
* id 0. If DT property 'qcom,single-core-latency-vote' is enabled
* then add voting request for only one core of cluster id 0.
*/
mutex_lock(&fl->pm_qos_mutex);
for (cpu = 0; cpu < cctx->lowest_capacity_core_count; cpu++) {
if (!fl->qos_request) {
err = dev_pm_qos_add_request(
ret = dev_pm_qos_add_request(
get_cpu_device(cpu),
&fl->dev_pm_qos_req[cpu],
DEV_PM_QOS_RESUME_LATENCY,
latency);
} else {
err = dev_pm_qos_update_request(
ret = dev_pm_qos_update_request(
&fl->dev_pm_qos_req[cpu],
latency);
}
if (err < 0) {
if (ret < 0) {
dev_err(fl->cctx->dev, "QoS with lat %u failed for CPU %d, err %d, req %d\n",
latency, cpu, err, fl->qos_request);
break;
}
}
if (err >= 0) {
if (ret >= 0) {
fl->qos_request = 1;
err = 0;
}
mutex_unlock(&fl->pm_qos_mutex);
break;
case FASTRPC_CONTROL_SMMU:
fl->sharedcb = cp->smmu.sharedcb;
@@ -4832,7 +4734,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
return 0;
err_assign:
err = fastrpc_req_munmap_dsp(fl, buf->raddr, buf->size);
err = fastrpc_req_munmap_impl(fl, buf);
if (err) {
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR) {
spin_lock_irqsave(&fl->cctx->lock, flags);
@@ -5092,9 +4994,7 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
atomic_set(&fl->state, DSP_CREATE_COMPLETE);
}
spin_lock_irqsave(&cctx->lock, flags);
fastrpc_channel_update_invoke_cnt(cctx, false);
spin_unlock_irqrestore(&cctx->lock, flags);
fastrpc_channel_ctx_put(fl->cctx);
return err;
}
@@ -5156,7 +5056,6 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev,
unsigned long invoke_param)
{
int err = 0;
bool is_cnt_updated = false;
union fastrpc_dev_param p;
struct fastrpc_user *fl = NULL;
struct fastrpc_map *map = NULL;
@@ -5167,6 +5066,7 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev,
p.map = (struct fastrpc_dev_map_dma *)invoke_param;
spin_lock_irqsave(glock, irq_flags);
if (!dev || dev->dev_close) {
err = -ESRCH;
@@ -5187,22 +5087,6 @@ long fastrpc_dev_map_dma(struct fastrpc_device *dev,
fl->is_dma_invoke_pend = true;
spin_unlock_irqrestore(glock, irq_flags);
spin_lock_irqsave(&cctx->lock, irq_flags);
if (atomic_read(&cctx->teardown)) {
spin_unlock_irqrestore(&cctx->lock, irq_flags);
err = -EPIPE;
goto error;
} else {
/*
* Update invoke count to block SSR handling thread
* from cleaning up the channel resources, while it
* is stillbeing used by this thread.
*/
fastrpc_channel_update_invoke_cnt(cctx, true);
is_cnt_updated = true;
}
spin_unlock_irqrestore(&cctx->lock, irq_flags);
/* Map DMA buffer on SMMU device*/
mutex_lock(&fl->remote_map_mutex);
mutex_lock(&fl->map_mutex);
@@ -5252,11 +5136,9 @@ error:
}
fl->is_dma_invoke_pend = false;
}
mutex_unlock(&fl->remote_map_mutex);
if (is_cnt_updated)
fastrpc_channel_update_invoke_cnt(cctx, false);
spin_unlock_irqrestore(&cctx->lock, irq_flags);
fastrpc_channel_ctx_put(cctx);
mutex_unlock(&fl->remote_map_mutex);
return err;
}
/*
@@ -5274,7 +5156,6 @@ long fastrpc_dev_unmap_dma(struct fastrpc_device *dev,
unsigned long invoke_param)
{
int err = 0;
bool is_cnt_updated = false;
union fastrpc_dev_param p;
struct fastrpc_user *fl = NULL;
struct fastrpc_map *map = NULL;
@@ -5303,22 +5184,6 @@ long fastrpc_dev_unmap_dma(struct fastrpc_device *dev,
fl->is_dma_invoke_pend = true;
spin_unlock_irqrestore(glock, irq_flags);
spin_lock_irqsave(&cctx->lock, irq_flags);
if (atomic_read(&cctx->teardown)) {
spin_unlock_irqrestore(&cctx->lock, irq_flags);
err = -EPIPE;
goto error;
} else {
/*
* Update invoke count to block SSR handling thread
* from cleaning up the channel resources, while it
* is stillbeing used by this thread.
*/
fastrpc_channel_update_invoke_cnt(cctx, true);
is_cnt_updated = true;
}
spin_unlock_irqrestore(&cctx->lock, irq_flags);
mutex_lock(&fl->remote_map_mutex);
mutex_lock(&fl->map_mutex);
err = fastrpc_map_lookup(fl, -1, 0, 0, p.unmap->buf,
@@ -5360,11 +5225,9 @@ error:
}
fl->is_dma_invoke_pend = false;
}
mutex_unlock(&fl->remote_map_mutex);
if (is_cnt_updated)
fastrpc_channel_update_invoke_cnt(cctx, false);
spin_unlock_irqrestore(&cctx->lock, irq_flags);
fastrpc_channel_ctx_put(cctx);
mutex_unlock(&fl->remote_map_mutex);
return err;
}
/*
@@ -5592,7 +5455,7 @@ int fastrpc_driver_register(struct fastrpc_driver *frpc_driver)
return -ESRCH;
process_found:
if(atomic_read(&user->state) >= DSP_EXIT_START) {
if(user->device->dev_close) {
spin_unlock_irqrestore(&cctx->lock, irq_flags);
pr_err("%s : process already exited", __func__);
return -ESRCH;
@@ -5619,11 +5482,9 @@ void fastrpc_notify_users(struct fastrpc_user *user)
/*
* After audio or ois PDR, skip notifying the pending kill call,
* as the DSP guestOS may still be processing and might result
* improper access issues. But in case of SSR cleanup pending
* kill calls as well.
* improper access issues.
*/
if (atomic_read(&fl->state) >= DSP_EXIT_START &&
!IS_SSR(fl) && IS_PDR(fl) &&
if (atomic_read(&fl->state) >= DSP_EXIT_START && IS_PDR(fl) &&
fl->pd_type != SENSORS_STATICPD &&
ctx->msg.handle == FASTRPC_INIT_HANDLE)
continue;
@@ -6095,9 +5956,7 @@ void fastrpc_register_wakeup_source(struct device *dev,
static void fastrpc_notify_user_ctx(struct fastrpc_invoke_ctx *ctx, int retval,
u32 rsp_flags, u32 early_wake_time)
{
if(!ctx->cctx)
return;
if (!atomic_read(&ctx->cctx->teardown))
if (ctx->cctx && !atomic_read(&ctx->cctx->teardown))
fastrpc_pm_awake(ctx->fl, ctx->cctx->secure);
ctx->retval = retval;
ctx->rsp_flags = (enum fastrpc_response_flags)rsp_flags;

View File

@@ -190,14 +190,14 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return 0;
fdev_error:
kfree(data);
populate_error:
if (data->fdevice)
misc_deregister(&data->fdevice->miscdev);
if (data->secure_fdevice)
misc_deregister(&data->secure_fdevice->miscdev);
populate_error:
kfree(data->gidlist.gids);
data->gidlist.gids = NULL;
kfree(data);
return err;
}
@@ -238,10 +238,10 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
* If there are other ongoing remote invocations, wait for them to
* complete before cleaning up the channel resources, to avoid UAF.
*/
while (cctx->invoke_cnt > 0) {
while (atomic_read(&cctx->invoke_cnt) > 0) {
spin_unlock_irqrestore(&cctx->lock, flags);
wait_event_interruptible(cctx->ssr_wait_queue,
cctx->invoke_cnt == 0);
atomic_read(&cctx->invoke_cnt) == 0);
spin_lock_irqsave(&cctx->lock, flags);
}
spin_unlock_irqrestore(&cctx->lock, flags);
@@ -309,10 +309,6 @@ int fastrpc_transport_send(struct fastrpc_channel_ctx *cctx, void *rpc_msg, uint
return -EPIPE;
err = rpmsg_send(cctx->rpdev->ept, rpc_msg, rpc_msg_size);
if (err == -EIO) {
pr_err("fastrpc: failed to send message due to SSR\n");
err = -EPIPE;
}
return err;
}

View File

@@ -253,8 +253,6 @@
#define FASTRPC_DSPSIGNAL_GROUP_SIZE 256
/* Macro to return PDR status */
#define IS_PDR(fl) (fl->spd && fl->spd->pdrcount != fl->spd->prevpdrcount)
/* Macro to return SSR status */
#define IS_SSR(fl) (fl && fl->cctx && atomic_read(&fl->cctx->teardown))
#define AUDIO_PDR_SERVICE_LOCATION_CLIENT_NAME "audio_pdr_adsp"
#define AUDIO_PDR_ADSP_SERVICE_NAME "avs/audio"
@@ -708,7 +706,7 @@ struct fastrpc_channel_ctx {
/* Flag to indicate CB pooling is enabled for channel */
bool smmucb_pool;
/* Number of active ongoing invocations (device ioctl / release) */
u32 invoke_cnt;
atomic_t invoke_cnt;
/* Completion object for threads to wait for SSR handling to finish */
struct completion ssr_complete;
/* Wait queue to block/resume SSR until all invocations are complete */
@@ -742,7 +740,6 @@ struct fastrpc_invoke_ctx {
struct fastrpc_msg msg;
struct fastrpc_user *fl;
union fastrpc_remote_arg *rpra;
union fastrpc_remote_arg *outbufs;
struct fastrpc_map **maps;
struct fastrpc_buf *buf;
struct fastrpc_invoke_args *args;
@@ -834,7 +831,7 @@ struct fastrpc_user {
*/
struct fastrpc_device *device;
#ifdef CONFIG_DEBUG_FS
atomic_t debugfs_file_create;
bool debugfs_file_create;
struct dentry *debugfs_file;
char *debugfs_buf;
#endif
@@ -872,8 +869,7 @@ struct fastrpc_user {
/*mutex for process maps synchronization*/
struct mutex map_mutex;
struct mutex signal_create_mutex;
/* mutex for qos request synchronization */
struct mutex pm_qos_mutex;
struct gid_list gidlist;
/* Compleation object for dma invocations by client driver*/
struct completion dma_invoke;
/* Completion objects and state for dspsignals */
@@ -888,7 +884,7 @@ struct fastrpc_user {
bool set_session_info;
/* Various states throughout process life cycle */
atomic_t state;
/* Flag to indicate notif thread exit requested */
/* Flag to indicate notif thread exit requested*/
bool exit_notif;
};

View File

@@ -13,4 +13,4 @@ ifeq ($(TARGET_BOARD_PLATFORM), niobe)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/cdsp-loader.ko
endif
endif
endif
endif

View File

@@ -11,4 +11,4 @@ PRODUCT_PACKAGES += frpc-adsprpc.ko
ifeq ($(TARGET_BOARD_PLATFORM), niobe)
PRODUCT_PACKAGES += cdsp-loader.ko
endif
endif
endif