replace common qcom sources with samsung ones
This commit is contained in:
@@ -596,14 +596,8 @@ static void __exit msm_cvp_exit(void)
|
||||
module_init(msm_cvp_init);
|
||||
module_exit(msm_cvp_exit);
|
||||
|
||||
#ifdef CVP_MMRM_ENABLED
|
||||
MODULE_SOFTDEP("pre: msm-mmrm");
|
||||
#endif
|
||||
#ifdef CVP_SYNX_ENABLED
|
||||
MODULE_SOFTDEP("pre: synx-driver");
|
||||
#endif
|
||||
#ifdef CVP_FASTRPC_ENABLED
|
||||
MODULE_SOFTDEP("pre: frpc-adsprpc");
|
||||
#endif
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_IMPORT_NS(DMA_BUF);
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_COMM_DEF_H_
|
||||
@@ -29,9 +29,7 @@ enum queue_state {
|
||||
|
||||
#else /* LA target starts here */
|
||||
|
||||
//#define USE_PRESIL 1
|
||||
|
||||
#if defined(CONFIG_EVA_KALAMA) && !defined(USE_PRESIL)
|
||||
#ifdef CONFIG_EVA_KALAMA
|
||||
#define CVP_SYNX_ENABLED 1
|
||||
#define CVP_MMRM_ENABLED 1
|
||||
#define CVP_FASTRPC_ENABLED 1
|
||||
@@ -39,26 +37,19 @@ enum queue_state {
|
||||
|
||||
/*SYNX MMRM and FASTRPC are removed temporarily*/
|
||||
/*Need to put them back when dependencies are available*/
|
||||
#if defined(CONFIG_EVA_PINEAPPLE) && !defined(USE_PRESIL)
|
||||
#ifdef CONFIG_EVA_PINEAPPLE
|
||||
#define CVP_SYNX_ENABLED 1
|
||||
#endif /* End of CONFIG_EVA_PINEAPPLE */
|
||||
|
||||
#if defined(CONFIG_EVA_SUN) && !defined(USE_PRESIL)
|
||||
#ifdef CONFIG_EVA_SUN
|
||||
#define CVP_SYNX_ENABLED 1
|
||||
#define CVP_FASTRPC_ENABLED 1
|
||||
#define CVP_DSP_ENABLED 1
|
||||
#define CVP_MMRM_ENABLED 1
|
||||
#endif /* End of CONFIG_EVA_SUN*/
|
||||
|
||||
#if defined(CONFIG_EVA_WAIPIO) && !defined(USE_PRESIL)
|
||||
#define CVP_MINIDUMP_ENABLED 1
|
||||
#endif
|
||||
|
||||
#ifdef USE_PRESIL
|
||||
//#define CVP_SYNX_ENABLED 1
|
||||
//#define CVP_FASTRPC_ENABLED 1
|
||||
//#define CVP_DSP_ENABLED 1
|
||||
//#define CVP_MMRM_ENABLED 1
|
||||
#ifdef CONFIG_EVA_WAIPIO
|
||||
#define CVP_MINIDUMP_ENABLED 1
|
||||
#endif
|
||||
|
||||
#endif /* End CONFIG_EVA_TVM */
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __H_CVP_CORE_HFI_H__
|
||||
@@ -289,6 +289,7 @@ struct iris_hfi_device {
|
||||
unsigned int skip_pc_count;
|
||||
struct msm_cvp_capability *sys_init_capabilities;
|
||||
struct cvp_hal_ops *hal_ops;
|
||||
bool msm_cvp_hw_wd;
|
||||
};
|
||||
|
||||
irqreturn_t cvp_hfi_isr(int irq, void *dev);
|
||||
@@ -324,10 +325,6 @@ struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
|
||||
ktid = (cmd_hdr->client_data.kdata & (FENCE_BIT - 1)); \
|
||||
trace_tracing_eva_frame_from_sw(aon_cycles, tag, sess_id, \
|
||||
stream_id, pkt_id, t_id, ktid); \
|
||||
dprintk(CVP_ERR, \
|
||||
"tracing_eva_frame_from_sw: AON_TIMESTAMP: %llu %s session_id = 0x%x "\
|
||||
"stream_id = 0x%x packet_id = 0x%x transaction_id = 0x%x ktid = 0x%x\n",\
|
||||
aon_cycles, tag, sess_id, stream_id, pkt_id, t_id, ktid); \
|
||||
} \
|
||||
})
|
||||
|
||||
@@ -349,10 +346,6 @@ struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
|
||||
ktid = (msg_hdr->client_data.kdata & (FENCE_BIT - 1)); \
|
||||
trace_tracing_eva_frame_from_sw(aon_cycles, tag, session_id, \
|
||||
stream_id, pkt_id, t_id, ktid); \
|
||||
dprintk(CVP_ERR,\
|
||||
"tracing_eva_frame_from_sw: AON_TIMESTAMP: %llu %s session_id = 0x%x "\
|
||||
"stream_id = 0x%x packet_id = 0x%x transaction_id = 0x%x ktid = 0x%x\n",\
|
||||
aon_cycles, tag, session_id, stream_id, pkt_id, t_id, ktid); \
|
||||
} \
|
||||
})
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <asm/memory.h>
|
||||
@@ -110,7 +110,6 @@ static int __reset_control_assert_name(struct iris_hfi_device *device, const cha
|
||||
static int __reset_control_deassert_name(struct iris_hfi_device *device, const char *name);
|
||||
static int __reset_control_acquire(struct iris_hfi_device *device, const char *name);
|
||||
static int __reset_control_release(struct iris_hfi_device *device, const char *name);
|
||||
static void __deinit_resources(struct iris_hfi_device *device);
|
||||
|
||||
static int cvp_iommu_map(struct iommu_domain* domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot)
|
||||
{
|
||||
@@ -130,12 +129,6 @@ static int cvp_iommu_map(struct iommu_domain* domain, unsigned long iova, phys_a
|
||||
return rc;
|
||||
}
|
||||
|
||||
enum enter_noc_lpi_caller {
|
||||
IRIS_POWER_ON = 1,
|
||||
POWER_OFF_CORE,
|
||||
POWER_OFF_CNTRL,
|
||||
};
|
||||
|
||||
|
||||
#ifdef CONFIG_EVA_PINEAPPLE
|
||||
|
||||
@@ -606,11 +599,11 @@ static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
|
||||
{
|
||||
struct cvp_hfi_queue_header *queue;
|
||||
struct cvp_hfi_msg_session_hdr *msg_pkt;
|
||||
u32 packet_size_in_words, new_read_idx, packet_size_in_bytes;
|
||||
u32 packet_size_in_words, new_read_idx;
|
||||
u32 *read_ptr;
|
||||
u32 receive_request = 0;
|
||||
u32 read_idx, write_idx;
|
||||
int rc = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (!qinfo || !packet || !pb_tx_req_is_set) {
|
||||
dprintk(CVP_ERR, "Invalid Params\n");
|
||||
@@ -679,7 +672,6 @@ static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
|
||||
}
|
||||
|
||||
packet_size_in_words = (*read_ptr) >> 2;
|
||||
packet_size_in_bytes = *read_ptr;
|
||||
if (!packet_size_in_words) {
|
||||
spin_unlock(&qinfo->hfi_lock);
|
||||
dprintk(CVP_ERR, "Zero packet size\n");
|
||||
@@ -701,12 +693,6 @@ static int __read_queue(struct cvp_iface_q_info *qinfo, u8 *packet,
|
||||
(u8 *)qinfo->q_array.align_virtual_addr,
|
||||
new_read_idx << 2);
|
||||
}
|
||||
/*
|
||||
* Copy back the validated size to avoid security issue. As we are reading
|
||||
* the packet from a shared queue, there is a possibility to get the
|
||||
* packet->size data corrupted of shared queue by mallicious FW.
|
||||
*/
|
||||
*((u32 *) packet) = packet_size_in_bytes;
|
||||
} else {
|
||||
dprintk(CVP_WARN,
|
||||
"BAD packet received, read_idx: %#x, pkt_size: %d\n",
|
||||
@@ -923,8 +909,7 @@ static void __set_threshold_registers(struct iris_hfi_device *device)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_EVA_SUN
|
||||
static void __enter_cpu_noc_lpi(struct iris_hfi_device *device,
|
||||
enum enter_noc_lpi_caller caller)
|
||||
static void __enter_cpu_noc_lpi(struct iris_hfi_device *device)
|
||||
{
|
||||
u32 lpi_status, count = 0, max_count = 2000;
|
||||
|
||||
@@ -938,7 +923,7 @@ static void __enter_cpu_noc_lpi(struct iris_hfi_device *device,
|
||||
__write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x1);
|
||||
while (count < max_count) {
|
||||
lpi_status = __read_register(device, CVP_WRAPPER_CPU_NOC_LPI_STATUS);
|
||||
if (((lpi_status & BIT(1)) || (lpi_status & BIT(2))) && (!(lpi_status & BIT(0)))) {
|
||||
if ((lpi_status & BIT(1)) || ((lpi_status & BIT(2)) && (!(lpi_status & BIT(0))))) {
|
||||
/*
|
||||
* If QDENY == true, or
|
||||
* If QACTIVE == true && QACCEPT == false
|
||||
@@ -955,8 +940,10 @@ static void __enter_cpu_noc_lpi(struct iris_hfi_device *device,
|
||||
}
|
||||
}
|
||||
|
||||
dprintk(CVP_PWR,
|
||||
"%s, CPU Noc: lpi_status %x (count %d)\n", __func__, lpi_status, count);
|
||||
/* HPG Step-7 of section 3.7 */
|
||||
__write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x0);
|
||||
// __write_register(device, CVP_WRAPPER_CPU_NOC_LPI_CONTROL, 0x0);
|
||||
if (count == max_count) {
|
||||
u32 pc_ready, wfi_status;
|
||||
|
||||
@@ -964,21 +951,17 @@ static void __enter_cpu_noc_lpi(struct iris_hfi_device *device,
|
||||
pc_ready = __read_register(device, CVP_CTRL_STATUS);
|
||||
|
||||
dprintk(CVP_WARN,
|
||||
"%s - %d, CPU Noc is not in LPI: %x %x %x\n",
|
||||
__func__, caller, lpi_status, wfi_status, pc_ready);
|
||||
"%s, CPU NOC not in qaccept status %x %x %x\n",
|
||||
__func__, lpi_status, wfi_status, pc_ready);
|
||||
|
||||
/* Added for debug info purpose, not part of HPG */
|
||||
call_iris_op(device, print_sbm_regs, device);
|
||||
} else
|
||||
dprintk(CVP_WARN,
|
||||
"%s - %d, CPU Noc is in LPI: lpi_status %x (count %d)\n",
|
||||
__func__, caller, lpi_status, count);
|
||||
}
|
||||
}
|
||||
|
||||
static void __enter_core_noc_lpi(struct iris_hfi_device *device,
|
||||
enum enter_noc_lpi_caller caller)
|
||||
static void __enter_core_noc_lpi(struct iris_hfi_device *device)
|
||||
{
|
||||
u32 lpi_status, count = 0, max_count = 2000, val = 0;
|
||||
u32 lpi_status, count = 0, max_count = 2000;
|
||||
|
||||
/* New addition to put CORE NOC to low power Section 6.14 (Steps 4-6)*/
|
||||
|
||||
@@ -991,7 +974,7 @@ static void __enter_core_noc_lpi(struct iris_hfi_device *device,
|
||||
while (count < max_count) {
|
||||
/* Reading the LPI status */
|
||||
lpi_status = __read_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_STATUS);
|
||||
if (((lpi_status & BIT(1)) || (lpi_status & BIT(2))) && (!(lpi_status & BIT(0)))) {
|
||||
if ((lpi_status & BIT(1)) || ((lpi_status & BIT(2)) && (!(lpi_status & BIT(0))))) {
|
||||
/*
|
||||
* If QDENY == true, or
|
||||
* If QACTIVE == true && QACCEPT == false
|
||||
@@ -1000,9 +983,6 @@ static void __enter_core_noc_lpi(struct iris_hfi_device *device,
|
||||
__write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x0);
|
||||
usleep_range(10, 20);
|
||||
__write_register(device, CVP_NOC_CORE_ERR_ERRCLR_LOW_OFFS, 0x1);
|
||||
val = __read_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS);
|
||||
__write_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS,
|
||||
val & ~(BIT(0)|BIT(1)));
|
||||
__write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x1);
|
||||
usleep_range(1000, 1200);
|
||||
count++;
|
||||
@@ -1011,23 +991,21 @@ static void __enter_core_noc_lpi(struct iris_hfi_device *device,
|
||||
}
|
||||
}
|
||||
|
||||
dprintk(CVP_PWR,
|
||||
"%s, CORE Noc: lpi_status %x (count %d)\n", __func__, lpi_status, count);
|
||||
/* HPG Step-4 of section 3.4.4 */
|
||||
__write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x0);
|
||||
// __write_register(device, CVP_AON_WRAPPER_CVP_NOC_LPI_CONTROL, 0x0);
|
||||
if (count == max_count) {
|
||||
dprintk(CVP_WARN,
|
||||
"%s - %d, CORE Noc is not in LPI: lpi_status %x\n",
|
||||
__func__, caller, lpi_status);
|
||||
"%s, CORE NOC not in qaccept status %x\n",
|
||||
__func__, lpi_status);
|
||||
|
||||
/* Added for debug info purpose, not part of HPG */
|
||||
call_iris_op(device, print_sbm_regs, device);
|
||||
} else
|
||||
dprintk(CVP_WARN,
|
||||
"%s - %d, CORE Noc is in LPI: lpi_status %x (count %d)\n",
|
||||
__func__, caller, lpi_status, count);
|
||||
}
|
||||
}
|
||||
|
||||
static void __enter_video_ctl_noc_lpi(struct iris_hfi_device *device,
|
||||
enum enter_noc_lpi_caller caller)
|
||||
static void __enter_video_ctl_noc_lpi(struct iris_hfi_device *device)
|
||||
{
|
||||
u32 lpi_status, count = 0, max_count = 2000;
|
||||
|
||||
@@ -1037,7 +1015,7 @@ static void __enter_video_ctl_noc_lpi(struct iris_hfi_device *device,
|
||||
while (count < max_count) {
|
||||
/* Reading the LPI status */
|
||||
lpi_status = __read_register(device, CVP_AON_WRAPPER_CVP_VIDEO_CTL_NOC_LPI_STATUS);
|
||||
if (((lpi_status & BIT(1)) || (lpi_status & BIT(2))) && (!(lpi_status & BIT(0)))) {
|
||||
if ((lpi_status & BIT(1)) || ((lpi_status & BIT(2)) && (!(lpi_status & BIT(0))))) {
|
||||
/*
|
||||
* If QDENY == true, or
|
||||
* If QACTIVE == true && QACCEPT == false
|
||||
@@ -1055,19 +1033,18 @@ static void __enter_video_ctl_noc_lpi(struct iris_hfi_device *device,
|
||||
}
|
||||
}
|
||||
|
||||
dprintk(CVP_PWR,
|
||||
"%s, CVP_VIDEO_CTL Noc: lpi_status %x (count %d)\n", __func__, lpi_status, count);
|
||||
/* HPG Step-22 of section 6.14 */
|
||||
__write_register(device, CVP_AON_WRAPPER_CVP_VIDEO_CTL_NOC_LPI_CONTROL, 0x0);
|
||||
if (count == max_count) {
|
||||
dprintk(CVP_WARN,
|
||||
"%s - %d, CVP_VIDEO_CTL Noc is not in LPI: lpi_status %x\n",
|
||||
__func__, caller, lpi_status);
|
||||
"%s, CVP_VIDEO_CTL NOC not in qaccept status %x %x %x\n",
|
||||
__func__, lpi_status);
|
||||
|
||||
/* Added for debug info purpose, not part of HPG */
|
||||
call_iris_op(device, print_sbm_regs, device);
|
||||
} else
|
||||
dprintk(CVP_WARN,
|
||||
"%s - %d, CVP_VIDEO_CTL Noc is in LPI: lpi_status %x (count %d)\n",
|
||||
__func__, caller, lpi_status, count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -1326,7 +1303,7 @@ static const char boot_states[0x40][32] = {
|
||||
static inline int __boot_firmware(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 5000;
|
||||
u32 ctrl_init_val = 0, ctrl_status = 0, count = 0, max_tries = 5000, SCIBARG3_status = 0;
|
||||
CVPKERNEL_ATRACE_BEGIN("__boot_firmware");
|
||||
|
||||
/*
|
||||
@@ -1339,16 +1316,11 @@ static inline int __boot_firmware(struct iris_hfi_device *device)
|
||||
dprintk(CVP_ERR, "Failed to enabled inter-frame PC\n");
|
||||
|
||||
ctrl_init_val = BIT(0);
|
||||
/*
|
||||
* Add BIT(1) to disable DSP and BIT(3) to disable SYNX
|
||||
*/
|
||||
#ifndef CVP_DSP_ENABLED
|
||||
ctrl_init_val |= BIT(1);
|
||||
#endif
|
||||
#ifndef CVP_SYNX_ENABLED
|
||||
ctrl_init_val |= BIT(3);
|
||||
#endif
|
||||
|
||||
SCIBARG3_status = __read_register(device, CVP_CPU_CS_SCIBARG3);
|
||||
ctrl_status = __read_register(device, CVP_CTRL_STATUS);
|
||||
dprintk(CVP_ERR, "%s: SS_DEBUG: Before writing CNTRL_INIT CNTRL_STATUS: %x, SCIBARG3_status %x\n",
|
||||
__func__, ctrl_status, SCIBARG3_status);
|
||||
/* RUMI: CVP_CTRL_INIT in MPTest has bit 0 and 3 set */
|
||||
__write_register(device, CVP_CTRL_INIT, ctrl_init_val);
|
||||
while (!(ctrl_status & CVP_CTRL_INIT_STATUS__M) && count < max_tries) {
|
||||
@@ -1380,12 +1352,20 @@ static inline int __boot_firmware(struct iris_hfi_device *device)
|
||||
rc = -ENODEV;
|
||||
}
|
||||
|
||||
SCIBARG3_status = __read_register(device, CVP_CPU_CS_SCIBARG3);
|
||||
ctrl_status = __read_register(device, CVP_CTRL_STATUS);
|
||||
dprintk(CVP_ERR, "%s: SS_DEBUG: After writing CNTRL_INIT CNTRL_STATUS: %x, SCIBARG3_status %x\n",
|
||||
__func__, ctrl_status, SCIBARG3_status);
|
||||
/* Enable interrupt before sending commands to tensilica */
|
||||
__write_register(device, CVP_CPU_CS_H2XSOFTINTEN, 0x1);
|
||||
__write_register(device, CVP_CPU_CS_X2RPMh, 0x0);
|
||||
|
||||
CVPKERNEL_ATRACE_END("__boot_firmware");
|
||||
|
||||
#ifdef USE_PRESIL
|
||||
/*Disable HW Synx if RUMI Support for Synx unavailable*/
|
||||
__write_register(device, CVP_CPU_CS_SCIACMD, 0x8);
|
||||
#endif
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1449,6 +1429,8 @@ static void cvp_dump_csr(struct iris_hfi_device *dev)
|
||||
dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
|
||||
reg = __read_register(dev, CVP_CPU_CS_SCIACMDARG0);
|
||||
dprintk(CVP_ERR, "CVP_CPU_CS_SCIACMDARG0: %x\n", reg);
|
||||
reg = __read_register(dev, CVP_CPU_CS_SCIBARG3);
|
||||
dprintk(CVP_ERR, "CVP_CPU_CS_SCIBARG3: %x\n", reg);
|
||||
reg = __read_register(dev, CVP_WRAPPER_INTR_STATUS);
|
||||
dprintk(CVP_ERR, "CVP_WRAPPER_INTR_STATUS: %x\n", reg);
|
||||
reg = __read_register(dev, CVP_CPU_CS_H2ASOFTINT);
|
||||
@@ -1742,7 +1724,6 @@ static void __interface_dsp_queues_release(struct iris_hfi_device *device)
|
||||
|
||||
static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
|
||||
{
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
int rc = 0;
|
||||
u32 i;
|
||||
struct cvp_iface_q_info *iface_q;
|
||||
@@ -1754,6 +1735,8 @@ static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
|
||||
dma_addr_t dma_handle;
|
||||
dma_addr_t iova;
|
||||
struct context_bank_info *cb;
|
||||
int count = 0;
|
||||
const int max_retries = 10;
|
||||
|
||||
q_size = ALIGN(QUEUE_SIZE, SZ_1M);
|
||||
mem_data = &dev->dsp_iface_q_table.mem_data;
|
||||
@@ -1763,13 +1746,24 @@ static int __interface_dsp_queues_init(struct iris_hfi_device *dev)
|
||||
cvp_dsp_init_hfi_queue_hdr(dev);
|
||||
return 0;
|
||||
}
|
||||
/* Allocate dsp queues from CDSP device memory */
|
||||
kvaddr = dma_alloc_coherent(dev->res->mem_cdsp.dev, q_size,
|
||||
while (count < max_retries) {
|
||||
/* Allocate dsp queues from CDSP device memory */
|
||||
kvaddr = dma_alloc_coherent(dev->res->mem_cdsp.dev, q_size,
|
||||
&dma_handle, GFP_KERNEL);
|
||||
if (IS_ERR_OR_NULL(kvaddr)) {
|
||||
dprintk(CVP_ERR, "%s: SS_DEBUG: failed dma allocation, retry %d\n", __func__, count);
|
||||
usleep_range(100000, 105000);
|
||||
count++;
|
||||
} else {
|
||||
dprintk(CVP_ERR, "%s: SS_DEBUG: DMA Allocation success\n", __func__);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (IS_ERR_OR_NULL(kvaddr)) {
|
||||
dprintk(CVP_ERR, "%s: failed dma allocation\n", __func__);
|
||||
dprintk(CVP_ERR, "%s: SS_DEBUG: failed dma allocation\n", __func__);
|
||||
goto fail_dma_alloc;
|
||||
}
|
||||
|
||||
cb = msm_cvp_smem_get_context_bank(dev->res, SMEM_CDSP);
|
||||
if (!cb) {
|
||||
dprintk(CVP_ERR,
|
||||
@@ -1818,9 +1812,6 @@ fail_dma_map:
|
||||
dma_free_coherent(dev->res->mem_cdsp.dev, q_size, kvaddr, dma_handle);
|
||||
fail_dma_alloc:
|
||||
return -ENOMEM;
|
||||
#else
|
||||
return 0; // DSP is not enabled
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __interface_queues_release(struct iris_hfi_device *device)
|
||||
@@ -2169,10 +2160,6 @@ static int __sys_set_debug(struct iris_hfi_device *device, u32 debug)
|
||||
|
||||
pkt = kzalloc(sizeof(struct cvp_hfi_cmd_sys_set_property_packet) + sizeof(u32) +
|
||||
sizeof(struct cvp_hfi_debug_config), GFP_KERNEL);
|
||||
if (!pkt) {
|
||||
dprintk(CVP_ERR, "Failed to allocate memory for sys set property packet\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rc = call_hfi_pkt_op(device, sys_debug_config, pkt, debug);
|
||||
if (rc) {
|
||||
@@ -2485,7 +2472,7 @@ static int iris_hfi_core_init(void *device)
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR, "failed to init queues\n");
|
||||
rc = -ENOMEM;
|
||||
goto err_init_queues;
|
||||
goto err_core_init;
|
||||
}
|
||||
cvp_register_va_md_region();
|
||||
|
||||
@@ -2583,10 +2570,7 @@ pm_qos_bail:
|
||||
dprintk(CVP_CORE, "Core inited successfully\n");
|
||||
|
||||
return 0;
|
||||
err_init_queues:
|
||||
__interface_queues_release(dev);
|
||||
power_off_iris2(dev);
|
||||
__deinit_resources(dev);
|
||||
|
||||
err_core_init:
|
||||
__set_state(dev, IRIS_STATE_DEINIT);
|
||||
__unload_fw(dev);
|
||||
@@ -2798,8 +2782,7 @@ static int iris_debug_hook(void *device)
|
||||
dprintk(CVP_ERR, "%s Invalid device\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
dprintk(CVP_WARN, "Stop NOC transactions from EVA Core\n");
|
||||
dprintk(CVP_WARN, "Stop transactions from EVA Core\n");
|
||||
val = __read_register(dev, CVP_VIDEO_B_NOC_A_QOSGEN_MAINCTL_LOW);
|
||||
__write_register(dev, CVP_VIDEO_B_NOC_A_QOSGEN_MAINCTL_LOW, val | BIT(2));
|
||||
val = __read_register(dev, CVP_VIDEO_B_NOC_B_QOSGEN_MAINCTL_LOW);
|
||||
@@ -2807,15 +2790,17 @@ static int iris_debug_hook(void *device)
|
||||
val = __read_register(dev, CVP_VIDEO_B_NOC_C_QOSGEN_MAINCTL_LOW);
|
||||
__write_register(dev, CVP_VIDEO_B_NOC_C_QOSGEN_MAINCTL_LOW, val | BIT(2));
|
||||
|
||||
val = __read_register(dev, CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW);
|
||||
__write_register(dev, CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW, val | BIT(0));
|
||||
|
||||
/* Masking Core and CPU NOC interrupts */
|
||||
mask_val = __read_register(dev, CVP_WRAPPER_INTR_MASK);
|
||||
/* Masking Xtensa NOC and Core NOC interrupts */
|
||||
mask_val = __read_register(device, CVP_WRAPPER_INTR_MASK);
|
||||
/* Write 0 to unmask CPU and WD interrupts */
|
||||
mask_val |= (CVP_FATAL_INTR_BMSK);
|
||||
dprintk(CVP_WARN, "Masking Core and CPU NOC interrupts\n");
|
||||
__write_register(dev, CVP_WRAPPER_INTR_MASK, mask_val);
|
||||
|
||||
__write_register(device, CVP_WRAPPER_INTR_MASK, mask_val);
|
||||
dprintk(CVP_REG, "%s: reg: %x, mask value %x\n",
|
||||
__func__, CVP_WRAPPER_INTR_MASK, mask_val);
|
||||
if (dev->msm_cvp_hw_wd) {
|
||||
dprintk(CVP_WARN, "Halt Tensilica\n");
|
||||
__write_register(dev, CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG, 0x1);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -3382,19 +3367,17 @@ skip_power_off:
|
||||
static void __process_sys_error(struct iris_hfi_device *device)
|
||||
{
|
||||
struct cvp_hfi_sfr_struct *vsfr = NULL;
|
||||
u32 sfr_buf_size = 0;
|
||||
|
||||
vsfr = (struct cvp_hfi_sfr_struct *)device->sfr.align_virtual_addr;
|
||||
sfr_buf_size = vsfr->bufSize;
|
||||
if (vsfr && sfr_buf_size < ALIGNED_SFR_SIZE) {
|
||||
void *p = memchr(vsfr->rg_data, '\0', sfr_buf_size);
|
||||
if (vsfr) {
|
||||
void *p = memchr(vsfr->rg_data, '\0', vsfr->bufSize);
|
||||
/*
|
||||
* SFR isn't guaranteed to be NULL terminated
|
||||
* since SYS_ERROR indicates that Iris is in the
|
||||
* process of crashing.
|
||||
*/
|
||||
if (p == NULL)
|
||||
vsfr->rg_data[sfr_buf_size - 1] = '\0';
|
||||
vsfr->rg_data[vsfr->bufSize - 1] = '\0';
|
||||
|
||||
dprintk(CVP_ERR, "SFR Message from FW: %s\n",
|
||||
vsfr->rg_data);
|
||||
@@ -3815,14 +3798,15 @@ static void iris_hfi_wd_work_handler(struct work_struct *work)
|
||||
device = core->dev_ops->hfi_device_data;
|
||||
else
|
||||
return;
|
||||
|
||||
if (msm_cvp_smmu_fault_recovery) {
|
||||
device->msm_cvp_hw_wd = true;
|
||||
dprintk(CVP_WARN, "%s: msm_cvp_hw_wd %d\n", __func__, device->msm_cvp_hw_wd);
|
||||
}
|
||||
if (msm_cvp_hw_wd_recovery) {
|
||||
dprintk(CVP_ERR, "Cleaning up as HW WD recovery is enable %d\n",
|
||||
msm_cvp_hw_wd_recovery);
|
||||
call_iris_op(device, print_sbm_regs, device);
|
||||
response.device_id = 0;
|
||||
dprintk(CVP_WARN, "Halt Tensilica\n");
|
||||
__write_register(device, CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG, 0x1);
|
||||
handle_sys_error(cmd, (void *) &response);
|
||||
enable_irq(device->cvp_hal_data->irq_wd);
|
||||
}
|
||||
@@ -4105,10 +4089,19 @@ static int __init_regulators(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
struct regulator_info *rinfo = NULL;
|
||||
struct msm_cvp_core *core = NULL;
|
||||
core = cvp_driver->cvp_core;
|
||||
|
||||
iris_hfi_for_each_regulator(device, rinfo) {
|
||||
rinfo->regulator = regulator_get(&device->res->pdev->dev,
|
||||
rinfo->name);
|
||||
if (core) {
|
||||
if (!strcmp(rinfo->name, "cvp-core"))
|
||||
dprintk(CVP_WARN, "%s: SS_DEBUG regulator %s: cnt: %d\n", __func__, rinfo->name, core->core_reg_cnt);
|
||||
else if (!strcmp(rinfo->name, "cvp"))
|
||||
dprintk(CVP_WARN, "%s: SS_DEBUG regulator %s: cnt: %d\n", __func__, rinfo->name, core->ctrl_reg_cnt);
|
||||
}
|
||||
|
||||
if (IS_ERR_OR_NULL(rinfo->regulator)) {
|
||||
rc = PTR_ERR(rinfo->regulator) ?: -EBADHANDLE;
|
||||
dprintk(CVP_ERR, "Failed to get regulator: %s\n",
|
||||
@@ -4261,8 +4254,10 @@ static int __disable_regulator_impl(struct regulator_info *rinfo,
|
||||
struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
struct msm_cvp_core *core = NULL;
|
||||
core = cvp_driver->cvp_core;
|
||||
|
||||
dprintk(CVP_PWR, "Disabling regulator %s\n", rinfo->name);
|
||||
dprintk(CVP_WARN, "%s: SS_DEBUG Disabling regulator %s\n", __func__, rinfo->name);
|
||||
|
||||
/*
|
||||
* This call is needed. Driver needs to acquire the control back
|
||||
@@ -4291,6 +4286,21 @@ static int __disable_regulator_impl(struct regulator_info *rinfo,
|
||||
rinfo->name, rc);
|
||||
goto disable_regulator_failed;
|
||||
}
|
||||
if (core) {
|
||||
if (!strcmp(rinfo->name, "cvp-core")) {
|
||||
core->core_reg_cnt--;
|
||||
dprintk(CVP_WARN,
|
||||
"%s: SS_DEBUG decremented counter for core regulator with value %d\n",
|
||||
__func__, core->core_reg_cnt);
|
||||
} else if (!strcmp(rinfo->name, "cvp")) {
|
||||
core->ctrl_reg_cnt--;
|
||||
dprintk(CVP_WARN,
|
||||
"%s: SS_DEBUG decremented counter for ctrl regulator with value %d\n",
|
||||
__func__, core->ctrl_reg_cnt);
|
||||
}
|
||||
} else {
|
||||
dprintk(CVP_WARN, "%s: SS_DEBUG invalid core pointer: %s\n", __func__, rinfo->name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
disable_regulator_failed:
|
||||
@@ -4322,6 +4332,12 @@ static int __enable_regulator(struct iris_hfi_device *device,
|
||||
{
|
||||
int rc = 0;
|
||||
struct regulator_info *rinfo;
|
||||
struct msm_cvp_core *core = NULL;
|
||||
|
||||
core = cvp_driver->cvp_core;
|
||||
|
||||
dprintk(CVP_WARN, "%s: SS_DEBUG enable regulator called for %s\n",
|
||||
__func__, name);
|
||||
|
||||
iris_hfi_for_each_regulator(device, rinfo) {
|
||||
if (strcmp(rinfo->name, name))
|
||||
@@ -4339,6 +4355,21 @@ static int __enable_regulator(struct iris_hfi_device *device,
|
||||
regulator_disable(rinfo->regulator);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (core) {
|
||||
if (!strcmp(rinfo->name, "cvp-core")) {
|
||||
core->core_reg_cnt++;
|
||||
dprintk(CVP_WARN,
|
||||
"%s: SS_DEBUG incremented counter for core regulator with value %d\n",
|
||||
__func__, core->core_reg_cnt);
|
||||
} else if (!strcmp(rinfo->name, "cvp")) {
|
||||
core->ctrl_reg_cnt++;
|
||||
dprintk(CVP_WARN,
|
||||
"%s: SS_DEBUG incremented counter for ctrl regulator with value %d\n",
|
||||
__func__, core->ctrl_reg_cnt);
|
||||
}
|
||||
} else {
|
||||
dprintk(CVP_WARN, "%s: SS_DEBUG invalid core pointer: %s\n", __func__, rinfo->name);
|
||||
}
|
||||
|
||||
dprintk(CVP_PWR, "Enabled regulator %s\n", rinfo->name);
|
||||
return 0;
|
||||
@@ -4561,7 +4592,6 @@ static void interrupt_init_iris2(struct iris_hfi_device *device)
|
||||
static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device)
|
||||
{
|
||||
/* initialize DSP QTBL & UCREGION with CPU queues */
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
#ifdef USE_PRESIL42
|
||||
presil42_setup_dsp_uc_memmap_vpu5(device);
|
||||
return;
|
||||
@@ -4572,7 +4602,6 @@ static void setup_dsp_uc_memmap_vpu5(struct iris_hfi_device *device)
|
||||
(u32)device->dsp_iface_q_table.align_device_addr);
|
||||
__write_register(device, HFI_DSP_UC_REGION_SIZE,
|
||||
device->dsp_iface_q_table.mem_data.size);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __set_ubwc_config(struct iris_hfi_device *device)
|
||||
@@ -4656,14 +4685,8 @@ static int __iris_power_on(struct iris_hfi_device *device)
|
||||
__write_register(device, CVP_CC_SPARE1, 1);
|
||||
}
|
||||
|
||||
/* New addition to put CPU/Tensilica NOC to low power Section 6.14 (Steps 15-17)*/
|
||||
__enter_cpu_noc_lpi(device, IRIS_POWER_ON);
|
||||
|
||||
/* New addition to put CVP_VIDEO_CTL NOC to low power Section 6.14 (Steps 19-21)*/
|
||||
__enter_video_ctl_noc_lpi(device, IRIS_POWER_ON);
|
||||
|
||||
/* New addition to put CORE NOC to low power Section 6.14 (Steps 4-6)*/
|
||||
__enter_core_noc_lpi(device, IRIS_POWER_ON);
|
||||
dump_clock(device);
|
||||
dump_regulator(device);
|
||||
|
||||
/*
|
||||
* Re-program all of the registers that get reset as a result of
|
||||
@@ -4768,6 +4791,7 @@ int __resume(struct iris_hfi_device *device)
|
||||
{
|
||||
int rc = 0;
|
||||
struct msm_cvp_core *core;
|
||||
u32 ctrl_status = 0, SCIBARG3_status = 0;
|
||||
|
||||
if (!device) {
|
||||
dprintk(CVP_ERR, "Invalid params: %pK\n", device);
|
||||
@@ -4791,7 +4815,10 @@ int __resume(struct iris_hfi_device *device)
|
||||
__setup_ucregion_memory_map(device);
|
||||
|
||||
/* RUMI: set CVP_CTRL_INIT register to disable synx in FW */
|
||||
|
||||
SCIBARG3_status = __read_register(device, CVP_CPU_CS_SCIBARG3);
|
||||
ctrl_status = __read_register(device, CVP_CTRL_STATUS);
|
||||
dprintk(CVP_ERR, "%s: SS_DEBUG: Before __tzbsp_set_cvp_state CNTRL_STATUS: %x, SCIBARG3_status %x\n",
|
||||
__func__, ctrl_status, SCIBARG3_status);
|
||||
/* Reboot the firmware */
|
||||
rc = __tzbsp_set_cvp_state(TZ_SUBSYS_STATE_RESUME);
|
||||
if (rc) {
|
||||
@@ -4897,6 +4924,11 @@ static void __unload_fw(struct iris_hfi_device *device)
|
||||
if (device->state != IRIS_STATE_DEINIT)
|
||||
flush_workqueue(device->iris_pm_workq);
|
||||
|
||||
if (msm_cvp_smmu_fault_recovery) {
|
||||
if (device)
|
||||
call_hfi_op(core->dev_ops, debug_hook, device);
|
||||
}
|
||||
|
||||
unload_cvp_fw_impl(device);
|
||||
__interface_queues_release(device);
|
||||
power_off_iris2(device);
|
||||
@@ -5425,7 +5457,7 @@ static int __power_off_controller(struct iris_hfi_device *device)
|
||||
pc_ready = __read_register(device, CVP_CTRL_STATUS);
|
||||
|
||||
dprintk(CVP_WARN,
|
||||
"CPU Noc is not in LPI: %x %x %x %x\n",
|
||||
"CPU NOC not in qaccept status %x %x %x %x\n",
|
||||
reg_status, lpi_status, wfi_status, pc_ready);
|
||||
|
||||
call_iris_op(device, print_sbm_regs, device);
|
||||
@@ -5629,7 +5661,7 @@ static int __power_off_core(struct iris_hfi_device *device)
|
||||
pc_ready = __read_register(device, CVP_CTRL_STATUS);
|
||||
|
||||
dprintk(CVP_WARN,
|
||||
"CORE Noc is not in LPI: %x %x %x %x\n",
|
||||
"Core NOC not in qaccept status %x %x %x %x\n",
|
||||
reg_status, lpi_status, wfi_status, pc_ready);
|
||||
|
||||
warn_flag = 1;
|
||||
@@ -6133,11 +6165,13 @@ static int __power_off_core_v1(struct iris_hfi_device *device)
|
||||
}
|
||||
|
||||
/* New addition to put CORE NOC to low power Section 6.14 (Steps 4-6)*/
|
||||
__enter_core_noc_lpi(device, POWER_OFF_CORE);
|
||||
__enter_core_noc_lpi(device);
|
||||
|
||||
/* HPG 3.4.4 step 5 */
|
||||
/* Reset both sides of 2 ahb2ahb_bridges (TZ and non-TZ) */
|
||||
__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x3);
|
||||
__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x2);
|
||||
__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x0);
|
||||
|
||||
__write_register(device, CVP_WRAPPER_CORE_CLOCK_CONFIG, config);
|
||||
|
||||
@@ -6159,10 +6193,10 @@ static int __power_off_controller_v1(struct iris_hfi_device *device)
|
||||
__write_register(device, CVP_CPU_CS_X2RPMh, 0x3);
|
||||
|
||||
/* New addition to put CPU/Tensilica NOC to low power Section 6.14 (Steps 15-17)*/
|
||||
__enter_cpu_noc_lpi(device, POWER_OFF_CNTRL);
|
||||
__enter_cpu_noc_lpi(device);
|
||||
|
||||
/* New addition to put CVP_VIDEO_CTL NOC to low power Section 6.14 (Steps 19-21)*/
|
||||
__enter_video_ctl_noc_lpi(device, POWER_OFF_CNTRL);
|
||||
__enter_video_ctl_noc_lpi(device);
|
||||
|
||||
/* HPG 3.7 step 11 */
|
||||
__write_register(device, CVP_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
|
||||
@@ -6207,9 +6241,6 @@ static int __power_off_controller_v1(struct iris_hfi_device *device)
|
||||
/* HPG 3.7 Step 13 and 14 */
|
||||
__disable_regulator(device, "cvp");
|
||||
|
||||
/* Step #28: Override ARCG control to allow AXI0 clock pass through */
|
||||
__write_register(device, CVP_AON_WRAPPER_CVP_NOC_ARCG_CONTROL, 0x1);
|
||||
|
||||
/* Below sequence are missing from HPG Section 3.7.
|
||||
* It disables GCC clks in power on sequence
|
||||
*/
|
||||
@@ -6390,24 +6421,14 @@ static int __set_registers_v1(struct iris_hfi_device *device)
|
||||
reg_set->reg_tbl[i].value);
|
||||
}
|
||||
|
||||
/* Reset both sides of 2 ahb2ahb_bridges (TZ and non-TZ)
|
||||
* As suggested by DV team
|
||||
*/
|
||||
__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x2);
|
||||
__write_register(device, CVP_AHB_BRIDGE_SYNC_RESET, 0x0);
|
||||
|
||||
val = __read_register(device, CVP_VIDEO_B_NOC_A_QOSGEN_MAINCTL_LOW);
|
||||
__write_register(device, CVP_VIDEO_B_NOC_A_QOSGEN_MAINCTL_LOW, val & ~BIT(2));
|
||||
val = __read_register(device, CVP_VIDEO_B_NOC_B_QOSGEN_MAINCTL_LOW);
|
||||
__write_register(device, CVP_VIDEO_B_NOC_B_QOSGEN_MAINCTL_LOW, val & ~BIT(2));
|
||||
val = __read_register(device, CVP_VIDEO_B_NOC_C_QOSGEN_MAINCTL_LOW);
|
||||
__write_register(device, CVP_VIDEO_B_NOC_C_QOSGEN_MAINCTL_LOW, val & ~BIT(2));
|
||||
|
||||
__write_register(device, CVP_NOC_RCGCONTROLLER_HYSTERESIS_LOW, 0xff);
|
||||
__write_register(device, CVP_NOC_RCGCONTROLLER_WAKEUP_LOW, 0x7);
|
||||
__write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW, 0x1);
|
||||
__write_register(device,
|
||||
CVP_NOC_RCG_VNOC_NOC_CLK_ENABLE_LOW + device->res->rcg_vnoc_clk_en_low, 0x1);
|
||||
|
||||
dump_clock(device);
|
||||
dump_regulator(device);
|
||||
|
||||
__write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_ENABLE_LOW, 0x1);
|
||||
usleep_range(5, 10);
|
||||
__write_register(device, CVP_NOC_RCG_VNOC_NOC_CLK_FORCECLOCKON_LOW, 0x0);
|
||||
__write_register(device, CVP_AON_WRAPPER_CVP_NOC_ARCG_CONTROL, 0x0);
|
||||
@@ -6453,6 +6474,12 @@ static int __set_registers_v1(struct iris_hfi_device *device)
|
||||
__write_register(device, CVP_NOC_CORE_ERR_MAINCTL_LOW_OFFS, 0x3);
|
||||
__write_register(device, CVP_NOC_MAIN_SIDEBANDMANAGER_FAULTINEN0_LOW, 0x1);
|
||||
|
||||
if (msm_cvp_smmu_fault_recovery) {
|
||||
device->msm_cvp_hw_wd = false;
|
||||
dprintk(CVP_WARN, "%s: msm_cvp_hw_wd %d\n", __func__, device->msm_cvp_hw_wd);
|
||||
val = __read_register(device, CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG);
|
||||
dprintk(CVP_WARN, "%s:CVP_WRAPPER_TZ_CPU_CLOCK_CONFIG %d\n", __func__, val);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -6600,7 +6627,7 @@ void cvp_clock_reg_print(struct iris_hfi_device *dev)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
dprintk(CVP_ERR, "%s Clock Controller Debug Prints:\n", __func__);
|
||||
dprintk(CVP_ERR, "%s Clock Controller Debug Prints:\n");
|
||||
|
||||
reg = __read_register(dev, CVP_CC_MVS0C_GDSCR);
|
||||
dprintk(CVP_ERR, "CVP_CC_MVS0C_GDSCR: %x\n", reg);
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
@@ -619,7 +619,6 @@ static int hfi_process_session_cvp_msg(u32 device_id,
|
||||
if (get_msg_errorcode(pkt) == HFI_ERR_SESSION_HW_HANG_DETECTED) {
|
||||
dprintk(CVP_ERR, "%s: Hardware Hang Observed:\n", __func__);
|
||||
cvp_clock_reg_print(dev);
|
||||
BUG_ON(!msm_cvp_session_error_recovery);
|
||||
}
|
||||
|
||||
wake_up_all(&sq->wq);
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "msm_cvp.h"
|
||||
@@ -708,12 +708,8 @@ static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
||||
{
|
||||
struct cvp_hfi_ops *ops_tbl;
|
||||
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
||||
int pkt_type, rc = 0, i = 0;
|
||||
int pkt_type, rc = 0;
|
||||
enum buf_map_type map_type;
|
||||
uint32_t *fd_arr = NULL;
|
||||
unsigned int offset = 0;
|
||||
struct cvp_buf_type *buf;
|
||||
|
||||
CVPKERNEL_ATRACE_BEGIN("cvp_enqueue_pkt");
|
||||
|
||||
ops_tbl = inst->core->dev_ops;
|
||||
@@ -731,24 +727,15 @@ static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
||||
cmd_hdr->client_data.transaction_id,
|
||||
cmd_hdr->client_data.kdata & (FENCE_BIT - 1));
|
||||
|
||||
if (map_type == MAP_PERSIST) {
|
||||
fd_arr = vmalloc(sizeof(uint32_t) * in_buf_num);
|
||||
if (!fd_arr) {
|
||||
dprintk(CVP_ERR, "%s: fd array allocation failed\n", __func__);
|
||||
rc = -ENOMEM;
|
||||
goto exit;
|
||||
} else {
|
||||
memset((void *)fd_arr, -1, sizeof(uint32_t) * in_buf_num);
|
||||
}
|
||||
rc = msm_cvp_map_user_persist(inst, in_pkt, in_offset, in_buf_num, fd_arr);
|
||||
} else if (map_type == UNMAP_PERSIST) {
|
||||
if (map_type == MAP_PERSIST)
|
||||
rc = msm_cvp_map_user_persist(inst, in_pkt, in_offset, in_buf_num);
|
||||
else if (map_type == UNMAP_PERSIST)
|
||||
rc = msm_cvp_unmap_user_persist(inst, in_pkt, in_offset, in_buf_num);
|
||||
} else {
|
||||
else
|
||||
rc = msm_cvp_map_frame(inst, in_pkt, in_offset, in_buf_num);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
goto exit;
|
||||
return rc;
|
||||
|
||||
rc = cvp_populate_fences(in_pkt, in_offset, in_buf_num, inst);
|
||||
if (rc == 0) {
|
||||
@@ -760,21 +747,9 @@ static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
||||
dprintk(CVP_ERR,"%s: Failed in call_hfi_op %d, %x\n",
|
||||
__func__, in_pkt->pkt_data[0],
|
||||
in_pkt->pkt_data[1]);
|
||||
if (map_type == MAP_FRAME) {
|
||||
msm_cvp_unmap_frame(inst, cmd_hdr->client_data.kdata);
|
||||
} else if (map_type == MAP_PERSIST) {
|
||||
offset = in_offset;
|
||||
for (i = 0; i < in_buf_num; i++) {
|
||||
// Update the in_pkt s.t iova is replaced back with fd
|
||||
buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
|
||||
offset += sizeof(*buf) >> 2;
|
||||
if (!buf->size || fd_arr[i] < 0)
|
||||
continue;
|
||||
buf->fd = fd_arr[i];
|
||||
}
|
||||
rc = msm_cvp_unmap_user_persist(inst,
|
||||
in_pkt, in_offset, in_buf_num);
|
||||
}
|
||||
if (map_type == MAP_FRAME)
|
||||
msm_cvp_unmap_frame(inst,
|
||||
cmd_hdr->client_data.kdata);
|
||||
}
|
||||
} else if (rc > 0) {
|
||||
dprintk(CVP_SYNX, "Going fenced path\n");
|
||||
@@ -782,27 +757,10 @@ static int cvp_enqueue_pkt(struct msm_cvp_inst* inst,
|
||||
} else {
|
||||
dprintk(CVP_ERR,"%s: Failed to populate fences\n",
|
||||
__func__);
|
||||
if (map_type == MAP_FRAME) {
|
||||
if (map_type == MAP_FRAME)
|
||||
msm_cvp_unmap_frame(inst, cmd_hdr->client_data.kdata);
|
||||
} else if (map_type == MAP_PERSIST) {
|
||||
offset = in_offset;
|
||||
for (i = 0; i < in_buf_num; i++) {
|
||||
// Update the in_pkt s.t iova is replaced back with fd
|
||||
buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
|
||||
offset += sizeof(*buf) >> 2;
|
||||
if (!buf->size || fd_arr[i] < 0)
|
||||
continue;
|
||||
buf->fd = fd_arr[i];
|
||||
}
|
||||
rc = msm_cvp_unmap_user_persist(inst,
|
||||
in_pkt, in_offset, in_buf_num);
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
CVPKERNEL_ATRACE_END("cvp_enqueue_pkt");
|
||||
if (map_type == MAP_PERSIST)
|
||||
vfree(fd_arr);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/pid.h>
|
||||
@@ -360,14 +360,15 @@ int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
|
||||
return rc;
|
||||
|
||||
exit:
|
||||
fput(file);
|
||||
if (smem) {
|
||||
if (smem->device_addr)
|
||||
msm_cvp_unmap_smem(inst, smem, "unmap dsp");
|
||||
msm_cvp_smem_put_dma_buf(smem->dma_buf);
|
||||
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
|
||||
}
|
||||
if (cbuf)
|
||||
cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
|
||||
fput(file);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -1133,7 +1134,6 @@ static int msm_cvp_proc_oob_wncc(struct msm_cvp_inst* inst,
|
||||
struct wncc_oob_buf wob;
|
||||
struct eva_kmd_wncc_metadata* wncc_metadata[EVA_KMD_WNCC_MAX_LAYERS];
|
||||
struct cvp_buf_type *wncc_metadata_bufs;
|
||||
struct dma_buf *dmabuf;
|
||||
unsigned int i, j;
|
||||
bool empty = false;
|
||||
u32 buf_id, buf_idx, buf_offset, iova;
|
||||
@@ -1220,25 +1220,6 @@ static int msm_cvp_proc_oob_wncc(struct msm_cvp_inst* inst,
|
||||
|
||||
wncc_metadata_bufs = (struct cvp_buf_type *)
|
||||
&in_pkt->pkt_data[wncc_oob->metadata_bufs_offset];
|
||||
|
||||
dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
|
||||
if (IS_ERR(dmabuf)) {
|
||||
rc = PTR_ERR(dmabuf);
|
||||
dprintk(CVP_ERR,
|
||||
"%s: dma_buf_get() failed for wncc_metadata_bufs[%d], rc %d",
|
||||
__func__, i, rc);
|
||||
break;
|
||||
}
|
||||
if (dmabuf->size < wncc_metadata_bufs[i].size) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: wncc_metadata_bufs[%d] size %d is more than dma buf size %d",
|
||||
__func__, i, wncc_metadata_bufs[i].size, dmabuf->size);
|
||||
dma_buf_put(dmabuf);
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
dma_buf_put(dmabuf);
|
||||
|
||||
if ((wncc_metadata_bufs[i].debug_flags & 0x00000001) != 0) {
|
||||
wncc_metadata_bufs[i].crc =
|
||||
eva_calculate_crc((unsigned int *)wncc_metadata[i],
|
||||
@@ -1355,7 +1336,6 @@ static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
|
||||
smem = buf->smem;
|
||||
if (smem && smem->dma_buf == dma_buf) {
|
||||
atomic_inc(&smem->refcount);
|
||||
msm_cvp_smem_put_dma_buf(smem->dma_buf);
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
print_smem(CVP_MEM, "found in persist", inst, smem);
|
||||
return smem;
|
||||
@@ -1370,7 +1350,6 @@ static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
|
||||
smem = frame->bufs[i].smem;
|
||||
if (smem && smem->dma_buf == dma_buf) {
|
||||
atomic_inc(&smem->refcount);
|
||||
msm_cvp_smem_put_dma_buf(smem->dma_buf);
|
||||
mutex_unlock(&inst->frames.lock);
|
||||
print_smem(CVP_MEM, "found in frame",
|
||||
inst, smem);
|
||||
@@ -1409,13 +1388,10 @@ static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
|
||||
smem->bitmap_index = i;
|
||||
SET_USE_BITMAP(i, inst);
|
||||
} else {
|
||||
dprintk(CVP_WARN,
|
||||
"%s: reached limit, fallback to buf mapping list\n"
|
||||
, __func__);
|
||||
atomic_inc(&smem->refcount);
|
||||
dprintk(CVP_WARN,
|
||||
"%s: reached limit, fallback to buf mapping list\n"
|
||||
, __func__);
|
||||
dprintk(CVP_WARN,
|
||||
"%s: fd %d, dma_buf %#llx, smem->refcount %d\n"
|
||||
, __func__, smem->fd, smem->dma_buf, atomic_read(&smem->refcount));
|
||||
mutex_unlock(&inst->dma_cache.lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@@ -1423,9 +1399,7 @@ static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
|
||||
|
||||
atomic_inc(&smem->refcount);
|
||||
mutex_unlock(&inst->dma_cache.lock);
|
||||
dprintk(CVP_MEM, "%s: Added entry %d into cache\n", __func__, i);
|
||||
dprintk(CVP_MEM, "%s: fd %d, dma_buf %#llx, smem->refcount %d\n",
|
||||
__func__, smem->fd, smem->dma_buf, atomic_read(&smem->refcount));
|
||||
dprintk(CVP_MEM, "Add entry %d into cache\n", i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1507,7 +1481,7 @@ static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
|
||||
__func__, buf->offset, buf->size);
|
||||
if (found) {
|
||||
mutex_lock(&inst->dma_cache.lock);
|
||||
atomic_dec(&smem->refcount); // Should we put dma_buf as well?
|
||||
atomic_dec(&smem->refcount);
|
||||
mutex_unlock(&inst->dma_cache.lock);
|
||||
return NULL;
|
||||
}
|
||||
@@ -1532,7 +1506,6 @@ static int msm_cvp_unmap_user_persist_buf(struct msm_cvp_inst *inst,
|
||||
struct list_head *ptr;
|
||||
struct list_head *next;
|
||||
struct cvp_internal_buf *pbuf;
|
||||
struct msm_cvp_smem *smem = NULL;
|
||||
struct dma_buf *dma_buf;
|
||||
|
||||
if (!inst) {
|
||||
@@ -1545,49 +1518,31 @@ static int msm_cvp_unmap_user_persist_buf(struct msm_cvp_inst *inst,
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&inst->persistbufs.lock);
|
||||
mutex_lock(&inst->dma_cache.lock);
|
||||
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
|
||||
if (!ptr) {
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
pbuf = list_entry(ptr, struct cvp_internal_buf, list);
|
||||
smem = pbuf->smem;
|
||||
if (dma_buf == smem->dma_buf && (smem->flags & SMEM_PERSIST)) {
|
||||
if (atomic_dec_and_test(&smem->refcount)) {
|
||||
*iova = smem->device_addr;
|
||||
dprintk(CVP_MEM,
|
||||
"Unmap persist fd %d, dma_buf %#llx iova %#x\n",
|
||||
pbuf->fd, smem->dma_buf, *iova);
|
||||
list_del(&pbuf->list);
|
||||
|
||||
/*
|
||||
* Remove from 64 bit cache entry for DMM & WARP_DS PARAMS.
|
||||
* Only clear bit in bitmap and leave the unmap and put
|
||||
* dma to add_smem
|
||||
*/
|
||||
if (is_params_pkt(pkt_type) &&
|
||||
(smem->bitmap_index < MAX_DMABUF_NUMS)) {
|
||||
CLEAR_USE_BITMAP(smem->bitmap_index, inst);
|
||||
print_smem(CVP_MEM, "Map dereference", inst, smem);
|
||||
smem->buf_idx |= 0x10000000;
|
||||
} else {
|
||||
msm_cvp_unmap_smem(inst, smem, "unmap user persist");
|
||||
msm_cvp_smem_put_dma_buf(smem->dma_buf);
|
||||
cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
|
||||
smem = NULL;
|
||||
}
|
||||
cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
|
||||
mutex_unlock(&inst->dma_cache.lock);
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
dma_buf_put(dma_buf);
|
||||
return 0;
|
||||
if (dma_buf == pbuf->smem->dma_buf && (pbuf->smem->flags & SMEM_PERSIST)) {
|
||||
*iova = pbuf->smem->device_addr;
|
||||
dprintk(CVP_MEM,
|
||||
"Unmap persist fd %d, dma_buf %#llx iova %#x\n",
|
||||
pbuf->fd, pbuf->smem->dma_buf, *iova);
|
||||
list_del(&pbuf->list);
|
||||
if (*iova) {
|
||||
msm_cvp_unmap_smem(inst, pbuf->smem, "unmap user persist");
|
||||
msm_cvp_smem_put_dma_buf(pbuf->smem->dma_buf);
|
||||
pbuf->smem->device_addr = 0;
|
||||
}
|
||||
dprintk(CVP_INFO, "%s - pbuf in use, smem refcount: %d",
|
||||
__func__, pbuf->smem->refcount);
|
||||
mutex_unlock(&inst->dma_cache.lock);
|
||||
cvp_kmem_cache_free(&cvp_driver->smem_cache, pbuf->smem);
|
||||
pbuf->smem = NULL;
|
||||
cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
dma_buf_put(dma_buf);
|
||||
return -EAGAIN;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&inst->dma_cache.lock);
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
dma_buf_put(dma_buf);
|
||||
|
||||
@@ -1913,14 +1868,14 @@ int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
|
||||
|
||||
return ret;
|
||||
}
|
||||
buf->fd = iova; // Why do we need to do this? This is not needed
|
||||
buf->fd = iova;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
|
||||
struct eva_kmd_hfi_packet *in_pkt,
|
||||
unsigned int offset, unsigned int buf_num, uint32_t *fd_arr)
|
||||
unsigned int offset, unsigned int buf_num)
|
||||
{
|
||||
struct cvp_buf_type *buf;
|
||||
struct cvp_hfi_cmd_session_hdr *cmd_hdr;
|
||||
@@ -1944,11 +1899,8 @@ int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
|
||||
buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
|
||||
offset += sizeof(*buf) >> 2;
|
||||
|
||||
if (buf->fd < 0 || !buf->size) {
|
||||
dprintk(CVP_ERR, "%s: fd = %d, Size = %d, in_buf_num = %d\n",
|
||||
__func__, buf->fd, buf->size, buf_num);
|
||||
if (buf->fd < 0 || !buf->size)
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = msm_cvp_map_user_persist_buf(inst, buf,
|
||||
cmd_hdr->packet_type, i, &iova);
|
||||
@@ -1959,7 +1911,7 @@ int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
|
||||
|
||||
return ret;
|
||||
}
|
||||
fd_arr[i] = buf->fd;
|
||||
|
||||
#ifdef USE_PRESIL42
|
||||
presil42_set_buf_fd(buf, iova, "cvp_map_user_persist");
|
||||
return 0;
|
||||
@@ -2084,7 +2036,7 @@ int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
|
||||
if (!smem) {
|
||||
dprintk(CVP_ERR, "%s invalid persist smem\n", __func__);
|
||||
mutex_unlock(&inst->persistbufs.lock);
|
||||
return -EINVAL; // Why to return? It should be continue.
|
||||
return -EINVAL;
|
||||
}
|
||||
if (cbuf->ownership != DRIVER) {
|
||||
dprintk(CVP_MEM,
|
||||
@@ -2200,8 +2152,7 @@ void msm_cvp_populate_dsp_buf_info(struct cvp_internal_buf *buf,
|
||||
|
||||
if (buf_cnt < EVA_TRACE_MAX_BUF_NUM) {
|
||||
for (int buf_idx = 0; buf_idx < buf_cnt; buf_idx++) {
|
||||
trace_buf =
|
||||
&dsp_debug_trace->sessions[idx].buf[buf_idx];
|
||||
trace_buf = &dsp_debug_trace->sessions[idx].buf[buf_idx];
|
||||
if (buf->smem->device_addr == trace_buf->iova) {
|
||||
buf->smem->buf_idx = trace_buf->buf_idx;
|
||||
buf->smem->pkt_type = trace_buf->pkt_type;
|
||||
|
@@ -2,7 +2,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_CVP_BUF_H_
|
||||
@@ -234,7 +233,7 @@ int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
|
||||
unsigned int offset, unsigned int buf_num);
|
||||
int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
|
||||
struct eva_kmd_hfi_packet *in_pkt,
|
||||
unsigned int offset, unsigned int buf_num, uint32_t *fd_arr);
|
||||
unsigned int offset, unsigned int buf_num);
|
||||
int msm_cvp_map_frame(struct msm_cvp_inst *inst,
|
||||
struct eva_kmd_hfi_packet *in_pkt,
|
||||
unsigned int offset, unsigned int buf_num);
|
||||
|
@@ -8,6 +8,7 @@
|
||||
#include "cvp_hfi_api.h"
|
||||
#include "msm_cvp_debug.h"
|
||||
#include "msm_cvp_clocks.h"
|
||||
#include <linux/clk/qcom.h>
|
||||
|
||||
static bool __mmrm_client_check_scaling_supported(
|
||||
struct mmrm_client_desc *client)
|
||||
@@ -492,3 +493,22 @@ int cvp_set_bw(struct bus_info *bus, unsigned long bw)
|
||||
return rc;
|
||||
}
|
||||
|
||||
void dump_clock(struct iris_hfi_device *device)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
|
||||
iris_hfi_for_each_clock(device, cl) {
|
||||
dprintk(CVP_ERR, "Dumping Clk: %s\n", cl->name);
|
||||
qcom_clk_dump(cl->clk, NULL, false);
|
||||
}
|
||||
}
|
||||
|
||||
void dump_regulator(struct iris_hfi_device *device)
|
||||
{
|
||||
struct regulator_info *rinfo;
|
||||
iris_hfi_for_each_regulator(device, rinfo) {
|
||||
dprintk(CVP_ERR, "Dumping Regulator: %s\n", rinfo->name);
|
||||
qcom_clk_dump(NULL, rinfo->regulator, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -25,4 +25,6 @@ int msm_cvp_init_clocks(struct iris_hfi_device *device);
|
||||
void msm_cvp_deinit_clocks(struct iris_hfi_device *device);
|
||||
int msm_cvp_set_bw(struct msm_cvp_core *core, struct bus_info *bus, unsigned long bw);
|
||||
int cvp_set_bw(struct bus_info *bus, unsigned long bw);
|
||||
void dump_regulator(struct iris_hfi_device *device);
|
||||
void dump_clock(struct iris_hfi_device *device);
|
||||
#endif
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/jiffies.h>
|
||||
@@ -198,7 +198,7 @@ struct msm_cvp_inst *cvp_get_inst_validate(struct msm_cvp_core *core,
|
||||
|
||||
s = cvp_get_inst(core, session_id);
|
||||
if (!s) {
|
||||
dprintk(CVP_WARN, "%s session doesn't exit\n", __func__);
|
||||
WARN(true, "%s session doesn't exit\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -365,8 +365,8 @@ int wait_for_sess_signal_receipt(struct msm_cvp_inst *inst,
|
||||
msecs_to_jiffies(
|
||||
inst->core->resources.msm_cvp_hw_rsp_timeout));
|
||||
if (!rc) {
|
||||
dprintk(CVP_WARN, "Wait interrupted or timed out: %d session_id = %#x\n",
|
||||
SESSION_MSG_INDEX(cmd), hash32_ptr(inst->session));
|
||||
dprintk(CVP_WARN, "Wait interrupted or timed out: %d\n",
|
||||
SESSION_MSG_INDEX(cmd));
|
||||
if (inst->state != MSM_CVP_CORE_INVALID)
|
||||
print_hfi_queue_info(ops_tbl);
|
||||
if (cmd != HAL_SESSION_STOP_DONE &&
|
||||
@@ -580,7 +580,6 @@ void handle_session_error(enum hal_command_response cmd, void *data)
|
||||
wake_up_all(&inst->event_handler.wq);
|
||||
}
|
||||
|
||||
BUG_ON(!msm_cvp_session_error_recovery);
|
||||
cvp_put_inst(inst);
|
||||
}
|
||||
|
||||
@@ -613,7 +612,6 @@ void handle_session_timeout(struct msm_cvp_inst *inst, bool stop_required)
|
||||
&inst->event_handler.lock, flags);
|
||||
wake_up_all(&inst->event_handler.wq);
|
||||
|
||||
BUG_ON(!msm_cvp_session_error_recovery);
|
||||
if (stop_required)
|
||||
msm_cvp_session_flush_stop(inst);
|
||||
}
|
||||
@@ -729,7 +727,7 @@ void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst)
|
||||
if (!inst->session) {
|
||||
dprintk(CVP_SESS, "%s: inst %pK session already cleaned\n",
|
||||
__func__, inst);
|
||||
mutex_unlock(&inst->lock);
|
||||
mutex_unlock(&inst->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -748,7 +746,7 @@ void msm_cvp_comm_session_clean(struct msm_cvp_inst *inst)
|
||||
static void handle_session_close(enum hal_command_response cmd, void *data)
|
||||
{
|
||||
struct msm_cvp_cb_cmd_done *response = data;
|
||||
struct msm_cvp_inst *inst, *temp;
|
||||
struct msm_cvp_inst *inst;
|
||||
struct msm_cvp_core *core;
|
||||
|
||||
if (!response) {
|
||||
@@ -763,7 +761,7 @@ static void handle_session_close(enum hal_command_response cmd, void *data)
|
||||
dprintk(CVP_WARN, "%s: response for an inactive session %#x\n",
|
||||
__func__, response->session_id);
|
||||
|
||||
list_for_each_entry_safe(inst, temp, &core->instances, list)
|
||||
list_for_each_entry(inst, &core->instances, list)
|
||||
cvp_print_inst(CVP_WARN, inst);
|
||||
|
||||
return;
|
||||
@@ -1325,47 +1323,46 @@ void msm_cvp_ssr_handler(struct work_struct *work)
|
||||
ops_tbl = core->dev_ops;
|
||||
|
||||
if (core->ssr_type == SSR_SESSION_ABORT) {
|
||||
struct msm_cvp_inst *inst = NULL, *s, *inst_temp;
|
||||
struct msm_cvp_inst *inst = NULL, *s;
|
||||
|
||||
dprintk(CVP_ERR, "Session abort triggered\n");
|
||||
mutex_lock(&core->lock);
|
||||
list_for_each_entry_safe(inst, inst_temp, &core->instances, list) {
|
||||
list_for_each_entry(inst, &core->instances, list) {
|
||||
dprintk(CVP_WARN,
|
||||
"Session to abort: inst %#x ref %x\n",
|
||||
inst, kref_read(&inst->kref));
|
||||
if (inst != NULL) {
|
||||
s = cvp_get_inst_validate(inst->core, inst);
|
||||
if (!s) {
|
||||
dprintk(CVP_WARN, "%s: Session is not a valid session\n",
|
||||
__func__);
|
||||
continue;
|
||||
}
|
||||
print_hfi_queue_info(ops_tbl);
|
||||
cvp_put_inst(s);
|
||||
} else {
|
||||
dprintk(CVP_WARN, "No active CVP session to abort\n");
|
||||
}
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&core->lock);
|
||||
|
||||
if (inst != NULL) {
|
||||
s = cvp_get_inst_validate(inst->core, inst);
|
||||
if (!s)
|
||||
return;
|
||||
print_hfi_queue_info(ops_tbl);
|
||||
cvp_put_inst(s);
|
||||
} else {
|
||||
dprintk(CVP_WARN, "No active CVP session to abort\n");
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
if (core->ssr_type == SSR_SESSION_ERROR) {
|
||||
struct msm_cvp_cb_cmd_done response = { 1 };
|
||||
struct msm_cvp_inst *inst = NULL, *inst_t;
|
||||
struct msm_cvp_inst *inst = NULL, *inst_temp = NULL;
|
||||
|
||||
dprintk(CVP_ERR, "Session error triggered\n");
|
||||
mutex_lock(&core->lock);
|
||||
list_for_each_entry_safe(inst, inst_t, &core->instances, list) {
|
||||
list_for_each_entry(inst, &core->instances, list) {
|
||||
if (inst != NULL) {
|
||||
inst_temp = cvp_get_inst_validate(inst->core, inst);
|
||||
if (!inst_temp) {
|
||||
dprintk(CVP_WARN, "%s: Session is not a valid session\n",
|
||||
__func__);
|
||||
continue;
|
||||
}
|
||||
dprintk(CVP_INFO, "Session to be taken for session error 0x%x\n",
|
||||
inst);
|
||||
response.session_id = inst;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&core->lock);
|
||||
|
||||
if (!response.session_id) {
|
||||
dprintk(CVP_ERR, "No active session\n");
|
||||
return;
|
||||
@@ -1373,23 +1370,28 @@ void msm_cvp_ssr_handler(struct work_struct *work)
|
||||
response.device_id = 0x00FF;
|
||||
response.status = CVP_ERR_HW_FATAL;
|
||||
response.size = sizeof(struct msm_cvp_cb_cmd_done);
|
||||
dprintk(CVP_ERR, "Session error triggered\n");
|
||||
handle_session_error(HAL_SESSION_ERROR, (void *)(&response));
|
||||
return;
|
||||
}
|
||||
if (core->ssr_type == SSR_SESSION_TIMEOUT) {
|
||||
struct msm_cvp_inst *inst = NULL, *inst_t;
|
||||
struct msm_cvp_inst *inst = NULL, *inst_temp = NULL;
|
||||
|
||||
dprintk(CVP_ERR, "Session timeout triggered\n");
|
||||
mutex_lock(&core->lock);
|
||||
list_for_each_entry_safe(inst, inst_t, &core->instances, list) {
|
||||
list_for_each_entry(inst, &core->instances, list) {
|
||||
if (inst != NULL) {
|
||||
inst_temp = cvp_get_inst_validate(inst->core, inst);
|
||||
if (!inst_temp) {
|
||||
dprintk(CVP_WARN, "%s: Session is not a valid session\n",
|
||||
__func__);
|
||||
continue;
|
||||
}
|
||||
dprintk(CVP_INFO, "Session to be taken for session timeout 0x%x\n",
|
||||
inst);
|
||||
handle_session_timeout(inst, false);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&core->lock);
|
||||
dprintk(CVP_ERR, "Session timeout triggered\n");
|
||||
handle_session_timeout(inst, true);
|
||||
return;
|
||||
}
|
||||
if (core->ssr_type == SSR_CORE_SMMU_FAULT) {
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
@@ -52,8 +52,7 @@ bool msm_cvp_dcvs_disable = !true;
|
||||
int msm_cvp_minidump_enable = !1;
|
||||
int cvp_kernel_fence_enabled = 2;
|
||||
int msm_cvp_hw_wd_recovery = 1;
|
||||
int msm_cvp_smmu_fault_recovery = 1;
|
||||
int msm_cvp_session_error_recovery = 1;
|
||||
int msm_cvp_smmu_fault_recovery = !1;
|
||||
|
||||
#define MAX_DBG_BUF_SIZE 4096
|
||||
|
||||
@@ -474,8 +473,6 @@ struct dentry *msm_cvp_debugfs_init_core(struct msm_cvp_core *core,
|
||||
&msm_cvp_hw_wd_recovery);
|
||||
debugfs_create_u32("smmu_fault_recovery", 0644, dir,
|
||||
&msm_cvp_smmu_fault_recovery);
|
||||
debugfs_create_u32("session_error_recovery", 0644, dir,
|
||||
&msm_cvp_session_error_recovery);
|
||||
failed_create_dir:
|
||||
return dir;
|
||||
}
|
||||
|
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2024-2025, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __MSM_CVP_DEBUG__
|
||||
@@ -76,7 +76,6 @@ extern int msm_cvp_minidump_enable;
|
||||
extern int cvp_kernel_fence_enabled;
|
||||
extern int msm_cvp_hw_wd_recovery;
|
||||
extern int msm_cvp_smmu_fault_recovery;
|
||||
extern int msm_cvp_session_error_recovery;
|
||||
|
||||
#define dprintk(__level, __fmt, arg...) \
|
||||
do { \
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/rpmsg.h>
|
||||
@@ -53,7 +53,6 @@ static int __fastrpc_driver_invoke(struct fastrpc_device *dev,
|
||||
|
||||
static int cvp_dsp_send_cmd(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
|
||||
{
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
int rc = 0;
|
||||
struct cvp_dsp_apps *me = &gfa_cv;
|
||||
|
||||
@@ -76,9 +75,6 @@ static int cvp_dsp_send_cmd(struct cvp_dsp_cmd_msg *cmd, uint32_t len)
|
||||
|
||||
exit:
|
||||
return rc;
|
||||
#else
|
||||
return -ENODEV;
|
||||
#endif /* End of CVP_DSP_ENABLED */
|
||||
}
|
||||
|
||||
static int cvp_dsp_send_cmd_sync(struct cvp_dsp_cmd_msg *cmd,
|
||||
@@ -704,22 +700,6 @@ static struct rpmsg_driver cvp_dsp_rpmsg_client = {
|
||||
},
|
||||
};
|
||||
|
||||
static int cvp_register_rpmsg_driver(struct rpmsg_driver *rpmsg_driver)
|
||||
{
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
return register_rpmsg_driver(rpmsg_driver);
|
||||
#else
|
||||
return -ENODEV;
|
||||
#endif /* End of CVP_DSP_ENABLED */
|
||||
}
|
||||
|
||||
static void cvp_unregister_rpmsg_driver(struct rpmsg_driver *rpmsg_driver)
|
||||
{
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
return unregister_rpmsg_driver(rpmsg_driver);
|
||||
#endif /* End of CVP_DSP_ENABLED */
|
||||
}
|
||||
|
||||
static void cvp_dsp_set_queue_hdr_defaults(struct cvp_hfi_queue_header *q_hdr)
|
||||
{
|
||||
q_hdr->qhdr_status = 0x1;
|
||||
@@ -1568,15 +1548,13 @@ void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
|
||||
|
||||
frpc_node = cvp_get_fastrpc_node_with_handle(dsp2cpu_cmd->pid);
|
||||
if (!frpc_node) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s pid 0x%x not registered with fastrpc, but allow delete session\n",
|
||||
dprintk(CVP_ERR, "%s pid 0x%x not registered with fastrpc\n",
|
||||
__func__, dsp2cpu_cmd->pid);
|
||||
// cmd->ret = -1;
|
||||
// return;
|
||||
cmd->ret = -1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (frpc_node)
|
||||
cvp_put_fastrpc_node(frpc_node);
|
||||
cvp_put_fastrpc_node(frpc_node);
|
||||
|
||||
task = inst->task;
|
||||
|
||||
@@ -1600,8 +1578,7 @@ void __dsp_cvp_sess_delete(struct cvp_dsp_cmd_msg *cmd)
|
||||
}
|
||||
|
||||
/* unregister fastrpc driver */
|
||||
if (frpc_node)
|
||||
eva_fastrpc_driver_unregister(dsp2cpu_cmd->pid, false);
|
||||
eva_fastrpc_driver_unregister(dsp2cpu_cmd->pid, false);
|
||||
|
||||
if (task)
|
||||
put_task_struct(task);
|
||||
@@ -1630,7 +1607,8 @@ void __dsp_cvp_power_req(struct cvp_dsp_cmd_msg *cmd)
|
||||
dsp2cpu_cmd->session_cpu_high,
|
||||
dsp2cpu_cmd->session_cpu_low);
|
||||
|
||||
if (!inst) {
|
||||
if (!inst || !is_cvp_inst_valid(inst)) {
|
||||
dprintk(CVP_ERR, "%s incorrect session ID %llx\n", __func__, inst);
|
||||
cmd->ret = -1;
|
||||
goto dsp_fail_power_req;
|
||||
}
|
||||
@@ -2279,7 +2257,7 @@ int cvp_dsp_device_init(void)
|
||||
name[11]++;
|
||||
}
|
||||
|
||||
rc = cvp_register_rpmsg_driver(&cvp_dsp_rpmsg_client);
|
||||
rc = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
|
||||
if (rc) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s : register_rpmsg_driver failed rc = %d\n",
|
||||
@@ -2323,5 +2301,5 @@ void cvp_dsp_device_exit(void)
|
||||
mutex_destroy(&me->tx_lock);
|
||||
mutex_destroy(&me->rx_lock);
|
||||
mutex_destroy(&me->driver_name_lock);
|
||||
cvp_unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
|
||||
unregister_rpmsg_driver(&cvp_dsp_rpmsg_client);
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#include "cvp_hfi_helper.h"
|
||||
|
||||
#define MAX_SUPPORTED_INSTANCES 16
|
||||
#define MAX_CV_INSTANCES 12
|
||||
#define MAX_CV_INSTANCES 8
|
||||
#define MAX_DMM_INSTANCES 8
|
||||
#define MAX_DEBUGFS_NAME 50
|
||||
#define MAX_DSP_INIT_ATTEMPTS 16
|
||||
@@ -367,6 +367,8 @@ struct msm_cvp_core {
|
||||
unsigned long bw_sum;
|
||||
atomic64_t kernel_trans_id;
|
||||
struct cvp_debug_log log;
|
||||
int core_reg_cnt;
|
||||
int ctrl_reg_cnt;
|
||||
};
|
||||
|
||||
struct msm_cvp_inst {
|
||||
|
@@ -308,16 +308,8 @@ static struct msm_cvp_common_data sm8750_common_data[] = {
|
||||
},
|
||||
{
|
||||
.key = "qcom,dsp-enabled",
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
.value = 1,
|
||||
#else
|
||||
.value = 0,
|
||||
#endif
|
||||
},
|
||||
{
|
||||
.key = "qcom,rcg_vnoc_clk_en_low",
|
||||
.value = 0x0,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct msm_cvp_common_data sm8735_common_data[] = {
|
||||
@@ -367,16 +359,8 @@ static struct msm_cvp_common_data sm8735_common_data[] = {
|
||||
},
|
||||
{
|
||||
.key = "qcom,dsp-enabled",
|
||||
#ifdef CVP_DSP_ENABLED
|
||||
.value = 1,
|
||||
#else
|
||||
.value = 0,
|
||||
#endif
|
||||
},
|
||||
{
|
||||
.key = "qcom,rcg_vnoc_clk_en_low",
|
||||
.value = 0x8,
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
/* Default UBWC config for LPDDR5 */
|
||||
|
@@ -853,9 +853,6 @@ int cvp_read_platform_resources_from_drv_data(
|
||||
res->non_fatal_pagefaults = find_key_value(platform_data,
|
||||
"qcom,domain-attr-non-fatal-faults");
|
||||
|
||||
res->rcg_vnoc_clk_en_low = find_key_value(platform_data,
|
||||
"qcom,rcg_vnoc_clk_en_low");
|
||||
|
||||
res->vpu_ver = platform_data->vpu_ver;
|
||||
res->ubwc_config = platform_data->ubwc_config;
|
||||
res->fatal_ssr = false;
|
||||
|
@@ -215,7 +215,6 @@ struct msm_cvp_platform_resources {
|
||||
uint32_t vpu_ver;
|
||||
uint32_t fw_cycles;
|
||||
struct msm_cvp_ubwc_config_data *ubwc_config;
|
||||
uint32_t rcg_vnoc_clk_en_low;
|
||||
};
|
||||
|
||||
static inline bool is_iommu_present(struct msm_cvp_platform_resources *res)
|
||||
|
@@ -79,10 +79,10 @@ static int cvp_msgq_receiver(void *data)
|
||||
size = (4 + msgq_drv->pending_local_cmd.len)<<2;
|
||||
|
||||
/* sanity check on size information */
|
||||
if (size > sizeof(*msg_ptr)) {
|
||||
if (size > GH_MSGQ_MAX_MSG_SIZE_BYTES) {
|
||||
dprintk(CVP_ERR,
|
||||
"%s: msg size %d exceed max size supported %d \n",
|
||||
__func__, size, sizeof(*msg_ptr));
|
||||
__func__, size, GH_MSGQ_MAX_MSG_SIZE_BYTES);
|
||||
rc = -E2BIG;
|
||||
msgq_drv->pending_local_cmd.type = 0;
|
||||
continue;
|
||||
@@ -112,9 +112,9 @@ static int cvp_msgq_receiver(void *data)
|
||||
"%s: gh_msgq_recv respond type from remote VM\n",
|
||||
__func__);
|
||||
|
||||
if (((msg_ptr->type & CVP_IPC_MSG_TYPE_ACT_CHECK) !=
|
||||
msgq_drv->pending_remote_rsp.type) ||
|
||||
(msgq_drv->pending_remote_rsp.type > CVP_MAX_IPC_CMD + 1)) {
|
||||
if ((msg_ptr->type & CVP_IPC_MSG_TYPE_ACT_CHECK) !=
|
||||
msgq_drv->pending_remote_rsp.type) {
|
||||
|
||||
dprintk(CVP_ERR,
|
||||
"%s: Msg disgard,recv type %d, pend local %d\n",
|
||||
__func__, msg_ptr->type,
|
||||
|
Reference in New Issue
Block a user