replace common qcom sources with samsung ones
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __H_HFI_PROPERTY_H__
|
||||
@@ -108,7 +108,6 @@ enum hfi_hevc_profile_type {
|
||||
HFI_H265_PROFILE_MAIN_10 = 2,
|
||||
HFI_H265_PROFILE_MAIN_10_STILL_PICTURE = 3,
|
||||
HFI_H265_PROFILE_MULTIVIEW_MAIN = 4,
|
||||
HFI_H265_PROFILE_MULTIVIEW_MAIN_10 = 5,
|
||||
};
|
||||
|
||||
enum hfi_vp9_profile_type {
|
||||
@@ -624,8 +623,6 @@ enum hfi_view_id {
|
||||
HFI_VIEW_ID_INVALID = 0xFFFFFFFF,
|
||||
};
|
||||
|
||||
#define HFI_PROP_PAIRED_YUV 0x030001AA
|
||||
|
||||
#define HFI_PROP_VIEW_ID 0x030001A5
|
||||
|
||||
#define HFI_PROP_OPEN_GOP 0x030001A6
|
||||
|
||||
@@ -82,6 +82,7 @@ struct msm_vidc_core {
|
||||
enum msm_vidc_core_sub_state sub_state;
|
||||
char sub_state_name[MAX_NAME_LENGTH];
|
||||
struct mutex lock;
|
||||
struct msm_vidc_resource *resource;
|
||||
struct msm_vidc_platform *platform;
|
||||
u32 intr_status;
|
||||
u32 spur_count;
|
||||
@@ -128,28 +129,6 @@ struct msm_vidc_core {
|
||||
u32 packet_id;
|
||||
u32 sys_init_id;
|
||||
struct msm_vidc_synx_fence_data synx_fence_data;
|
||||
u32 irq;
|
||||
u8 __iomem *register_base_addr;
|
||||
int fw_cookie;
|
||||
bool is_subcache_set_to_fw;
|
||||
struct bus_info *bus_tbl;
|
||||
u32 bus_tbl_count;
|
||||
struct clock_info *clock_tbl;
|
||||
u32 clock_tbl_count;
|
||||
struct reset_info *reset_tbl;
|
||||
u32 reset_tbl_count;
|
||||
struct subcache_info *subcache_tbl;
|
||||
u32 subcache_tbl_count;
|
||||
struct regulator_info *regulator_tbl;
|
||||
u32 regulator_tbl_count;
|
||||
struct power_domain_info *power_domain_tbl;
|
||||
u32 power_domain_tbl_count;
|
||||
struct context_bank_info *context_bank_tbl;
|
||||
u32 context_bank_tbl_count;
|
||||
struct device_region_info *device_region_tbl;
|
||||
u32 device_region_tbl_count;
|
||||
struct frequency_table *freq_tbl;
|
||||
u32 freq_tbl_count;
|
||||
};
|
||||
|
||||
#endif // _MSM_VIDC_CORE_H_
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_VIDC_DRIVER_H_
|
||||
@@ -534,8 +534,6 @@ struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
|
||||
struct msm_vidc_inst *get_inst_ref_locked(struct msm_vidc_inst *inst);
|
||||
struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
|
||||
struct msm_vidc_inst *instance);
|
||||
struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
|
||||
u32 session_id);
|
||||
void put_inst(struct msm_vidc_inst *inst);
|
||||
bool msm_vidc_allow_metadata_delivery(struct msm_vidc_inst *inst,
|
||||
u32 cap_id, u32 port);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_VIDC_INTERNAL_H_
|
||||
@@ -63,9 +63,6 @@ struct msm_vidc_inst;
|
||||
#ifndef V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_MULTIVIEW
|
||||
#define V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_MULTIVIEW (4)
|
||||
#endif
|
||||
#ifndef V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_MULTIVIEW
|
||||
#define V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_MULTIVIEW (5)
|
||||
#endif
|
||||
|
||||
enum msm_vidc_blur_types {
|
||||
MSM_VIDC_BLUR_NONE = 0x0,
|
||||
@@ -276,7 +273,6 @@ enum msm_vidc_metadata_bits {
|
||||
CAP(META_DOLBY_RPU) \
|
||||
CAP(META_HDR10_MAX_RGB_INFO) \
|
||||
CAP(META_VIEW_ID) \
|
||||
CAP(META_VIEW_PAIR) \
|
||||
CAP(META_THREE_DIMENSIONAL_REF_DISP_INFO) \
|
||||
CAP(DRV_VERSION) \
|
||||
CAP(MIN_FRAME_QP) \
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _MSM_VIDC_RESOURCES_H_
|
||||
@@ -16,6 +16,91 @@ struct iommu_domain;
|
||||
struct device;
|
||||
struct msm_vidc_core;
|
||||
|
||||
/*
|
||||
* These are helper macros to iterate over various lists within
|
||||
* msm_vidc_core->resource. The intention is to cut down on a lot
|
||||
* of boiler-plate code
|
||||
*/
|
||||
|
||||
/* Read as "for each 'thing' in a set of 'thingies'" */
|
||||
#define venus_hfi_for_each_thing(__device, __thing, __thingy) \
|
||||
venus_hfi_for_each_thing_continue(__device, __thing, __thingy, 0)
|
||||
|
||||
#define venus_hfi_for_each_thing_reverse(__device, __thing, __thingy) \
|
||||
venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
|
||||
(__device)->resource->__thingy##_set.count - 1)
|
||||
|
||||
/* TODO: the __from parameter technically not required since we can figure it
|
||||
* out with some pointer magic (i.e. __thing - __thing##_tbl[0]). If this macro
|
||||
* sees extensive use, probably worth cleaning it up but for now omitting it
|
||||
* since it introduces unnecessary complexity.
|
||||
*/
|
||||
#define venus_hfi_for_each_thing_continue(__device, __thing, __thingy, __from) \
|
||||
for (__thing = &(__device)->resource->\
|
||||
__thingy##_set.__thingy##_tbl[__from]; \
|
||||
__thing < &(__device)->resource->__thingy##_set.__thingy##_tbl[0] + \
|
||||
((__device)->resource->__thingy##_set.count - __from); \
|
||||
++__thing)
|
||||
|
||||
#define venus_hfi_for_each_thing_reverse_continue(__device, __thing, __thingy, \
|
||||
__from) \
|
||||
for (__thing = &(__device)->resource->\
|
||||
__thingy##_set.__thingy##_tbl[__from]; \
|
||||
__thing >= &(__device)->resource->__thingy##_set.__thingy##_tbl[0]; \
|
||||
--__thing)
|
||||
|
||||
/* Bus set helpers */
|
||||
#define venus_hfi_for_each_bus(__device, __binfo) \
|
||||
venus_hfi_for_each_thing(__device, __binfo, bus)
|
||||
#define venus_hfi_for_each_bus_reverse(__device, __binfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __binfo, bus)
|
||||
|
||||
/* Regular set helpers */
|
||||
#define venus_hfi_for_each_regulator(__device, __rinfo) \
|
||||
venus_hfi_for_each_thing(__device, __rinfo, regulator)
|
||||
#define venus_hfi_for_each_regulator_reverse(__device, __rinfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __rinfo, regulator)
|
||||
#define venus_hfi_for_each_regulator_reverse_continue(__device, __rinfo, \
|
||||
__from) \
|
||||
venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
|
||||
regulator, __from)
|
||||
|
||||
/* Power domain set helpers */
|
||||
#define venus_hfi_for_each_power_domain(__device, __pdinfo) \
|
||||
venus_hfi_for_each_thing(__device, __pdinfo, power_domain)
|
||||
|
||||
/* Clock set helpers */
|
||||
#define venus_hfi_for_each_clock(__device, __cinfo) \
|
||||
venus_hfi_for_each_thing(__device, __cinfo, clock)
|
||||
#define venus_hfi_for_each_clock_reverse(__device, __cinfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __cinfo, clock)
|
||||
|
||||
/* Reset clock set helpers */
|
||||
#define venus_hfi_for_each_reset_clock(__device, __rcinfo) \
|
||||
venus_hfi_for_each_thing(__device, __rcinfo, reset)
|
||||
#define venus_hfi_for_each_reset_clock_reverse(__device, __rcinfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __rcinfo, reset)
|
||||
#define venus_hfi_for_each_reset_clock_reverse_continue(__device, __rinfo, \
|
||||
__from) \
|
||||
venus_hfi_for_each_thing_reverse_continue(__device, __rinfo, \
|
||||
reset, __from)
|
||||
|
||||
/* Subcache set helpers */
|
||||
#define venus_hfi_for_each_subcache(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing(__device, __sinfo, subcache)
|
||||
#define venus_hfi_for_each_subcache_reverse(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __sinfo, subcache)
|
||||
|
||||
/* Contextbank set helpers */
|
||||
#define venus_hfi_for_each_context_bank(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing(__device, __sinfo, context_bank)
|
||||
#define venus_hfi_for_each_context_bank_reverse(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing_reverse(__device, __sinfo, context_bank)
|
||||
|
||||
/* Device region set helper */
|
||||
#define venus_hfi_for_each_device_region(__device, __sinfo) \
|
||||
venus_hfi_for_each_thing(__device, __sinfo, device_region)
|
||||
|
||||
enum msm_vidc_branch_mem_flags {
|
||||
MSM_VIDC_CLKFLAG_RETAIN_PERIPH,
|
||||
MSM_VIDC_CLKFLAG_NORETAIN_PERIPH,
|
||||
@@ -32,17 +117,32 @@ struct bus_info {
|
||||
u32 max_kbps;
|
||||
};
|
||||
|
||||
struct bus_set {
|
||||
struct bus_info *bus_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct regulator_info {
|
||||
struct regulator *regulator;
|
||||
const char *name;
|
||||
bool hw_power_collapse;
|
||||
};
|
||||
|
||||
struct regulator_set {
|
||||
struct regulator_info *regulator_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct power_domain_info {
|
||||
struct device *genpd_dev;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
struct power_domain_set {
|
||||
struct power_domain_info *power_domain_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct clock_residency {
|
||||
struct list_head list;
|
||||
u64 rate;
|
||||
@@ -62,12 +162,22 @@ struct clock_info {
|
||||
struct list_head residency_list; /* list of struct clock_residency */
|
||||
};
|
||||
|
||||
struct clock_set {
|
||||
struct clock_info *clock_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct reset_info {
|
||||
struct reset_control *rst;
|
||||
const char *name;
|
||||
bool exclusive_release;
|
||||
};
|
||||
|
||||
struct reset_set {
|
||||
struct reset_info *reset_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct subcache_info {
|
||||
struct llcc_slice_desc *subcache;
|
||||
const char *name;
|
||||
@@ -75,6 +185,12 @@ struct subcache_info {
|
||||
bool isactive;
|
||||
};
|
||||
|
||||
struct subcache_set {
|
||||
struct subcache_info *subcache_tbl;
|
||||
u32 count;
|
||||
bool set_to_fw;
|
||||
};
|
||||
|
||||
struct addr_range {
|
||||
u32 start;
|
||||
u32 size;
|
||||
@@ -91,10 +207,20 @@ struct context_bank_info {
|
||||
u64 dma_mask;
|
||||
};
|
||||
|
||||
struct context_bank_set {
|
||||
struct context_bank_info *context_bank_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct frequency_table {
|
||||
unsigned long freq;
|
||||
};
|
||||
|
||||
struct freq_set {
|
||||
struct frequency_table *freq_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct device_region_info {
|
||||
const char *name;
|
||||
phys_addr_t phy_addr;
|
||||
@@ -103,6 +229,26 @@ struct device_region_info {
|
||||
u32 region;
|
||||
};
|
||||
|
||||
struct device_region_set {
|
||||
struct device_region_info *device_region_tbl;
|
||||
u32 count;
|
||||
};
|
||||
|
||||
struct msm_vidc_resource {
|
||||
u8 __iomem *register_base_addr;
|
||||
u32 irq;
|
||||
struct bus_set bus_set;
|
||||
struct regulator_set regulator_set;
|
||||
struct power_domain_set power_domain_set;
|
||||
struct clock_set clock_set;
|
||||
struct reset_set reset_set;
|
||||
struct subcache_set subcache_set;
|
||||
struct context_bank_set context_bank_set;
|
||||
struct freq_set freq_set;
|
||||
struct device_region_set device_region_set;
|
||||
int fw_cookie;
|
||||
};
|
||||
|
||||
#define call_res_op(c, op, ...) \
|
||||
(((c) && (c)->res_ops && (c)->res_ops->op) ? \
|
||||
((c)->res_ops->op(__VA_ARGS__)) : 0)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
@@ -35,17 +35,15 @@ enum tzbsp_video_state {
|
||||
static int protect_cp_mem(struct msm_vidc_core *core)
|
||||
{
|
||||
struct tzbsp_memprot memprot;
|
||||
struct context_bank_info *cb;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct context_bank_info *cb;
|
||||
|
||||
memprot.cp_start = 0x0;
|
||||
memprot.cp_size = 0x0;
|
||||
memprot.cp_nonpixel_start = 0x0;
|
||||
memprot.cp_nonpixel_size = 0x0;
|
||||
|
||||
for (cnt = 0; cnt < core->context_bank_tbl_count; ++cnt) {
|
||||
cb = &core->context_bank_tbl[cnt];
|
||||
venus_hfi_for_each_context_bank(core, cb) {
|
||||
if (cb->region == MSM_VIDC_NON_SECURE) {
|
||||
memprot.cp_size = cb->addr_range.start;
|
||||
|
||||
@@ -183,12 +181,13 @@ int fw_load(struct msm_vidc_core *core)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (!core->fw_cookie) {
|
||||
core->fw_cookie = __load_fw_to_memory(core->pdev,
|
||||
if (!core->resource->fw_cookie) {
|
||||
core->resource->fw_cookie = __load_fw_to_memory(core->pdev,
|
||||
core->platform->data.fwname);
|
||||
if (core->fw_cookie <= 0) {
|
||||
d_vpr_e("%s: firmware download failed %d\n", __func__, core->fw_cookie);
|
||||
core->fw_cookie = 0;
|
||||
if (core->resource->fw_cookie <= 0) {
|
||||
d_vpr_e("%s: firmware download failed %d\n",
|
||||
__func__, core->resource->fw_cookie);
|
||||
core->resource->fw_cookie = 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
@@ -202,9 +201,9 @@ int fw_load(struct msm_vidc_core *core)
|
||||
return rc;
|
||||
|
||||
fail_protect_mem:
|
||||
if (core->fw_cookie)
|
||||
qcom_scm_pas_shutdown(core->fw_cookie);
|
||||
core->fw_cookie = 0;
|
||||
if (core->resource->fw_cookie)
|
||||
qcom_scm_pas_shutdown(core->resource->fw_cookie);
|
||||
core->resource->fw_cookie = 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -212,14 +211,14 @@ int fw_unload(struct msm_vidc_core *core)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!core->fw_cookie)
|
||||
if (!core->resource->fw_cookie)
|
||||
return -EINVAL;
|
||||
|
||||
ret = qcom_scm_pas_shutdown(core->fw_cookie);
|
||||
ret = qcom_scm_pas_shutdown(core->resource->fw_cookie);
|
||||
if (ret)
|
||||
d_vpr_e("Firmware unload failed rc=%d\n", ret);
|
||||
|
||||
core->fw_cookie = 0;
|
||||
core->resource->fw_cookie = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -1518,7 +1518,7 @@ int msm_venc_s_param(struct msm_vidc_inst *inst,
|
||||
{
|
||||
int rc = 0;
|
||||
struct v4l2_fract *timeperframe = NULL;
|
||||
u32 input_rate_q16, max_rate_q16, min_rate_q16;
|
||||
u32 input_rate_q16, max_rate_q16;
|
||||
u32 input_rate, default_rate;
|
||||
bool is_frame_rate = false;
|
||||
|
||||
@@ -1526,7 +1526,6 @@ int msm_venc_s_param(struct msm_vidc_inst *inst,
|
||||
/* operating rate */
|
||||
timeperframe = &s_parm->parm.output.timeperframe;
|
||||
max_rate_q16 = inst->capabilities[OPERATING_RATE].max;
|
||||
min_rate_q16 = inst->capabilities[OPERATING_RATE].min;
|
||||
default_rate = inst->capabilities[OPERATING_RATE].value >> 16;
|
||||
s_parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
|
||||
} else {
|
||||
@@ -1534,7 +1533,6 @@ int msm_venc_s_param(struct msm_vidc_inst *inst,
|
||||
timeperframe = &s_parm->parm.capture.timeperframe;
|
||||
is_frame_rate = true;
|
||||
max_rate_q16 = inst->capabilities[FRAME_RATE].max;
|
||||
min_rate_q16 = inst->capabilities[FRAME_RATE].min;
|
||||
default_rate = inst->capabilities[FRAME_RATE].value >> 16;
|
||||
s_parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
|
||||
}
|
||||
@@ -1554,13 +1552,7 @@ int msm_venc_s_param(struct msm_vidc_inst *inst,
|
||||
i_vpr_h(inst, "%s: type %s, %s value %u limited to %u\n",
|
||||
__func__, v4l2_type_name(s_parm->type),
|
||||
is_frame_rate ? "frame rate" : "operating rate",
|
||||
input_rate << 16, max_rate_q16);
|
||||
} else if (input_rate < (min_rate_q16 >> 16)) {
|
||||
input_rate_q16 = min_rate_q16;
|
||||
i_vpr_h(inst, "%s: type %s, %s value %u limited to %u\n",
|
||||
__func__, v4l2_type_name(s_parm->type),
|
||||
is_frame_rate ? "frame rate" : "operating rate",
|
||||
input_rate << 16, min_rate_q16);
|
||||
input_rate_q16, max_rate_q16);
|
||||
} else {
|
||||
input_rate_q16 = input_rate << 16;
|
||||
input_rate_q16 |=
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include "msm_vidc_internal.h"
|
||||
@@ -26,9 +26,8 @@ static bool is_priv_ctrl(u32 id)
|
||||
switch (id) {
|
||||
/*
|
||||
* TODO: V4L2_CID_MPEG_VIDEO_HEVC_PROFILE is std ctrl. But
|
||||
* V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_STILL_PICTURE,
|
||||
* V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_MULTIVIEW and
|
||||
* V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_MULTIVIEW support is not
|
||||
* V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10_STILL_PICTURE and
|
||||
* V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_MULTIVIEW support is not
|
||||
* available yet. Hence, make this as private ctrl for time being
|
||||
*/
|
||||
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
|
||||
@@ -55,7 +54,6 @@ static const char *const mpeg_video_hevc_profile[] = {
|
||||
"Main 10",
|
||||
"Main 10 Still Picture",
|
||||
"Main Multi View",
|
||||
"Main 10 Multi View",
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
||||
@@ -312,8 +312,8 @@ static ssize_t core_info_read(struct file *file, char __user *buf,
|
||||
cur += write_str(cur, end - cur,
|
||||
"FW version : %s\n", core->fw_version);
|
||||
cur += write_str(cur, end - cur,
|
||||
"register_base: 0x%x\n", core->register_base_addr);
|
||||
cur += write_str(cur, end - cur, "irq: %u\n", core->irq);
|
||||
"register_base: 0x%x\n", core->resource->register_base_addr);
|
||||
cur += write_str(cur, end - cur, "irq: %u\n", core->resource->irq);
|
||||
|
||||
len = simple_read_from_buffer(buf, count, ppos,
|
||||
dbuf, cur - dbuf);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2022, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/iommu.h>
|
||||
@@ -25,6 +25,10 @@
|
||||
#include "hfi_packet.h"
|
||||
#include "msm_vidc_events.h"
|
||||
|
||||
#if IS_ENABLED(CONFIG_SEC_ABC)
|
||||
#include <linux/sti/abc_common.h>
|
||||
#endif
|
||||
|
||||
extern struct msm_vidc_core *g_core;
|
||||
|
||||
#define is_odd(val) ((val) % 2 == 1)
|
||||
@@ -3938,6 +3942,9 @@ int msm_vidc_core_init(struct msm_vidc_core *core)
|
||||
if (rc) {
|
||||
msm_vidc_change_core_state(core, MSM_VIDC_CORE_ERROR, __func__);
|
||||
d_vpr_e("%s: core init failed\n", __func__);
|
||||
#if IS_ENABLED(CONFIG_SEC_ABC)
|
||||
sec_abc_send_event("MODULE=mm@WARN=venus_fw_load_fail");
|
||||
#endif
|
||||
/* do core deinit to handle error */
|
||||
msm_vidc_core_deinit_locked(core, true);
|
||||
goto unlock;
|
||||
@@ -4721,24 +4728,6 @@ struct msm_vidc_inst *get_inst_ref(struct msm_vidc_core *core,
|
||||
return inst;
|
||||
}
|
||||
|
||||
struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
|
||||
u32 session_id)
|
||||
{
|
||||
struct msm_vidc_inst *inst = NULL;
|
||||
bool matches = false;
|
||||
|
||||
mutex_lock(&core->lock);
|
||||
list_for_each_entry(inst, &core->instances, list) {
|
||||
if (inst->session_id == session_id) {
|
||||
matches = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
inst = matches ? get_inst_ref_locked(inst) : NULL;
|
||||
mutex_unlock(&core->lock);
|
||||
return inst;
|
||||
}
|
||||
|
||||
void put_inst(struct msm_vidc_inst *inst)
|
||||
{
|
||||
kref_put(&inst->kref, msm_vidc_close_helper);
|
||||
@@ -5608,15 +5597,13 @@ struct context_bank_info *msm_vidc_get_context_bank_for_region(
|
||||
struct msm_vidc_core *core, enum msm_vidc_buffer_region region)
|
||||
{
|
||||
struct context_bank_info *cb = NULL, *match = NULL;
|
||||
u32 cnt = 0;
|
||||
|
||||
if (!region || region >= MSM_VIDC_REGION_MAX) {
|
||||
d_vpr_e("Invalid region %#x\n", region);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (cnt = 0; cnt < core->context_bank_tbl_count; ++cnt) {
|
||||
cb = &core->context_bank_tbl[cnt];
|
||||
venus_hfi_for_each_context_bank(core, cb) {
|
||||
if (cb->region == region) {
|
||||
match = cb;
|
||||
break;
|
||||
@@ -5632,10 +5619,8 @@ struct context_bank_info *msm_vidc_get_context_bank_for_device(
|
||||
struct msm_vidc_core *core, struct device *dev)
|
||||
{
|
||||
struct context_bank_info *cb = NULL, *match = NULL;
|
||||
u32 cnt = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->context_bank_tbl_count; ++cnt) {
|
||||
cb = &core->context_bank_tbl[cnt];
|
||||
venus_hfi_for_each_context_bank(core, cb) {
|
||||
if (of_device_is_compatible(dev->of_node, cb->name)) {
|
||||
match = cb;
|
||||
break;
|
||||
|
||||
@@ -97,11 +97,12 @@ u64 msm_vidc_max_freq(struct msm_vidc_inst *inst)
|
||||
|
||||
core = inst->core;
|
||||
|
||||
if (!core->freq_tbl || !core->freq_tbl_count) {
|
||||
if (!core->resource || !core->resource->freq_set.freq_tbl ||
|
||||
!core->resource->freq_set.count) {
|
||||
i_vpr_e(inst, "%s: invalid frequency table\n", __func__);
|
||||
return freq;
|
||||
}
|
||||
freq_tbl = core->freq_tbl;
|
||||
freq_tbl = core->resource->freq_set.freq_tbl;
|
||||
freq = freq_tbl[0].freq;
|
||||
|
||||
i_vpr_l(inst, "%s: rate = %llu\n", __func__, freq);
|
||||
@@ -114,7 +115,7 @@ static int fill_dynamic_stats(struct msm_vidc_inst *inst,
|
||||
struct msm_vidc_input_cr_data *temp, *next;
|
||||
u32 cf = MSM_VIDC_MAX_UBWC_COMPLEXITY_FACTOR;
|
||||
u32 cr = MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO;
|
||||
u32 input_cr = MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO;
|
||||
u32 input_cr = MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO;
|
||||
u32 frame_size;
|
||||
|
||||
if (inst->power.fw_cr)
|
||||
@@ -142,16 +143,6 @@ static int fill_dynamic_stats(struct msm_vidc_inst *inst,
|
||||
input_cr = clamp_t(u32, input_cr, MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO,
|
||||
MSM_VIDC_MAX_UBWC_COMPRESSION_RATIO);
|
||||
|
||||
/*
|
||||
* CR = MIN means UBWC didn't compress at all,
|
||||
* which is impossible unless the input yuv is pure while noise,
|
||||
* so MIN means there is no valid CR info,
|
||||
* set zero will let the bw calculation function use a predefined CR value instead,
|
||||
* to avoid overvoting.
|
||||
*/
|
||||
if (input_cr == MSM_VIDC_MIN_UBWC_COMPRESSION_RATIO)
|
||||
input_cr = 0;
|
||||
|
||||
vote_data->compression_ratio = cr;
|
||||
vote_data->complexity_factor = cf;
|
||||
vote_data->input_cr = input_cr;
|
||||
@@ -224,6 +215,10 @@ int msm_vidc_scale_buses(struct msm_vidc_inst *inst)
|
||||
u32 operating_rate, frame_rate;
|
||||
|
||||
core = inst->core;
|
||||
if (!core->resource) {
|
||||
i_vpr_e(inst, "%s: invalid resource params\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
vote_data = &inst->bus_data;
|
||||
|
||||
vote_data->power_mode = VIDC_POWER_NORMAL;
|
||||
@@ -300,7 +295,7 @@ int msm_vidc_scale_buses(struct msm_vidc_inst *inst)
|
||||
}
|
||||
}
|
||||
vote_data->work_mode = inst->capabilities[STAGE].value;
|
||||
if (core->is_subcache_set_to_fw)
|
||||
if (core->resource->subcache_set.set_to_fw)
|
||||
vote_data->use_sys_cache = true;
|
||||
vote_data->num_vpp_pipes = core->capabilities[NUM_VPP_PIPE].value;
|
||||
fill_dynamic_stats(inst, vote_data);
|
||||
@@ -343,7 +338,8 @@ int msm_vidc_set_clocks(struct msm_vidc_inst *inst)
|
||||
|
||||
core = inst->core;
|
||||
|
||||
if (!core->freq_tbl || !core->freq_tbl_count) {
|
||||
if (!core->resource || !core->resource->freq_set.freq_tbl ||
|
||||
!core->resource->freq_set.count) {
|
||||
d_vpr_e("%s: invalid frequency table\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -381,8 +377,8 @@ int msm_vidc_set_clocks(struct msm_vidc_inst *inst)
|
||||
* keep checking from lowest to highest rate until
|
||||
* table rate >= requested rate
|
||||
*/
|
||||
for (i = core->freq_tbl_count - 1; i >= 0; i--) {
|
||||
rate = core->freq_tbl[i].freq;
|
||||
for (i = core->resource->freq_set.count - 1; i >= 0; i--) {
|
||||
rate = core->resource->freq_set.freq_tbl[i].freq;
|
||||
if (rate >= freq)
|
||||
break;
|
||||
}
|
||||
@@ -390,10 +386,10 @@ int msm_vidc_set_clocks(struct msm_vidc_inst *inst)
|
||||
i = 0;
|
||||
if (increment) {
|
||||
if (i > 0)
|
||||
rate = core->freq_tbl[i - 1].freq;
|
||||
rate = core->resource->freq_set.freq_tbl[i - 1].freq;
|
||||
} else if (decrement) {
|
||||
if (i < (int)(core->platform->data.freq_tbl_size - 1))
|
||||
rate = core->freq_tbl[i + 1].freq;
|
||||
rate = core->resource->freq_set.freq_tbl[i + 1].freq;
|
||||
}
|
||||
core->power.clk_freq = (u32)rate;
|
||||
|
||||
|
||||
@@ -48,9 +48,7 @@ static inline bool is_video_device(struct device *dev)
|
||||
of_device_is_compatible(dev->of_node, "qcom,cliffs-vidc") ||
|
||||
of_device_is_compatible(dev->of_node, "qcom,volcano-vidc") ||
|
||||
of_device_is_compatible(dev->of_node, "qcom,sm8750-vidc") ||
|
||||
of_device_is_compatible(dev->of_node, "qcom,sm8750-vidc-v2") ||
|
||||
of_device_is_compatible(dev->of_node, "qcom,tuna-vidc") ||
|
||||
of_device_is_compatible(dev->of_node, "qcom,kera-vidc"));
|
||||
of_device_is_compatible(dev->of_node, "qcom,sm8750-vidc-v2"));
|
||||
}
|
||||
|
||||
static inline bool is_video_context_bank_device_node(struct device_node *of_node)
|
||||
@@ -69,8 +67,16 @@ static inline bool is_video_context_bank_device(struct device *dev)
|
||||
|
||||
static int msm_vidc_init_resources(struct msm_vidc_core *core)
|
||||
{
|
||||
struct msm_vidc_resource *res = NULL;
|
||||
int rc = 0;
|
||||
|
||||
res = devm_kzalloc(&core->pdev->dev, sizeof(*res), GFP_KERNEL);
|
||||
if (!res) {
|
||||
d_vpr_e("%s: failed to alloc memory for resource\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->resource = res;
|
||||
|
||||
rc = call_res_op(core, init, core);
|
||||
if (rc) {
|
||||
d_vpr_e("%s: Failed to init resources: %d\n", __func__, rc);
|
||||
@@ -123,8 +129,6 @@ static const struct of_device_id msm_vidc_dt_match[] = {
|
||||
{.compatible = "qcom,sm8750-vidc-v2"},
|
||||
{.compatible = "qcom,cliffs-vidc"},
|
||||
{.compatible = "qcom,volcano-vidc"},
|
||||
{.compatible = "qcom,tuna-vidc"},
|
||||
{.compatible = "qcom,kera-vidc"},
|
||||
{.compatible = "qcom,vidc,cb-ns-pxl"},
|
||||
{.compatible = "qcom,vidc,cb-ns"},
|
||||
{.compatible = "qcom,vidc,cb-sec-non-pxl"},
|
||||
|
||||
@@ -226,53 +226,63 @@ static int __opp_set_rate(struct msm_vidc_core *core, u64 freq)
|
||||
|
||||
static int __init_register_base(struct msm_vidc_core *core)
|
||||
{
|
||||
core->register_base_addr = devm_platform_ioremap_resource(core->pdev, 0);
|
||||
if (IS_ERR(core->register_base_addr)) {
|
||||
struct msm_vidc_resource *res;
|
||||
|
||||
res = core->resource;
|
||||
|
||||
res->register_base_addr = devm_platform_ioremap_resource(core->pdev, 0);
|
||||
if (IS_ERR(res->register_base_addr)) {
|
||||
d_vpr_e("%s: map reg addr failed %ld\n",
|
||||
__func__, PTR_ERR(core->register_base_addr));
|
||||
__func__, PTR_ERR(res->register_base_addr));
|
||||
return -EINVAL;
|
||||
}
|
||||
d_vpr_h("%s: reg_base %pK\n", __func__, core->register_base_addr);
|
||||
d_vpr_h("%s: reg_base %pK\n", __func__, res->register_base_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init_irq(struct msm_vidc_core *core)
|
||||
{
|
||||
struct msm_vidc_resource *res;
|
||||
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
|
||||
struct resource *kres;
|
||||
#endif
|
||||
int rc = 0;
|
||||
|
||||
res = core->resource;
|
||||
|
||||
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 16, 0))
|
||||
core->irq = platform_get_irq(core->pdev, 0);
|
||||
res->irq = platform_get_irq(core->pdev, 0);
|
||||
#else
|
||||
kres = platform_get_resource(core->pdev, IORESOURCE_IRQ, 0);
|
||||
core->irq = kres ? kres->start : -1;
|
||||
res->irq = kres ? kres->start : -1;
|
||||
#endif
|
||||
if (core->irq < 0)
|
||||
d_vpr_e("%s: get irq failed, %d\n", __func__, core->irq);
|
||||
if (res->irq < 0)
|
||||
d_vpr_e("%s: get irq failed, %d\n", __func__, res->irq);
|
||||
|
||||
d_vpr_h("%s: irq %d\n", __func__, core->irq);
|
||||
d_vpr_h("%s: irq %d\n", __func__, res->irq);
|
||||
|
||||
rc = devm_request_threaded_irq(&core->pdev->dev, core->irq, venus_hfi_isr,
|
||||
rc = devm_request_threaded_irq(&core->pdev->dev, res->irq, venus_hfi_isr,
|
||||
venus_hfi_isr_handler, IRQF_TRIGGER_HIGH, "msm-vidc", core);
|
||||
if (rc) {
|
||||
d_vpr_e("%s: Failed to allocate venus IRQ\n", __func__);
|
||||
return rc;
|
||||
}
|
||||
disable_irq_nosync(core->irq);
|
||||
disable_irq_nosync(res->irq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __init_bus(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct bw_table *bus_tbl = NULL;
|
||||
const struct bw_table *bus_tbl;
|
||||
struct bus_set *interconnects;
|
||||
struct bus_info *binfo = NULL;
|
||||
u32 bus_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
interconnects = &core->resource->bus_set;
|
||||
|
||||
bus_tbl = core->platform->data.bw_tbl;
|
||||
bus_count = core->platform->data.bw_tbl_size;
|
||||
|
||||
@@ -283,31 +293,29 @@ static int __init_bus(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate bus_set */
|
||||
core->bus_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->bus_tbl) * bus_count, GFP_KERNEL);
|
||||
if (!core->bus_tbl) {
|
||||
interconnects->bus_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*interconnects->bus_tbl) * bus_count, GFP_KERNEL);
|
||||
if (!interconnects->bus_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for bus table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->bus_tbl_count = bus_count;
|
||||
interconnects->count = bus_count;
|
||||
|
||||
/* populate bus field from platform data */
|
||||
for (cnt = 0; cnt < core->bus_tbl_count; cnt++) {
|
||||
core->bus_tbl[cnt].name = bus_tbl[cnt].name;
|
||||
core->bus_tbl[cnt].min_kbps = bus_tbl[cnt].min_kbps;
|
||||
core->bus_tbl[cnt].max_kbps = bus_tbl[cnt].max_kbps;
|
||||
for (cnt = 0; cnt < interconnects->count; cnt++) {
|
||||
interconnects->bus_tbl[cnt].name = bus_tbl[cnt].name;
|
||||
interconnects->bus_tbl[cnt].min_kbps = bus_tbl[cnt].min_kbps;
|
||||
interconnects->bus_tbl[cnt].max_kbps = bus_tbl[cnt].max_kbps;
|
||||
}
|
||||
|
||||
/* print bus fields */
|
||||
for (cnt = 0; cnt < core->bus_tbl_count; ++cnt) {
|
||||
binfo = &core->bus_tbl[cnt];
|
||||
venus_hfi_for_each_bus(core, binfo) {
|
||||
d_vpr_h("%s: name %s min_kbps %u max_kbps %u\n",
|
||||
__func__, binfo->name, binfo->min_kbps, binfo->max_kbps);
|
||||
}
|
||||
|
||||
/* get interconnect handle */
|
||||
for (cnt = 0; cnt < core->bus_tbl_count; ++cnt) {
|
||||
binfo = &core->bus_tbl[cnt];
|
||||
venus_hfi_for_each_bus(core, binfo) {
|
||||
if (!strcmp(binfo->name, "venus-llcc")) {
|
||||
if (msm_vidc_syscache_disable) {
|
||||
d_vpr_h("%s: skipping LLC bus init: %s\n", __func__,
|
||||
@@ -332,11 +340,14 @@ static int __init_power_domains(struct msm_vidc_core *core)
|
||||
{
|
||||
struct power_domain_info *pdinfo = NULL;
|
||||
const struct pd_table *pd_tbl;
|
||||
struct power_domain_set *pds;
|
||||
struct device **opp_vdevs = NULL;
|
||||
const char * const *opp_tbl;
|
||||
u32 pd_count = 0, opp_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
pds = &core->resource->power_domain_set;
|
||||
|
||||
pd_tbl = core->platform->data.pd_tbl;
|
||||
pd_count = core->platform->data.pd_tbl_size;
|
||||
|
||||
@@ -353,27 +364,24 @@ static int __init_power_domains(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate power_domain_set */
|
||||
core->power_domain_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->power_domain_tbl) * pd_count, GFP_KERNEL);
|
||||
if (!core->power_domain_tbl) {
|
||||
pds->power_domain_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*pds->power_domain_tbl) * pd_count, GFP_KERNEL);
|
||||
if (!pds->power_domain_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for pd table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->power_domain_tbl_count = pd_count;
|
||||
pds->count = pd_count;
|
||||
|
||||
/* populate power domain fields */
|
||||
for (cnt = 0; cnt < core->power_domain_tbl_count; cnt++)
|
||||
core->power_domain_tbl[cnt].name = pd_tbl[cnt].name;
|
||||
for (cnt = 0; cnt < pds->count; cnt++)
|
||||
pds->power_domain_tbl[cnt].name = pd_tbl[cnt].name;
|
||||
|
||||
/* print power domain fields */
|
||||
for (cnt = 0; cnt < core->power_domain_tbl_count; ++cnt) {
|
||||
pdinfo = &core->power_domain_tbl[cnt];
|
||||
venus_hfi_for_each_power_domain(core, pdinfo)
|
||||
d_vpr_h("%s: pd name %s\n", __func__, pdinfo->name);
|
||||
}
|
||||
|
||||
/* get power domain handle */
|
||||
for (cnt = 0; cnt < core->power_domain_tbl_count; ++cnt) {
|
||||
pdinfo = &core->power_domain_tbl[cnt];
|
||||
venus_hfi_for_each_power_domain(core, pdinfo) {
|
||||
pdinfo->genpd_dev = devm_pd_get(&core->pdev->dev, pdinfo->name);
|
||||
if (IS_ERR_OR_NULL(pdinfo->genpd_dev)) {
|
||||
rc = PTR_ERR(pdinfo->genpd_dev) ?
|
||||
@@ -461,10 +469,13 @@ static int __init_clocks(struct msm_vidc_core *core)
|
||||
struct clock_residency *residency = NULL;
|
||||
const struct clk_table *clk_tbl;
|
||||
struct freq_table *freq_tbl;
|
||||
struct clock_set *clocks;
|
||||
struct clock_info *cinfo = NULL;
|
||||
u32 clk_count = 0, freq_count = 0;
|
||||
int fcnt = 0, cnt = 0, rc = 0;
|
||||
|
||||
clocks = &core->resource->clock_set;
|
||||
|
||||
clk_tbl = core->platform->data.clk_tbl;
|
||||
clk_count = core->platform->data.clk_tbl_size;
|
||||
|
||||
@@ -475,31 +486,31 @@ static int __init_clocks(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate clock_set */
|
||||
core->clock_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->clock_tbl) * clk_count, GFP_KERNEL);
|
||||
if (!core->clock_tbl) {
|
||||
clocks->clock_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*clocks->clock_tbl) * clk_count, GFP_KERNEL);
|
||||
if (!clocks->clock_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for clock table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->clock_tbl_count = clk_count;
|
||||
clocks->count = clk_count;
|
||||
|
||||
/* populate clock field from platform data */
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; cnt++) {
|
||||
core->clock_tbl[cnt].name = clk_tbl[cnt].name;
|
||||
core->clock_tbl[cnt].clk_id = clk_tbl[cnt].clk_id;
|
||||
core->clock_tbl[cnt].has_scaling = clk_tbl[cnt].scaling;
|
||||
for (cnt = 0; cnt < clocks->count; cnt++) {
|
||||
clocks->clock_tbl[cnt].name = clk_tbl[cnt].name;
|
||||
clocks->clock_tbl[cnt].clk_id = clk_tbl[cnt].clk_id;
|
||||
clocks->clock_tbl[cnt].has_scaling = clk_tbl[cnt].scaling;
|
||||
}
|
||||
|
||||
freq_tbl = core->platform->data.freq_tbl;
|
||||
freq_count = core->platform->data.freq_tbl_size;
|
||||
|
||||
/* populate clk residency stats table */
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; cnt++) {
|
||||
for (cnt = 0; cnt < clocks->count; cnt++) {
|
||||
/* initialize residency_list */
|
||||
INIT_LIST_HEAD(&core->clock_tbl[cnt].residency_list);
|
||||
INIT_LIST_HEAD(&clocks->clock_tbl[cnt].residency_list);
|
||||
|
||||
/* skip if scaling not supported */
|
||||
if (!core->clock_tbl[cnt].has_scaling)
|
||||
if (!clocks->clock_tbl[cnt].has_scaling)
|
||||
continue;
|
||||
|
||||
for (fcnt = 0; fcnt < freq_count; fcnt++) {
|
||||
@@ -522,20 +533,18 @@ static int __init_clocks(struct msm_vidc_core *core)
|
||||
INIT_LIST_HEAD(&residency->list);
|
||||
|
||||
/* add entry into residency_list */
|
||||
list_add_tail(&residency->list, &core->clock_tbl[cnt].residency_list);
|
||||
list_add_tail(&residency->list, &clocks->clock_tbl[cnt].residency_list);
|
||||
}
|
||||
}
|
||||
|
||||
/* print clock fields */
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cinfo = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cinfo) {
|
||||
d_vpr_h("%s: clock name %s clock id %#x scaling %d\n",
|
||||
__func__, cinfo->name, cinfo->clk_id, cinfo->has_scaling);
|
||||
}
|
||||
|
||||
/* get clock handle */
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cinfo = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cinfo) {
|
||||
cinfo->clk = devm_clk_get(&core->pdev->dev, cinfo->name);
|
||||
if (IS_ERR_OR_NULL(cinfo->clk)) {
|
||||
d_vpr_e("%s: failed to get clock: %s\n", __func__, cinfo->name);
|
||||
@@ -552,10 +561,13 @@ static int __init_clocks(struct msm_vidc_core *core)
|
||||
static int __init_reset_clocks(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct clk_rst_table *rst_tbl;
|
||||
struct reset_set *rsts;
|
||||
struct reset_info *rinfo = NULL;
|
||||
u32 rst_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
rsts = &core->resource->reset_set;
|
||||
|
||||
rst_tbl = core->platform->data.clk_rst_tbl;
|
||||
rst_count = core->platform->data.clk_rst_tbl_size;
|
||||
|
||||
@@ -566,30 +578,28 @@ static int __init_reset_clocks(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate reset_set */
|
||||
core->reset_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->reset_tbl) * rst_count, GFP_KERNEL);
|
||||
if (!core->reset_tbl) {
|
||||
rsts->reset_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*rsts->reset_tbl) * rst_count, GFP_KERNEL);
|
||||
if (!rsts->reset_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for reset table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->reset_tbl_count = rst_count;
|
||||
rsts->count = rst_count;
|
||||
|
||||
/* populate clock field from platform data */
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; cnt++) {
|
||||
core->reset_tbl[cnt].name = rst_tbl[cnt].name;
|
||||
core->reset_tbl[cnt].exclusive_release = rst_tbl[cnt].exclusive_release;
|
||||
for (cnt = 0; cnt < rsts->count; cnt++) {
|
||||
rsts->reset_tbl[cnt].name = rst_tbl[cnt].name;
|
||||
rsts->reset_tbl[cnt].exclusive_release = rst_tbl[cnt].exclusive_release;
|
||||
}
|
||||
|
||||
/* print reset clock fields */
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rinfo) {
|
||||
d_vpr_h("%s: reset clk %s, exclusive %d\n",
|
||||
__func__, rinfo->name, rinfo->exclusive_release);
|
||||
}
|
||||
|
||||
/* get reset clock handle */
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rinfo) {
|
||||
if (rinfo->exclusive_release)
|
||||
rinfo->rst = devm_reset_control_get_exclusive_released(
|
||||
&core->pdev->dev, rinfo->name);
|
||||
@@ -610,10 +620,13 @@ static int __init_reset_clocks(struct msm_vidc_core *core)
|
||||
static int __init_subcaches(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct subcache_table *llcc_tbl;
|
||||
struct subcache_set *caches;
|
||||
struct subcache_info *sinfo = NULL;
|
||||
u32 llcc_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
caches = &core->resource->subcache_set;
|
||||
|
||||
/* skip init if subcache not available */
|
||||
if (!is_sys_cache_present(core))
|
||||
return 0;
|
||||
@@ -628,30 +641,28 @@ static int __init_subcaches(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate clock_set */
|
||||
core->subcache_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->subcache_tbl) * llcc_count, GFP_KERNEL);
|
||||
if (!core->subcache_tbl) {
|
||||
caches->subcache_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*caches->subcache_tbl) * llcc_count, GFP_KERNEL);
|
||||
if (!caches->subcache_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for subcache table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->subcache_tbl_count = llcc_count;
|
||||
caches->count = llcc_count;
|
||||
|
||||
/* populate subcache fields from platform data */
|
||||
for (cnt = 0; cnt < core->subcache_tbl_count; cnt++) {
|
||||
core->subcache_tbl[cnt].name = llcc_tbl[cnt].name;
|
||||
core->subcache_tbl[cnt].llcc_id = llcc_tbl[cnt].llcc_id;
|
||||
for (cnt = 0; cnt < caches->count; cnt++) {
|
||||
caches->subcache_tbl[cnt].name = llcc_tbl[cnt].name;
|
||||
caches->subcache_tbl[cnt].llcc_id = llcc_tbl[cnt].llcc_id;
|
||||
}
|
||||
|
||||
/* print subcache fields */
|
||||
for (cnt = 0; cnt < core->subcache_tbl_count; ++cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt];
|
||||
venus_hfi_for_each_subcache(core, sinfo) {
|
||||
d_vpr_h("%s: name %s subcache id %d\n",
|
||||
__func__, sinfo->name, sinfo->llcc_id);
|
||||
}
|
||||
|
||||
/* get subcache/llcc handle */
|
||||
for (cnt = 0; cnt < core->subcache_tbl_count; ++cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt];
|
||||
venus_hfi_for_each_subcache(core, sinfo) {
|
||||
sinfo->subcache = devm_llcc_get(&core->pdev->dev, sinfo->llcc_id);
|
||||
if (IS_ERR_OR_NULL(sinfo->subcache)) {
|
||||
d_vpr_e("%s: failed to get subcache: %d\n", __func__, sinfo->llcc_id);
|
||||
@@ -668,9 +679,12 @@ static int __init_subcaches(struct msm_vidc_core *core)
|
||||
static int __init_freq_table(struct msm_vidc_core *core)
|
||||
{
|
||||
struct freq_table *freq_tbl;
|
||||
struct freq_set *clks;
|
||||
u32 freq_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
clks = &core->resource->freq_set;
|
||||
|
||||
freq_tbl = core->platform->data.freq_tbl;
|
||||
freq_count = core->platform->data.freq_tbl_size;
|
||||
|
||||
@@ -681,25 +695,25 @@ static int __init_freq_table(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate freq_set */
|
||||
core->freq_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->freq_tbl) * freq_count, GFP_KERNEL);
|
||||
if (!core->freq_tbl) {
|
||||
clks->freq_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*clks->freq_tbl) * freq_count, GFP_KERNEL);
|
||||
if (!clks->freq_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for freq table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->freq_tbl_count = freq_count;
|
||||
clks->count = freq_count;
|
||||
|
||||
/* populate freq field from platform data */
|
||||
for (cnt = 0; cnt < core->freq_tbl_count; cnt++)
|
||||
core->freq_tbl[cnt].freq = freq_tbl[cnt].freq;
|
||||
for (cnt = 0; cnt < clks->count; cnt++)
|
||||
clks->freq_tbl[cnt].freq = freq_tbl[cnt].freq;
|
||||
|
||||
/* sort freq table */
|
||||
sort(core->freq_tbl, core->freq_tbl_count, sizeof(*core->freq_tbl), cmp, NULL);
|
||||
sort(clks->freq_tbl, clks->count, sizeof(*clks->freq_tbl), cmp, NULL);
|
||||
|
||||
/* print freq field freq_set */
|
||||
d_vpr_h("%s: updated freq table\n", __func__);
|
||||
for (cnt = 0; cnt < core->freq_tbl_count; cnt++)
|
||||
d_vpr_h("%s:\t %lu\n", __func__, core->freq_tbl[cnt].freq);
|
||||
for (cnt = 0; cnt < clks->count; cnt++)
|
||||
d_vpr_h("%s:\t %lu\n", __func__, clks->freq_tbl[cnt].freq);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@@ -707,10 +721,13 @@ static int __init_freq_table(struct msm_vidc_core *core)
|
||||
static int __init_context_banks(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct context_bank_table *cb_tbl;
|
||||
struct context_bank_set *cbs;
|
||||
struct context_bank_info *cbinfo = NULL;
|
||||
u32 cb_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
cbs = &core->resource->context_bank_set;
|
||||
|
||||
cb_tbl = core->platform->data.context_bank_tbl;
|
||||
cb_count = core->platform->data.context_bank_tbl_size;
|
||||
|
||||
@@ -721,32 +738,31 @@ static int __init_context_banks(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate context_bank table */
|
||||
core->context_bank_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->context_bank_tbl) * cb_count, GFP_KERNEL);
|
||||
if (!core->context_bank_tbl) {
|
||||
cbs->context_bank_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*cbs->context_bank_tbl) * cb_count, GFP_KERNEL);
|
||||
if (!cbs->context_bank_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for context_bank table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->context_bank_tbl_count = cb_count;
|
||||
cbs->count = cb_count;
|
||||
|
||||
/**
|
||||
* populate context bank field from platform data except
|
||||
* dev & domain which are assigned as part of context bank
|
||||
* probe sequence
|
||||
*/
|
||||
for (cnt = 0; cnt < core->context_bank_tbl_count; cnt++) {
|
||||
core->context_bank_tbl[cnt].name = cb_tbl[cnt].name;
|
||||
core->context_bank_tbl[cnt].addr_range.start = cb_tbl[cnt].start;
|
||||
core->context_bank_tbl[cnt].addr_range.size = cb_tbl[cnt].size;
|
||||
core->context_bank_tbl[cnt].secure = cb_tbl[cnt].secure;
|
||||
core->context_bank_tbl[cnt].dma_coherant = cb_tbl[cnt].dma_coherant;
|
||||
core->context_bank_tbl[cnt].region = cb_tbl[cnt].region;
|
||||
core->context_bank_tbl[cnt].dma_mask = cb_tbl[cnt].dma_mask;
|
||||
for (cnt = 0; cnt < cbs->count; cnt++) {
|
||||
cbs->context_bank_tbl[cnt].name = cb_tbl[cnt].name;
|
||||
cbs->context_bank_tbl[cnt].addr_range.start = cb_tbl[cnt].start;
|
||||
cbs->context_bank_tbl[cnt].addr_range.size = cb_tbl[cnt].size;
|
||||
cbs->context_bank_tbl[cnt].secure = cb_tbl[cnt].secure;
|
||||
cbs->context_bank_tbl[cnt].dma_coherant = cb_tbl[cnt].dma_coherant;
|
||||
cbs->context_bank_tbl[cnt].region = cb_tbl[cnt].region;
|
||||
cbs->context_bank_tbl[cnt].dma_mask = cb_tbl[cnt].dma_mask;
|
||||
}
|
||||
|
||||
/* print context_bank fiels */
|
||||
for (cnt = 0; cnt < core->context_bank_tbl_count; ++cnt) {
|
||||
cbinfo = &core->context_bank_tbl[cnt];
|
||||
venus_hfi_for_each_context_bank(core, cbinfo) {
|
||||
d_vpr_h("%s: name %s addr start %#x size %#x secure %d "
|
||||
"coherant %d region %d dma_mask %llu\n",
|
||||
__func__, cbinfo->name, cbinfo->addr_range.start,
|
||||
@@ -760,10 +776,13 @@ static int __init_context_banks(struct msm_vidc_core *core)
|
||||
static int __init_device_region(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct device_region_table *dev_reg_tbl;
|
||||
struct device_region_set *dev_set;
|
||||
struct device_region_info *dev_reg_info;
|
||||
u32 dev_reg_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
dev_set = &core->resource->device_region_set;
|
||||
|
||||
dev_reg_tbl = core->platform->data.dev_reg_tbl;
|
||||
dev_reg_count = core->platform->data.dev_reg_tbl_size;
|
||||
|
||||
@@ -773,26 +792,25 @@ static int __init_device_region(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate device region table */
|
||||
core->device_region_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->device_region_tbl) * dev_reg_count, GFP_KERNEL);
|
||||
if (!core->device_region_tbl) {
|
||||
dev_set->device_region_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*dev_set->device_region_tbl) * dev_reg_count, GFP_KERNEL);
|
||||
if (!dev_set->device_region_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for device region table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->device_region_tbl_count = dev_reg_count;
|
||||
dev_set->count = dev_reg_count;
|
||||
|
||||
/* populate device region fields from platform data */
|
||||
for (cnt = 0; cnt < core->device_region_tbl_count; cnt++) {
|
||||
core->device_region_tbl[cnt].name = dev_reg_tbl[cnt].name;
|
||||
core->device_region_tbl[cnt].phy_addr = dev_reg_tbl[cnt].phy_addr;
|
||||
core->device_region_tbl[cnt].size = dev_reg_tbl[cnt].size;
|
||||
core->device_region_tbl[cnt].dev_addr = dev_reg_tbl[cnt].dev_addr;
|
||||
core->device_region_tbl[cnt].region = dev_reg_tbl[cnt].region;
|
||||
for (cnt = 0; cnt < dev_set->count; cnt++) {
|
||||
dev_set->device_region_tbl[cnt].name = dev_reg_tbl[cnt].name;
|
||||
dev_set->device_region_tbl[cnt].phy_addr = dev_reg_tbl[cnt].phy_addr;
|
||||
dev_set->device_region_tbl[cnt].size = dev_reg_tbl[cnt].size;
|
||||
dev_set->device_region_tbl[cnt].dev_addr = dev_reg_tbl[cnt].dev_addr;
|
||||
dev_set->device_region_tbl[cnt].region = dev_reg_tbl[cnt].region;
|
||||
}
|
||||
|
||||
/* print device region fields */
|
||||
for (cnt = 0; cnt < core->device_region_tbl_count; ++cnt) {
|
||||
dev_reg_info = &core->device_region_tbl[cnt];
|
||||
venus_hfi_for_each_device_region(core, dev_reg_info) {
|
||||
d_vpr_h("%s: name %s phy_addr %#llx size %#x dev_addr %#x dev_region %d\n",
|
||||
__func__, dev_reg_info->name, dev_reg_info->phy_addr, dev_reg_info->size,
|
||||
dev_reg_info->dev_addr, dev_reg_info->region);
|
||||
@@ -804,9 +822,8 @@ static int __init_device_region(struct msm_vidc_core *core)
|
||||
#ifdef CONFIG_MSM_MMRM
|
||||
static int __register_mmrm(struct msm_vidc_core *core)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct clock_info *cl;
|
||||
|
||||
/* skip if platform does not support mmrm */
|
||||
if (!is_mmrm_supported(core)) {
|
||||
@@ -815,8 +832,7 @@ static int __register_mmrm(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* get mmrm handle for each clock sources */
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
struct mmrm_client_desc desc;
|
||||
char *name = (char *)desc.client_info.desc.name;
|
||||
|
||||
@@ -879,7 +895,6 @@ static int __register_mmrm(struct msm_vidc_core *core)
|
||||
static int __enable_power_domains(struct msm_vidc_core *core, const char *name)
|
||||
{
|
||||
struct power_domain_info *pdinfo = NULL;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
/* power up rails(mxc & mmcx) to enable RCG(video_cc_mvs0_clk_src) */
|
||||
@@ -892,8 +907,7 @@ static int __enable_power_domains(struct msm_vidc_core *core, const char *name)
|
||||
}
|
||||
|
||||
/* power up (gdsc0/gdsc0c) to enable (mvs0/mvs0c) branch clock */
|
||||
for (cnt = 0; cnt < core->power_domain_tbl_count; ++cnt) {
|
||||
pdinfo = &core->power_domain_tbl[cnt];
|
||||
venus_hfi_for_each_power_domain(core, pdinfo) {
|
||||
if (strcmp(pdinfo->name, name))
|
||||
continue;
|
||||
|
||||
@@ -914,12 +928,10 @@ static int __enable_power_domains(struct msm_vidc_core *core, const char *name)
|
||||
static int __disable_power_domains(struct msm_vidc_core *core, const char *name)
|
||||
{
|
||||
struct power_domain_info *pdinfo = NULL;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
/* power down (gdsc0/gdsc0c) to disable (mvs0/mvs0c) branch clock */
|
||||
for (cnt = 0; cnt < core->power_domain_tbl_count; ++cnt) {
|
||||
pdinfo = &core->power_domain_tbl[cnt];
|
||||
venus_hfi_for_each_power_domain(core, pdinfo) {
|
||||
if (strcmp(pdinfo->name, name))
|
||||
continue;
|
||||
|
||||
@@ -991,15 +1003,13 @@ static int __acquire_power_domains(struct msm_vidc_core *core)
|
||||
static int __disable_subcaches(struct msm_vidc_core *core)
|
||||
{
|
||||
struct subcache_info *sinfo;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
|
||||
return 0;
|
||||
|
||||
/* De-activate subcaches */
|
||||
for (cnt = core->subcache_tbl_count; cnt > 0; --cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt-1];
|
||||
venus_hfi_for_each_subcache_reverse(core, sinfo) {
|
||||
if (!sinfo->isactive)
|
||||
continue;
|
||||
|
||||
@@ -1017,16 +1027,15 @@ static int __disable_subcaches(struct msm_vidc_core *core)
|
||||
|
||||
static int __enable_subcaches(struct msm_vidc_core *core)
|
||||
{
|
||||
struct subcache_info *sinfo;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
u32 c = 0;
|
||||
struct subcache_info *sinfo;
|
||||
|
||||
if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
|
||||
return 0;
|
||||
|
||||
/* Activate subcaches */
|
||||
for (cnt = 0; cnt < core->subcache_tbl_count; ++cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt];
|
||||
venus_hfi_for_each_subcache(core, sinfo) {
|
||||
rc = llcc_slice_activate(sinfo->subcache);
|
||||
if (rc) {
|
||||
d_vpr_e("Failed to activate %s: %d\n", sinfo->name, rc);
|
||||
@@ -1035,9 +1044,10 @@ static int __enable_subcaches(struct msm_vidc_core *core)
|
||||
}
|
||||
sinfo->isactive = true;
|
||||
d_vpr_h("Activated subcache %s\n", sinfo->name);
|
||||
c++;
|
||||
}
|
||||
|
||||
d_vpr_h("Activated %d Subcaches to Venus\n", cnt);
|
||||
d_vpr_h("Activated %d Subcaches to Venus\n", c);
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1079,15 +1089,13 @@ static int __vote_bandwidth(struct bus_info *bus, unsigned long bw_kbps)
|
||||
|
||||
static int __unvote_buses(struct msm_vidc_core *core)
|
||||
{
|
||||
struct bus_info *bus = NULL;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct bus_info *bus = NULL;
|
||||
|
||||
core->power.bw_ddr = 0;
|
||||
core->power.bw_llcc = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->bus_tbl_count; ++cnt) {
|
||||
bus = &core->bus_tbl[cnt];
|
||||
venus_hfi_for_each_bus(core, bus) {
|
||||
rc = __vote_bandwidth(bus, 0);
|
||||
if (rc)
|
||||
goto err_unknown_device;
|
||||
@@ -1100,14 +1108,12 @@ err_unknown_device:
|
||||
static int __vote_buses(struct msm_vidc_core *core,
|
||||
unsigned long bw_ddr, unsigned long bw_llcc)
|
||||
{
|
||||
unsigned long bw_kbps = 0, bw_prev = 0;
|
||||
struct bus_info *bus = NULL;
|
||||
enum vidc_bus_type type;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct bus_info *bus = NULL;
|
||||
unsigned long bw_kbps = 0, bw_prev = 0;
|
||||
enum vidc_bus_type type;
|
||||
|
||||
for (cnt = 0; cnt < core->bus_tbl_count; ++cnt) {
|
||||
bus = &core->bus_tbl[cnt];
|
||||
venus_hfi_for_each_bus(core, bus) {
|
||||
if (bus && bus->icc) {
|
||||
type = get_type_frm_name(bus->name);
|
||||
|
||||
@@ -1310,7 +1316,6 @@ static int __set_clk_rate(struct msm_vidc_core *core, struct clock_info *cl,
|
||||
static int __set_clocks(struct msm_vidc_core *core, u64 freq)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
/* scale mxc & mmcx rails */
|
||||
@@ -1320,8 +1325,7 @@ static int __set_clocks(struct msm_vidc_core *core, u64 freq)
|
||||
return rc;
|
||||
}
|
||||
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
if (cl->has_scaling) {
|
||||
rc = __set_clk_rate(core, cl, freq);
|
||||
if (rc)
|
||||
@@ -1335,14 +1339,12 @@ static int __set_clocks(struct msm_vidc_core *core, u64 freq)
|
||||
static int __disable_unprepare_clock(struct msm_vidc_core *core,
|
||||
const char *clk_name)
|
||||
{
|
||||
int rc = 0;
|
||||
struct clock_info *cl;
|
||||
bool found;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
found = false;
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
if (!cl->clk) {
|
||||
d_vpr_e("%s: invalid clock %s\n", __func__, cl->name);
|
||||
return -EINVAL;
|
||||
@@ -1368,15 +1370,13 @@ static int __disable_unprepare_clock(struct msm_vidc_core *core,
|
||||
static int __prepare_enable_clock(struct msm_vidc_core *core,
|
||||
const char *clk_name)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
u64 rate = 0;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct clock_info *cl;
|
||||
bool found;
|
||||
u64 rate = 0;
|
||||
|
||||
found = false;
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
if (!cl->clk) {
|
||||
d_vpr_e("%s: invalid clock\n", __func__);
|
||||
return -EINVAL;
|
||||
@@ -1485,10 +1485,8 @@ static int __reset_control_acquire_name(struct msm_vidc_core *core,
|
||||
struct reset_info *rcinfo = NULL;
|
||||
int rc = 0, count = 0;
|
||||
bool found = false;
|
||||
u32 cnt = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rcinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rcinfo) {
|
||||
if (strcmp(rcinfo->name, name))
|
||||
continue;
|
||||
|
||||
@@ -1545,12 +1543,10 @@ static int __reset_control_release_name(struct msm_vidc_core *core,
|
||||
const char *name)
|
||||
{
|
||||
struct reset_info *rcinfo = NULL;
|
||||
bool found = false;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
bool found = false;
|
||||
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rcinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rcinfo) {
|
||||
if (strcmp(rcinfo->name, name))
|
||||
continue;
|
||||
|
||||
@@ -1591,13 +1587,11 @@ static int __reset_control_release_name(struct msm_vidc_core *core,
|
||||
static int __reset_control_assert_name(struct msm_vidc_core *core,
|
||||
const char *name)
|
||||
{
|
||||
bool found = false;
|
||||
struct reset_info *rcinfo = NULL;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
bool found = false;
|
||||
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rcinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rcinfo) {
|
||||
if (strcmp(rcinfo->name, name))
|
||||
continue;
|
||||
|
||||
@@ -1623,12 +1617,10 @@ static int __reset_control_deassert_name(struct msm_vidc_core *core,
|
||||
const char *name)
|
||||
{
|
||||
struct reset_info *rcinfo = NULL;
|
||||
bool found = false;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
bool found = false;
|
||||
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rcinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rcinfo) {
|
||||
if (strcmp(rcinfo->name, name))
|
||||
continue;
|
||||
found = true;
|
||||
@@ -1652,11 +1644,9 @@ static int __reset_control_deassert_name(struct msm_vidc_core *core,
|
||||
static int __reset_control_deassert(struct msm_vidc_core *core)
|
||||
{
|
||||
struct reset_info *rcinfo = NULL;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rcinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rcinfo) {
|
||||
rc = reset_control_deassert(rcinfo->rst);
|
||||
if (rc) {
|
||||
d_vpr_e("%s: deassert reset control failed. rc = %d\n", __func__, rc);
|
||||
@@ -1671,10 +1661,9 @@ static int __reset_control_deassert(struct msm_vidc_core *core)
|
||||
static int __reset_control_assert(struct msm_vidc_core *core)
|
||||
{
|
||||
struct reset_info *rcinfo = NULL;
|
||||
int rc = 0, cnt = 0, rcnt = 0;
|
||||
int rc = 0, cnt = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->reset_tbl_count; ++cnt) {
|
||||
rcinfo = &core->reset_tbl[cnt];
|
||||
venus_hfi_for_each_reset_clock(core, rcinfo) {
|
||||
if (!rcinfo->rst) {
|
||||
d_vpr_e("%s: invalid reset clock %s\n",
|
||||
__func__, rcinfo->name);
|
||||
@@ -1694,8 +1683,7 @@ static int __reset_control_assert(struct msm_vidc_core *core)
|
||||
|
||||
return rc;
|
||||
deassert_reset_control:
|
||||
for (rcnt = cnt; rcnt > 0; --rcnt) {
|
||||
rcinfo = &core->reset_tbl[rcnt-1];
|
||||
venus_hfi_for_each_reset_clock_reverse_continue(core, rcinfo, cnt) {
|
||||
d_vpr_e("%s: deassert reset control %s\n", __func__, rcinfo->name);
|
||||
reset_control_deassert(rcinfo->rst);
|
||||
}
|
||||
@@ -1721,11 +1709,9 @@ static int __reset_ahb2axi_bridge(struct msm_vidc_core *core)
|
||||
static int __print_clock_residency_stats(struct msm_vidc_core *core)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
/* skip if scaling not supported */
|
||||
if (!cl->has_scaling)
|
||||
continue;
|
||||
@@ -1746,11 +1732,9 @@ static int __print_clock_residency_stats(struct msm_vidc_core *core)
|
||||
static int __reset_clock_residency_stats(struct msm_vidc_core *core)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
/* skip if scaling not supported */
|
||||
if (!cl->has_scaling)
|
||||
continue;
|
||||
|
||||
@@ -26,10 +26,13 @@ static void __fatal_error(bool fatal)
|
||||
static int __init_regulators(struct msm_vidc_core *core)
|
||||
{
|
||||
const struct regulator_table *regulator_tbl;
|
||||
struct regulator_set *regulators;
|
||||
struct regulator_info *rinfo = NULL;
|
||||
u32 regulator_count = 0, cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
regulators = &core->resource->regulator_set;
|
||||
|
||||
regulator_tbl = core->platform->data.regulator_tbl;
|
||||
regulator_count = core->platform->data.regulator_tbl_size;
|
||||
|
||||
@@ -46,30 +49,28 @@ static int __init_regulators(struct msm_vidc_core *core)
|
||||
}
|
||||
|
||||
/* allocate regulator_set */
|
||||
core->regulator_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*core->regulator_tbl) * regulator_count, GFP_KERNEL);
|
||||
if (!core->regulator_tbl) {
|
||||
regulators->regulator_tbl = devm_kzalloc(&core->pdev->dev,
|
||||
sizeof(*regulators->regulator_tbl) * regulator_count, GFP_KERNEL);
|
||||
if (!regulators->regulator_tbl) {
|
||||
d_vpr_e("%s: failed to alloc memory for regulator table\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
core->regulator_tbl_count = regulator_count;
|
||||
regulators->count = regulator_count;
|
||||
|
||||
/* populate regulator fields */
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; cnt++) {
|
||||
core->regulator_tbl[cnt].name = regulator_tbl[cnt].name;
|
||||
core->regulator_tbl[cnt].hw_power_collapse = regulator_tbl[cnt].hw_trigger;
|
||||
for (cnt = 0; cnt < regulators->count; cnt++) {
|
||||
regulators->regulator_tbl[cnt].name = regulator_tbl[cnt].name;
|
||||
regulators->regulator_tbl[cnt].hw_power_collapse = regulator_tbl[cnt].hw_trigger;
|
||||
}
|
||||
|
||||
/* print regulator fields */
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; ++cnt) {
|
||||
rinfo = &core->regulator_tbl[cnt];
|
||||
venus_hfi_for_each_regulator(core, rinfo) {
|
||||
d_vpr_h("%s: name %s hw_power_collapse %d\n",
|
||||
__func__, rinfo->name, rinfo->hw_power_collapse);
|
||||
}
|
||||
|
||||
/* get regulator handle */
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; ++cnt) {
|
||||
rinfo = &core->regulator_tbl[cnt];
|
||||
venus_hfi_for_each_regulator(core, rinfo) {
|
||||
rinfo->regulator = devm_regulator_get(&core->pdev->dev, rinfo->name);
|
||||
if (IS_ERR_OR_NULL(rinfo->regulator)) {
|
||||
rc = PTR_ERR(rinfo->regulator) ?
|
||||
@@ -205,14 +206,12 @@ fail_assert_xo_reset:
|
||||
|
||||
static int __enable_regulator(struct msm_vidc_core *core, const char *reg_name)
|
||||
{
|
||||
int rc = 0;
|
||||
struct regulator_info *rinfo;
|
||||
bool found;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
found = false;
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; ++cnt) {
|
||||
rinfo = &core->regulator_tbl[cnt];
|
||||
venus_hfi_for_each_regulator(core, rinfo) {
|
||||
if (!rinfo->regulator) {
|
||||
d_vpr_e("%s: invalid regulator %s\n",
|
||||
__func__, rinfo->name);
|
||||
@@ -264,14 +263,12 @@ fail_assert_xo_reset:
|
||||
|
||||
static int __disable_regulator(struct msm_vidc_core *core, const char *reg_name)
|
||||
{
|
||||
struct regulator_info *rinfo;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct regulator_info *rinfo;
|
||||
bool found;
|
||||
|
||||
found = false;
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; ++cnt) {
|
||||
rinfo = &core->regulator_tbl[cnt];
|
||||
venus_hfi_for_each_regulator(core, rinfo) {
|
||||
if (!rinfo->regulator) {
|
||||
d_vpr_e("%s: invalid regulator %s\n",
|
||||
__func__, rinfo->name);
|
||||
@@ -328,11 +325,9 @@ fail_assert_xo_reset:
|
||||
static int __hand_off_regulators(struct msm_vidc_core *core)
|
||||
{
|
||||
struct regulator_info *rinfo;
|
||||
u32 cnt = 0, rcnt = 0;
|
||||
int rc = 0;
|
||||
int rc = 0, c = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; ++cnt) {
|
||||
rinfo = &core->regulator_tbl[cnt];
|
||||
venus_hfi_for_each_regulator(core, rinfo) {
|
||||
rc = __hand_off_regulator(core, rinfo);
|
||||
/*
|
||||
* If one regulator hand off failed, driver should take
|
||||
@@ -340,30 +335,26 @@ static int __hand_off_regulators(struct msm_vidc_core *core)
|
||||
*/
|
||||
if (rc)
|
||||
goto err_reg_handoff_failed;
|
||||
c++;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
err_reg_handoff_failed:
|
||||
for (rcnt = cnt; rcnt > 0; --rcnt) {
|
||||
rinfo = &core->regulator_tbl[rcnt-1];
|
||||
venus_hfi_for_each_regulator_reverse_continue(core, rinfo, c)
|
||||
__acquire_regulator(core, rinfo);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __acquire_regulators(struct msm_vidc_core *core)
|
||||
{
|
||||
int rc = 0;
|
||||
struct regulator_info *rinfo;
|
||||
u32 cnt = 0;
|
||||
|
||||
for (cnt = 0; cnt < core->regulator_tbl_count; ++cnt) {
|
||||
rinfo = &core->regulator_tbl[cnt];
|
||||
venus_hfi_for_each_regulator(core, rinfo)
|
||||
__acquire_regulator(core, rinfo);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MSM_MMRM
|
||||
@@ -461,12 +452,10 @@ static int __set_clk_rate(struct msm_vidc_core *core, struct clock_info *cl,
|
||||
|
||||
static int __set_clocks_ext(struct msm_vidc_core *core, u64 freq)
|
||||
{
|
||||
struct clock_info *cl;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct clock_info *cl;
|
||||
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cl = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cl) {
|
||||
if (cl->has_scaling) {
|
||||
rc = __set_clk_rate(core, cl, freq);
|
||||
if (rc)
|
||||
@@ -509,15 +498,13 @@ static int qcom_clk_get_branch_flag(enum msm_vidc_branch_mem_flags vidc_flag,
|
||||
static int __clock_set_flag_ext(struct msm_vidc_core *core,
|
||||
const char *name, enum msm_vidc_branch_mem_flags flag)
|
||||
{
|
||||
struct clock_info *cinfo = NULL;
|
||||
enum branch_mem_flags mem_flag;
|
||||
bool found = false;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
struct clock_info *cinfo = NULL;
|
||||
bool found = false;
|
||||
enum branch_mem_flags mem_flag;
|
||||
|
||||
/* get clock handle */
|
||||
for (cnt = 0; cnt < core->clock_tbl_count; ++cnt) {
|
||||
cinfo = &core->clock_tbl[cnt];
|
||||
venus_hfi_for_each_clock(core, cinfo) {
|
||||
if (strcmp(cinfo->name, name))
|
||||
continue;
|
||||
found = true;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/iommu.h>
|
||||
@@ -64,29 +64,6 @@ int __strict_check(struct msm_vidc_core *core, const char *function)
|
||||
return fatal ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static bool __is_valid_instance(struct msm_vidc_core *core,
|
||||
struct msm_vidc_inst *inst, const char *func)
|
||||
{
|
||||
bool valid = false;
|
||||
struct msm_vidc_inst *temp;
|
||||
int rc = 0;
|
||||
|
||||
rc = __strict_check(core, func);
|
||||
if (rc)
|
||||
return false;
|
||||
|
||||
list_for_each_entry(temp, &core->instances, list) {
|
||||
if (temp == inst) {
|
||||
valid = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!valid)
|
||||
i_vpr_e(inst, "%s: invalid inst\n", func);
|
||||
|
||||
return valid;
|
||||
}
|
||||
|
||||
static void __schedule_power_collapse_work(struct msm_vidc_core *core)
|
||||
{
|
||||
if (!core->capabilities[SW_PC].value) {
|
||||
@@ -306,15 +283,14 @@ skip_power_off:
|
||||
|
||||
static int __release_subcaches(struct msm_vidc_core *core)
|
||||
{
|
||||
int rc = 0;
|
||||
struct subcache_info *sinfo;
|
||||
struct hfi_buffer buf;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (msm_vidc_syscache_disable || !is_sys_cache_present(core))
|
||||
return 0;
|
||||
|
||||
if (!core->is_subcache_set_to_fw) {
|
||||
if (!core->resource->subcache_set.set_to_fw) {
|
||||
d_vpr_h("Subcaches not set to Venus\n");
|
||||
return 0;
|
||||
}
|
||||
@@ -328,8 +304,7 @@ static int __release_subcaches(struct msm_vidc_core *core)
|
||||
buf.type = HFI_BUFFER_SUBCACHE;
|
||||
buf.flags = HFI_BUF_HOST_FLAG_RELEASE;
|
||||
|
||||
for (cnt = core->subcache_tbl_count; cnt > 0; --cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt-1];
|
||||
venus_hfi_for_each_subcache_reverse(core, sinfo) {
|
||||
if (!sinfo->isactive)
|
||||
continue;
|
||||
|
||||
@@ -354,8 +329,7 @@ static int __release_subcaches(struct msm_vidc_core *core)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
for (cnt = core->subcache_tbl_count; cnt > 0; --cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt-1];
|
||||
venus_hfi_for_each_subcache_reverse(core, sinfo) {
|
||||
if (!sinfo->isactive)
|
||||
continue;
|
||||
|
||||
@@ -363,24 +337,23 @@ static int __release_subcaches(struct msm_vidc_core *core)
|
||||
__func__, sinfo->subcache->slice_id,
|
||||
sinfo->subcache->slice_size);
|
||||
}
|
||||
core->is_subcache_set_to_fw = false;
|
||||
core->resource->subcache_set.set_to_fw = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __set_subcaches(struct msm_vidc_core *core)
|
||||
{
|
||||
int rc = 0;
|
||||
struct subcache_info *sinfo;
|
||||
struct hfi_buffer buf;
|
||||
u32 cnt = 0;
|
||||
int rc = 0;
|
||||
|
||||
if (msm_vidc_syscache_disable ||
|
||||
!is_sys_cache_present(core)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (core->is_subcache_set_to_fw) {
|
||||
if (core->resource->subcache_set.set_to_fw) {
|
||||
d_vpr_h("Subcaches already set to Venus\n");
|
||||
return 0;
|
||||
}
|
||||
@@ -394,8 +367,7 @@ static int __set_subcaches(struct msm_vidc_core *core)
|
||||
buf.type = HFI_BUFFER_SUBCACHE;
|
||||
buf.flags = HFI_BUF_HOST_FLAG_NONE;
|
||||
|
||||
for (cnt = 0; cnt < core->subcache_tbl_count; ++cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt];
|
||||
venus_hfi_for_each_subcache(core, sinfo) {
|
||||
if (!sinfo->isactive)
|
||||
continue;
|
||||
buf.index = sinfo->subcache->slice_id;
|
||||
@@ -419,15 +391,14 @@ static int __set_subcaches(struct msm_vidc_core *core)
|
||||
if (rc)
|
||||
goto err_fail_set_subacaches;
|
||||
|
||||
for (cnt = 0; cnt < core->subcache_tbl_count; ++cnt) {
|
||||
sinfo = &core->subcache_tbl[cnt];
|
||||
venus_hfi_for_each_subcache(core, sinfo) {
|
||||
if (!sinfo->isactive)
|
||||
continue;
|
||||
d_vpr_h("%s: set Subcache id %d size %lu done\n",
|
||||
__func__, sinfo->subcache->slice_id,
|
||||
sinfo->subcache->slice_size);
|
||||
}
|
||||
core->is_subcache_set_to_fw = true;
|
||||
core->resource->subcache_set.set_to_fw = true;
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -627,7 +598,7 @@ fail_power:
|
||||
|
||||
void __unload_fw(struct msm_vidc_core *core)
|
||||
{
|
||||
if (!core->fw_cookie)
|
||||
if (!core->resource->fw_cookie)
|
||||
return;
|
||||
|
||||
cancel_delayed_work(&core->pm_work);
|
||||
@@ -640,7 +611,7 @@ void __unload_fw(struct msm_vidc_core *core)
|
||||
d_vpr_h("%s unloaded video firmware\n", __func__);
|
||||
}
|
||||
|
||||
static inline struct msm_vidc_inst *find_instance(
|
||||
static inline struct msm_vidc_inst *get_inst(
|
||||
struct msm_vidc_inst *const *const instances, const s32 count, u32 session_id)
|
||||
{
|
||||
struct msm_vidc_inst *inst = NULL;
|
||||
@@ -680,27 +651,15 @@ static int __process_msg_q(struct msm_vidc_core *core,
|
||||
if (!hdr->session_id) {
|
||||
rc = handle_system_response(core, hdr);
|
||||
} else {
|
||||
bool local_inst = false;
|
||||
|
||||
inst = find_instance(instances, num_instances, hdr->session_id);
|
||||
inst = get_inst(instances, num_instances, hdr->session_id);
|
||||
if (!inst) {
|
||||
d_vpr_l("%s: inst not found in cache - %#x\n",
|
||||
__func__, hdr->session_id);
|
||||
inst = get_inst(core, hdr->session_id);
|
||||
if (!inst) {
|
||||
d_vpr_e("%s: Invalid inst - %#x\n",
|
||||
__func__, hdr->session_id);
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
local_inst = true;
|
||||
d_vpr_e("%s: Invalid inst - %#x\n", __func__, hdr->session_id);
|
||||
rc = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
inst_lock(inst, __func__);
|
||||
rc = handle_session_response(inst, hdr);
|
||||
inst_unlock(inst, __func__);
|
||||
|
||||
if (local_inst)
|
||||
put_inst(inst);
|
||||
}
|
||||
error:
|
||||
if (rc)
|
||||
@@ -1047,9 +1006,7 @@ static int venus_hfi_session_command_locked(struct msm_vidc_inst *inst,
|
||||
* make sure to always allow sync cmd(even if session is in error state),
|
||||
* that will help to do a proper cleanup at FW side.
|
||||
*/
|
||||
if (!(is_sync_session_cmd(pkt_type) &&
|
||||
__is_valid_instance(core, inst, __func__)) &&
|
||||
is_session_error(inst)) {
|
||||
if (!is_sync_session_cmd(pkt_type) && is_session_error(inst)) {
|
||||
i_vpr_e(inst, "%s: failled. Session error. cmd %#x\n", func, pkt_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -1950,15 +1907,13 @@ struct device_region_info *venus_hfi_get_device_region_info(
|
||||
struct msm_vidc_core *core, enum msm_vidc_device_region region)
|
||||
{
|
||||
struct device_region_info *dev_reg = NULL, *match = NULL;
|
||||
u32 cnt;
|
||||
|
||||
if (!region || region >= MSM_VIDC_DEVICE_REGION_MAX) {
|
||||
d_vpr_e("%s: invalid region %#x\n", __func__, region);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (cnt = 0; cnt < core->device_region_tbl_count; ++cnt) {
|
||||
dev_reg = &core->device_region_tbl[cnt];
|
||||
venus_hfi_for_each_device_region(core, dev_reg) {
|
||||
if (dev_reg->region == region) {
|
||||
match = dev_reg;
|
||||
break;
|
||||
|
||||
@@ -16,6 +16,10 @@
|
||||
#include "msm_vidc_fence.h"
|
||||
#include "msm_vidc_platform.h"
|
||||
|
||||
#if IS_ENABLED(CONFIG_SEC_ABC)
|
||||
#include <linux/sti/abc_common.h>
|
||||
#endif
|
||||
|
||||
#define is_in_range(range, val) (((range.begin) < (val)) && ((range.end) > (val)))
|
||||
|
||||
extern struct msm_vidc_core *g_core;
|
||||
@@ -71,7 +75,7 @@ static void print_sfr_message(struct msm_vidc_core *core)
|
||||
vsfr->buf_size, core->sfr.mem_size);
|
||||
return;
|
||||
}
|
||||
vsfr_size = core->sfr.mem_size - sizeof(u32);
|
||||
vsfr_size = vsfr->buf_size - sizeof(u32);
|
||||
p = memchr(vsfr->rg_data, '\0', vsfr_size);
|
||||
/* SFR isn't guaranteed to be NULL terminated */
|
||||
if (p == NULL)
|
||||
@@ -402,10 +406,7 @@ int handle_system_error(struct msm_vidc_core *core,
|
||||
}
|
||||
}
|
||||
|
||||
core_lock(core, __func__);
|
||||
msm_vidc_change_core_state(core, MSM_VIDC_CORE_ERROR, __func__);
|
||||
msm_vidc_core_deinit_locked(core, true);
|
||||
core_unlock(core, __func__);
|
||||
msm_vidc_core_deinit(core, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1894,6 +1895,16 @@ int handle_system_response(struct msm_vidc_core *core,
|
||||
if (packet->flags & HFI_FW_FLAGS_SYSTEM_ERROR) {
|
||||
d_vpr_e("%s: received system error %#x\n",
|
||||
__func__, packet->type);
|
||||
#if IS_ENABLED(CONFIG_SEC_ABC)
|
||||
if (packet->type == 0x5000003) {
|
||||
sec_abc_send_event("MODULE=mm@WARN=vidc_sys_err_type3");
|
||||
d_vpr_e("%s: ABC report vidc_sys_err_type3(0x5000003)\n", __func__);
|
||||
}
|
||||
else if (packet->type == 0x5000002) {
|
||||
sec_abc_send_event("MODULE=mm@WARN=vidc_sys_err_type2");
|
||||
d_vpr_e("%s: ABC report vidc_sys_err_type2(0x5000002)\n", __func__);
|
||||
}
|
||||
#endif
|
||||
rc = handle_system_error(core, packet);
|
||||
if (rc)
|
||||
goto exit;
|
||||
|
||||
Reference in New Issue
Block a user