sm8750: init kernel modules repo

This commit is contained in:
2025-08-11 12:21:01 +02:00
parent 2681143b87
commit facad83b01
8851 changed files with 6894561 additions and 0 deletions

View File

@@ -0,0 +1,36 @@
headers_src = [
"sync_fence/include/uapi/*/**/*.h",
]
mm_drivers_headers_out = [
"sync_fence/qcom_sync_file.h",
]
mm_drivers_kernel_headers_verbose = "--verbose "
genrule {
name: "qti_generate_mm_drivers_kernel_headers",
tools: [
"headers_install.sh",
"unifdef"
],
tool_files: [
"mm_drivers_kernel_headers.py",
],
srcs: headers_src,
cmd: "python3 $(location mm_drivers_kernel_headers.py) " +
mm_drivers_kernel_headers_verbose +
"--header_arch arm64 " +
"--gen_dir $(genDir) " +
"--mm_drivers_include_uapi $(locations sync_fence/include/uapi/*/**/*.h) " +
"--unifdef $(location unifdef) " +
"--headers_install $(location headers_install.sh)",
out: mm_drivers_headers_out,
}
cc_library_headers {
name: "qti_mm_drivers_kernel_headers",
generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
export_generated_headers: ["qti_generate_mm_drivers_kernel_headers"],
vendor: true,
recovery_available: true
}

View File

@@ -0,0 +1,16 @@
MM_DRIVER_PATH := $(call my-dir)
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
include $(MM_DRIVER_PATH)/msm_ext_display/Android.mk
ifneq ($(TARGET_BOARD_PLATFORM), taro)
include $(MM_DRIVER_PATH)/hw_fence/Android.mk
include $(MM_DRIVER_PATH)/sync_fence/Android.mk
endif
endif

View File

@@ -0,0 +1,24 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
package(
default_visibility = [
"//visibility:public",
],
)
ddk_headers(
name = "mm_drivers_configs",
hdrs = glob(["config/*.h"]),
includes = ["config"],
)
ddk_headers(
name = "mm_drivers_headers",
hdrs = [
":mm_drivers_configs",
"//vendor/qcom/opensource/mm-drivers/hw_fence:hw_fence_headers",
"//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_uapi_headers",
"//vendor/qcom/opensource/mm-drivers/msm_ext_display:msm_ext_display_headers",
"//vendor/qcom/opensource/mm-drivers/sync_fence:sync_fence_headers",
],
)

View File

@@ -0,0 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2020, The Linux Foundation. All rights reserved.
export CONFIG_MSM_EXT_DISPLAY=y
export CONFIG_QCOM_SPEC_SYNC=y
export CONFIG_QTI_HW_FENCE=y

View File

@@ -0,0 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
*/
#define CONFIG_MSM_EXT_DISPLAY 1
#define CONFIG_QCOM_SPEC_SYNC 1
#define CONFIG_QTI_HW_FENCE 1

View File

@@ -0,0 +1,42 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
MSM_HW_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/hw_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := MSM_HW_FENCE_ROOT=$(MSM_HW_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=msm_hw_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := hw-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm_hw_fence.ko
LOCAL_MODULE_KBUILD_NAME := msm_hw_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@@ -0,0 +1,16 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_hw_fence.bzl", "define_hw_fence")
package(
default_visibility = [
"//visibility:public"
],
)
ddk_headers(
name = "hw_fence_headers",
hdrs = glob(["include/*.h"]),
includes = ["include"]
)
define_hw_fence()

View File

@@ -0,0 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
include $(MSM_HW_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(MSM_HW_FENCE_ROOT)/config/kalamammdriversconf.h \
-I$(MSM_HW_FENCE_ROOT)hw_fence/include/
ifdef CONFIG_QTI_HW_FENCE
obj-m += msm_hw_fence.o
msm_hw_fence-y := src/msm_hw_fence.o \
src/hw_fence_drv_priv.o \
src/hw_fence_drv_utils.o \
src/hw_fence_drv_debug.o \
src/hw_fence_drv_ipc.o
msm_hw_fence-$(CONFIG_DEBUG_FS) += src/hw_fence_ioctl.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@@ -0,0 +1,22 @@
config QTI_HW_FENCE
bool "HW Fence"
help
Enable the hw_fence module
config QTI_HW_FENCE_USE_SYNX
bool "HW Fence uses synx"
help
Enable the hw_fence module through synx api.
This will enable hw-fence module to register
hw-fence ops with synx module to support hw-
fencing through synx api and inter-op
functionality between synx and hw-fence.
config QTI_ENABLE_HW_FENCE_DEFAULT
bool "HW Fence is enabled by default"
help
Enable the hw_fence module by default.
This config allow hw-fence client registrations
by default without any fastboot commands.
HW-fencing can still be disabled and reenabled
at runtime through fastboot commands.

View File

@@ -0,0 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
KBUILD_OPTIONS += MSM_HW_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@@ -0,0 +1,2 @@
CONFIG_QTI_HW_FENCE=y
CONFIG_QTI_HW_FENCE_USE_SYNX=y

View File

@@ -0,0 +1,56 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
if target in [ "pineapple" ]:
target_config = "defconfig"
else:
target_config = "{}_defconfig".format(target)
ddk_module(
name = "{}_msm_hw_fence".format(tv),
srcs = [
"src/hw_fence_drv_debug.c",
"src/hw_fence_drv_ipc.c",
"src/hw_fence_drv_priv.c",
"src/hw_fence_drv_utils.c",
"src/msm_hw_fence.c",
],
out = "msm_hw_fence.ko",
defconfig = target_config,
kconfig = "Kconfig",
conditional_srcs = {
"CONFIG_DEBUG_FS": {
True: ["src/hw_fence_ioctl.c"],
},
"CONFIG_QTI_HW_FENCE_USE_SYNX" : {
True: [
"src/msm_hw_fence_synx_translation.c",
"src/hw_fence_drv_interop.c",
]
},
},
deps = [
"//msm-kernel:all_headers",
"//vendor/qcom/opensource/synx-kernel:synx_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_msm_hw_fence_dist".format(tv),
data = [":{}_msm_hw_fence".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_hw_fence():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@@ -0,0 +1,126 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_DEBUG
#define __HW_FENCE_DRV_DEBUG
#include "hw_fence_drv_ipc.h"
enum hw_fence_drv_prio {
HW_FENCE_HIGH = 0x000001, /* High density debug messages (noisy) */
HW_FENCE_LOW = 0x000002, /* Low density debug messages */
HW_FENCE_INFO = 0x000004, /* Informational prints */
HW_FENCE_INIT = 0x00008, /* Initialization logs */
HW_FENCE_QUEUE = 0x000010, /* Queue logs */
HW_FENCE_LUT = 0x000020, /* Look-up and algorithm logs */
HW_FENCE_IRQ = 0x000040, /* Interrupt-related messages */
HW_FENCE_LOCK = 0x000080, /* Lock-related messages */
HW_FENCE_SSR = 0x0000100, /* SSR-related messages */
HW_FENCE_PRINTK = 0x010000,
};
extern u32 msm_hw_fence_debug_level;
#define dprintk(__level, __fmt, ...) \
do { \
if (msm_hw_fence_debug_level & __level) \
if (msm_hw_fence_debug_level & HW_FENCE_PRINTK) \
pr_err(__fmt, ##__VA_ARGS__); \
} while (0)
#define HWFNC_ERR(fmt, ...) \
pr_err("[hwfence_error:%s:%d][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
#define HWFNC_ERR_ONCE(fmt, ...) \
pr_err_once("[hwfence_error:%s:%d][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
#define HWFNC_DBG_H(fmt, ...) \
dprintk(HW_FENCE_HIGH, "[hwfence_dbgh:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_L(fmt, ...) \
dprintk(HW_FENCE_LOW, "[hwfence_dbgl:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_INFO(fmt, ...) \
dprintk(HW_FENCE_INFO, "[hwfence_dbgi:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_INIT(fmt, ...) \
dprintk(HW_FENCE_INIT, "[hwfence_dbg:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_Q(fmt, ...) \
dprintk(HW_FENCE_QUEUE, "[hwfence_dbgq:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_LUT(fmt, ...) \
dprintk(HW_FENCE_LUT, "[hwfence_dbglut:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_IRQ(fmt, ...) \
dprintk(HW_FENCE_IRQ, "[hwfence_dbgirq:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_LOCK(fmt, ...) \
dprintk(HW_FENCE_LOCK, "[hwfence_dbglock:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_SSR(fmt, ...) \
dprintk(HW_FENCE_SSR, "[hwfence_dbgssr:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_DBG_DUMP(prio, fmt, ...) \
dprintk(prio, "[hwfence_dbgd:%s:%d]"fmt, __func__, __LINE__, ##__VA_ARGS__)
#define HWFNC_WARN(fmt, ...) \
pr_warn("[hwfence_warn:%s:%d][%pS] "fmt, __func__, __LINE__, \
__builtin_return_address(0), ##__VA_ARGS__)
int hw_fence_debug_debugfs_register(struct hw_fence_driver_data *drv_data);
void hw_fence_debug_dump_fence(enum hw_fence_drv_prio prio, struct msm_hw_fence *hw_fence, u64 hash,
u32 count);
#if IS_ENABLED(CONFIG_DEBUG_FS)
int process_validation_client_loopback(struct hw_fence_driver_data *drv_data, int client_id);
int hw_fence_debug_wait_val(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 hash, u64 mask,
u64 timeout_ms, u32 *error);
void hw_fence_debug_dump_queues(struct hw_fence_driver_data *drv_data, enum hw_fence_drv_prio prio,
struct msm_hw_fence_client *hw_fence_client);
void hw_fence_debug_dump_table(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data);
void hw_fence_debug_dump_events(enum hw_fence_drv_prio prio, struct hw_fence_driver_data *drv_data);
extern const struct file_operations hw_sync_debugfs_fops;
struct hw_fence_out_clients_map {
int ipc_client_id_vid; /* ipc client virtual id for the hw fence client */
int ipc_client_id_pid; /* ipc client physical id for the hw fence client */
int ipc_signal_id; /* ipc signal id for the hw fence client */
};
/* These signals are the ones that the actual clients should be triggering, hw-fence driver
* does not need to have knowledge of these signals. Adding them here for debugging purposes.
* Only fence controller and the cliens know these id's, since these
* are to trigger the ipcc from the 'client hw-core' to the 'hw-fence controller'
* The index of this struct must match the enum hw_fence_client_id
*/
static const struct hw_fence_out_clients_map
dbg_out_clients_signal_map_no_dpu[HW_FENCE_CLIENT_ID_VAL6 + 1] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 0}, /* CTRL_LOOPBACK */
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0}, /* CTX0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 2}, /* CTL0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 4}, /* CTL1 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 6}, /* CTL2 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 8}, /* CTL3 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 10}, /* CTL4 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 12}, /* CTL5 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21}, /* VAL0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22}, /* VAL1 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23}, /* VAL2 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24}, /* VAL3 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25}, /* VAL4 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26}, /* VAL5 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27}, /* VAL6 */
};
#endif /* CONFIG_DEBUG_FS */
#endif /* __HW_FENCE_DRV_DEBUG */

View File

@@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_HW_DMA_FENCE
#define __HW_FENCE_DRV_HW_DMA_FENCE
#define HW_FENCE_NAME_SIZE 64
/**
* struct hw_dma_fence - fences internally created by hw-fence driver.
* @base: base dma-fence structure, this must remain at beginning of the struct.
* @name: name of each fence.
* @client_handle: handle for the client owner of this fence, this is returned by the hw-fence
* driver after a successful registration of the client and used by this fence
* during release.
* @data: internal data to process the fence ops.
* @dma_fence_key: key for the dma-fence hash table.
* @is_internal: true if this fence is initialized internally by hw-fence driver, false otherwise
* @signal_cb: drv_data, hash, and signal_cb of hw_fence
* @node: node for fences held in the dma-fences hash table linked lists
*/
struct hw_dma_fence {
struct dma_fence base;
char name[HW_FENCE_NAME_SIZE];
void *client_handle;
u32 dma_fence_key;
bool is_internal;
struct hw_fence_signal_cb signal_cb;
struct hlist_node node;
};
static inline struct hw_dma_fence *to_hw_dma_fence(struct dma_fence *fence)
{
return container_of(fence, struct hw_dma_fence, base);
}
static const char *hw_fence_dbg_get_driver_name(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
return hw_dma_fence->name;
}
static const char *hw_fence_dbg_get_timeline_name(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence = to_hw_dma_fence(fence);
return hw_dma_fence->name;
}
static bool hw_fence_dbg_enable_signaling(struct dma_fence *fence)
{
return true;
}
static void _hw_fence_release(struct hw_dma_fence *hw_dma_fence)
{
int ret = 0;
if (IS_ERR_OR_NULL(hw_dma_fence->client_handle) || (hw_dma_fence->is_internal &&
IS_ERR_OR_NULL(hw_dma_fence->signal_cb.drv_data))) {
HWFNC_ERR("invalid hwfence data %pK %pK, won't release hw_fence!\n",
hw_dma_fence->client_handle, hw_dma_fence->signal_cb.drv_data);
return;
}
/* release hw-fence */
if (hw_dma_fence->is_internal) /* internally owned hw_dma_fence has its own refcount */
ret = hw_fence_destroy_refcount(hw_dma_fence->signal_cb.drv_data,
hw_dma_fence->signal_cb.hash, HW_FENCE_DMA_FENCE_REFCOUNT);
else /* externally owned hw_dma_fence uses standard hlos refcount */
ret = msm_hw_fence_destroy(hw_dma_fence->client_handle, &hw_dma_fence->base);
if (ret)
HWFNC_ERR("failed to release hw_fence!\n");
}
static void hw_fence_dbg_release(struct dma_fence *fence)
{
struct hw_dma_fence *hw_dma_fence;
if (!fence)
return;
HWFNC_DBG_H("release backing fence %pK\n", fence);
hw_dma_fence = to_hw_dma_fence(fence);
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags))
_hw_fence_release(hw_dma_fence);
kfree(fence->lock);
kfree(hw_dma_fence);
}
static struct dma_fence_ops hw_fence_dbg_ops = {
.get_driver_name = hw_fence_dbg_get_driver_name,
.get_timeline_name = hw_fence_dbg_get_timeline_name,
.enable_signaling = hw_fence_dbg_enable_signaling,
.wait = dma_fence_default_wait,
.release = hw_fence_dbg_release,
};
static inline bool dma_fence_is_hw_dma(struct dma_fence *fence)
{
return fence->ops == &hw_fence_dbg_ops;
}
#endif /* __HW_FENCE_DRV_HW_DMA_FENCE */

View File

@@ -0,0 +1,78 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_INTEROP_H
#define __HW_FENCE_INTEROP_H
#include <synx_api.h>
extern struct hw_fence_driver_data *hw_fence_drv_data;
extern struct synx_hwfence_interops synx_interops;
/**
* HW_FENCE_HANDLE_INDEX_MASK: Mask to extract table index from hw-fence handle
*/
#define HW_FENCE_HANDLE_INDEX_MASK GENMASK(16, 0)
/**
* hw_fence_interop_to_synx_status() - Converts hw-fence status code to synx status code
*
* @param code : hw-fence status code
* @return synx status code corresponding to hw-fence status code
*/
int hw_fence_interop_to_synx_status(int hw_fence_status_code);
/**
* hw_fence_interop_to_synx_signal_status() - Converts hw-fence flags and error to
* synx signaling status
*
* @param flags : hw-fence flags
* @param error : hw-fence error
*
* @return synx signaling status
*/
u32 hw_fence_interop_to_synx_signal_status(u32 flags, u32 error);
/**
* hw_fence_interop_to_hw_fence_error() - Convert synx signaling status to hw-fence error
*
* @param status : synx signaling status
* @return hw-fence error
*/
u32 hw_fence_interop_to_hw_fence_error(u32 status);
/**
* hw_fence_interop_create_fence_from_import() - Creates hw-fence if necessary during synx_import,
* e.g. if there is no backing hw-fence for a synx fence.
*
* @param params : pointer to import params
* @return SYNX_SUCCESS upon success, -SYNX_INVALID if failed
*/
int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params);
/**
* hw_fence_interop_share_handle_status() - updates HW fence table with synx handle
* (if not already signaled) and return hw-fence handle by populating params.new_h_synx
* and returning signal status
*
* @param params : pointer to import params
* @param h_synx : synx handle
* @param signal_status: signalin status of fence
*
* @return SYNX_SUCCESS upon success, -SYNX_INVALID if failed
*/
int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx,
u32 *signal_status);
/**
* hw_fence_interop_get_fence() return the dma-fence associated with the given handle
*
* @param h_synx : hw-fence handle
*
* @return dma-fence associated with hw-fence handle. Null or error pointer in case of error.
*/
void *hw_fence_interop_get_fence(u32 h_synx);
#endif /* __HW_FENCE_INTEROP_H */

View File

@@ -0,0 +1,223 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_IPC_H
#define __HW_FENCE_DRV_IPC_H
/* ipc clients virtual client-id */
#define HW_FENCE_IPC_CLIENT_ID_APPS_VID 8
#define HW_FENCE_IPC_CLIENT_ID_GPU_VID 9
#define HW_FENCE_IPC_CLIENT_ID_IPE_VID 11
#define HW_FENCE_IPC_CLIENT_ID_VPU_VID 12
#define HW_FENCE_IPC_CLIENT_ID_DPU_VID 25
#define HW_FENCE_IPC_CLIENT_ID_IPA_VID 26
#define HW_FENCE_IPC_CLIENT_ID_SOCCP_VID 46
#define HW_FENCE_IPC_CLIENT_ID_IFE0_VID 128
#define HW_FENCE_IPC_CLIENT_ID_IFE1_VID 129
#define HW_FENCE_IPC_CLIENT_ID_IFE2_VID 130
#define HW_FENCE_IPC_CLIENT_ID_IFE3_VID 131
#define HW_FENCE_IPC_CLIENT_ID_IFE4_VID 132
#define HW_FENCE_IPC_CLIENT_ID_IFE5_VID 133
#define HW_FENCE_IPC_CLIENT_ID_IFE6_VID 134
#define HW_FENCE_IPC_CLIENT_ID_IFE7_VID 135
#define HW_FENCE_IPC_CLIENT_ID_IFE8_VID 136
#define HW_FENCE_IPC_CLIENT_ID_IFE9_VID 137
#define HW_FENCE_IPC_CLIENT_ID_IFE10_VID 138
#define HW_FENCE_IPC_CLIENT_ID_IFE11_VID 139
/* ipc clients physical client-id */
#define HW_FENCE_IPC_CLIENT_ID_APPS_PID 3
#define HW_FENCE_IPC_CLIENT_ID_GPU_PID 4
#define HW_FENCE_IPC_CLIENT_ID_IPE_PID 5
#define HW_FENCE_IPC_CLIENT_ID_VPU_PID 8
#define HW_FENCE_IPC_CLIENT_ID_DPU_PID 9
#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID 11
#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID 12
#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID 13
#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID 14
#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID 15
#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID 16
#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID 17
#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID 18
#define HW_FENCE_IPC_CLIENT_ID_SOCCP_PID 22
/* ipc clients physical client-id on other targets */
#define HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN 9
#define HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN 20
#define HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE 2
#define HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE 3
#define HW_FENCE_IPC_CLIENT_ID_IPA_PID_NIOBE 4
#define HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE 8
#define HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE 10
#define HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE 11
#define HW_FENCE_IPC_CLIENT_ID_SOCCP_PID_NIOBE 13
#define HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE 15
#define HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE 16
#define HW_FENCE_IPC_CLIENT_ID_IFE2_PID_NIOBE 17
#define HW_FENCE_IPC_CLIENT_ID_IFE3_PID_NIOBE 18
#define HW_FENCE_IPC_CLIENT_ID_IFE4_PID_NIOBE 19
#define HW_FENCE_IPC_CLIENT_ID_IFE5_PID_NIOBE 20
#define HW_FENCE_IPC_CLIENT_ID_IFE6_PID_NIOBE 21
#define HW_FENCE_IPC_CLIENT_ID_IFE7_PID_NIOBE 22
#define HW_FENCE_IPC_CLIENT_ID_IFE8_PID_NIOBE 23
#define HW_FENCE_IPC_CLIENT_ID_IFE9_PID_NIOBE 24
#define HW_FENCE_IPC_CLIENT_ID_IFE10_PID_NIOBE 25
#define HW_FENCE_IPC_CLIENT_ID_IFE11_PID_NIOBE 26
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA 2
#define HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_PINEAPPLE 2
#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE 4
#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN 4
#define HW_FENCE_IPC_FENCE_PROTOCOL_ID_NIOBE 4
#define HW_FENCE_IPCC_HW_REV_170 0x00010700 /* Kalama */
#define HW_FENCE_IPCC_HW_REV_203 0x00020003 /* Pineapple */
#define HW_FENCE_IPCC_HW_REV_2A2 0x00020A02 /* Sun */
#define HW_FENCE_IPCC_HW_REV_2B4 0x00020B04 /* Niobe */
#define IPC_PROTOCOLp_CLIENTc_VERSION(base, p, c) (base + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_CONFIG(base, p, c) (base + 0x8 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(base, p, c) \
(base + 0x14 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_SEND(base, p, c) ((base + 0xc) + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_RECV_ID(base, p, c) (base + 0x10 + (0x40000*p) + (0x1000*c))
#define IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_CLEAR(base, p, c) (base + 0x1C + (0x40000*p) + (0x1000*c))
#define HW_FENCE_IPC_RECV_ID_NONE 0xFFFFFFFF
/**
* hw_fence_ipcc_trigger_signal() - Trigger ipc signal for the requested client/signal pair.
* @drv_data: driver data.
* @tx_client_id: ipc client id that sends the ipc signal.
* @rx_client_id: ipc client id that receives the ipc signal.
* @signal_id: signal id to send.
*
* This API triggers the ipc 'signal_id' from the 'tx_client_id' to the 'rx_client_id'
*/
void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
u32 tx_client_id, u32 rx_client_id, u32 signal_id);
/**
* hw_fence_ipcc_enable_signaling() - Enable ipcc signaling for hw-fence driver.
* @drv_data: driver data.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_ipcc_enable_protocol() - Enable ipcc protocol used for hw-fencing
* (either compute l1 or fence depending on target) for given client.
* @drv_data: driver data
* @client_id: hw fence driver client id
*
* This should only be called once for each IPCC client, e.g. if protocol is enabled
* for one dpu client, it should not be called again for another dpu client.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_protocol(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_enable_client_signal_pairs() - Enable ipcc signaling for all client-signal
* pairs required for hw-fencing for given client.
* @drv_data: driver data.
* @start_client: first hw fence driver client id for given ipcc client
*
* This API enables input signal from driver and fctl (if fctl is separate from driver) for
* given client. IPCC protocol must be enabled via hw_fence_ipcc_enable_protocol() prior
* to this call. This API iterates through driver's ipc client table to ensure all client-
* signal pairs for given client are enabled.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_data,
u32 start_client);
/**
* hw_fence_ipcc_get_client_virt_id() - Returns the ipc client virtual id that corresponds to the
* hw fence driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_get_client_phys_id() - Returns the ipc client physical id that corresponds to the
* hw fence driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc client id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_get_signal_id() - Returns the ipc signal id that corresponds to the hw fence
* driver client.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* The ipc signal id returned by this API is used by the hw fence driver when signaling the fence.
*
* Return: client_id on success or negative errno (-EINVAL)
*/
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id);
/**
* hw_fence_ipcc_needs_rxq_update() - Returns bool to indicate if client uses rx-queue.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs to update rxq, false otherwise
*/
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_ipcc_signaled_needs_rxq_update() - Returns bool to indicate if client requires
* rx-queue update when registering to wait on an already signaled fence.
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs to update rxq when dma-fence is signaled, false otherwise
*/
bool hw_fence_ipcc_signaled_needs_rxq_update(struct hw_fence_driver_data *drv_data,
int client_id);
/**
* hw_fence_ipcc_signaled_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt
* for already signaled fences
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs ipc interrupt for signaled fences, false otherwise
*/
bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_ipcc_txq_update_needs_ipc_irq() - Returns bool to indicate if client needs ipc interrupt
* when updating client tx queue in hlos
* @drv_data: driver data.
* @client_id: hw fence driver client id.
*
* Return: true if client needs ipc interrupt when updating client tx queue, false otherwise
*/
bool hw_fence_ipcc_txq_update_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_ipcc_get_signaled_clients_mask() - Returns mask to indicate signals for which clients
* were received by HW Fence Driver
* @drv_data: driver_data
*
* Return: mask on success or zero upon error
*/
u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data);
#endif /* __HW_FENCE_DRV_IPC_H */

View File

@@ -0,0 +1,701 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_INTERNAL_H
#define __HW_FENCE_DRV_INTERNAL_H
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/dma-fence-array.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
#include <linux/hashtable.h>
#include <linux/remoteproc.h>
#include "msm_hw_fence.h"
/* max u64 to indicate invalid fence */
#define HW_FENCE_INVALID_PARENT_FENCE (~0ULL)
/* hash algorithm constants */
#define HW_FENCE_HASH_A_MULT 4969 /* a multiplier for Hash algorithm */
#define HW_FENCE_HASH_C_MULT 907 /* c multiplier for Hash algorithm */
/* number of queues per type (i.e. ctrl or client queues) */
#define HW_FENCE_CTRL_QUEUES 2 /* Rx and Tx Queues */
#define HW_FENCE_CLIENT_QUEUES 2 /* Rx and Tx Queues */
/* hfi headers calculation */
#define HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) \
((has_soccp) ? (sizeof(struct msm_hw_fence_hfi_queue_table_header_v2)) : \
(sizeof(struct msm_hw_fence_hfi_queue_table_header)))
#define HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) \
((has_soccp) ? (sizeof(struct msm_hw_fence_hfi_queue_header_v2)) : \
(sizeof(struct msm_hw_fence_hfi_queue_header)))
#define HW_FENCE_HFI_CTRL_HEADERS_SIZE(has_soccp) (HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) * HW_FENCE_CTRL_QUEUES))
#define HW_FENCE_HFI_CLIENT_HEADERS_SIZE(queues_num, has_soccp) \
(HW_FENCE_HFI_TABLE_HEADER_SIZE(has_soccp) + \
(HW_FENCE_HFI_QUEUE_HEADER_SIZE(has_soccp) * queues_num))
/*
* CTRL queue uses same 64-byte aligned payload size as client queue.
*/
#define HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE (sizeof(struct msm_hw_fence_queue_payload))
#define HW_FENCE_CTRL_QUEUE_PAYLOAD HW_FENCE_CTRL_QUEUE_MAX_PAYLOAD_SIZE
#define HW_FENCE_CLIENT_QUEUE_PAYLOAD (sizeof(struct msm_hw_fence_queue_payload))
#define HW_FENCE_CTRL_QUEUE_ENTRIES 64
/*
* On targets with SOCCP, client RxQ lock is 64-bit in size but each lock is at a separate 64-byte
* chunk of memory
*/
#define HW_FENCE_LOCK_IDX_OFFSET 8
/* Locks area for all clients with RxQ */
#define HW_FENCE_MEM_LOCKS_SIZE(rxq_clients_num) (HW_FENCE_LOCK_IDX_OFFSET * sizeof(u64) * \
rxq_clients_num)
#define HW_FENCE_TX_QUEUE 1
#define HW_FENCE_RX_QUEUE 2
/* ClientID for the internal join fence, this is used by the framework when creating a join-fence */
#define HW_FENCE_JOIN_FENCE_CLIENT_ID (~(u32)0)
/**
* msm hw fence flags:
* MSM_HW_FENCE_FLAG_SIGNAL - Flag set when the hw-fence is signaled
* MSM_HW_FENCE_FLAG_CREATE_SIGNALED - Flag set when the hw-fence is created to back a signaled
* dma-fence whose hw-fence has been destroyed
* MSM_HW_FENCE_FLAG_INTERNAL_OWNED - Flag set when HLOS Native fence is internally owned and
* present in dma-fence table
*/
#define MSM_HW_FENCE_FLAG_SIGNAL BIT(0)
#define MSM_HW_FENCE_FLAG_CREATE_SIGNALED BIT(1)
#define MSM_HW_FENCE_FLAG_INTERNAL_OWNED BIT(2)
/**
* MSM_HW_FENCE_MAX_JOIN_PARENTS:
* Maximum number of parents that a fence can have for a join-fence
*/
#define MSM_HW_FENCE_MAX_JOIN_PARENTS 3
/**
* HW_FENCE_PAYLOAD_REV:
* Payload version with major and minor version information
*/
#define HW_FENCE_PAYLOAD_REV(major, minor) (major << 8 | (minor & 0xFF))
/**
* HW_FENCE_EVENT_MAX_DATA:
* Maximum data that can be added to the debug event
*/
#define HW_FENCE_EVENT_MAX_DATA 12
/**
* HW_FENCE_FCTL_REFCOUNT:
* Refcount held by Fence Controller for signaling.
* This bit in hw_fence->refcount is set during creation of a hw-fence and released when the
* hw-fence is signaled by Fence Controller.
*/
#define HW_FENCE_FCTL_REFCOUNT BIT(31)
/**
* HW_FENCE_DMA_FENCE_REFCOUNT:
* Refcount held by HW Fence Driver for dma-fence release or signal.
* For dma-fences internally owned by the HW Fence Driver, this is set during hw-fence creation and
* cleared during dma_fence_release.
* For external dma-fences initialized by the client, this is set when the hw-fence signal callback
* is added to the dma-fence and cleared during dma_fence_signal.
*/
#define HW_FENCE_DMA_FENCE_REFCOUNT BIT(30)
/**
* HW_FENCE_HLOS_REFCOUNT_MASK:
* Mask for refcounts acquired and released from HLOS.
* The field "hw_fence->refcount & HW_FENCE_HLOS_REFCOUNT_MASK" stores the number of refcounts held
* by HW Fence clients or HW Fence Driver.
*/
#define HW_FENCE_HLOS_REFCOUNT_MASK GENMASK(29, 0)
/*
* DMA_FENCE_HASH_TABLE_BIT: Bit that define the size of the dma-fences hash table
* DMA_FENCE_HASH_TABLE_SIZE: Size of dma-fences hash table
*/
#define DMA_FENCE_HASH_TABLE_BIT (12) /* size of table = (1 << 12) = 4096 */
#define DMA_FENCE_HASH_TABLE_SIZE (1 << DMA_FENCE_HASH_TABLE_BIT)
/**
* enum hw_fence_client_data_id - Enum with the clients having client_data, an optional
* parameter passed from the waiting client and returned
* to it upon fence signaling.
* @HW_FENCE_CLIENT_DATA_ID_CTX0: GFX Client 0.
* @HW_FENCE_MAX_CLIENTS_WITH_DATA: Max number of clients with data, also indicates an
* invalid hw_fence_client_data_id
*/
enum hw_fence_client_data_id {
HW_FENCE_CLIENT_DATA_ID_CTX0,
HW_FENCE_MAX_CLIENTS_WITH_DATA,
};
/**
* struct msm_hw_fence_queue - Structure holding the data of the hw fence queues.
* @va_queue: pointer to the virtual address of the queue elements
* @q_size_bytes: size of the queue
* @va_header: pointer to the hfi header virtual address
* @pa_queue: physical address of the queue
* @rd_wr_idx_start: start read and write indexes for client queue (zero by default)
* @rd_wr_idx_factor: factor to multiply custom index to get index in dwords (one by default)
* @skip_wr_idx: bool to indicate if update to write_index is skipped within hw fence driver and
* hfi_header->tx_wm is updated instead
*/
struct msm_hw_fence_queue {
void *va_queue;
u32 q_size_bytes;
void *va_header;
phys_addr_t pa_queue;
u32 rd_wr_idx_start;
u32 rd_wr_idx_factor;
bool skip_wr_idx;
};
/**
* enum payload_type - Enum with the queue payload types.
* HW_FENCE_PAYLOAD_TYPE_1: client queue payload
* HW_FENCE_PAYLOAD_TYPE_2: ctrl queue payload for fence error; client_data stores client_id
* HW_FENCE_PAYLOAD_TYPE_3: ctrl queue payload for memory sharing
* HW_FENCE_PAYLOAD_TYPE_4: ctrl queue payload for soccp ssr
*/
enum payload_type {
HW_FENCE_PAYLOAD_TYPE_1 = 1,
HW_FENCE_PAYLOAD_TYPE_2,
HW_FENCE_PAYLOAD_TYPE_3,
HW_FENCE_PAYLOAD_TYPE_4
};
/**
* struct msm_hw_fence_client - Structure holding the per-Client allocated resources.
* @client_id: internal client_id used within HW fence driver; index into the clients struct
* @client_id_ext: external client_id, equal to client_id except for clients with configurable
* number of sub-clients (e.g. ife clients)
* @mem_descriptor: hfi header memory descriptor
* @queues: queues descriptor
* @queues_num: number of client queues
* @fence_error_cb: function called for waiting clients that need HLOS notification of fence error
* @fence_error_cb_userdata: opaque pointer registered with fence error callback and passed to
* client during invocation of callback function
* @error_cb_lock: lock to synchronize access to fence error cb and fence error cb data
* @ipc_signal_id: id of the signal to be triggered for this client
* @ipc_client_vid: virtual id of the ipc client for this hw fence driver client
* @ipc_client_pid: physical id of the ipc client for this hw fence driver client
* @update_rxq: bool to indicate if client requires rx queue update in general signal case
* (e.g. if dma-fence is signaled)
* @signaled_update_rxq: bool to indicate if client requires rx queue update when registering to
* wait on an already signaled fence
* @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
* @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for txq updates
* @skip_fctl_ref: bool to indicate if client-created fences should not have fctl refcount during
* initial creation; this refcount is instead set during synx_import call
* @context_id: context id for fences created internally
* @seqno: sequence no for fences created internally
* @wait_queue: wait queue for the validation clients
* @val_signal: doorbell flag to signal the validation clients in the wait queue
* @kref: number of active references to this client
*/
struct msm_hw_fence_client {
enum hw_fence_client_id client_id;
enum hw_fence_client_id client_id_ext;
struct msm_hw_fence_mem_addr mem_descriptor;
struct msm_hw_fence_queue queues[HW_FENCE_CLIENT_QUEUES];
int queues_num;
msm_hw_fence_error_cb_t fence_error_cb;
void *fence_error_cb_userdata;
struct mutex error_cb_lock;
int ipc_signal_id;
int ipc_client_vid;
int ipc_client_pid;
bool update_rxq;
bool signaled_update_rxq;
bool signaled_send_ipc;
bool txq_update_send_ipc;
bool skip_fctl_ref;
u64 context_id;
atomic_t seqno;
struct kref kref;
#if IS_ENABLED(CONFIG_DEBUG_FS)
wait_queue_head_t wait_queue;
atomic_t val_signal;
#endif /* CONFIG_DEBUG_FS */
};
/**
* struct msm_hw_fence_mem_data - Structure holding internal memory attributes
*
* @attrs: attributes for the memory allocation
*/
struct msm_hw_fence_mem_data {
unsigned long attrs;
};
/**
* struct msm_hw_fence_dbg_data - Structure holding debugfs data
*
* @root: debugfs root
* @entry_rd: flag to indicate if debugfs dumps a single line or table
* @context_rd: debugfs setting to indicate which context id to dump
* @seqno_rd: debugfs setting to indicate which seqno to dump
* @client_id_rd: debugfs setting to indicate which client queue(s) to dump
* @hw_fence_sim_release_delay: delay in micro seconds for the debugfs node that simulates the
* hw-fences behavior, to release the hw-fences
* @create_hw_fences: boolean to continuosly create hw-fences within debugfs
* @clients_list: list of debug clients registered
* @clients_list_lock: lock to synchronize access to the clients list
* @lock_wake_cnt: number of times that driver triggers wake-up ipcc to unlock inter-vm try-lock
*/
struct msm_hw_fence_dbg_data {
struct dentry *root;
bool entry_rd;
u64 context_rd;
u64 seqno_rd;
u32 client_id_rd;
u32 hw_fence_sim_release_delay;
bool create_hw_fences;
struct list_head clients_list;
struct mutex clients_list_lock;
u64 lock_wake_cnt;
};
/**
* struct hw_fence_client_type_desc - Structure holding client type properties, including static
* properties and client queue properties read from device-tree.
*
* @name: name of client type, used to parse properties from device-tree
* @init_id: initial client_id for given client type within the 'hw_fence_client_id' enum, e.g.
* HW_FENCE_CLIENT_ID_CTL0 for DPU clients
* @max_clients_num: maximum number of clients of given client type
* @clients_num: number of clients of given client type
* @queues_num: number of queues per client of given client type; either one (for only Tx Queue) or
* two (for both Tx and Rx Queues)
* @queue_entries: number of entries per client queue of given client type
* @start_padding: size of padding between queue table header and first queue header in bytes
* @end_padding: size of padding between queue header(s) and first queue payload in bytes
* @mem_size: size of memory allocated for client queue(s) per client in bytes
* @txq_idx_start: start read and write indexes for client tx queue (zero by default)
* @txq_idx_factor: factor to multiply custom TxQ idx to get index in dwords (one by default)
* @skip_txq_wr_idx: bool to indicate if update to tx queue write_index is skipped within hw fence
* driver and hfi_header->tx_wm is updated instead
* @skip_fctl_ref: bool to indicate if client-created fences should not have fctl refcount during
* initial creation; this refcount is instead set during synx_import call
*/
struct hw_fence_client_type_desc {
char *name;
enum hw_fence_client_id init_id;
u32 max_clients_num;
u32 clients_num;
u32 queues_num;
u32 queue_entries;
u32 start_padding;
u32 end_padding;
u32 mem_size;
u32 txq_idx_start;
u32 txq_idx_factor;
bool skip_txq_wr_idx;
bool skip_fctl_ref;
};
/**
* struct hw_fence_client_queue_desc - Structure holding client queue properties for a client.
*
* @type: pointer to client queue properties of client type
* @start_offset: start offset of client queue memory region, from beginning of carved-out memory
* allocation for hw fence driver
*/
struct hw_fence_client_queue_desc {
struct hw_fence_client_type_desc *type;
u32 start_offset;
};
/**
* struct hw_fence_signal_cb - Structure holding hw-fence callback data for dma-fence callback
*
* @fence_cb: fence callback data structure used to add dma_fence_callback
* @drv_data: structure holding internal hw-fence driver data
* @hash: hash of hw-fence to decrement refcount in dma-fence callback
*/
struct hw_fence_signal_cb {
struct dma_fence_cb fence_cb;
struct hw_fence_driver_data *drv_data;
u64 hash;
};
/**
* struct hw_fence_soccp - Structure holding hw-fence data specific to soccp
* @rproc_ph: phandle for soccp rproc object used to set power vote
* @rproc: soccp rproc object used to set power vote
* @rproc_lock: lock to synchronization modifications to soccp rproc data structure and state
* @is_awake: true if HW Fence Driver has successfully set a power vote on soccp that has not been
* removed by SSR; false if soccp has not set a power vote, successfully removed its power vote,
* or soccp has crashed
* @usage_cnt: independent counter of number of users of SOCCP, 1 if no one is using
* @ssr_nb: notifier block used for soccp ssr
* @ssr_notifier: soccp ssr notifier
* @ssr_wait_queue: wait queue to notify ssr callback that a payload has been received from soccp
* @ssr_cnt: counts number of times soccp has restarted, zero if initial boot-up
*/
struct hw_fence_soccp {
phandle rproc_ph;
struct rproc *rproc;
struct mutex rproc_lock;
bool is_awake;
refcount_t usage_cnt;
struct notifier_block ssr_nb;
void *ssr_notifier;
wait_queue_head_t ssr_wait_queue;
u32 ssr_cnt;
};
/**
* struct hw_fence_driver_data - Structure holding internal hw-fence driver data
*
* @dev: device driver pointer
* @resources_ready: value set by driver at end of probe, once all resources are ready
* @hw_fence_table_entries: total number of hw-fences in the global table
* @hw_fence_mem_fences_table_size: hw-fences global table total size
* @hw_fence_queue_entries: total number of entries that can be available in the queue
* @hw_fence_ctrl_queue_size: size of the ctrl queue for the payload
* @hw_fence_mem_ctrl_queues_size: total size of ctrl queues, including: header + rxq + txq
* @hw_fence_client_queue_size: descriptors of client queue properties for each hw fence client
* @hw_fence_client_types: descriptors of properties for each hw fence client type
* @rxq_clients_num: number of supported hw fence clients with rxq (configured based on device-tree)
* @clients_num: number of supported hw fence clients (configured based on device-tree)
* @hw_fences_tbl: pointer to the hw-fences table
* @hw_fences_tbl_cnt: number of elements in the hw-fence table
* @hlos_key_tbl: pointer to table of keys tracked by hlos only, same size as the hw-fences table
* @events: start address of hw fence debug events
* @total_events: total number of hw fence debug events supported
* @client_lock_tbl: pointer to the per-client locks table
* @client_lock_tbl_cnt: number of elements in the locks table
* @hw_fences_mem_desc: memory descriptor for the hw-fence table
* @clients_locks_mem_desc: memory descriptor for the locks table
* @ctrl_queue_mem_desc: memory descriptor for the ctrl queues
* @ctrl_queues: pointer to the ctrl queues
* @io_mem_base: pointer to the carved-out io memory
* @res: resources for the carved out memory
* @size: size of the carved-out memory
* @label: label for the carved-out memory (this is used by SVM to find the memory)
* @peer_name: peer name for this carved-out memory
* @rm_nb: hyp resource manager notifier
* @memparcel: memparcel for the allocated memory
* @used_mem_size: total memory size of global table, lock region, and ctrl and client queues
* @cpu_addr_cookie: bogus cpu address returned by dma_alloc_attrs which is used for freeing memory
* @db_label: doorbell label
* @rx_dbl: handle to the Rx doorbell
* @debugfs_data: debugfs info
* @ipcc_reg_base: base for ipcc regs mapping
* @ipcc_io_mem: base for the ipcc io mem map
* @ipcc_size: size of the ipcc io mem mapping
* @protocol_id: ipcc protocol id used by this driver
* @ipcc_client_vid: ipcc client virtual-id for this driver
* @ipcc_client_pid: ipcc client physical-id for this driver
* @ipcc_fctl_vid: ipcc client virtual-id for fctl
* @ipcc_fctl_pid: ipcc client physical-id for fctl
* @ipc_clients_table: table with the ipcc mapping for each client of this driver
* @qtime_reg_base: qtimer register base address
* @qtime_io_mem: qtimer io mem map
* @qtime_size: qtimer io mem map size
* @client_id_mask: bitmask for tracking registered client_ids
* @clients_register_lock: lock to synchronize clients registration and deregistration
* @clients: table with the handles of the registered clients; size is equal to clients_num
* @fctl_ready: flag to indicate if fence controller has been initialized
* @ipcc_dpu_initialized: flag to indicate if dpu hw is initialized
* @ipcc_val_initialized: flag to indicate if val is initialized
* @dma_fence_table_lock: lock to synchronize access to dma-fence table
* @dma_fence_table: table with internal dma-fences for hw-fences
* @has_soccp: flag to indicate if soccp is present (otherwise vm is used)
* @soccp_listener_thread: thread that processes interrupts received from soccp
* @soccp_wait_queue: wait queue to notify soccp_listener_thread of new interrupts
* @signaled_clients_mask: mask to track signals received from soccp by hw-fence driver
* @soccp_props: soccp-specific properties for ssr and power votes
*/
struct hw_fence_driver_data {
struct device *dev;
bool resources_ready;
/* Table & Queues info */
u32 hw_fence_table_entries;
u32 hw_fence_mem_fences_table_size;
u32 hw_fence_queue_entries;
/* ctrl queues */
u32 hw_fence_ctrl_queue_size;
u32 hw_fence_mem_ctrl_queues_size;
/* client queues */
struct hw_fence_client_queue_desc *hw_fence_client_queue_size;
struct hw_fence_client_type_desc *hw_fence_client_types;
u32 rxq_clients_num;
u32 clients_num;
/* HW Fences Table VA */
struct msm_hw_fence *hw_fences_tbl;
u64 *hlos_key_tbl;
u32 hw_fences_tbl_cnt;
/* events */
struct msm_hw_fence_event *events;
u32 total_events;
/* Table with a Per-Client Lock */
u64 *client_lock_tbl;
u32 client_lock_tbl_cnt;
/* Memory Descriptors */
struct msm_hw_fence_mem_addr hw_fences_mem_desc;
struct msm_hw_fence_mem_addr clients_locks_mem_desc;
struct msm_hw_fence_mem_addr ctrl_queue_mem_desc;
struct msm_hw_fence_queue ctrl_queues[HW_FENCE_CTRL_QUEUES];
/* carved out memory */
void __iomem *io_mem_base;
struct resource res;
size_t size;
u32 label;
u32 peer_name;
struct notifier_block rm_nb;
u32 memparcel;
u32 used_mem_size;
void *cpu_addr_cookie;
/* doorbell */
u32 db_label;
/* VM virq */
void *rx_dbl;
/* debugfs */
struct msm_hw_fence_dbg_data debugfs_data;
/* ipcc regs */
phys_addr_t ipcc_reg_base;
void __iomem *ipcc_io_mem;
uint32_t ipcc_size;
u32 protocol_id;
u32 ipcc_client_vid;
u32 ipcc_client_pid;
u32 ipcc_fctl_vid;
u32 ipcc_fctl_pid;
/* table with mapping of ipc client for each hw-fence client */
struct hw_fence_client_ipc_map *ipc_clients_table;
/* qtime reg */
phys_addr_t qtime_reg_base;
void __iomem *qtime_io_mem;
uint32_t qtime_size;
/* synchronize client_ids registration and deregistration */
struct mutex clients_register_lock;
/* table with registered client handles */
struct msm_hw_fence_client **clients;
bool fctl_ready;
/* state variables */
bool ipcc_dpu_initialized;
#if IS_ENABLED(CONFIG_DEBUG_FS)
bool ipcc_val_initialized;
#endif /* CONFIG_DEBUG_FS */
spinlock_t dma_fence_table_lock;
/* table with internal dma-fences created by the this driver on client's behalf */
DECLARE_HASHTABLE(dma_fence_table, DMA_FENCE_HASH_TABLE_BIT);
/* soccp is present */
bool has_soccp;
struct task_struct *soccp_listener_thread;
wait_queue_head_t soccp_wait_queue;
atomic_t signaled_clients_mask;
struct hw_fence_soccp soccp_props;
};
/**
* struct msm_hw_fence_queue_payload - hardware fence clients queues payload.
* @size: size of queue payload
* @type: type of queue payload
* @version: version of queue payload. High eight bits are for major and lower eight
* bits are for minor version
* @ctxt_id: context id of the dma fence
* @seqno: sequence number of the dma fence
* @hash: fence hash
* @flags: see MSM_HW_FENCE_FLAG_* flags descriptions
* @client_data: data passed from and returned to waiting client upon fence signaling
* @error: error code for this fence, fence controller receives this
* error from the signaling client through the tx queue and
* propagates the error to the waiting client through rx queue
* @timestamp_lo: low 32-bits of qtime of when the payload is written into the queue
* @timestamp_hi: high 32-bits of qtime of when the payload is written into the queue
*/
struct msm_hw_fence_queue_payload {
u32 size;
u16 type;
u16 version;
u64 ctxt_id;
u64 seqno;
u64 hash;
u64 flags;
u64 client_data;
u32 error;
u32 timestamp_lo;
u32 timestamp_hi;
u32 reserve;
};
/**
* struct msm_hw_fence_event - hardware fence ctl debug event
* time: qtime when the event is logged
* cpu: cpu id where the event is logged
* data_cnt: count of valid data available in the data field
* data: debug data logged by the event
*/
struct msm_hw_fence_event {
u64 time;
u32 cpu;
u32 data_cnt;
u32 data[HW_FENCE_EVENT_MAX_DATA];
};
/**
* struct msm_hw_fence - structure holding each hw fence data.
* @valid: field updated when a hw-fence is reserved. True if hw-fence is in use
* @error: field to hold a hw-fence error
* @ctx_id: context id
* @seq_id: sequence id
* @wait_client_mask: bitmask holding the waiting-clients of the fence
* @fence_allocator: field to indicate the client_id that reserved the fence
* @fence_signal_client: client that signaled the fence
* @lock: this field is required to share information between the Driver & Driver ||
* Driver & FenceCTL. Needs to be 64-bit atomic inter-processor lock.
* @flags: field to indicate the state of the fence
* @parent_list: list of indexes with the parents for a child-fence in a join-fence
* @parent_cnt: total number of parents for a child-fence in a join-fence
* @pending_child_cnt: children refcount for a parent-fence in a join-fence. Access must be atomic
* or locked
* @fence_create_time: debug info with the create time timestamp
* @fence_trigger_time: debug info with the trigger time timestamp
* @fence_wait_time: debug info with the register-for-wait timestamp
* @refcount: refcount on the hw-fence. This is split into multiple fields, see
* HW_FENCE_HLOS_REFCOUNT_MASK and HW_FENCE_FCTL_REFCOUNT and HW_FENCE_DMA_FENCE_REFCOUNT
* for more detail
* @h_synx: synx handle, nonzero if hw-fence is also backed by synx fence
* @client_data: array of data optionally passed from and returned to clients waiting on the fence
* during fence signaling
*/
struct msm_hw_fence {
u32 valid;
u32 error;
u64 ctx_id;
u64 seq_id;
u64 wait_client_mask;
u32 fence_allocator;
u32 fence_signal_client;
u64 lock; /* Datatype must be 64-bit. */
u64 flags;
u64 parent_list[MSM_HW_FENCE_MAX_JOIN_PARENTS];
u32 parents_cnt;
u32 pending_child_cnt;
u64 fence_create_time;
u64 fence_trigger_time;
u64 fence_wait_time;
u32 refcount;
u32 h_synx;
u64 client_data[HW_FENCE_MAX_CLIENTS_WITH_DATA];
};
int hw_fence_init(struct hw_fence_driver_data *drv_data);
int hw_fence_alloc_client_resources(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
struct msm_hw_fence_mem_addr *mem_descriptor);
int hw_fence_init_controller_signal(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
int hw_fence_init_controller_resources(struct msm_hw_fence_client *hw_fence_client);
void hw_fence_cleanup_client(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
void hw_fence_utils_reset_queues(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client);
int hw_fence_create(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hlos_key,
u64 context, u64 seqno, u64 *hash);
int hw_fence_add_callback(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash);
int hw_fence_destroy(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hlos_key,
u64 context, u64 seqno);
int hw_fence_destroy_with_hash(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hash);
int hw_fence_destroy_refcount(struct hw_fence_driver_data *drv_data, u64 hash, u32 ref);
int hw_fence_process_fence_array(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client,
struct dma_fence_array *array, u64 *hash_join_fence, u64 client_data);
int hw_fence_process_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash,
u64 client_data);
int hw_fence_update_queue(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id, u64 seqno, u64 hash,
u64 flags, u64 client_data, u32 error, int queue_type);
int hw_fence_update_queue_helper(struct hw_fence_driver_data *drv_data, u32 client_id,
struct msm_hw_fence_queue *queue, u16 type, u64 ctxt_id, u64 seqno, u64 hash, u64 flags,
u64 client_data, u32 error, int queue_type);
int hw_fence_update_existing_txq_payload(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hash, u32 error);
inline u64 hw_fence_get_qtime(struct hw_fence_driver_data *drv_data);
char *_get_queue_type(int queue_type);
int hw_fence_read_queue(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence_queue_payload *payload,
int queue_type);
int hw_fence_read_queue_helper(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_queue *queue, struct msm_hw_fence_queue_payload *payload);
void hw_fence_get_queue_idx_ptrs(struct hw_fence_driver_data *drv_data, void *va_header,
u32 **rd_idx_ptr, u32 **wr_idx_ptr, u32 **tx_wm_ptr);
int hw_fence_register_wait_client(struct hw_fence_driver_data *drv_data,
struct dma_fence *fence, struct msm_hw_fence_client *hw_fence_client, u64 context,
u64 seqno, u64 *hash, u64 client_data);
struct msm_hw_fence *msm_hw_fence_find(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 hlos_key,
u64 context, u64 seqno, u64 *hash);
struct msm_hw_fence *hw_fence_find_with_dma_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct dma_fence *fence, u64 *hash,
bool *is_signaled, bool create);
enum hw_fence_client_data_id hw_fence_get_client_data_id(enum hw_fence_client_id client_id);
int hw_fence_signal_fence(struct hw_fence_driver_data *drv_data, struct dma_fence *fence, u64 hash,
u32 error, bool release_ref);
int hw_fence_get_flags_error(struct hw_fence_driver_data *drv_data, u64 hash, u64 *flags,
u32 *error);
int hw_fence_update_hsynx(struct hw_fence_driver_data *drv_data, u64 hash, u32 h_synx,
bool wait_for);
int hw_fence_ssr_cleanup_table(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence *hw_fences_tbl, u32 table_total_entries, u64 in_flight_lock);
/* apis for internally managed dma-fence */
struct dma_fence *hw_dma_fence_init(struct msm_hw_fence_client *hw_fence_client, u64 context,
u64 seqno);
struct dma_fence *hw_fence_internal_dma_fence_create(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, u64 *hash);
struct dma_fence *hw_fence_dma_fence_find(struct hw_fence_driver_data *drv_data,
u64 hash, bool incr_refcount);
/* internal checks used by msm_hw_fence and synx_hwfence functions */
int hw_fence_check_hw_fence_driver(struct hw_fence_driver_data *drv_data);
int hw_fence_check_valid_client(struct hw_fence_driver_data *drv_data, void *client_handle);
int hw_fence_check_valid_fctl(struct hw_fence_driver_data *drv_data, void *client_handle);
#endif /* __HW_FENCE_DRV_INTERNAL_H */

View File

@@ -0,0 +1,206 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HW_FENCE_DRV_UTILS_H
#define __HW_FENCE_DRV_UTILS_H
/**
* HW_FENCE_MAX_CLIENT_TYPE_STATIC:
* Total number of client types without configurable number of sub-clients (GFX, DPU, VAL)
*/
#define HW_FENCE_MAX_CLIENT_TYPE_STATIC 3
/**
* HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE:
* Maximum number of client types with configurable number of sub-clients (e.g. IPE, VPU, IFE, IPA)
*/
#define HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE 15
/**
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX:
* Maximum number of static clients, i.e. clients without configurable numbers of sub-clients
*/
#define HW_FENCE_MAX_STATIC_CLIENTS_INDEX HW_FENCE_CLIENT_ID_IPE
/**
* enum hw_fence_mem_reserve - Types of reservations for the carved-out memory.
* HW_FENCE_MEM_RESERVE_CTRL_QUEUE: Reserve memory for the ctrl rx/tx queues.
* HW_FENCE_MEM_RESERVE_LOCKS_REGION: Reserve memory for the per-client locks memory region.
* HW_FENCE_MEM_RESERVE_TABLE: Reserve memory for the hw-fences global table.
* HW_FENCE_MEM_RESERVE_CLIENT_QUEUE: Reserve memory per-client for the rx/tx queues.
* HW_FENCE_MEM_RESERVE_EVENTS_BUFF: Reserve memory for the debug events
*/
enum hw_fence_mem_reserve {
HW_FENCE_MEM_RESERVE_CTRL_QUEUE,
HW_FENCE_MEM_RESERVE_LOCKS_REGION,
HW_FENCE_MEM_RESERVE_TABLE,
HW_FENCE_MEM_RESERVE_CLIENT_QUEUE,
HW_FENCE_MEM_RESERVE_EVENTS_BUFF
};
/**
* global_atomic_store() - Inter-processor lock
* @drv_data: hw fence driver data
* @lock: memory to lock
* @val: if true, api locks the memory, if false it unlocks the memory
*/
void global_atomic_store(struct hw_fence_driver_data *drv_data, uint64_t *lock, bool val);
/**
* hw_fence_utils_init_virq() - Initialize doorbell (i.e. vIRQ) for SVM to HLOS signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_init_virq(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_init_soccp_irq() - Initialize interrupt handler for SOCCP to HLOS signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_init_soccp_irq(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_register_soccp_ssr_notifier() - registers rproc ssr notifier for soccp
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_register_soccp_ssr_notifier(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_process_signaled_clients_mask() - Process the mask containing HW Fence client IDs
* that HW Fence Driver is responsible for, i.e.
* ctrl queue and validation clients.
* @drv_data: hw fence driver data.
* @mask: mask with signaled clients
*/
void hw_fence_utils_process_signaled_clients_mask(struct hw_fence_driver_data *drv_data,
u64 mask);
/**
* hw_fence_utils_alloc_mem() - Allocates the carved-out memory pool that will be used for the HW
* Fence global table, locks and queues.
* @hw_fence_drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_alloc_mem(struct hw_fence_driver_data *hw_fence_drv_data);
/**
* hw_fence_utils_reserve_mem() - Reserves memory from the carved-out memory pool.
* @drv_data: hw fence driver data.
* @type: memory reservation type.
* @phys: physical address of the carved-out memory pool
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_reserve_mem(struct hw_fence_driver_data *drv_data,
enum hw_fence_mem_reserve type, phys_addr_t *phys, void **pa, u32 *size, int client_id);
/**
* hw_fence_utils_parse_dt_props() - Init dt properties
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_parse_dt_props(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_ipcc() - Maps IPCC registers and enable signaling
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_map_ipcc(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_map_qtime() - Maps qtime register
* @drv_data: hw fence driver data
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_map_qtime(struct hw_fence_driver_data *drv_data);
/**
* hw_fence_utils_cleanup_fence() - Cleanup the hw-fence from a specified client
* @drv_data: hw fence driver data
* @hw_fence_client: client, for which the fence must be cleared
* @hw_fence: hw-fence to cleanup
* @hash: hash of the hw-fence to cleanup
* @reset_flags: flags to determine how to handle the reset
*
* Returns zero if success, otherwise returns negative error code.
*/
int hw_fence_utils_cleanup_fence(struct hw_fence_driver_data *drv_data,
struct msm_hw_fence_client *hw_fence_client, struct msm_hw_fence *hw_fence, u64 hash,
u32 reset_flags);
/**
* hw_fence_utils_fence_error_cb() - Invokes fence error callback registered by specified client
*
* @hw_fence_client: client, for which fence error callback must be invoked
* @ctxt_id: context id of the hw-fence
* @seqno: sequence number of the hw-fence
* @hash: hash of the hw-fence
* @flags: flags of the hw-fence
* @error: error of the hw-fence
*
* Returns zero if success, otherwise returns negative error code
*/
int hw_fence_utils_fence_error_cb(struct msm_hw_fence_client *hw_fence_client, u64 ctxt_id,
u64 seqno, u64 hash, u64 flags, u32 error);
/**
* hw_fence_utils_get_client_id_priv() - Gets the index into clients struct within hw fence driver
* from the client_id used externally
*
* Performs a 1-to-1 mapping for all client IDs less than HW_FENCE_MAX_STATIC_CLIENTS_INDEX,
* otherwise consolidates client IDs of clients with configurable number of sub-clients. Fails if
* provided with client IDs for such clients when support for those clients is not configured in
* device-tree.
*
* @drv_data: hw fence driver data
* @client_id: external client_id to get internal client_id for
*
* Returns client_id < drv_data->clients_num if success, otherwise returns HW_FENCE_CLIENT_MAX
*/
enum hw_fence_client_id hw_fence_utils_get_client_id_priv(struct hw_fence_driver_data *drv_data,
enum hw_fence_client_id client_id);
/**
* hw_fence_utils_get_queues_num() - Returns number of client queues for the client_id.
*
* @drv_data: driver data
* @client_id: hw fence driver client id
*
* Returns: number of client queues
*/
int hw_fence_utils_get_queues_num(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_utils_get_skip_fctl_ref() - Returns if client avoids creating fences with fctl
* refcount initialized.
*
* @drv_data: driver data
* @client_id: hw fence driver client id
*
* Returns: number of client queues
*/
int hw_fence_utils_get_skip_fctl_ref(struct hw_fence_driver_data *drv_data, int client_id);
/**
* hw_fence_utils_set_power_vote() - Sets the power vote for soccp.
*
* @drv_data: driver data
* @state: power state to set
*
* Returns: 0 if successful, error if not
*/
int hw_fence_utils_set_power_vote(struct hw_fence_driver_data *drv_data, bool state);
#endif /* __HW_FENCE_DRV_UTILS_H */

View File

@@ -0,0 +1,713 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __MSM_HW_FENCE_H
#define __MSM_HW_FENCE_H
#include <linux/types.h>
#include <linux/dma-fence.h>
extern struct hw_fence_driver_data *hw_fence_drv_data;
extern bool hw_fence_driver_enable;
/**
* MSM_HW_FENCE_FLAG_ENABLED_BIT - Hw-fence is enabled for the dma_fence.
*
* Drivers set this flag in the dma_fence 'flags' to fences that
* are backed up by a hw-fence.
*/
#define MSM_HW_FENCE_FLAG_ENABLED_BIT 31
/**
* MSM_HW_FENCE_FLAG_SIGNALED_BIT - Hw-fence is signaled for the dma_fence.
*
* This flag is set by hw-fence driver when a client wants to add itself as
* a waiter for this hw-fence. The client uses this flag to avoid adding itself
* as a waiter for a fence that is already retired.
*/
#define MSM_HW_FENCE_FLAG_SIGNALED_BIT 30
/**
* MSM_HW_FENCE_ERROR_RESET - Hw-fence flagged as error due to forced reset from producer.
*/
#define MSM_HW_FENCE_ERROR_RESET BIT(0)
/**
* MSM_HW_FENCE_RESET_WITHOUT_ERROR: Resets client and its hw-fences, signaling them without error.
* MSM_HW_FENCE_RESET_WITHOUT_DESTROY: Resets client and its hw-fences, signaling without
* destroying the fences.
*/
#define MSM_HW_FENCE_RESET_WITHOUT_ERROR BIT(0)
#define MSM_HW_FENCE_RESET_WITHOUT_DESTROY BIT(1)
/**
* MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE: Updates client tx queue error by moving fence with error to
* beginning of queue.
*/
#define MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE BIT(0)
/**
* MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT - Maximum number of signals per client
*/
#define MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT 64
/**
* MSM_HW_FENCE_DBG_DUMP_QUEUES: Dumps queues information
* MSM_HW_FENCE_DBG_DUMP_TABLE: Dumps hwfence table
* MSM_HW_FENCE_DBG_DUMP_EVENTS: Dumps hwfence ctl events
*/
#define MSM_HW_FENCE_DBG_DUMP_QUEUES BIT(0)
#define MSM_HW_FENCE_DBG_DUMP_TABLE BIT(1)
#define MSM_HW_FENCE_DBG_DUMP_EVENTS BIT(2)
/**
* struct msm_hw_fence_create_params - Creation parameters.
*
* @name : Optional parameter associating a name with the object for debug purposes.
* Only first 64 bytes are accepted, rest will be ignored.
* @handle : Pointer to fence handle (filled by function).
* @fence : Pointer to fence.
* @flags : flags for customization.
*/
struct msm_hw_fence_create_params {
const char *name;
u64 *handle;
void *fence;
u32 flags;
};
/**
* struct msm_hw_fence_hfi_queue_table_header - HFI queue table structure.
* @version: HFI protocol version.
* @size: Queue table size in dwords.
* @qhdr0_offset: First queue header offset (dwords) in this table.
* @qhdr_size: Queue header size.
* @num_q: Number of queues defined in this table.
* @num_active_q: Number of active queues.
*/
struct msm_hw_fence_hfi_queue_table_header {
u32 version;
u32 size;
u32 qhdr0_offset;
u32 qhdr_size;
u32 num_q;
u32 num_active_q;
};
/**
* struct msm_hw_fence_hfi_queue_table_header_v2 - Version 2 of HFI queue table structure.
* @version: HFI protocol version.
* @size: Queue table size in dwords.
* @qhdr0_offset: First queue header offset (dwords) in this table.
* @qhdr_size: Queue header size.
* @num_q: Number of queues defined in this table.
* @num_active_q: Number of active queues.
* @reserved: reserved memory used for 64-byte alignment
*/
struct msm_hw_fence_hfi_queue_table_header_v2 {
u32 version;
u32 size;
u32 qhdr0_offset;
u32 qhdr_size;
u32 num_q;
u32 num_active_q;
u32 reserved[10];
};
/**
* struct msm_hw_fence_hfi_queue_header - HFI queue header structure.
* @status: Active = 1, Inactive = 0.
* @start_addr: Starting address of the queue.
* @type: Queue type (rx/tx).
* @queue_size: Size of the queue.
* @pkt_size: Size of the queue packet entries,
* 0 - means variable size of message in the queue,
* non-zero - size of the packet, fixed.
* @pkt_drop_cnt: Number of packets drop by sender.
* @rx_wm: Receiver watermark, applicable in event driven mode.
* @tx_wm: Sender watermark, applicable in event driven mode.
* @rx_req: Receiver sets this bit if queue is empty.
* @tx_req: Sender sets this bit if queue is full.
* @rx_irq_status: Receiver sets this bit and triggers an interrupt to the
* sender after packets are dequeued. Sender clears this bit.
* @tx_irq_status: Sender sets this bit and triggers an interrupt to the
* receiver after packets are queued. Receiver clears this bit.
* @read_index: read index of the queue.
* @write_index: write index of the queue.
*/
struct msm_hw_fence_hfi_queue_header {
u32 status;
u32 start_addr;
u32 type;
u32 queue_size;
u32 pkt_size;
u32 pkt_drop_cnt;
u32 rx_wm;
u32 tx_wm;
u32 rx_req;
u32 tx_req;
u32 rx_irq_status;
u32 tx_irq_status;
u32 read_index;
u32 write_index;
};
/**
* struct msm_hw_fence_hfi_queue_header - HFI queue header structure.
* @status: Active = 1, Inactive = 0.
* @start_addr: Starting address of the queue.
* @type: Queue type (rx/tx).
* @queue_size: Size of the queue.
* @pkt_size: Size of the queue packet entries,
* 0 - means variable size of message in the queue,
* non-zero - size of the packet, fixed.
* @pkt_drop_cnt: Number of packets drop by sender.
* @rx_wm: Receiver watermark, applicable in event driven mode.
* @tx_wm: Sender watermark, applicable in event driven mode.
* @rx_req: Receiver sets this bit if queue is empty.
* @tx_req: Sender sets this bit if queue is full.
* @rx_irq_status: Receiver sets this bit and triggers an interrupt to the
* sender after packets are dequeued. Sender clears this bit.
* @tx_irq_status: Sender sets this bit and triggers an interrupt to the
* receiver after packets are queued. Receiver clears this bit.
* @init_reserved: reservation for 64-byte alignment of read and write indexes
* @read_index: read index of the queue.
* @read_index_reserved: reservation for 64-byte alignment of read and write indexes
* @write_index: write index of the queue.
* @write_index_reserved: reservation for 64-byte alignment of read and write indexes
*/
struct msm_hw_fence_hfi_queue_header_v2 {
u32 status;
u32 start_addr;
u32 type;
u32 queue_size;
u32 pkt_size;
u32 pkt_drop_cnt;
u32 rx_wm;
u32 tx_wm;
u32 rx_req;
u32 tx_req;
u32 rx_irq_status;
u32 tx_irq_status;
u32 init_reserved[4];
u32 read_index;
u32 read_index_reserved[15];
u32 write_index;
u32 write_index_reserved[15];
};
/**
* struct msm_hw_fence_mem_addr - Memory descriptor of the queue allocated by
* the fence driver for each client during
* register.
* @virtual_addr: Kernel virtual address of the queue.
* @device_addr: Physical address of the memory object.
* @size: Size of the memory.
* @mem_data: Internal pointer with the attributes of the allocation.
*/
struct msm_hw_fence_mem_addr {
void *virtual_addr;
phys_addr_t device_addr;
u64 size;
void *mem_data;
};
/**
* struct msm_hw_fence_cb_data - Data passed back in fence error callback.
* @data: data registered with callback
* @fence: fence signaled with error
*/
struct msm_hw_fence_cb_data {
void *data;
struct dma_fence *fence;
};
/**
* msm_hw_fence_error_cb: Callback function registered by waiting clients.
* Dispatched when client is waiting on a fence
* signaled with error.
*
* @handle: handle of fence signaled with error
* @error: error signed for fence
* @cb_data: pointer to struct containing opaque pointer registered with callback
* and fence information
*/
typedef void (*msm_hw_fence_error_cb_t)(u32 handle, int error, void *cb_data);
/**
* enum hw_fence_client_id - Unique identifier of the supported clients.
* @HW_FENCE_CLIENT_ID_CTX0: GFX Client.
* @HW_FENCE_CLIENT_ID_CTL0: DPU Client 0.
* @HW_FENCE_CLIENT_ID_CTL1: DPU Client 1.
* @HW_FENCE_CLIENT_ID_CTL2: DPU Client 2.
* @HW_FENCE_CLIENT_ID_CTL3: DPU Client 3.
* @HW_FENCE_CLIENT_ID_CTL4: DPU Client 4.
* @HW_FENCE_CLIENT_ID_CTL5: DPU Client 5.
* @HW_FENCE_CLIENT_ID_VAL0: debug Validation client 0.
* @HW_FENCE_CLIENT_ID_VAL1: debug Validation client 1.
* @HW_FENCE_CLIENT_ID_VAL2: debug Validation client 2.
* @HW_FENCE_CLIENT_ID_VAL3: debug Validation client 3.
* @HW_FENCE_CLIENT_ID_VAL4: debug Validation client 4.
* @HW_FENCE_CLIENT_ID_VAL5: debug Validation client 5.
* @HW_FENCE_CLIENT_ID_VAL6: debug Validation client 6.
* @HW_FENCE_CLIENT_ID_IPE: IPE Client.
* @HW_FENCE_CLIENT_ID_VPU: VPU Client.
* @HW_FENCE_CLIENT_ID_IFE0: IFE0 Client 0.
* @HW_FENCE_CLIENT_ID_IFE1: IFE1 Client 0.
* @HW_FENCE_CLIENT_ID_IFE2: IFE2 Client 0.
* @HW_FENCE_CLIENT_ID_IFE3: IFE3 Client 0.
* @HW_FENCE_CLIENT_ID_IFE4: IFE4 Client 0.
* @HW_FENCE_CLIENT_ID_IFE5: IFE5 Client 0.
* @HW_FENCE_CLIENT_ID_IFE6: IFE6 Client 0.
* @HW_FENCE_CLIENT_ID_IFE7: IFE7 Client 0.
* @HW_FENCE_CLIENT_MAX: Max number of clients, any client must be added
* before this enum.
*/
enum hw_fence_client_id {
HW_FENCE_CLIENT_ID_CTX0 = 0x1,
HW_FENCE_CLIENT_ID_CTL0,
HW_FENCE_CLIENT_ID_CTL1,
HW_FENCE_CLIENT_ID_CTL2,
HW_FENCE_CLIENT_ID_CTL3,
HW_FENCE_CLIENT_ID_CTL4,
HW_FENCE_CLIENT_ID_CTL5,
HW_FENCE_CLIENT_ID_VAL0,
HW_FENCE_CLIENT_ID_VAL1,
HW_FENCE_CLIENT_ID_VAL2,
HW_FENCE_CLIENT_ID_VAL3,
HW_FENCE_CLIENT_ID_VAL4,
HW_FENCE_CLIENT_ID_VAL5,
HW_FENCE_CLIENT_ID_VAL6,
HW_FENCE_CLIENT_ID_IPE,
HW_FENCE_CLIENT_ID_VPU = HW_FENCE_CLIENT_ID_IPE + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IPA = HW_FENCE_CLIENT_ID_VPU + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE0 = HW_FENCE_CLIENT_ID_IPA + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE1 = HW_FENCE_CLIENT_ID_IFE0 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE2 = HW_FENCE_CLIENT_ID_IFE1 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE3 = HW_FENCE_CLIENT_ID_IFE2 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE4 = HW_FENCE_CLIENT_ID_IFE3 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE5 = HW_FENCE_CLIENT_ID_IFE4 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE6 = HW_FENCE_CLIENT_ID_IFE5 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE7 = HW_FENCE_CLIENT_ID_IFE6 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE8 = HW_FENCE_CLIENT_ID_IFE7 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE9 = HW_FENCE_CLIENT_ID_IFE8 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE10 = HW_FENCE_CLIENT_ID_IFE9 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_ID_IFE11 = HW_FENCE_CLIENT_ID_IFE10 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT,
HW_FENCE_CLIENT_MAX = HW_FENCE_CLIENT_ID_IFE11 + MSM_HW_FENCE_MAX_SIGNAL_PER_CLIENT
};
#if IS_ENABLED(CONFIG_QTI_HW_FENCE)
/**
* msm_hw_fence_register() - Registers a client with the HW Fence Driver.
* @client_id: ID of the client that is being registered.
* @mem_descriptor: Pointer to fill the memory descriptor. Fence
* controller driver fills this pointer with the
* memory descriptor for the rx/tx queues.
*
* This call initializes any shared memory region for the tables/queues
* required for the HW Fence Driver to communicate with Fence Controller
* for this client_id and fills the memory descriptor for the queues
* that the client hw cores need to manage.
*
* Return: Handle to the client object that must be used for further calls
* to the fence controller driver or NULL in case of error.
*
* The returned handle is used internally by the fence controller driver
* in further calls to identify the client and access any resources
* allocated for this client.
*/
void *msm_hw_fence_register(
enum hw_fence_client_id client_id,
struct msm_hw_fence_mem_addr *mem_descriptor);
/**
* msm_hw_fence_deregister() - Deregisters a client that was previously
* registered with the HW Fence Driver.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_deregister(void *client_handle);
/**
* msm_hw_fence_create() - Creates a new hw fence.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @params: Hw fence creation parameters containing dma fence
* to create its associated hw-fence.
*
* This call creates the hw fence and registers it with the fence
* controller. After the creation of this fence, it is a Client Driver
* responsibility to 'destroy' this fence to prevent any leakage of
* hw-fence resources.
* To destroy a fence, 'msm_hw_fence_destroy' must be called, once the
* fence is not required anymore, which is when all the references to
* the dma-fence are released.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_create(void *client_handle,
struct msm_hw_fence_create_params *params);
/**
* msm_hw_fence_destroy() - Destroys a hw fence.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @fence: Sw dma-fence to destroy its associated hw-fence.
*
* The fence destroyed by this function, is a fence that must have been
* created by the hw fence driver through 'msm_hw_fence_create' call.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_destroy(void *client_handle,
struct dma_fence *fence);
/**
* msm_hw_fence_destroy_with_handle() - Destroys a hw fence through its handle.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @handle: handle for hw-fence to destroy
*
* The fence destroyed by this function, is a fence that must have been
* created by the hw fence driver through 'msm_hw_fence_create' call.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle);
/**
* msm_hw_fence_wait_update_v2() - Register or unregister the Client with the
* Fence Controller as a waiting-client of the
* list of fences received as parameter.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @fences: Pointer to an array of pointers containing the fences to
* 'wait-on' for this client. If a 'fence-array' fence is passed,
* driver will iterate through the individual 'fences' which are
* part of the 'fence-array' and will register to wait-for-all the
* individual fences of the fence-array.
* A 'fence-array' passed as parameter can only have 'individual'
* fences and cannot have another nested 'fence-array',
* otherwise this API will return failure.
* Also, all the 'fences' in this list must have a corresponding
* hw-fence that was registered by the producer of the fence,
* otherwise, this API will return failure.
* @handles: Optional pointer to an array of handles of 'fences'.
* If non-null, these handles are filled by the function.
* This list must have the same size as 'fences' if present.
* @client_data_list: Optional pointer to an array of u64 client_data
* values for each fence in 'fences'.
* If non-null, this list must have the same size as
* the 'fences' list. This client registers each fence
* with the client_data value at the same index so that
* this value is returned to the client upon signaling
* of the fence.
* If a null pointer is provided, a default value of
* zero is registered as the client_data of each fence.
* @num_fences: Number of elements in the 'fences' list (and 'handles' and
* 'client_data_list' if either or both are present).
* @reg: Boolean to indicate if register or unregister for waiting on
* the hw-fence.
*
* If the 'register' boolean is set as true, this API will register with
* the Fence Controller the Client as a consumer (i.e. 'wait-client') of
* the fences received as parameter.
* Function will return immediately after the client was registered
* (i.e this function does not wait for the fences to be signaled).
* When any of the Fences received as parameter is signaled (or all the
* fences in case of a fence-array), Fence controller will trigger the hw
* signal to notify the Client hw-core about the signaled fence (or fences
* in case of a fence array). i.e. signalization of the hw fence it is a
* hw to hw communication between Fence Controller and the Client hw-core,
* and this API is only the interface to allow the Client Driver to
* register its Client hw-core for the hw-to-hw notification.
* If the 'register' boolean is set as false, this API will unregister
* with the Fence Controller the Client as a consumer, this is used for
* cases where a Timeout waiting for a fence occurs and client drivers want
* to unregister for signal.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_wait_update_v2(void *client_handle,
struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg);
/**
* msm_hw_fence_wait_update() - Register or unregister the Client with the
* Fence Controller as a waiting-client of the
* list of fences received as parameter.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @fences: Pointer to an array of pointers containing the fences to
* 'wait-on' for this client. If a 'fence-array' fence is passed,
* driver will iterate through the individual 'fences' which are
* part of the 'fence-array' and will register to wait-for-all the
* individual fences of the fence-array.
* A 'fence-array' passed as parameter can only have 'individual'
* fences and cannot have another nested 'fence-array',
* otherwise this API will return failure.
* Also, all the 'fences' in this list must have a corresponding
* hw-fence that was registered by the producer of the fence,
* otherwise, this API will return failure.
* @num_fences: Number of elements in the 'fences' list.
* @reg: Boolean to indicate if register or unregister for waiting on
* the hw-fence.
*
* If the 'register' boolean is set as true, this API will register with
* the Fence Controller the Client as a consumer (i.e. 'wait-client') of
* the fences received as parameter.
* Function will return immediately after the client was registered
* (i.e this function does not wait for the fences to be signaled).
* When any of the Fences received as parameter is signaled (or all the
* fences in case of a fence-array), Fence controller will trigger the hw
* signal to notify the Client hw-core about the signaled fence (or fences
* in case of a fence array). i.e. signalization of the hw fence it is a
* hw to hw communication between Fence Controller and the Client hw-core,
* and this API is only the interface to allow the Client Driver to
* register its Client hw-core for the hw-to-hw notification.
* If the 'register' boolean is set as false, this API will unregister
* with the Fence Controller the Client as a consumer, this is used for
* cases where a Timeout waiting for a fence occurs and client drivers want
* to unregister for signal.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_wait_update(void *client_handle,
struct dma_fence **fences, u32 num_fences, bool reg);
/**
* msm_hw_fence_reset_client() - Resets the HW Fence Client.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_*
* definitions.
*
* This function iterates through the HW Fences and removes the client
* from the waiting-client mask in any of the HW Fences and signal the
* fences owned by that client.
* This function should only be called by clients upon error, when clients
* did a HW reset, to make sure any HW Fence where the client was register
* for wait are removed, and any Fence owned by the client are signaled.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags);
/**
* msm_hw_fence_reset_client_by_id() - Resets the HW Fence Client through
* its id.
* @client_id: id of client to reset
* @reset_flags: Flags to choose the reset type. See MSM_HW_FENCE_RESET_*
* definitions.
*
* This function iterates through the HW Fences and removes the client
* from the waiting-client mask in any of the HW Fences and signal the
* fences owned by that client.
* This function should only be called by clients upon error, when clients
* did a HW reset, to make sure any HW Fence where the client was register
* for wait are removed, and any Fence owned by the client are signaled.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id, u32 reset_flags);
/**
* msm_hw_fence_update_txq() - Updates Client Tx Queue with the Fence info.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @handle: handle for fence to update in the Tx Queue.
* @flags: flags to set in the queue for the fence.
* @error: error to set in the queue for the fence.
*
* This function should only be used by clients that cannot have the Tx Queue
* updated by the Firmware or the HW Core.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error);
/**
* msm_hw_fence_update_txq_error() - Updates error field for fence already in Tx Queue.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @handle: handle for existing fence in Tx Queue to update.
* @error: error to set in the queue for the fence.
* @update_flags: flags to choose the update type. See MSM_HW_FENCE_UPDATE_ERROR_*
* definitions.
*
* This function should only be used by clients that cannot have the Tx Queue
* updated by the Firmware or the HW Core.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags);
/**
* msm_hw_fence_trigger_signal() - Triggers signal for the tx/rx signal pair
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @ tx_client_id: id of the client triggering the signal.
* @ rx_client_id: id of the client receiving the signal.
* @ signal_id: id of the signal to trigger
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id, u32 rx_client_id,
u32 signal_id);
/**
* msm_hw_fence_register_error_cb() - Register callback to be dispatched when
* HW Fence Client is waiting for a fence
* that is signaled with error.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
* @cb: pointer to callback function to be invoked
* @data: opaque pointer passed back with callback
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data);
/**
* msm_hw_fence_deregister_error_cb() - Deregister callback to be dispatched when
* HW Fence Client is waiting for a fence
* that is signaled with error.
* @client_handle: Hw fence driver client handle, this handle was returned
* during the call 'msm_hw_fence_register' to register the
* client.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_deregister_error_cb(void *client_handle);
#else
static inline void *msm_hw_fence_register(enum hw_fence_client_id client_id,
struct msm_hw_fence_mem_addr *mem_descriptor)
{
return NULL;
}
static inline int msm_hw_fence_deregister(void *client_handle)
{
return -EINVAL;
}
static inline int msm_hw_fence_create(void *client_handle,
struct msm_hw_fence_create_params *params)
{
return -EINVAL;
}
static inline int msm_hw_fence_destroy(void *client_handle, struct dma_fence *fence)
{
return -EINVAL;
}
static inline int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle)
{
return -EINVAL;
}
static inline int msm_hw_fence_wait_update_v2(void *client_handle,
struct dma_fence **fences, u64 *handles, u64 *client_data_list, u32 num_fences, bool reg)
{
return -EINVAL;
}
static inline int msm_hw_fence_wait_update(void *client_handle,
struct dma_fence **fences, u32 num_fences, bool reg)
{
return -EINVAL;
}
static inline int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags)
{
return -EINVAL;
}
static inline int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id,
u32 reset_flags)
{
return -EINVAL;
}
static inline int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error)
{
return -EINVAL;
}
static inline int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error,
u32 update_flags)
{
return -EINVAL;
}
static inline int msm_hw_fence_trigger_signal(void *client_handle, u32 tx_client_id,
u32 rx_client_id, u32 signal_id)
{
return -EINVAL;
}
static inline int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb,
void *data)
{
return -EINVAL;
}
static inline int msm_hw_fence_deregister_error_cb(void *client_handle)
{
return -EINVAL;
}
#endif
#if IS_ENABLED(CONFIG_DEBUG_FS) && IS_ENABLED(CONFIG_QTI_HW_FENCE)
/**
* msm_hw_fence_dump_debug_data() - Dumps debug data information
* @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'.
* @dump_flags: Flags to indicate which info to dump, see MSM_HW_FENCE_DBG_DUMP_** flags.
* @dump_clients_mask: Optional bitmask to indicate along with the caller of the api, which other
* clients to dump data from. E.g. a client like display might want to dump
* info of any all other clients from which it can receive fences, like gfx.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask);
/**
* msm_hw_fence_dump_debug_data() - Dumps hw-fence information for dma-fence
* @client_handle: Hw fence driver client handle returned during 'msm_hw_fence_register'.
* @fence: dma_fence to dump hw-fence information
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence);
#else
static inline int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags,
u32 dump_clients_mask)
{
return -EINVAL;
}
static inline int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence)
{
return -EINVAL;
}
#endif
#endif

View File

@@ -0,0 +1,3 @@
CONFIG_QTI_HW_FENCE=y
CONFIG_QTI_HW_FENCE_USE_SYNX=y
CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT=y

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,355 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <synx_interop.h>
#include "msm_hw_fence.h"
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_debug.h"
#include "hw_fence_drv_interop.h"
/**
* HW_FENCE_SYNX_FENCE_CLIENT_ID:
* ClientID for fences created to back synx fences
*/
#define HW_FENCE_SYNX_FENCE_CLIENT_ID (~(u32)1)
/**
* HW_FENCE_SYNX_FENCE_CLIENT_ID:
* ClientID for fences created to back fences with native dma-fence producers
*/
#define HW_FENCE_NATIVE_FENCE_CLIENT_ID (~(u32)2)
struct synx_hwfence_interops synx_interops = {
.share_handle_status = NULL,
.get_fence = NULL,
.notify_recover = NULL,
};
int hw_fence_interop_to_synx_status(int hw_fence_status_code)
{
int synx_status_code;
switch (hw_fence_status_code) {
case 0:
synx_status_code = SYNX_SUCCESS;
break;
case -ENOMEM:
synx_status_code = -SYNX_NOMEM;
break;
case -EPERM:
synx_status_code = -SYNX_NOPERM;
break;
case -ETIMEDOUT:
synx_status_code = -SYNX_TIMEOUT;
break;
case -EALREADY:
synx_status_code = -SYNX_ALREADY;
break;
case -ENOENT:
synx_status_code = -SYNX_NOENT;
break;
case -EINVAL:
synx_status_code = -SYNX_INVALID;
break;
case -EBUSY:
synx_status_code = -SYNX_BUSY;
break;
case -EAGAIN:
synx_status_code = -SYNX_EAGAIN;
break;
default:
synx_status_code = hw_fence_status_code;
break;
}
return synx_status_code;
}
u32 hw_fence_interop_to_synx_signal_status(u32 flags, u32 error)
{
u32 status;
if (!(flags & MSM_HW_FENCE_FLAG_SIGNAL)) {
status = SYNX_STATE_ACTIVE;
goto end;
}
switch (error) {
case 0:
status = SYNX_STATE_SIGNALED_SUCCESS;
break;
case MSM_HW_FENCE_ERROR_RESET:
status = SYNX_STATE_SIGNALED_SSR;
break;
default:
status = error;
break;
}
end:
HWFNC_DBG_L("fence flags:%u err:%u status:%u\n", flags, error, status);
return status;
}
u32 hw_fence_interop_to_hw_fence_error(u32 status)
{
u32 error;
switch (status) {
case SYNX_STATE_INVALID:
HWFNC_ERR("converting error status for invalid fence\n");
error = SYNX_INVALID;
break;
case SYNX_STATE_ACTIVE:
HWFNC_ERR("converting error status for unsignaled fence\n");
error = 0;
break;
case SYNX_STATE_SIGNALED_SUCCESS:
error = 0;
break;
case SYNX_STATE_SIGNALED_SSR:
error = MSM_HW_FENCE_ERROR_RESET;
break;
default:
error = status;
break;
}
HWFNC_DBG_L("fence status:%u err:%u\n", status, error);
return error;
}
static int _update_interop_fence(struct synx_import_indv_params *params, u64 handle)
{
u32 signal_status;
int ret, error;
if (!params->new_h_synx || !synx_interops.share_handle_status) {
HWFNC_ERR("invalid new_h_synx:0x%pK share_handle_status:0x%pK\n",
params->new_h_synx, synx_interops.share_handle_status);
return -EINVAL;
}
ret = synx_interops.share_handle_status(params, handle, &signal_status);
if (ret || signal_status == SYNX_STATE_INVALID) {
HWFNC_ERR("failed to share handle and signal status handle:%llu ret:%d\n",
handle, ret);
/* destroy reference held by signal*/
hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT);
return ret;
}
if (signal_status != SYNX_STATE_ACTIVE) {
error = hw_fence_interop_to_hw_fence_error(signal_status);
ret = hw_fence_signal_fence(hw_fence_drv_data, NULL, handle, error, true);
if (ret) {
HWFNC_ERR("Failed to signal hwfence handle:%llu error:%u\n", handle, error);
return ret;
}
}
/* store h_synx for debugging purposes */
ret = hw_fence_update_hsynx(hw_fence_drv_data, handle, *params->new_h_synx, false);
if (ret)
HWFNC_ERR("Failed to update hwfence handle:%llu h_synx:%u\n", handle,
*params->new_h_synx);
return ret;
}
int hw_fence_interop_create_fence_from_import(struct synx_import_indv_params *params)
{
struct msm_hw_fence_client dummy_client;
struct dma_fence *fence;
int destroy_ret, ret;
unsigned long flags;
bool is_synx;
u64 handle;
if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->fence)) {
HWFNC_ERR("invalid params:0x%pK fence:0x%pK\n",
params, IS_ERR_OR_NULL(params) ? NULL : params->fence);
return -SYNX_INVALID;
}
fence = (struct dma_fence *)params->fence;
spin_lock_irqsave(fence->lock, flags);
/* hw-fence already present, so no need to create new hw-fence */
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
spin_unlock_irqrestore(fence->lock, flags);
return SYNX_SUCCESS;
}
is_synx = test_bit(SYNX_NATIVE_FENCE_FLAG_ENABLED_BIT, &fence->flags);
/* only synx clients can signal synx fences; no one can signal sw dma-fence from fw */
dummy_client.client_id = is_synx ? HW_FENCE_SYNX_FENCE_CLIENT_ID :
HW_FENCE_NATIVE_FENCE_CLIENT_ID;
ret = hw_fence_create(hw_fence_drv_data, &dummy_client, (u64)fence, fence->context,
fence->seqno, &handle);
if (ret) {
HWFNC_ERR("failed create fence client:%d ctx:%llu seq:%llu is_synx:%s ret:%d\n",
dummy_client.client_id, fence->context, fence->seqno,
is_synx ? "true" : "false", ret);
spin_unlock_irqrestore(fence->lock, flags);
return hw_fence_interop_to_synx_status(ret);
}
set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
spin_unlock_irqrestore(fence->lock, flags);
if (is_synx)
/* exchange handles and register fence controller for wait on synx fence */
ret = _update_interop_fence(params, handle);
else
/* native dma-fences do not have a signaling client, remove ref for fctl signal */
ret = hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT);
if (ret) {
HWFNC_ERR("failed to update for signaling client handle:%llu is_synx:%s ret:%d\n",
handle, is_synx ? "true" : "false", ret);
goto error;
}
ret = hw_fence_add_callback(hw_fence_drv_data, fence, handle);
if (ret)
HWFNC_ERR("failed to add signal callback for fence handle:%llu is_synx:%s ret:%d\n",
handle, is_synx ? "true" : "false", ret);
error:
/* destroy reference held by creator of fence */
destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, &dummy_client,
handle);
if (destroy_ret) {
HWFNC_ERR("failed destroy fence client:%d handle:%llu is_synx:%s ret:%d\n",
dummy_client.client_id, handle, is_synx ? "true" : "false", ret);
ret = destroy_ret;
}
return hw_fence_interop_to_synx_status(ret);
}
int hw_fence_interop_share_handle_status(struct synx_import_indv_params *params, u32 h_synx,
u32 *signal_status)
{
struct msm_hw_fence *hw_fence;
int destroy_ret, ret = 0;
struct dma_fence *fence;
u64 flags, handle;
bool is_signaled;
u32 error;
ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data);
if (ret)
return hw_fence_interop_to_synx_status(ret);
if (!hw_fence_drv_data->fctl_ready) {
HWFNC_ERR("fctl in invalid state, cannot perform operation\n");
return -SYNX_EAGAIN;
}
if (IS_ERR_OR_NULL(params) || IS_ERR_OR_NULL(params->new_h_synx) ||
!(params->flags & SYNX_IMPORT_DMA_FENCE) ||
(params->flags & SYNX_IMPORT_SYNX_FENCE) || IS_ERR_OR_NULL(params->fence)) {
HWFNC_ERR("invalid params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n",
params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx,
IS_ERR_OR_NULL(params) ? 0 : params->flags,
IS_ERR_OR_NULL(params) ? NULL : params->fence);
return -SYNX_INVALID;
}
fence = params->fence;
if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("invalid hwfence ctx:%llu seqno:%llu flags:%lx\n", fence->context,
fence->seqno, fence->flags);
return -SYNX_INVALID;
}
hw_fence = hw_fence_find_with_dma_fence(hw_fence_drv_data, NULL, fence, &handle,
&is_signaled, false);
if (is_signaled) {
*signal_status = dma_fence_get_status(fence);
return SYNX_SUCCESS;
}
if (!hw_fence) {
HWFNC_ERR("failed to find hw-fence for ctx:%llu seq:%llu\n", fence->context,
fence->seqno);
return -SYNX_INVALID;
}
ret = hw_fence_get_flags_error(hw_fence_drv_data, handle, &flags, &error);
if (ret) {
HWFNC_ERR("Failed to get flags and error hwfence handle:%llu\n", handle);
goto end;
}
*signal_status = hw_fence_interop_to_synx_signal_status(flags, error);
if (*signal_status >= SYNX_STATE_SIGNALED_SUCCESS)
goto end;
/* update h_synx to register the synx framework as a waiter on the hw-fence */
ret = hw_fence_update_hsynx(hw_fence_drv_data, handle, h_synx, true);
if (ret) {
HWFNC_ERR("failed to set h_synx for hw-fence handle:%llu\n", handle);
goto end;
}
*params->new_h_synx = (u32)handle;
end:
/* release reference held to find hw-fence */
destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, NULL, handle);
if (destroy_ret) {
HWFNC_ERR("Failed to decrement refcount on hw-fence handle:%llu\n", handle);
ret = destroy_ret;
}
return hw_fence_interop_to_synx_status(ret);
}
void *hw_fence_interop_get_fence(u32 h_synx)
{
struct dma_fence *fence;
int ret;
ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data);
if (ret)
return ERR_PTR(hw_fence_interop_to_synx_status(ret));
if (!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) {
HWFNC_ERR("invalid h_synx:%u does not have hw-fence handle bit set:%lu\n",
h_synx, SYNX_HW_FENCE_HANDLE_FLAG);
return ERR_PTR(-SYNX_INVALID);
}
h_synx &= HW_FENCE_HANDLE_INDEX_MASK;
fence = hw_fence_dma_fence_find(hw_fence_drv_data, h_synx, true);
if (!fence) {
HWFNC_ERR("failed to find dma-fence for hw-fence idx:%u\n", h_synx);
return ERR_PTR(-SYNX_INVALID);
}
return (void *)fence;
}
int synx_hwfence_init_interops(struct synx_hwfence_interops *synx_ops,
struct synx_hwfence_interops *hwfence_ops)
{
if (IS_ERR_OR_NULL(synx_ops) || IS_ERR_OR_NULL(hwfence_ops)) {
HWFNC_ERR("invalid params synx_ops:0x%pK hwfence_ops:0x%pK\n", synx_ops,
hwfence_ops);
return -EINVAL;
}
synx_interops.share_handle_status = synx_ops->share_handle_status;
synx_interops.get_fence = synx_ops->get_fence;
synx_interops.notify_recover = synx_ops->notify_recover;
hwfence_ops->share_handle_status = hw_fence_interop_share_handle_status;
hwfence_ops->get_fence = hw_fence_interop_get_fence;
return 0;
}
EXPORT_SYMBOL_GPL(synx_hwfence_init_interops);

View File

@@ -0,0 +1,784 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of_platform.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
/*
* Max size of base table with ipc mappings, with one mapping per client type with configurable
* number of subclients
*/
#define HW_FENCE_IPC_MAP_MAX (HW_FENCE_MAX_STATIC_CLIENTS_INDEX + \
HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE)
/**
* HW_FENCE_IPCC_MAX_LOOPS:
* Max number of times HW Fence Driver can read interrupt information
*/
#define HW_FENCE_IPCC_MAX_LOOPS 100
/**
* struct hw_fence_client_ipc_map - map client id with ipc signal for trigger.
* @ipc_client_id_virt: virtual ipc client id for the hw-fence client.
* @ipc_client_id_phys: physical ipc client id for the hw-fence client.
* @ipc_signal_id: ipc signal id for the hw-fence client.
* @update_rxq: bool to indicate if client requires rx queue update in general signal case
* (e.g. if dma-fence is signaled)
* @signaled_update_rxq: bool to indicate if client requires rx queue update when registering to
* wait on an already signaled fence
* @signaled_send_ipc: bool to indicate if client requires ipc interrupt for already signaled fences
* @txq_update_send_ipc: bool to indicate if client requires ipc interrupt for signaled fences
*/
struct hw_fence_client_ipc_map {
int ipc_client_id_virt;
int ipc_client_id_phys;
int ipc_signal_id;
bool update_rxq;
bool signaled_update_rxq;
bool signaled_send_ipc;
bool txq_update_send_ipc;
};
/**
* struct hw_fence_clients_ipc_map - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for targets that support dpu client id.
*
* Note that the index of this struct must match the enum hw_fence_client_id
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 1, true, true, true,
false},
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_VID, 0, true, false, false,
true},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 1, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 2, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 3, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 4, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_VID, 5, false, false, true,
false},
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 21, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 22, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 23, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 24, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 25, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 26, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_VID, 27, true, true, false,
true},
#else
{0, 0, 0, false, false, false, false}, /* val0 */
{0, 0, 0, false, false, false, false}, /* val1 */
{0, 0, 0, false, false, false, false}, /* val2 */
{0, 0, 0, false, false, false, false}, /* val3 */
{0, 0, 0, false, false, false, false}, /* val4 */
{0, 0, 0, false, false, false, false}, /* val5 */
{0, 0, 0, false, false, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_VID, 0, true, true, true,
false},
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_VID, 0, true, true, true,
false},
};
/**
* struct hw_fence_clients_ipc_map_v2 - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for targets that support dpu client id and IPC v2.
*
* Note that the index of this struct must match the enum hw_fence_client_id for clients ids less
* than HW_FENCE_MAX_STATIC_CLIENTS_INDEX.
* For clients with configurable sub-clients, the index of this struct matches
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC).
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_v2[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 1, true, true, true,
false},
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, true, false, false,
true},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 1, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 2, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 3, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 4, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID, 5, false, false, true,
false},
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, false,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, false,
true},
#else
{0, 0, 0, false, false, false, false}, /* val0 */
{0, 0, 0, false, false, false, false}, /* val1 */
{0, 0, 0, false, false, false, false}, /* val2 */
{0, 0, 0, false, false, false, false}, /* val3 */
{0, 0, 0, false, false, false, false}, /* val4 */
{0, 0, 0, false, false, false, false}, /* val5 */
{0, 0, 0, false, false, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID, 0, true, true, true,
false},
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true,
false},
{0, 0, 0, false, false, false, false}, /* ipa */
{HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, false, true,
false},
};
/**
* struct hw_fence_clients_ipc_map_sun - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for sun target.
*
* Note that the index of this struct must match the enum hw_fence_client_id for clients ids less
* than HW_FENCE_MAX_STATIC_CLIENTS_INDEX.
* For clients with configurable sub-clients, the index of this struct matches
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC).
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_sun[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 0, true, true, true,
false},
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID, 0, true, false, false,
true},
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 0, false, false, true,
false}, /* ctl0 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 1, false, false, true,
false}, /* ctl1 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 2, false, false, true,
false}, /* ctl2 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 3, false, false, true,
false}, /* ctl3 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 4, false, false, true,
false}, /* ctl4 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_SUN, 5, false, false, true,
false}, /* ctl5 */
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 21, true, true, true,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 22, true, true, true,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 23, true, true, true,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 24, true, true, true,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 25, true, true, true,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 26, true, true, true,
true},
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID, 27, true, true, true,
true},
#else
{0, 0, 0, false, false, false, false}, /* val0 */
{0, 0, 0, false, false, false, false}, /* val1 */
{0, 0, 0, false, false, false, false}, /* val2 */
{0, 0, 0, false, false, false, false}, /* val3 */
{0, 0, 0, false, false, false, false}, /* val4 */
{0, 0, 0, false, false, false, false}, /* val5 */
{0, 0, 0, false, false, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_SUN, 0, true, true, true,
false},
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID, 0, true, true, true,
false},
{0, 0, 0, false, false, false, false}, /* ipa */
{HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID, 0, false, false, true,
false},
{HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID, 0, false, false, true,
false},
};
/**
* struct hw_fence_clients_ipc_map_niobe - Table makes the 'client to signal' mapping, which is
* used by the hw fence driver to trigger ipc signal when hw fence is already
* signaled.
* This version is for niobe target.
*
* Note that the index of this struct must match the enum hw_fence_client_id for clients ids less
* than HW_FENCE_MAX_STATIC_CLIENTS_INDEX.
* For clients with configurable sub-clients, the index of this struct matches
* HW_FENCE_MAX_STATIC_CLIENTS_INDEX + (client type index - HW_FENCE_MAX_CLIENT_TYPE_STATIC).
*/
struct hw_fence_client_ipc_map hw_fence_clients_ipc_map_niobe[HW_FENCE_IPC_MAP_MAX] = {
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 0, true, true,
true, false}, /* ctrlq */
{HW_FENCE_IPC_CLIENT_ID_GPU_VID, HW_FENCE_IPC_CLIENT_ID_GPU_PID_NIOBE, 0, true, false,
false, true}, /* gfx */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 0, false, false,
true, false}, /* ctl0 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 1, false, false,
true, false}, /* ctl1 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 2, false, false,
true, false}, /* ctl2 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 3, false, false,
true, false}, /* ctl3 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 4, false, false,
true, false}, /* ctl4 */
{HW_FENCE_IPC_CLIENT_ID_DPU_VID, HW_FENCE_IPC_CLIENT_ID_DPU_PID_NIOBE, 5, false, false,
true, false}, /* ctl5 */
#if IS_ENABLED(CONFIG_DEBUG_FS)
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 21, true, true,
true, true}, /* val0 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 22, true, true,
true, true}, /* val1 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 23, true, true,
true, true}, /* val2 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 24, true, true,
true, true}, /* val3 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 25, true, true,
true, true}, /* val4 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 26, true, true,
true, true}, /* val5 */
{HW_FENCE_IPC_CLIENT_ID_APPS_VID, HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE, 27, true, true,
true, true}, /* val6 */
#else
{0, 0, 0, false, false, false, false}, /* val0 */
{0, 0, 0, false, false, false, false}, /* val1 */
{0, 0, 0, false, false, false, false}, /* val2 */
{0, 0, 0, false, false, false, false}, /* val3 */
{0, 0, 0, false, false, false, false}, /* val4 */
{0, 0, 0, false, false, false, false}, /* val5 */
{0, 0, 0, false, false, false, false}, /* val6 */
#endif /* CONFIG_DEBUG_FS */
{HW_FENCE_IPC_CLIENT_ID_IPE_VID, HW_FENCE_IPC_CLIENT_ID_IPE_PID_NIOBE, 0, true, true, true,
false}, /* ipe */
{HW_FENCE_IPC_CLIENT_ID_VPU_VID, HW_FENCE_IPC_CLIENT_ID_VPU_PID_NIOBE, 0, true, true, true,
false}, /* vpu */
{HW_FENCE_IPC_CLIENT_ID_IPA_VID, HW_FENCE_IPC_CLIENT_ID_IPA_PID_NIOBE, 0, true, true, true,
false}, /* ipa */
{HW_FENCE_IPC_CLIENT_ID_IFE0_VID, HW_FENCE_IPC_CLIENT_ID_IFE0_PID_NIOBE, 0, false, false,
true, false}, /* ife0 */
{HW_FENCE_IPC_CLIENT_ID_IFE1_VID, HW_FENCE_IPC_CLIENT_ID_IFE1_PID_NIOBE, 0, false, false,
true, false}, /* ife1 */
{HW_FENCE_IPC_CLIENT_ID_IFE2_VID, HW_FENCE_IPC_CLIENT_ID_IFE2_PID_NIOBE, 0, false, false,
true, false}, /* ife2 */
{HW_FENCE_IPC_CLIENT_ID_IFE3_VID, HW_FENCE_IPC_CLIENT_ID_IFE3_PID_NIOBE, 0, false, false,
true, false}, /* ife3 */
{HW_FENCE_IPC_CLIENT_ID_IFE4_VID, HW_FENCE_IPC_CLIENT_ID_IFE4_PID_NIOBE, 0, false, false,
true, false}, /* ife4 */
{HW_FENCE_IPC_CLIENT_ID_IFE5_VID, HW_FENCE_IPC_CLIENT_ID_IFE5_PID_NIOBE, 0, false, false,
true, false}, /* ife5 */
{HW_FENCE_IPC_CLIENT_ID_IFE6_VID, HW_FENCE_IPC_CLIENT_ID_IFE6_PID_NIOBE, 0, false, false,
true, false}, /* ife6 */
{HW_FENCE_IPC_CLIENT_ID_IFE7_VID, HW_FENCE_IPC_CLIENT_ID_IFE7_PID_NIOBE, 0, false, false,
true, false}, /* ife7 */
{HW_FENCE_IPC_CLIENT_ID_IFE8_VID, HW_FENCE_IPC_CLIENT_ID_IFE8_PID_NIOBE, 0, false, false,
true, false}, /* ife8 */
{HW_FENCE_IPC_CLIENT_ID_IFE9_VID, HW_FENCE_IPC_CLIENT_ID_IFE9_PID_NIOBE, 0, false, false,
true, false}, /* ife9 */
{HW_FENCE_IPC_CLIENT_ID_IFE10_VID, HW_FENCE_IPC_CLIENT_ID_IFE10_PID_NIOBE, 0, false, false,
true, false}, /* ife10 */
{HW_FENCE_IPC_CLIENT_ID_IFE11_VID, HW_FENCE_IPC_CLIENT_ID_IFE11_PID_NIOBE, 0, false, false,
true, false}, /* ife11 */
};
int hw_fence_ipcc_get_client_virt_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id_virt;
}
int hw_fence_ipcc_get_client_phys_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_client_id_phys;
}
int hw_fence_ipcc_get_signal_id(struct hw_fence_driver_data *drv_data, u32 client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return -EINVAL;
return drv_data->ipc_clients_table[client_id].ipc_signal_id;
}
bool hw_fence_ipcc_needs_rxq_update(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->ipc_clients_table[client_id].update_rxq;
}
bool hw_fence_ipcc_signaled_needs_rxq_update(struct hw_fence_driver_data *drv_data,
int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->ipc_clients_table[client_id].signaled_update_rxq;
}
bool hw_fence_ipcc_signaled_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->ipc_clients_table[client_id].signaled_send_ipc;
}
bool hw_fence_ipcc_txq_update_needs_ipc_irq(struct hw_fence_driver_data *drv_data, int client_id)
{
if (!drv_data || client_id >= drv_data->clients_num)
return false;
return drv_data->ipc_clients_table[client_id].txq_update_send_ipc;
}
/**
* _get_ipc_phys_client_name() - Returns ipc client name from its physical id, used for debugging.
*/
static inline char *_get_ipc_phys_client_name(u32 client_id)
{
switch (client_id) {
case HW_FENCE_IPC_CLIENT_ID_APPS_PID:
return "APPS_PID";
case HW_FENCE_IPC_CLIENT_ID_GPU_PID:
return "GPU_PID";
case HW_FENCE_IPC_CLIENT_ID_DPU_PID:
return "DPU_PID";
case HW_FENCE_IPC_CLIENT_ID_IPE_PID:
return "IPE_PID";
case HW_FENCE_IPC_CLIENT_ID_VPU_PID:
return "VPU_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE0_PID:
return "IFE0_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE1_PID:
return "IFE1_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE2_PID:
return "IFE2_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE3_PID:
return "IFE3_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE4_PID:
return "IFE4_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE5_PID:
return "IFE5_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE6_PID:
return "IFE6_PID";
case HW_FENCE_IPC_CLIENT_ID_IFE7_PID:
return "IFE7_PID";
}
return "UNKNOWN_PID";
}
/**
* _get_ipc_virt_client_name() - Returns ipc client name from its virtual id, used for debugging.
*/
static inline char *_get_ipc_virt_client_name(u32 client_id)
{
switch (client_id) {
case HW_FENCE_IPC_CLIENT_ID_APPS_VID:
return "APPS_VID";
case HW_FENCE_IPC_CLIENT_ID_GPU_VID:
return "GPU_VID";
case HW_FENCE_IPC_CLIENT_ID_DPU_VID:
return "DPU_VID";
case HW_FENCE_IPC_CLIENT_ID_IPE_VID:
return "IPE_VID";
case HW_FENCE_IPC_CLIENT_ID_VPU_VID:
return "VPU_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE0_VID:
return "IFE0_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE1_VID:
return "IFE1_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE2_VID:
return "IFE2_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE3_VID:
return "IFE3_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE4_VID:
return "IFE4_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE5_VID:
return "IFE5_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE6_VID:
return "IFE6_VID";
case HW_FENCE_IPC_CLIENT_ID_IFE7_VID:
return "IFE7_VID";
}
return "UNKNOWN_VID";
}
void hw_fence_ipcc_trigger_signal(struct hw_fence_driver_data *drv_data,
u32 tx_client_pid, u32 rx_client_vid, u32 signal_id)
{
void __iomem *ptr;
u32 val;
/* Send signal */
ptr = IPC_PROTOCOLp_CLIENTc_SEND(drv_data->ipcc_io_mem, drv_data->protocol_id,
tx_client_pid);
val = (rx_client_vid << 16) | signal_id;
HWFNC_DBG_IRQ("Sending ipcc from %s (%d) to %s (%d) signal_id:%d [wr:0x%x to off:0x%pK]\n",
_get_ipc_phys_client_name(tx_client_pid), tx_client_pid,
_get_ipc_virt_client_name(rx_client_vid), rx_client_vid,
signal_id, val, ptr);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
/* Make sure value is written */
wmb();
}
static int _hw_fence_ipcc_init_map_with_configurable_clients(struct hw_fence_driver_data *drv_data,
struct hw_fence_client_ipc_map *base_table)
{
int i, j, map_idx;
size_t size;
size = drv_data->clients_num * sizeof(struct hw_fence_client_ipc_map);
drv_data->ipc_clients_table = kzalloc(size, GFP_KERNEL);
if (!drv_data->ipc_clients_table)
return -ENOMEM;
/* copy mappings for static hw fence clients */
size = HW_FENCE_MAX_STATIC_CLIENTS_INDEX * sizeof(struct hw_fence_client_ipc_map);
memcpy(drv_data->ipc_clients_table, base_table, size);
/* initialize mappings for ipc clients with configurable number of hw fence clients */
map_idx = HW_FENCE_MAX_STATIC_CLIENTS_INDEX;
for (i = 0; i < HW_FENCE_MAX_CLIENT_TYPE_CONFIGURABLE; i++) {
int client_type = HW_FENCE_MAX_CLIENT_TYPE_STATIC + i;
int clients_num = drv_data->hw_fence_client_types[client_type].clients_num;
for (j = 0; j < clients_num; j++) {
/* this should never happen if drv_data->clients_num is correct */
if (map_idx >= drv_data->clients_num) {
HWFNC_ERR("%s clients_num:%d exceeds drv_data->clients_num:%u\n",
drv_data->hw_fence_client_types[client_type].name,
clients_num, drv_data->clients_num);
return -EINVAL;
}
drv_data->ipc_clients_table[map_idx] =
base_table[HW_FENCE_MAX_STATIC_CLIENTS_INDEX + i];
drv_data->ipc_clients_table[map_idx].ipc_signal_id = j;
map_idx++;
}
}
return 0;
}
/**
* _hw_fence_ipcc_hwrev_init() - Initializes internal driver struct with corresponding ipcc data,
* according to the ipcc hw revision.
* @drv_data: driver data.
* @hwrev: ipcc hw revision.
*/
static int _hw_fence_ipcc_hwrev_init(struct hw_fence_driver_data *drv_data, u32 hwrev)
{
int ret = 0;
switch (hwrev) {
case HW_FENCE_IPCC_HW_REV_170:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_fctl_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_fctl_pid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->protocol_id = HW_FENCE_IPC_COMPUTE_L1_PROTOCOL_ID_KALAMA;
drv_data->ipc_clients_table = hw_fence_clients_ipc_map;
HWFNC_DBG_INIT("ipcc protocol_id: Kalama\n");
break;
case HW_FENCE_IPCC_HW_REV_203:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID;
drv_data->ipcc_fctl_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_fctl_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID;
drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_PINEAPPLE; /* Fence */
ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data,
hw_fence_clients_ipc_map_v2);
HWFNC_DBG_INIT("ipcc protocol_id: Pineapple\n");
break;
case HW_FENCE_IPCC_HW_REV_2A2:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID;
drv_data->ipcc_fctl_vid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_VID :
HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_fctl_pid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_PID :
HW_FENCE_IPC_CLIENT_ID_APPS_PID;
drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_SUN; /* Fence */
ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data,
hw_fence_clients_ipc_map_sun);
HWFNC_DBG_INIT("ipcc protocol_id: Sun\n");
break;
case HW_FENCE_IPCC_HW_REV_2B4:
drv_data->ipcc_client_vid = HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_client_pid = HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE;
drv_data->ipcc_fctl_vid = drv_data->has_soccp ? HW_FENCE_IPC_CLIENT_ID_SOCCP_VID :
HW_FENCE_IPC_CLIENT_ID_APPS_VID;
drv_data->ipcc_fctl_pid = drv_data->has_soccp ?
HW_FENCE_IPC_CLIENT_ID_SOCCP_PID_NIOBE :
HW_FENCE_IPC_CLIENT_ID_APPS_PID_NIOBE;
drv_data->protocol_id = HW_FENCE_IPC_FENCE_PROTOCOL_ID_NIOBE; /* Fence */
ret = _hw_fence_ipcc_init_map_with_configurable_clients(drv_data,
hw_fence_clients_ipc_map_niobe);
HWFNC_DBG_INIT("ipcc protocol_id: Niobe\n");
break;
default:
HWFNC_ERR("unrecognized ipcc hw-rev:0x%x\n", hwrev);
return -1;
}
return ret;
}
static int _enable_client_signal_pair(struct hw_fence_driver_data *drv_data,
u32 rx_client_id_phys, u32 tx_client_id_vid, u32 signal_id)
{
void __iomem *ptr;
u32 val;
if (!drv_data || !drv_data->ipcc_io_mem || !drv_data->protocol_id) {
HWFNC_ERR("invalid drv_data:0x%pK ipcc_io_mem:0x%pK protocol:%d\n",
drv_data, drv_data ? drv_data->ipcc_io_mem : NULL,
drv_data ? drv_data->protocol_id : -1);
return -EINVAL;
}
val = ((tx_client_id_vid) << 16) | ((signal_id) & 0xFFFF);
ptr = IPC_PROTOCOLp_CLIENTc_RECV_SIGNAL_ENABLE(drv_data->ipcc_io_mem, drv_data->protocol_id,
rx_client_id_phys);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%pK\n", val, ptr);
writel_relaxed(val, ptr);
return 0;
}
int hw_fence_ipcc_enable_signaling(struct hw_fence_driver_data *drv_data)
{
u32 val;
int ret;
HWFNC_DBG_H("enable ipc +\n");
ret = of_property_read_u32(drv_data->dev->of_node, "qcom,hw-fence-ipc-ver", &val);
if (ret || !val) {
HWFNC_ERR("missing hw fences ipc-ver entry or invalid ret:%d val:%d\n", ret, val);
return -EINVAL;
}
if (_hw_fence_ipcc_hwrev_init(drv_data, val)) {
HWFNC_ERR("ipcc protocol id not supported\n");
return -EINVAL;
}
/* Enable protocol for ctrl queue */
hw_fence_ipcc_enable_protocol(drv_data, 0);
/* Enable Client-Signal pairs from FCTL (SOCCP or APSS(NS)) to APPS(NS) (0x8) */
ret = _enable_client_signal_pair(drv_data, drv_data->ipcc_client_pid,
drv_data->ipcc_fctl_vid, 0);
HWFNC_DBG_H("enable ipc -\n");
return 0;
}
int hw_fence_ipcc_enable_protocol(struct hw_fence_driver_data *drv_data, u32 client_id)
{
void __iomem *ptr;
u32 val;
if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table ||
client_id >= drv_data->clients_num) {
HWFNC_ERR("drv_data:0x%pK protocol:%d ipc_table:0x%pK client_id:%u max:%u\n",
drv_data, drv_data ? drv_data->protocol_id : -1,
drv_data ? drv_data->ipc_clients_table : NULL, client_id,
drv_data ? drv_data->clients_num : -1);
return -EINVAL;
}
/* Sets bit(1) to clear when RECV_ID is read */
val = 0x00000001;
ptr = IPC_PROTOCOLp_CLIENTc_CONFIG(drv_data->ipcc_io_mem, drv_data->protocol_id,
drv_data->ipc_clients_table[client_id].ipc_client_id_phys);
HWFNC_DBG_H("Write:0x%x to RegOffset:0x%llx\n", val, (u64)ptr);
writel_relaxed(val, ptr);
return 0;
}
int hw_fence_ipcc_enable_client_signal_pairs(struct hw_fence_driver_data *drv_data,
u32 start_client)
{
struct hw_fence_client_ipc_map *hw_fence_client;
int i, ipc_client_vid;
HWFNC_DBG_H("enable ipc for client signal pairs +\n");
if (!drv_data || !drv_data->protocol_id || !drv_data->ipc_clients_table ||
start_client >= drv_data->clients_num) {
HWFNC_ERR("drv_data:0x%pK protocol:%d ipc_table:0x%pK start_client:%u max:%u\n",
drv_data, drv_data ? drv_data->protocol_id : -1,
drv_data ? drv_data->ipc_clients_table : NULL, start_client,
drv_data ? drv_data->clients_num : -1);
return -EINVAL;
}
ipc_client_vid = drv_data->ipc_clients_table[start_client].ipc_client_id_virt;
HWFNC_DBG_H("ipcc_io_mem:0x%llx\n", (u64)drv_data->ipcc_io_mem);
HWFNC_DBG_H("Initialize %s ipc signals\n", _get_ipc_virt_client_name(ipc_client_vid));
/* Enable Client-Signal pairs from Client to APPS(NS) (8) */
for (i = start_client; i < drv_data->clients_num; i++) {
hw_fence_client = &drv_data->ipc_clients_table[i];
/*
* Stop after enabling signals for all clients with the same ipcc client id as the
* given client.
*/
if (hw_fence_client->ipc_client_id_virt != ipc_client_vid)
break;
/* Enable signals for given client */
HWFNC_DBG_H("%s client:%d vid:%d pid:%d signal:%d has_soccp:%d\n",
_get_ipc_virt_client_name(ipc_client_vid), i,
hw_fence_client->ipc_client_id_virt, hw_fence_client->ipc_client_id_phys,
hw_fence_client->ipc_signal_id, drv_data->has_soccp);
/* Enable input signal from driver to client */
if (drv_data->has_soccp || ipc_client_vid != drv_data->ipcc_client_vid)
_enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys,
drv_data->ipcc_client_vid, hw_fence_client->ipc_signal_id);
/* If fctl separate from driver, enable separate input fctl-signal for client */
if (drv_data->ipcc_client_vid != drv_data->ipcc_fctl_vid)
_enable_client_signal_pair(drv_data, hw_fence_client->ipc_client_id_phys,
drv_data->ipcc_fctl_vid, hw_fence_client->ipc_signal_id);
}
HWFNC_DBG_H("enable %s ipc for start:%d end:%d -\n",
_get_ipc_virt_client_name(ipc_client_vid), start_client, i);
return 0;
}
static bool _is_invalid_signaling_client(struct hw_fence_driver_data *drv_data, u32 client_id)
{
#if IS_ENABLED(CONFIG_DEBUG_FS)
return client_id != drv_data->ipcc_fctl_vid && client_id != drv_data->ipcc_client_vid;
#else
return client_id != drv_data->ipcc_fctl_vid;
#endif
}
u64 hw_fence_ipcc_get_signaled_clients_mask(struct hw_fence_driver_data *drv_data)
{
u32 client_id, signal_id, reg_val;
u64 mask = 0;
int i;
if (!drv_data || !drv_data->protocol_id || !drv_data->ipcc_client_pid ||
!drv_data->ipcc_fctl_vid || !drv_data->has_soccp) {
HWFNC_ERR("invalid drv_data:0x%pK protocol:%d drv_pid:%d fctl_vid:%d\n",
drv_data, drv_data ? drv_data->protocol_id : -1,
drv_data ? drv_data->ipcc_client_pid : -1,
drv_data ? drv_data->ipcc_fctl_vid : -1);
return -1;
}
/* read recv_id until done processing all clients signals */
for (i = 0; i < HW_FENCE_IPCC_MAX_LOOPS; i++) {
mb(); /* make sure memory is updated */
reg_val = readl_relaxed(IPC_PROTOCOLp_CLIENTc_RECV_ID(drv_data->ipcc_io_mem,
drv_data->protocol_id, drv_data->ipcc_client_pid));
/* finished reading clients */
if (reg_val == HW_FENCE_IPC_RECV_ID_NONE)
return mask;
client_id = (reg_val >> 16) & 0xFFFF;
signal_id = reg_val & 0xFFFF;
HWFNC_DBG_IRQ("read recv_id value:0x%x client:%u signal:%u\n", reg_val, client_id,
signal_id);
if (_is_invalid_signaling_client(drv_data, client_id)) {
HWFNC_ERR("Received client:%u signal:%u expected client:%u\n",
client_id, signal_id, drv_data->ipcc_fctl_vid);
continue;
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
/* received signals from SOCCP for validation clients */
if (signal_id >= hw_fence_ipcc_get_signal_id(drv_data, HW_FENCE_CLIENT_ID_VAL0)
&& signal_id <= hw_fence_ipcc_get_signal_id(drv_data,
HW_FENCE_CLIENT_ID_VAL6))
signal_id = signal_id - hw_fence_ipcc_get_signal_id(drv_data,
HW_FENCE_CLIENT_ID_VAL0) + HW_FENCE_CLIENT_ID_VAL0;
#endif /* CONFIG_DEBUG_FS*/
mask |= BIT(signal_id);
}
HWFNC_ERR("irq_handler has too many loops i=%d max:%d\n", i, HW_FENCE_IPCC_MAX_LOOPS);
return mask;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,580 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/types.h>
#include <linux/sync_file.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_debug.h"
#include "hw_fence_drv_fence.h"
#define HW_SYNC_IOCTL_COUNT ARRAY_SIZE(hw_sync_debugfs_ioctls)
#define HW_FENCE_ARRAY_SIZE 10
#define HW_SYNC_IOC_MAGIC 'W'
#define HW_SYNC_IOC_REG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 10, unsigned long)
#define HW_SYNC_IOC_UNREG_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 11, unsigned long)
#define HW_SYNC_IOC_CREATE_FENCE _IOWR(HW_SYNC_IOC_MAGIC, 12,\
struct hw_fence_sync_create_data)
#define HW_SYNC_IOC_CREATE_FENCE_ARRAY _IOWR(HW_SYNC_IOC_MAGIC, 14,\
struct hw_fence_array_sync_create_data)
#define HW_SYNC_IOC_REG_FOR_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 16, int)
#define HW_SYNC_IOC_FENCE_SIGNAL _IOWR(HW_SYNC_IOC_MAGIC, 17, unsigned long)
#define HW_SYNC_IOC_FENCE_WAIT _IOWR(HW_SYNC_IOC_MAGIC, 18, int)
#define HW_SYNC_IOC_RESET_CLIENT _IOWR(HW_SYNC_IOC_MAGIC, 19, unsigned long)
#define HW_FENCE_IOCTL_NR(n) (_IOC_NR(n) - 2)
#define HW_IOCTL_DEF(ioctl, _func) \
[HW_FENCE_IOCTL_NR(ioctl)] = { \
.cmd = ioctl, \
.func = _func, \
.name = #ioctl \
}
/**
* struct hw_sync_obj - per client hw sync object.
* @context: context id used to create fences.
* @client_id: to uniquely represent client.
* @client_handle: Pointer to the structure holding the resources
* allocated to the client.
* @mem_descriptor: Memory descriptor of the queue allocated by the
* hardware fence driver for each client during register.
*/
struct hw_sync_obj {
u64 context;
int client_id;
void *client_handle;
struct msm_hw_fence_mem_addr mem_descriptor;
};
/**
* struct hw_fence_sync_create_data - data used in creating fences.
* @seqno: sequence number.
* @incr_context: if set, then the context would be incremented.
* @fence: returns the fd of the new sync_file with the created fence.
* @hash: fence hash
*/
struct hw_fence_sync_create_data {
u64 seqno;
bool incr_context;
__s32 fence;
u64 hash;
};
/**
* struct hw_fence_array_sync_create_data - data used in creating multiple fences.
* @seqno: sequence number used to create fence array.
* @num_fences: number of fence fds received.
* @fences: array of fence fds.
* @fence_array_fd: fd of fence array.
*/
struct hw_fence_array_sync_create_data {
u64 seqno;
int num_fences;
u64 fences[HW_FENCE_ARRAY_SIZE];
__s32 fence_array_fd;
};
/**
* struct hw_fence_sync_signal_data - data used to signal fences.
* @hash: hash of the fence.
* @error_flag: error flag
*/
struct hw_fence_sync_signal_data {
u64 hash;
u32 error_flag;
};
/**
* struct hw_fence_sync_wait_data - data used to wait on fences.
* @fence: fence fd.
* @timeout_ms: fence wait time out.
*/
struct hw_fence_sync_wait_data {
__s32 fence;
u64 timeout_ms;
};
/**
* struct hw_fence_sync_reset_data - data used to reset client.
* @client_id: client id.
* @reset_flag: reset flag
*/
struct hw_fence_sync_reset_data {
int client_id;
u32 reset_flag;
};
typedef long hw_fence_ioctl_t(struct hw_sync_obj *obj, unsigned long arg);
/**
* struct hw_sync_ioctl_def - hw_sync driver ioctl entry
* @cmd: ioctl command number, without flags
* @func: handler for this ioctl
* @name: user-readable name for debug output
*/
struct hw_sync_ioctl_def {
unsigned int cmd;
hw_fence_ioctl_t *func;
const char *name;
};
static bool _is_valid_client(struct hw_sync_obj *obj)
{
if (!obj)
return false;
if (obj->client_id < HW_FENCE_CLIENT_ID_VAL0 || obj->client_id > HW_FENCE_CLIENT_ID_VAL6) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", obj->client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
return false;
}
return true;
}
static int _get_client_id(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id;
if (copy_from_user(&client_id, (void __user *)arg, sizeof(client_id)))
return -EFAULT;
if (!obj)
return -EINVAL;
if (client_id < HW_FENCE_CLIENT_ID_VAL0 || client_id > HW_FENCE_CLIENT_ID_VAL6) {
HWFNC_ERR("invalid client_id:%d min:%d max:%d\n", client_id,
HW_FENCE_CLIENT_ID_VAL0, HW_FENCE_CLIENT_ID_VAL6);
return -EINVAL;
}
return client_id;
}
static void *_hw_sync_get_fence(int fd)
{
return fd >= 0 ? sync_file_get_fence(fd) : NULL;
}
static int hw_sync_debugfs_open(struct inode *inode, struct file *file)
{
struct hw_sync_obj *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return -ENOMEM;
obj->context = dma_fence_context_alloc(1);
file->private_data = obj;
return 0;
}
static int hw_sync_debugfs_release(struct inode *inode, struct file *file)
{
struct hw_sync_obj *obj = file->private_data;
if (!obj)
return -EINVAL;
kfree(obj);
return 0;
}
static long hw_sync_ioctl_reg_client(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id = _get_client_id(obj, arg);
if (IS_ERR(&client_id)) {
return client_id;
} else if (obj->client_handle) {
HWFNC_ERR("client:%d already registered as validation client\n", client_id);
return -EINVAL;
}
obj->client_id = client_id;
obj->client_handle = msm_hw_fence_register(obj->client_id, &obj->mem_descriptor);
if (IS_ERR_OR_NULL(obj->client_handle))
return -EINVAL;
return 0;
}
static long hw_sync_ioctl_unreg_client(struct hw_sync_obj *obj, unsigned long arg)
{
int client_id = _get_client_id(obj, arg);
if (IS_ERR(&client_id)) {
return client_id;
} else if (client_id != obj->client_id) {
HWFNC_ERR("deregistering hw-fence client %d with invalid client_id arg:%d\n",
obj->client_id, client_id);
return -EINVAL;
}
return msm_hw_fence_deregister(obj->client_handle);
}
static long hw_sync_ioctl_create_fence(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_create_params params;
struct hw_fence_sync_create_data data;
struct hw_dma_fence *fence;
struct dma_fence *dma_fence;
u64 hash;
struct sync_file *sync_file;
int fd, ret;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
dma_fence = hw_dma_fence_init(obj->client_handle, obj->context, data.seqno);
if (IS_ERR_OR_NULL(dma_fence))
return -EINVAL;
fence = (struct hw_dma_fence *)dma_fence;
params.fence = dma_fence;
params.handle = &hash;
/* create hw fence */
ret = msm_hw_fence_create(obj->client_handle, &params);
if (ret) {
HWFNC_ERR("failed to create hw_fence for client:%d ctx:%llu seqno:%llu\n",
obj->client_id, obj->context, data.seqno);
dma_fence_put(&fence->base);
return -EINVAL;
}
/* keep handle in dma_fence, to destroy hw-fence during release */
fence->client_handle = obj->client_handle;
if (data.incr_context)
obj->context = dma_fence_context_alloc(1);
/* create fd */
fd = get_unused_fd_flags(0);
if (fd < 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence->base);
return fd;
}
sync_file = sync_file_create(&fence->base);
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence->base);
ret = -EINVAL;
goto exit;
}
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence->base);
data.fence = fd;
data.hash = hash;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
dma_fence_put(&fence->base);
fput(sync_file->file);
ret = -EFAULT;
goto exit;
}
fd_install(fd, sync_file->file);
return 0;
exit:
put_unused_fd(fd);
return ret;
}
static void _put_child_fences(int i, struct dma_fence **fences)
{
int fence_idx;
for (fence_idx = i; fence_idx >= 0 ; fence_idx--)
dma_fence_put(fences[i]);
}
static long hw_sync_ioctl_create_fence_array(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence_array *fence_array;
struct hw_fence_array_sync_create_data data;
struct dma_fence **fences = NULL;
struct sync_file *sync_file;
int num_fences, i, fd, ret;
struct hw_dma_fence *fence;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d is not register as validation client\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
num_fences = data.num_fences;
if (num_fences > HW_FENCE_ARRAY_SIZE) {
HWFNC_ERR("Number of fences: %d is greater than allowed size: %d\n",
num_fences, HW_FENCE_ARRAY_SIZE);
return -EINVAL;
}
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
if (!fences) {
return -ENOMEM;
}
for (i = 0; i < num_fences; i++) {
fd = data.fences[i];
if (fd <= 0) {
kfree(fences);
return -EINVAL;
}
fence = (struct hw_dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
_put_child_fences(i-1, fences);
kfree(fences);
return -EINVAL;
}
fences[i] = &fence->base;
}
/* create the fence array from array of dma fences */
fence_array = dma_fence_array_create(num_fences, fences, obj->context, data.seqno, 0);
if (!fence_array) {
HWFNC_ERR("Error creating fence_array\n");
/* decrease the refcount incremented for each child fences */
for (i = 0; i < num_fences; i++)
dma_fence_put(fences[i]);
kfree(fences);
return -EINVAL;
}
/* create fd */
fd = get_unused_fd_flags(0);
if (fd <= 0) {
HWFNC_ERR("failed to get fd for client:%d\n", obj->client_id);
dma_fence_put(&fence_array->base);
return fd;
}
sync_file = sync_file_create(&fence_array->base);
if (sync_file == NULL) {
HWFNC_ERR("couldn't create fence fd, %d\n", fd);
dma_fence_put(&fence_array->base);
kfree(fence_array);
ret = -EINVAL;
goto exit;
}
/* Decrement the refcount that sync_file_create increments */
dma_fence_put(&fence_array->base);
data.fence_array_fd = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
fput(sync_file->file);
dma_fence_put(&fence_array->base);
ret = -EFAULT;
goto exit;
}
fd_install(fd, sync_file->file);
return 0;
exit:
put_unused_fd(fd);
return ret;
}
/*
* this IOCTL only supports receiving one fence as input-parameter, which can be
* either a "dma_fence" or a "dma_fence_array", but eventually we would expand
* this API to receive more fences
*/
static long hw_sync_ioctl_reg_for_wait(struct hw_sync_obj *obj, unsigned long arg)
{
struct dma_fence *fence;
int ret, fd, num_fences = 1;
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
return -EFAULT;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
ret = msm_hw_fence_wait_update(obj->client_handle, &fence, num_fences, 1);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return ret;
}
static long hw_sync_ioctl_fence_signal(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_client *hw_fence_client;
struct hw_fence_sync_signal_data data;
int ret, tx_client, rx_client, signal_id;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("invalid client handle for the client_id: %d\n", obj->client_id);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
if (!hw_fence_client) {
HWFNC_ERR("invalid client handle\n");
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
ret = msm_hw_fence_update_txq(obj->client_handle, data.hash, 0, data.error_flag);
if (ret) {
HWFNC_ERR("hw fence update txq has failed client_id: %d\n", obj->client_id);
return ret;
}
signal_id = dbg_out_clients_signal_map_no_dpu[obj->client_id].ipc_signal_id;
if (signal_id < 0)
return -EINVAL;
tx_client = hw_fence_client->ipc_client_pid;
rx_client = hw_fence_client->ipc_client_vid;
ret = msm_hw_fence_trigger_signal(obj->client_handle, tx_client, rx_client, signal_id);
if (ret) {
HWFNC_ERR("hw fence trigger signal has failed\n");
return ret;
}
return 0;
}
static long hw_sync_ioctl_fence_wait(struct hw_sync_obj *obj, unsigned long arg)
{
struct msm_hw_fence_client *hw_fence_client;
struct hw_fence_sync_wait_data data;
struct dma_fence *fence;
int fd, ret;
u32 error;
if (!_is_valid_client(obj))
return -EINVAL;
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
fd = data.fence;
fence = (struct dma_fence *)_hw_sync_get_fence(fd);
if (!fence) {
HWFNC_ERR("Invalid fence fd: %d\n", fd);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)obj->client_handle;
if (!hw_fence_client) {
HWFNC_ERR("invalid client handle for fd:%d\n", fd);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return -EINVAL;
}
ret = hw_fence_debug_wait_val(hw_fence_drv_data, hw_fence_client, fence, 0, 0,
data.timeout_ms, &error);
if (ret)
HWFNC_ERR("failed to wait for hw-fence client:%d ctx:%llu seq:%llu\n",
hw_fence_client->client_id, fence->context, fence->seqno);
/* Decrement the refcount that hw_sync_get_fence increments */
dma_fence_put(fence);
return ret;
}
static long hw_sync_ioctl_reset_client(struct hw_sync_obj *obj, unsigned long arg)
{
int ret;
struct hw_fence_sync_reset_data data;
if (!_is_valid_client(obj)) {
return -EINVAL;
} else if (IS_ERR_OR_NULL(obj->client_handle)) {
HWFNC_ERR("client:%d handle doesn't exists\n", obj->client_id);
return -EINVAL;
}
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
ret = msm_hw_fence_reset_client(obj->client_handle, data.reset_flag);
if (ret) {
HWFNC_ERR("hw fence reset client has failed\n");
return ret;
}
return 0;
}
static const struct hw_sync_ioctl_def hw_sync_debugfs_ioctls[] = {
HW_IOCTL_DEF(HW_SYNC_IOC_REG_CLIENT, hw_sync_ioctl_reg_client),
HW_IOCTL_DEF(HW_SYNC_IOC_UNREG_CLIENT, hw_sync_ioctl_unreg_client),
HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE, hw_sync_ioctl_create_fence),
HW_IOCTL_DEF(HW_SYNC_IOC_CREATE_FENCE_ARRAY, hw_sync_ioctl_create_fence_array),
HW_IOCTL_DEF(HW_SYNC_IOC_REG_FOR_WAIT, hw_sync_ioctl_reg_for_wait),
HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_SIGNAL, hw_sync_ioctl_fence_signal),
HW_IOCTL_DEF(HW_SYNC_IOC_FENCE_WAIT, hw_sync_ioctl_fence_wait),
HW_IOCTL_DEF(HW_SYNC_IOC_RESET_CLIENT, hw_sync_ioctl_reset_client)
};
static long hw_sync_debugfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct hw_sync_obj *obj = file->private_data;
int num = HW_FENCE_IOCTL_NR(cmd);
hw_fence_ioctl_t *func;
if (num >= HW_SYNC_IOCTL_COUNT) {
HWFNC_ERR("invalid ioctl num = %d\n", num);
return -EINVAL;
}
func = (&hw_sync_debugfs_ioctls[num])->func;
if (unlikely(!func)) {
HWFNC_ERR("no function num = %d\n", num);
return -ENOTTY;
}
return func(obj, arg);
}
const struct file_operations hw_sync_debugfs_fops = {
.open = hw_sync_debugfs_open,
.release = hw_sync_debugfs_release,
.unlocked_ioctl = hw_sync_debugfs_ioctl,
};

View File

@@ -0,0 +1,972 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/version.h>
#if (KERNEL_VERSION(6, 1, 25) <= LINUX_VERSION_CODE)
#include <linux/remoteproc/qcom_rproc.h>
#endif
#include <linux/kthread.h>
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_debug.h"
#include "hw_fence_drv_ipc.h"
#include "hw_fence_drv_fence.h"
struct hw_fence_driver_data *hw_fence_drv_data;
#if IS_ENABLED(CONFIG_QTI_ENABLE_HW_FENCE_DEFAULT)
bool hw_fence_driver_enable = true;
#else
bool hw_fence_driver_enable;
#endif
static int _set_power_vote_if_needed(struct hw_fence_driver_data *drv_data,
u32 client_id, bool state)
{
int ret = 0;
#if IS_ENABLED(CONFIG_DEBUG_FS)
if (drv_data->has_soccp && client_id >= HW_FENCE_CLIENT_ID_VAL0 &&
client_id <= HW_FENCE_CLIENT_ID_VAL6) {
ret = hw_fence_utils_set_power_vote(drv_data, state);
}
#endif /* CONFIG_DEBUG_FS */
return ret;
}
static void msm_hw_fence_client_destroy(struct kref *kref)
{
struct msm_hw_fence_client *hw_fence_client = container_of(kref,
struct msm_hw_fence_client, kref);
hw_fence_cleanup_client(hw_fence_drv_data, hw_fence_client);
}
void *msm_hw_fence_register(enum hw_fence_client_id client_id_ext,
struct msm_hw_fence_mem_addr *mem_descriptor)
{
struct msm_hw_fence_client *hw_fence_client;
enum hw_fence_client_id client_id;
int ret;
if (!hw_fence_driver_enable)
return ERR_PTR(-ENODEV);
HWFNC_DBG_H("++ client_id_ext:%d\n", client_id_ext);
ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data);
if (ret)
return ERR_PTR(ret);
if (client_id_ext >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext);
return ERR_PTR(-EINVAL);
}
client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid params: client_id:%d client_id_ext:%d\n",
client_id, client_id_ext);
return ERR_PTR(-EINVAL);
}
/* Alloc client handle */
hw_fence_client = kzalloc(sizeof(*hw_fence_client), GFP_KERNEL);
if (!hw_fence_client)
return ERR_PTR(-ENOMEM);
kref_init(&hw_fence_client->kref);
/* Avoid race condition if multiple-threads request same client at same time */
mutex_lock(&hw_fence_drv_data->clients_register_lock);
if (hw_fence_drv_data->clients[client_id] &&
kref_get_unless_zero(&hw_fence_drv_data->clients[client_id]->kref)) {
mutex_unlock(&hw_fence_drv_data->clients_register_lock);
HWFNC_DBG_INIT("client with id %d already registered\n", client_id);
kfree(hw_fence_client);
/* Client already exists, return the pointer to the client and populate mem desc */
hw_fence_client = hw_fence_drv_data->clients[client_id];
/* Init client memory descriptor */
if (!IS_ERR_OR_NULL(mem_descriptor))
memcpy(mem_descriptor, &hw_fence_client->mem_descriptor,
sizeof(struct msm_hw_fence_mem_addr));
else
HWFNC_DBG_L("null mem descriptor, skipping copy\n");
return hw_fence_client;
}
/* Mark client as registered */
hw_fence_drv_data->clients[client_id] = hw_fence_client;
mutex_unlock(&hw_fence_drv_data->clients_register_lock);
hw_fence_client->client_id = client_id;
hw_fence_client->client_id_ext = client_id_ext;
hw_fence_client->ipc_client_vid =
hw_fence_ipcc_get_client_virt_id(hw_fence_drv_data, client_id);
hw_fence_client->ipc_client_pid =
hw_fence_ipcc_get_client_phys_id(hw_fence_drv_data, client_id);
if (hw_fence_client->ipc_client_vid <= 0 || hw_fence_client->ipc_client_pid <= 0) {
HWFNC_ERR("Failed to find client:%d ipc vid:%d pid:%d\n", client_id,
hw_fence_client->ipc_client_vid, hw_fence_client->ipc_client_pid);
ret = -EINVAL;
goto error;
}
hw_fence_client->ipc_signal_id = hw_fence_ipcc_get_signal_id(hw_fence_drv_data, client_id);
if (hw_fence_client->ipc_signal_id < 0) {
HWFNC_ERR("Failed to find client:%d signal\n", client_id);
ret = -EINVAL;
goto error;
}
hw_fence_client->update_rxq = hw_fence_ipcc_needs_rxq_update(hw_fence_drv_data, client_id);
hw_fence_client->signaled_update_rxq =
hw_fence_ipcc_signaled_needs_rxq_update(hw_fence_drv_data, client_id);
hw_fence_client->signaled_send_ipc = hw_fence_ipcc_signaled_needs_ipc_irq(hw_fence_drv_data,
client_id);
hw_fence_client->txq_update_send_ipc =
hw_fence_ipcc_txq_update_needs_ipc_irq(hw_fence_drv_data, client_id);
hw_fence_client->queues_num = hw_fence_utils_get_queues_num(hw_fence_drv_data, client_id);
if (!hw_fence_client->queues_num) {
HWFNC_ERR("client:%d invalid q_num:%d\n", client_id, hw_fence_client->queues_num);
ret = -EINVAL;
goto error;
}
if (hw_fence_client->queues_num < HW_FENCE_CLIENT_QUEUES) {
hw_fence_client->update_rxq = false;
hw_fence_client->signaled_update_rxq = false;
}
hw_fence_client->skip_fctl_ref = hw_fence_utils_get_skip_fctl_ref(hw_fence_drv_data,
client_id);
/* Alloc Client HFI Headers and Queues */
ret = hw_fence_alloc_client_resources(hw_fence_drv_data,
hw_fence_client, mem_descriptor);
if (ret)
goto error;
/* Initialize signal for communication with FenceCTL */
ret = hw_fence_init_controller_signal(hw_fence_drv_data, hw_fence_client);
if (ret)
goto error;
/*
* Update Fence Controller with the address of the Queues and
* the Fences Tables for this client
*/
ret = hw_fence_init_controller_resources(hw_fence_client);
if (ret)
goto error;
hw_fence_client->context_id = dma_fence_context_alloc(1);
mutex_init(&hw_fence_client->error_cb_lock);
HWFNC_DBG_INIT("Initialized ptr:0x%p client_id:%d q_num:%d ipc signal:%d vid:%d pid:%d\n",
hw_fence_client, hw_fence_client->client_id, hw_fence_client->queues_num,
hw_fence_client->ipc_signal_id, hw_fence_client->ipc_client_vid,
hw_fence_client->ipc_client_pid);
HWFNC_DBG_INIT("update_rxq:%s signaled update_rxq:%s send_ipc:%s txq_update_send_ipc:%s\n",
hw_fence_client->update_rxq ? "true" : "false",
hw_fence_client->signaled_update_rxq ? "true" : "false",
hw_fence_client->signaled_send_ipc ? "true" : "false",
hw_fence_client->txq_update_send_ipc ? "true" : "false");
#if IS_ENABLED(CONFIG_DEBUG_FS)
init_waitqueue_head(&hw_fence_client->wait_queue);
#endif /* CONFIG_DEBUG_FS */
ret = _set_power_vote_if_needed(hw_fence_drv_data, hw_fence_client->client_id_ext, true);
if (ret) {
HWFNC_ERR("set soccp power vote failed, fail client:%u registration ret:%d\n",
hw_fence_client->client_id_ext, ret);
goto error;
}
return (void *)hw_fence_client;
error:
/* Free all the allocated resources */
kref_put(&hw_fence_client->kref, msm_hw_fence_client_destroy);
HWFNC_ERR("failed with error:%d\n", ret);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(msm_hw_fence_register);
int msm_hw_fence_deregister(void *client_handle)
{
struct msm_hw_fence_client *hw_fence_client;
bool destroyed_client;
u32 client_id;
int ret = 0;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
client_id = hw_fence_client->client_id_ext;
if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Free all the allocated resources */
destroyed_client = kref_put(&hw_fence_client->kref, msm_hw_fence_client_destroy);
if (destroyed_client)
ret = _set_power_vote_if_needed(hw_fence_drv_data, client_id, false);
if (ret)
HWFNC_ERR("remove soccp power vote failed, fail client:%u deregistration ret:%d\n",
hw_fence_client->client_id_ext, ret);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_deregister);
int msm_hw_fence_create(void *client_handle,
struct msm_hw_fence_create_params *params)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
struct dma_fence *fence;
int ret;
ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if (!params || !params->handle) {
HWFNC_ERR("Invalid input\n");
return -EINVAL;
}
HWFNC_DBG_H("+\n");
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
fence = (struct dma_fence *)params->fence;
/* if not provided, create a dma-fence */
if (!fence) {
fence = hw_fence_internal_dma_fence_create(hw_fence_drv_data, hw_fence_client,
params->handle);
if (IS_ERR_OR_NULL(fence)) {
HWFNC_ERR("failed to create internal dma-fence for client:%d err:%ld\n",
hw_fence_client->client_id, PTR_ERR(fence));
return PTR_ERR(fence);
}
return 0;
}
/* Block any Fence-Array, we should only get individual fences */
array = to_dma_fence_array(fence);
if (array) {
HWFNC_ERR("HW Fence must be created for individual fences\n");
return -EINVAL;
}
/* This Fence is already a HW-Fence */
if (test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence already has HW Fence Flag set\n");
return -EINVAL;
}
/* Create the HW Fence, i.e. add entry in the Global Table for this Fence */
ret = hw_fence_create(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context,
fence->seqno, params->handle);
if (ret) {
HWFNC_ERR("Error creating HW fence\n");
return ret;
}
ret = hw_fence_add_callback(hw_fence_drv_data, fence, *params->handle);
if (ret) {
HWFNC_ERR("Fail to add dma-fence signal cb client:%d ctx:%llu seq:%llu ret:%d\n",
hw_fence_client->client_id, fence->context, fence->seqno, ret);
/* release both refs, one held by fctl and one held by creating client */
hw_fence_destroy_refcount(hw_fence_drv_data, *params->handle,
HW_FENCE_FCTL_REFCOUNT);
hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, *params->handle);
return ret;
}
/* If no error, set the HW Fence Flag in the dma-fence */
set_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
HWFNC_DBG_H("-\n");
return ret;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_create);
int msm_hw_fence_destroy(void *client_handle,
struct dma_fence *fence)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if (!fence) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("+\n");
/* Block any Fence-Array, we should only get individual fences */
array = to_dma_fence_array(fence);
if (array) {
HWFNC_ERR("HW Fence must be destroy for individual fences\n");
return -EINVAL;
}
/* This Fence not a HW-Fence */
if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence is not a HW Fence flags:0x%lx\n", fence->flags);
return -EINVAL;
}
if (dma_fence_is_hw_dma(fence)) {
HWFNC_ERR("deprecated api cannot destroy hw_dma_fence ctx:%llu seq:%llu\n",
fence->context, fence->seqno);
return -EINVAL;
}
/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
ret = hw_fence_destroy(hw_fence_drv_data, hw_fence_client, (u64)fence,
fence->context, fence->seqno);
if (ret) {
HWFNC_ERR("Error destroying the HW fence\n");
return ret;
}
/* Clear the HW Fence Flag in the dma-fence */
clear_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags);
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_destroy);
int msm_hw_fence_destroy_with_handle(void *client_handle, u64 handle)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->client_id >= hw_fence_drv_data->clients_num) {
HWFNC_ERR("Invalid client_id:%d\n", hw_fence_client->client_id);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Destroy the HW Fence, i.e. remove entry in the Global Table for the Fence */
ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client, handle);
if (ret) {
HWFNC_ERR("Error destroying the HW fence handle:%llu client_id:%d\n", handle,
hw_fence_client->client_id);
return ret;
}
HWFNC_DBG_H("-\n");
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_destroy_with_handle);
int msm_hw_fence_wait_update_v2(void *client_handle,
struct dma_fence **fence_list, u64 *handles, u64 *client_data_list, u32 num_fences,
bool create)
{
struct msm_hw_fence_client *hw_fence_client;
struct dma_fence_array *array;
int i, j, destroy_ret, ret = 0;
enum hw_fence_client_data_id data_id;
ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if (!fence_list || !*fence_list) {
HWFNC_ERR("Invalid data\n");
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
data_id = hw_fence_get_client_data_id(hw_fence_client->client_id_ext);
if (client_data_list && data_id >= HW_FENCE_MAX_CLIENTS_WITH_DATA) {
HWFNC_ERR("Populating non-NULL client_data_list with invalid client_id_ext:%d\n",
hw_fence_client->client_id_ext);
return -EINVAL;
}
HWFNC_DBG_H("+\n");
/* Process all the list of fences */
for (i = 0; i < num_fences; i++) {
struct dma_fence *fence = fence_list[i];
u64 hash, client_data = 0;
if (client_data_list)
client_data = client_data_list[i];
/* Process a Fence-Array */
array = to_dma_fence_array(fence);
if (array) {
ret = hw_fence_process_fence_array(hw_fence_drv_data, hw_fence_client,
array, &hash, client_data);
if (ret) {
HWFNC_ERR("Failed to process FenceArray\n");
goto error;
}
} else {
/* Process individual Fence */
ret = hw_fence_process_fence(hw_fence_drv_data, hw_fence_client, fence,
&hash, client_data);
if (ret) {
HWFNC_ERR("Failed to process Fence\n");
goto error;
}
}
if (handles)
handles[i] = hash;
}
HWFNC_DBG_H("-\n");
return 0;
error:
for (j = 0; j < i; j++) {
destroy_ret = hw_fence_destroy_with_hash(hw_fence_drv_data, hw_fence_client,
handles[j]);
if (destroy_ret)
HWFNC_ERR("Failed decr fence ref ctx:%llu seq:%llu h:%llu idx:%d ret:%d\n",
fence_list[j] ? fence_list[j]->context : -1, fence_list[j] ?
fence_list[j]->seqno : -1, handles[j], j, destroy_ret);
}
return ret;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update_v2);
int msm_hw_fence_wait_update(void *client_handle,
struct dma_fence **fence_list, u32 num_fences, bool create)
{
u64 handle;
int i, ret = 0;
for (i = 0; i < num_fences; i++) {
ret = msm_hw_fence_wait_update_v2(client_handle, &fence_list[i], &handle, NULL,
1, create);
if (ret) {
HWFNC_ERR("Failed reg for wait on fence ctx:%llu seq:%llu idx:%d ret:%d\n",
fence_list[i] ? fence_list[i]->context : -1,
fence_list[i] ? fence_list[i]->seqno : -1, i, ret);
return ret;
}
/* decrement reference on hw-fence acquired by msm_hw_fence_wait_update_v2 call */
ret = msm_hw_fence_destroy_with_handle(client_handle, handle);
if (ret) {
HWFNC_ERR("Failed decr fence ref ctx:%llu seq:%llu h:%llu idx:%d ret:%d\n",
fence_list[i] ? fence_list[i]->context : -1,
fence_list[i] ? fence_list[i]->seqno : -1, handle, i, ret);
return ret;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_wait_update);
int msm_hw_fence_reset_client(void *client_handle, u32 reset_flags)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence *hw_fences_tbl;
int i, ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
hw_fences_tbl = hw_fence_drv_data->hw_fences_tbl;
HWFNC_DBG_L("reset fences and queues for client:%d\n", hw_fence_client->client_id);
for (i = 0; i < hw_fence_drv_data->hw_fences_tbl_cnt; i++)
hw_fence_utils_cleanup_fence(hw_fence_drv_data, hw_fence_client,
&hw_fences_tbl[i], i, reset_flags);
hw_fence_utils_reset_queues(hw_fence_drv_data, hw_fence_client);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client);
int msm_hw_fence_reset_client_by_id(enum hw_fence_client_id client_id_ext, u32 reset_flags)
{
enum hw_fence_client_id client_id;
int ret;
ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data);
if (ret)
return ret;
if (client_id_ext >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id_ext:%d\n", client_id_ext);
return -EINVAL;
}
client_id = hw_fence_utils_get_client_id_priv(hw_fence_drv_data, client_id_ext);
if (client_id >= HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Invalid client_id:%d client_id_ext:%d\n", client_id, client_id_ext);
return -EINVAL;
}
return msm_hw_fence_reset_client(hw_fence_drv_data->clients[client_id],
reset_flags);
}
EXPORT_SYMBOL_GPL(msm_hw_fence_reset_client_by_id);
int msm_hw_fence_update_txq(void *client_handle, u64 handle, u64 flags, u32 error)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if (handle >= hw_fence_drv_data->hw_fences_tbl_cnt) {
HWFNC_ERR("Invalid handle:%llu max:%d\n", handle,
hw_fence_drv_data->hw_fences_tbl_cnt);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
/* Write to Tx queue */
hw_fence_update_queue(hw_fence_drv_data, hw_fence_client,
hw_fence_drv_data->hw_fences_tbl[handle].ctx_id,
hw_fence_drv_data->hw_fences_tbl[handle].seq_id, handle,
flags, 0, error, HW_FENCE_TX_QUEUE - 1);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq);
int msm_hw_fence_update_txq_error(void *client_handle, u64 handle, u32 error, u32 update_flags)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if ((handle >= hw_fence_drv_data->hw_fences_tbl_cnt) || !error) {
HWFNC_ERR("Invalid fence handle:%llu max:%d or error:%d\n",
handle, hw_fence_drv_data->hw_fences_tbl_cnt, error);
return -EINVAL;
}
if (update_flags != MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE) {
HWFNC_ERR("invalid flags:0x%x expected:0x%lx no support of in-place error update\n",
update_flags, MSM_HW_FENCE_UPDATE_ERROR_WITH_MOVE);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
/* Write to Tx queue */
hw_fence_update_existing_txq_payload(hw_fence_drv_data, hw_fence_client,
handle, error);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_update_txq_error);
/* tx client has to be the physical, rx client virtual id*/
int msm_hw_fence_trigger_signal(void *client_handle,
u32 tx_client_pid, u32 rx_client_vid,
u32 signal_id)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
HWFNC_DBG_H("sending ipc for client:%d\n", hw_fence_client->client_id);
hw_fence_ipcc_trigger_signal(hw_fence_drv_data, tx_client_pid,
rx_client_vid, signal_id);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_trigger_signal);
int msm_hw_fence_register_error_cb(void *client_handle, msm_hw_fence_error_cb_t cb, void *data)
{
struct msm_hw_fence_client *hw_fence_client;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if (IS_ERR_OR_NULL(cb) || IS_ERR_OR_NULL(data)) {
HWFNC_ERR("Invalid params cb_func:0x%pK data:0x%pK\n", cb, data);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (hw_fence_client->fence_error_cb) {
HWFNC_ERR("client_id:%d client_id_ext:%d already registered cb_func:%pK data:%pK\n",
hw_fence_client->client_id, hw_fence_client->client_id_ext,
hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata);
return -EINVAL;
}
hw_fence_client->fence_error_cb_userdata = data;
hw_fence_client->fence_error_cb = cb;
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_register_error_cb);
int msm_hw_fence_deregister_error_cb(void *client_handle)
{
struct msm_hw_fence_client *hw_fence_client;
int ret = 0;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (!mutex_trylock(&hw_fence_client->error_cb_lock)) {
HWFNC_ERR("client_id:%d is modifying or using fence_error_cb:0x%pK data:0x%pK\n",
hw_fence_client->client_id, hw_fence_client->fence_error_cb,
hw_fence_client->fence_error_cb_userdata);
return -EAGAIN;
}
if (!hw_fence_client->fence_error_cb) {
HWFNC_ERR("client_id:%d client_id_ext:%d did not register cb:%pK data:%pK\n",
hw_fence_client->client_id, hw_fence_client->client_id_ext,
hw_fence_client->fence_error_cb, hw_fence_client->fence_error_cb_userdata);
ret = -EINVAL;
goto exit;
}
hw_fence_client->fence_error_cb = NULL;
hw_fence_client->fence_error_cb_userdata = NULL;
exit:
mutex_unlock(&hw_fence_client->error_cb_lock);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_deregister_error_cb);
#if IS_ENABLED(CONFIG_DEBUG_FS)
int msm_hw_fence_dump_debug_data(void *client_handle, u32 dump_flags, u32 dump_clients_mask)
{
struct msm_hw_fence_client *hw_fence_client;
int client_id, ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
if (dump_flags & MSM_HW_FENCE_DBG_DUMP_QUEUES) {
hw_fence_debug_dump_queues(hw_fence_drv_data, HW_FENCE_PRINTK, hw_fence_client);
if (dump_clients_mask)
for (client_id = 0; client_id < HW_FENCE_CLIENT_MAX; client_id++)
if ((dump_clients_mask & (1 << client_id)) &&
hw_fence_drv_data->clients[client_id])
hw_fence_debug_dump_queues(hw_fence_drv_data,
HW_FENCE_PRINTK,
hw_fence_drv_data->clients[client_id]);
}
if (dump_flags & MSM_HW_FENCE_DBG_DUMP_TABLE)
hw_fence_debug_dump_table(HW_FENCE_PRINTK, hw_fence_drv_data);
if (dump_flags & MSM_HW_FENCE_DBG_DUMP_EVENTS)
hw_fence_debug_dump_events(HW_FENCE_PRINTK, hw_fence_drv_data);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_dump_debug_data);
int msm_hw_fence_dump_fence(void *client_handle, struct dma_fence *fence)
{
struct msm_hw_fence_client *hw_fence_client;
struct msm_hw_fence *hw_fence;
u64 hash;
int ret;
ret = hw_fence_check_valid_client(hw_fence_drv_data, client_handle);
if (ret)
return ret;
if (!test_bit(MSM_HW_FENCE_FLAG_ENABLED_BIT, &fence->flags)) {
HWFNC_ERR("DMA Fence is not a HW Fence ctx:%llu seqno:%llu flags:0x%lx\n",
fence->context, fence->seqno, fence->flags);
return -EINVAL;
}
hw_fence_client = (struct msm_hw_fence_client *)client_handle;
hw_fence = msm_hw_fence_find(hw_fence_drv_data, hw_fence_client, (u64)fence, fence->context,
fence->seqno, &hash);
if (!hw_fence) {
HWFNC_ERR("failed to find hw-fence client_id:%d fence:0x%pK ctx:%llu seqno:%llu\n",
hw_fence_client->client_id, fence, fence->context, fence->seqno);
return -EINVAL;
}
hw_fence_debug_dump_fence(HW_FENCE_PRINTK, hw_fence, hash, 0);
/* release refcount acquired by finding fence */
msm_hw_fence_destroy_with_handle(client_handle, hash);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_dump_fence);
#endif /* CONFIG_DEBUG_FS */
/* Function used for simulation purposes only. */
int msm_hw_fence_driver_doorbell_sim(u64 db_mask)
{
int ret;
ret = hw_fence_check_hw_fence_driver(hw_fence_drv_data);
if (ret)
return ret;
HWFNC_DBG_IRQ("db callback sim-mode flags:0x%llx qtime:%llu\n",
db_mask, hw_fence_get_qtime(hw_fence_drv_data));
hw_fence_utils_process_signaled_clients_mask(hw_fence_drv_data, db_mask);
return 0;
}
EXPORT_SYMBOL_GPL(msm_hw_fence_driver_doorbell_sim);
static int msm_hw_fence_probe_init(struct platform_device *pdev)
{
int rc;
HWFNC_DBG_H("+\n");
hw_fence_drv_data = kzalloc(sizeof(*hw_fence_drv_data), GFP_KERNEL);
if (!hw_fence_drv_data)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, hw_fence_drv_data);
hw_fence_drv_data->dev = &pdev->dev;
if (hw_fence_driver_enable) {
/* Initialize HW Fence Driver resources */
rc = hw_fence_init(hw_fence_drv_data);
if (rc)
goto error;
mutex_init(&hw_fence_drv_data->clients_register_lock);
/* set ready value so clients can register */
hw_fence_drv_data->resources_ready = true;
} else {
/* check for presence of soccp */
hw_fence_drv_data->has_soccp =
of_property_read_bool(hw_fence_drv_data->dev->of_node, "soccp_controller");
/* Allocate hw fence driver mem pool and share it with HYP */
rc = hw_fence_utils_alloc_mem(hw_fence_drv_data);
if (rc) {
HWFNC_ERR_ONCE("failed to alloc base memory\n");
goto error;
}
HWFNC_DBG_INFO("hw fence driver not enabled\n");
}
HWFNC_DBG_H("-\n");
return rc;
error:
dev_set_drvdata(&pdev->dev, NULL);
kfree(hw_fence_drv_data->ipc_clients_table);
kfree(hw_fence_drv_data->hw_fence_client_queue_size);
if (hw_fence_drv_data->cpu_addr_cookie)
dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size,
hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start,
DMA_ATTR_NO_KERNEL_MAPPING);
kfree(hw_fence_drv_data);
hw_fence_drv_data = (void *) -EPROBE_DEFER;
HWFNC_ERR_ONCE("error %d\n", rc);
return rc;
}
static int msm_hw_fence_probe(struct platform_device *pdev)
{
int rc = -EINVAL;
HWFNC_DBG_H("+\n");
if (!pdev) {
HWFNC_ERR("null platform dev\n");
return -EINVAL;
}
if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-hw-fence"))
rc = msm_hw_fence_probe_init(pdev);
if (rc)
goto err_exit;
HWFNC_DBG_H("-\n");
return 0;
err_exit:
HWFNC_ERR_ONCE("error %d\n", rc);
return rc;
}
static int msm_hw_fence_remove(struct platform_device *pdev)
{
struct hw_fence_soccp *soccp_props;
HWFNC_DBG_H("+\n");
if (!pdev) {
HWFNC_ERR("null platform dev\n");
return -EINVAL;
}
hw_fence_drv_data = dev_get_drvdata(&pdev->dev);
if (!hw_fence_drv_data) {
HWFNC_ERR("null driver data\n");
return -EINVAL;
}
soccp_props = &hw_fence_drv_data->soccp_props;
if (soccp_props->ssr_notifier) {
if (qcom_unregister_ssr_notifier(soccp_props->ssr_notifier,
&soccp_props->ssr_nb))
HWFNC_ERR("failed to unregister soccp ssr notifier\n");
}
/* indicate listener thread should stop listening for interrupts from soccp */
hw_fence_drv_data->has_soccp = false;
if (hw_fence_drv_data->soccp_listener_thread)
kthread_stop(hw_fence_drv_data->soccp_listener_thread);
dev_set_drvdata(&pdev->dev, NULL);
/* free memory allocations as part of hw_fence_drv_data */
kfree(hw_fence_drv_data->ipc_clients_table);
kfree(hw_fence_drv_data->hw_fence_client_queue_size);
kfree(hw_fence_drv_data->hlos_key_tbl);
if (hw_fence_drv_data->cpu_addr_cookie)
dma_free_attrs(hw_fence_drv_data->dev, hw_fence_drv_data->size,
hw_fence_drv_data->cpu_addr_cookie, hw_fence_drv_data->res.start,
DMA_ATTR_NO_KERNEL_MAPPING);
kfree(hw_fence_drv_data);
hw_fence_drv_data = (void *) -EPROBE_DEFER;
HWFNC_DBG_H("-\n");
return 0;
}
static const struct of_device_id msm_hw_fence_dt_match[] = {
{.compatible = "qcom,msm-hw-fence"},
{}
};
static struct platform_driver msm_hw_fence_driver = {
.probe = msm_hw_fence_probe,
.remove = msm_hw_fence_remove,
.driver = {
.name = "msm-hw-fence",
.of_match_table = of_match_ptr(msm_hw_fence_dt_match),
},
};
static int __init msm_hw_fence_init(void)
{
int rc = 0;
HWFNC_DBG_H("+\n");
rc = platform_driver_register(&msm_hw_fence_driver);
if (rc) {
HWFNC_ERR("%s: failed to register platform driver\n",
__func__);
return rc;
}
HWFNC_DBG_H("-\n");
return 0;
}
static void __exit msm_hw_fence_exit(void)
{
HWFNC_DBG_H("+\n");
platform_driver_unregister(&msm_hw_fence_driver);
HWFNC_DBG_H("-\n");
}
module_param_named(enable, hw_fence_driver_enable, bool, 0600);
MODULE_PARM_DESC(enable, "Enable hardware fences");
module_init(msm_hw_fence_init);
module_exit(msm_hw_fence_exit);
MODULE_DESCRIPTION("QTI HW Fence Driver");
MODULE_LICENSE("GPL v2");

View File

@@ -0,0 +1,525 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <synx_api.h>
#include <synx_hwfence.h>
#include <synx_interop.h>
#include "msm_hw_fence.h"
#include "hw_fence_drv_priv.h"
#include "hw_fence_drv_utils.h"
#include "hw_fence_drv_debug.h"
#include "hw_fence_drv_interop.h"
/**
* MAX_SUPPORTED_DPU0: Maximum number of dpu clients supported
* MAX_SUPPORTED_TEST: Maximum number of validation clients supported
*/
#define MAX_SUPPORTED_DPU0 (HW_FENCE_CLIENT_ID_CTL5 - HW_FENCE_CLIENT_ID_CTL0)
#define MAX_SUPPORTED_TEST (HW_FENCE_CLIENT_ID_VAL6 - HW_FENCE_CLIENT_ID_VAL0)
static enum hw_fence_client_id _get_hw_fence_client_id(enum synx_client_id synx_client_id)
{
enum hw_fence_client_id hw_fence_client_id;
switch ((int)synx_client_id) {
case SYNX_CLIENT_HW_FENCE_GFX_CTX0:
hw_fence_client_id = HW_FENCE_CLIENT_ID_CTX0;
break;
case SYNX_CLIENT_HW_FENCE_IPE_CTX0 ... SYNX_CLIENT_HW_FENCE_IPE_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPE_CTX0 +
HW_FENCE_CLIENT_ID_IPE;
break;
case SYNX_CLIENT_HW_FENCE_VID_CTX0 ... SYNX_CLIENT_HW_FENCE_VID_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_VID_CTX0 +
HW_FENCE_CLIENT_ID_VPU;
break;
case SYNX_CLIENT_HW_FENCE_DPU0_CTL0 ... SYNX_CLIENT_HW_FENCE_DPU0_CTL0 + MAX_SUPPORTED_DPU0:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_DPU0_CTL0 +
HW_FENCE_CLIENT_ID_CTL0;
break;
case SYNX_CLIENT_HW_FENCE_IPA_CTX0 ... SYNX_CLIENT_HW_FENCE_IPA_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IPA_CTX0 +
HW_FENCE_CLIENT_ID_IPA;
break;
case SYNX_CLIENT_HW_FENCE_IFE0_CTX0 ... SYNX_CLIENT_HW_FENCE_IFE11_CTX0 +
SYNX_MAX_SIGNAL_PER_CLIENT - 1:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_IFE0_CTX0 +
HW_FENCE_CLIENT_ID_IFE0;
break;
case SYNX_CLIENT_HW_FENCE_TEST_CTX0 ... SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST:
hw_fence_client_id = synx_client_id - SYNX_CLIENT_HW_FENCE_TEST_CTX0 +
HW_FENCE_CLIENT_ID_VAL0;
break;
default:
HWFNC_ERR("Unsupported hw-fence client for synx_id:%d\n", synx_client_id);
hw_fence_client_id = HW_FENCE_CLIENT_MAX;
break;
}
return hw_fence_client_id;
}
static bool is_hw_fence_client(enum synx_client_id synx_client_id)
{
return synx_client_id >= SYNX_HW_FENCE_CLIENT_START
&& synx_client_id < SYNX_HW_FENCE_CLIENT_END;
}
struct synx_session *synx_hwfence_initialize(struct synx_initialization_params *params)
{
struct synx_session *session = NULL;
enum hw_fence_client_id client_id;
void *client_handle;
if (!hw_fence_driver_enable)
return ERR_PTR(-SYNX_INVALID);
if (IS_ERR_OR_NULL(params)) {
HWFNC_ERR("invalid params:0x%pK\n", params);
return ERR_PTR(-SYNX_INVALID);
}
client_id = _get_hw_fence_client_id(params->id);
if (!is_hw_fence_client(params->id) || client_id == HW_FENCE_CLIENT_MAX) {
HWFNC_ERR("Initializing session for invalid synx_id:%d\n", params->id);
return ERR_PTR(-SYNX_INVALID);
}
session = kzalloc(sizeof(struct synx_session), GFP_KERNEL);
if (!session)
return ERR_PTR(-SYNX_NOMEM);
client_handle = msm_hw_fence_register(client_id,
(struct msm_hw_fence_mem_addr *)params->ptr);
if (IS_ERR_OR_NULL(client_handle)) {
kfree(session);
HWFNC_ERR("failed to initialize synx_id:%d ret:%ld\n", params->id,
PTR_ERR(client_handle));
return ERR_PTR(hw_fence_interop_to_synx_status(PTR_ERR(client_handle)));
}
session->client = client_handle;
session->type = params->id;
HWFNC_DBG_INIT("initialized session synx_id:%d hw_fence_id:%d\n", params->id, client_id);
return session;
}
EXPORT_SYMBOL_GPL(synx_hwfence_initialize);
static int synx_hwfence_uninitialize(struct synx_session *session)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type);
return -SYNX_INVALID;
}
ret = msm_hw_fence_deregister(session->client);
if (ret)
HWFNC_ERR("Failed to deregister synx_id:%d ret:%d\n", session->type, ret);
else
kfree(session);
return hw_fence_interop_to_synx_status(ret);
}
static int synx_hwfence_create(struct synx_session *session, struct synx_create_params *params)
{
int ret = 0;
struct msm_hw_fence_create_params hwfence_params;
u64 handle;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
IS_ERR_OR_NULL(params)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type, params);
return -SYNX_INVALID;
}
if (IS_ERR_OR_NULL(params->h_synx) || (params->flags > SYNX_CREATE_MAX_FLAGS) ||
(params->flags & SYNX_CREATE_CSL_FENCE)) {
HWFNC_ERR("synx_id:%d invalid create params h_synx:0x%pK flags:0x%x\n",
session->type, params->h_synx, params->flags);
return -SYNX_INVALID;
}
/* if SYNX_CREATE_DMA_FENCE specified and no dma-fence, fail */
if (!params->fence && (params->flags & SYNX_CREATE_DMA_FENCE)) {
HWFNC_ERR("synx_id:%d invalid fence:%pK params flags:0x%x\n",
session->type, params->fence, params->flags);
return -SYNX_INVALID;
}
hwfence_params.fence = params->fence;
hwfence_params.handle = &handle;
ret = msm_hw_fence_create(session->client, &hwfence_params);
if (ret) {
HWFNC_ERR("synx_id:%d failed create fence:0x%pK flags:0x%x ret:%d\n", session->type,
params->fence, params->flags, ret);
return hw_fence_interop_to_synx_status(ret);
}
if (handle > U32_MAX) {
HWFNC_ERR("synx_id:%d fence handle:%llu would overflow h_synx\n", session->type,
handle);
hw_fence_destroy_refcount(hw_fence_drv_data, handle, HW_FENCE_FCTL_REFCOUNT);
msm_hw_fence_destroy_with_handle(session->client, handle);
return -SYNX_INVALID;
}
*params->h_synx = SYNX_HW_FENCE_HANDLE_FLAG | handle;
return SYNX_SUCCESS;
}
static int synx_hwfence_release(struct synx_session *session, u32 h_synx)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type);
return -SYNX_INVALID;
}
h_synx &= HW_FENCE_HANDLE_INDEX_MASK;
ret = msm_hw_fence_destroy_with_handle(session->client, h_synx);
if (ret)
HWFNC_ERR("synx_id:%d failed to destroy fence h_synx:%u ret:%d\n", session->type,
h_synx, ret);
return hw_fence_interop_to_synx_status(ret);
}
static int synx_hwfence_signal(struct synx_session *session, u32 h_synx,
enum synx_signal_status status)
{
struct msm_hw_fence_client *hw_fence_client;
u32 error;
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) || !session->client ||
!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG) ||
!(status == SYNX_STATE_SIGNALED_SUCCESS ||
status == SYNX_STATE_SIGNALED_CANCEL ||
status > SYNX_STATE_SIGNALED_MAX)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d client:0x%pK h_synx:%u status:%u\n",
session, IS_ERR_OR_NULL(session) ? -1 : session->type,
IS_ERR_OR_NULL(session) ? NULL : session->client, h_synx, status);
return -SYNX_INVALID;
}
error = hw_fence_interop_to_hw_fence_error(status);
h_synx &= HW_FENCE_HANDLE_INDEX_MASK;
ret = msm_hw_fence_update_txq(session->client, h_synx, 0, error);
if (ret) {
HWFNC_ERR("synx_id:%d failed to signal fence h_synx:%u status:%d ret:%d\n",
session->type, h_synx, status, ret);
goto error;
}
hw_fence_client = (struct msm_hw_fence_client *)session->client;
if (hw_fence_client->txq_update_send_ipc)
hw_fence_ipcc_trigger_signal(hw_fence_drv_data,
hw_fence_client->ipc_client_pid, hw_fence_drv_data->ipcc_fctl_vid,
hw_fence_client->ipc_signal_id);
error:
return hw_fence_interop_to_synx_status(ret);
}
static int synx_hwfence_wait(struct synx_session *session, u32 h_synx, u64 timeout_ms)
{
int ret = -EINVAL;
u32 error;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type);
return -SYNX_INVALID;
}
#if IS_ENABLED(CONFIG_DEBUG_FS)
if (session->type >= SYNX_CLIENT_HW_FENCE_TEST_CTX0
&& session->type <= SYNX_CLIENT_HW_FENCE_TEST_CTX0 + MAX_SUPPORTED_TEST)
ret = hw_fence_debug_wait_val(hw_fence_drv_data, session->client, NULL, h_synx,
HW_FENCE_HANDLE_INDEX_MASK, timeout_ms, &error);
#endif /* CONFIG_DEBUG_FS */
if (ret) {
HWFNC_ERR("synx_id:%d failed to wait on fence h_synx:%u timeout_ms:%llu\n",
session->type, h_synx, timeout_ms);
return hw_fence_interop_to_synx_status(ret);
}
return hw_fence_interop_to_synx_signal_status(MSM_HW_FENCE_FLAG_SIGNAL, error);
}
int synx_hwfence_recover(enum synx_client_id id)
{
int ret;
if (!is_hw_fence_client(id)) {
HWFNC_ERR("invalid synx_id:%d\n", id);
return -SYNX_INVALID;
}
ret = msm_hw_fence_reset_client_by_id(_get_hw_fence_client_id(id),
MSM_HW_FENCE_RESET_WITHOUT_DESTROY);
if (ret)
HWFNC_ERR("synx_id:%d failed to recover ret:%d\n", id, ret);
return hw_fence_interop_to_synx_status(ret);
}
EXPORT_SYMBOL_GPL(synx_hwfence_recover);
static void *synx_hwfence_get_fence(struct synx_session *session, u32 h_synx)
{
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx);
return ERR_PTR(-SYNX_INVALID);
}
return (void *)hw_fence_interop_get_fence(h_synx);
}
static int synx_hwfence_get_status(struct synx_session *session, u32 h_synx)
{
u64 flags;
u32 error;
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type) ||
!(h_synx & SYNX_HW_FENCE_HANDLE_FLAG)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d h_synx:%u\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type, h_synx);
return SYNX_STATE_INVALID;
}
h_synx &= HW_FENCE_HANDLE_INDEX_MASK;
ret = hw_fence_get_flags_error(hw_fence_drv_data, h_synx, &flags, &error);
if (ret) {
HWFNC_ERR("Failed to get status for client:%d h_synx:%u\n", session->type, h_synx);
return SYNX_STATE_INVALID;
}
return hw_fence_interop_to_synx_signal_status(flags, error);
}
static int synx_hwfence_import_fence(void *client, struct synx_import_indv_params *params)
{
struct dma_fence_array *array;
struct dma_fence *fence;
u64 handle;
int ret, i;
ret = hw_fence_check_valid_fctl(hw_fence_drv_data, client);
if (ret)
return hw_fence_interop_to_synx_status(ret);
fence = (struct dma_fence *)params->fence;
array = to_dma_fence_array(fence);
if (array) {
for (i = 0; i < array->num_fences; i++) {
if (dma_fence_is_array(array->fences[i])) {
HWFNC_ERR("nested fence arrays not supported idx:%d fence:0x%pK\n",
i, array->fences[i]);
ret = -SYNX_INVALID;
break;
}
params->fence = array->fences[i];
ret = hw_fence_interop_create_fence_from_import(params);
if (ret) {
HWFNC_ERR("failed to back dma_fence_array idx:%d fence:0x%pK\n",
i, array->fences[i]);
params->fence = fence;
break;
}
}
params->fence = fence;
} else {
ret = hw_fence_interop_create_fence_from_import(params);
}
if (ret) {
HWFNC_ERR("failed to back dma-fence:0x%pK with hw-fence(s) ret:%d\n",
params->fence, ret);
return ret;
}
ret = msm_hw_fence_wait_update_v2(client, (struct dma_fence **)&params->fence, &handle,
NULL, 1, true);
if (ret) {
HWFNC_ERR("failed to import fence:0x%pK flags:0x%x ret:%d\n", params->fence,
params->flags, ret);
goto error;
}
if (handle > U32_MAX) {
HWFNC_ERR("fence handle:%llu would overflow new_h_synx\n", handle);
msm_hw_fence_wait_update_v2(client, (struct dma_fence **)&params->fence, &handle,
NULL, 1, false);
return -SYNX_INVALID;
}
*params->new_h_synx = SYNX_HW_FENCE_HANDLE_FLAG | handle;
error:
return hw_fence_interop_to_synx_status(ret);
}
static int synx_hwfence_import_handle(void *client, struct synx_import_indv_params *params)
{
struct synx_import_indv_params fence_params;
u32 h_synx;
int ret;
if (!synx_interops.get_fence) {
HWFNC_ERR("invalid synx_get_fence:0x%pK\n", synx_interops.get_fence);
return -SYNX_INVALID;
}
h_synx = *(u32 *)params->fence;
if (h_synx & SYNX_HW_FENCE_HANDLE_FLAG)
fence_params.fence = hw_fence_interop_get_fence(h_synx);
else
fence_params.fence = synx_interops.get_fence(h_synx);
if (IS_ERR_OR_NULL(fence_params.fence)) {
HWFNC_ERR("failed to get native fence h_synx:%u ret:0x%pK\n", h_synx,
fence_params.fence);
return -SYNX_INVALID;
}
fence_params.new_h_synx = params->new_h_synx;
fence_params.flags = SYNX_IMPORT_DMA_FENCE;
ret = synx_hwfence_import_fence(client, &fence_params);
dma_fence_put(fence_params.fence); /* release dma-fence ref acquired by get_fence */
return ret;
}
static int synx_hwfence_import_indv(void *client, struct synx_import_indv_params *params)
{
if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) ||
IS_ERR_OR_NULL(params->new_h_synx) ||
!((params->flags & SYNX_IMPORT_DMA_FENCE) ||
(params->flags & SYNX_IMPORT_SYNX_FENCE)) ||
IS_ERR_OR_NULL(params->fence)) {
HWFNC_ERR("invalid client:0x%pK params:0x%pK h_synx:0x%pK flags:0x%x fence:0x%pK\n",
client, params, IS_ERR_OR_NULL(params) ? NULL : params->new_h_synx,
IS_ERR_OR_NULL(params) ? 0 : params->flags,
IS_ERR_OR_NULL(params) ? NULL : params->fence);
return -SYNX_INVALID;
}
if (params->flags & SYNX_IMPORT_DMA_FENCE)
return synx_hwfence_import_fence(client, params);
else if (params->flags & SYNX_IMPORT_SYNX_FENCE)
return synx_hwfence_import_handle(client, params);
HWFNC_ERR("invalid import flags:0x%x\n", params->flags);
return -SYNX_INVALID;
}
static int synx_hwfence_import_arr(void *client, struct synx_import_arr_params *params)
{
int i, ret;
if (IS_ERR_OR_NULL(client) || IS_ERR_OR_NULL(params) || !params->num_fences) {
HWFNC_ERR("invalid import arr client:0x%pK params:0x%pK num_fences:%u\n", client,
params, IS_ERR_OR_NULL(params) ? -1 : params->num_fences);
return -SYNX_INVALID;
}
for (i = 0; i < params->num_fences; i++) {
ret = synx_hwfence_import_indv(client, &params->list[i]);
if (ret) {
HWFNC_ERR("importing fence[%u] 0x%pK failed ret:%d\n", i,
params->list[i].fence, ret);
return ret;
}
}
return SYNX_SUCCESS;
}
int synx_hwfence_import(struct synx_session *session, struct synx_import_params *params)
{
int ret;
if (IS_ERR_OR_NULL(session) || !is_hw_fence_client(session->type)
|| IS_ERR_OR_NULL(params)) {
HWFNC_ERR("invalid session:0x%pK synx_id:%d params:0x%pK\n", session,
IS_ERR_OR_NULL(session) ? -1 : session->type, params);
return -SYNX_INVALID;
}
if (params->type == SYNX_IMPORT_ARR_PARAMS)
ret = synx_hwfence_import_arr(session->client, &params->arr);
else
ret = synx_hwfence_import_indv(session->client, &params->indv);
if (ret)
HWFNC_ERR("synx_id:%d failed to import type:%s fences ret:%d\n", session->type,
(params->type == SYNX_IMPORT_ARR_PARAMS) ? "arr" : "indv", ret);
return ret;
}
int synx_hwfence_init_ops(struct synx_ops *hwfence_ops)
{
if (IS_ERR_OR_NULL(hwfence_ops)) {
HWFNC_ERR("invalid ops\n");
return -SYNX_INVALID;
}
hwfence_ops->uninitialize = synx_hwfence_uninitialize;
hwfence_ops->create = synx_hwfence_create;
hwfence_ops->release = synx_hwfence_release;
hwfence_ops->signal = synx_hwfence_signal;
hwfence_ops->import = synx_hwfence_import;
hwfence_ops->get_fence = synx_hwfence_get_fence;
hwfence_ops->get_status = synx_hwfence_get_status;
hwfence_ops->wait = synx_hwfence_wait;
return SYNX_SUCCESS;
}
EXPORT_SYMBOL_GPL(synx_hwfence_init_ops);
int synx_hwfence_enable_resources(enum synx_client_id id, enum synx_resource_type resource,
bool enable)
{
int ret;
if (!hw_fence_driver_enable)
return -SYNX_INVALID;
if (IS_ERR_OR_NULL(hw_fence_drv_data) || !hw_fence_drv_data->resources_ready) {
HWFNC_ERR("hw fence driver not ready\n");
return -SYNX_INVALID;
}
if (!is_hw_fence_client(id) || !(resource == SYNX_RESOURCE_SOCCP)) {
HWFNC_ERR("enabling hw-fence resources for invalid client id:%d res:%d enable:%d\n",
id, resource, enable);
return -SYNX_INVALID;
}
if (!hw_fence_drv_data->has_soccp)
return SYNX_SUCCESS;
ret = hw_fence_utils_set_power_vote(hw_fence_drv_data, enable);
if (ret)
HWFNC_ERR("Failed to vote for SOCCP state:%d\n", enable);
return hw_fence_interop_to_synx_status(ret);
}
EXPORT_SYMBOL_GPL(synx_hwfence_enable_resources);

View File

@@ -0,0 +1,2 @@
CONFIG_QTI_HW_FENCE=y
CONFIG_QTI_HW_FENCE_USE_SYNX=y

View File

@@ -0,0 +1,26 @@
#SPDX-License-Identifier: GPL-2.0-only
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
ifneq ($(TARGET_BOARD_AUTO),true)
ifeq ($(call is-board-platform-in-list,$(TARGET_BOARD_PLATFORM)),true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/msm_ext_display.ko
ifneq ($(TARGET_BOARD_PLATFORM), taro)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/sync_fence.ko \
$(KERNEL_MODULES_OUT)/msm_hw_fence.ko
endif
endif
endif
endif

View File

@@ -0,0 +1,17 @@
PRODUCT_PACKAGES += msm_ext_display.ko
MM_DRV_DLKM_ENABLE := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_MM_DRV_OVERRIDE), false)
MM_DRV_DLKM_ENABLE := false
endif
endif
ifeq ($(MM_DRV_DLKM_ENABLE), true)
ifneq ($(TARGET_BOARD_PLATFORM), taro)
PRODUCT_PACKAGES += sync_fence.ko msm_hw_fence.ko
endif
endif
DISPLAY_MM_DRIVER := msm_ext_display.ko sync_fence.ko msm_hw_fence.ko

View File

@@ -0,0 +1,95 @@
# Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import filecmp
import os
import re
import subprocess
import sys
def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
if not h.startswith(prefix):
print('error: expected prefix [%s] on header [%s]' % (prefix, h))
return False
out_h = os.path.join(gen_dir, h[len(prefix):])
(out_h_dirname, out_h_basename) = os.path.split(out_h)
env = os.environ.copy()
env["LOC_UNIFDEF"] = unifdef
cmd = ["sh", headers_install, h, out_h]
if True:
print('run_headers_install: cmd is %s' % cmd)
result = subprocess.call(cmd, env=env)
if result != 0:
print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
return False
return True
def gen_mm_drivers_headers(verbose, gen_dir, headers_install, unifdef, mm_drivers_include_uapi):
error_count = 0
for h in mm_drivers_include_uapi:
mm_drivers_uapi_include_prefix = os.path.join(h.split('sync_fence/include/uapi')[0],
'sync_fence', 'include', 'uapi') + os.sep
if not run_headers_install(
verbose, gen_dir, headers_install, unifdef,
mm_drivers_uapi_include_prefix, h): error_count += 1
return error_count
def main():
"""Parse command line arguments and perform top level control."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments that apply to every invocation of this script.
parser.add_argument(
'--verbose', action='store_true',
help='Print output that describes the workings of this script.')
parser.add_argument(
'--header_arch', required=True,
help='The arch for which to generate headers.')
parser.add_argument(
'--gen_dir', required=True,
help='Where to place the generated files.')
parser.add_argument(
'--mm_drivers_include_uapi', required=True, nargs='*',
help='The list of techpack/*/include/uapi header files.')
parser.add_argument(
'--headers_install', required=True,
help='The headers_install tool to process input headers.')
parser.add_argument(
'--unifdef',
required=True,
help='The unifdef tool used by headers_install.')
args = parser.parse_args()
if args.verbose:
print('header_arch [%s]' % args.header_arch)
print('gen_dir [%s]' % args.gen_dir)
print('mm_drivers_include_uapi [%s]' % args.mm_drivers_include_uapi)
print('headers_install [%s]' % args.headers_install)
print('unifdef [%s]' % args.unifdef)
return gen_mm_drivers_headers(args.verbose, args.gen_dir,
args.headers_install, args.unifdef, args.mm_drivers_include_uapi)
if __name__ == '__main__':
sys.exit(main())

View File

@@ -0,0 +1,43 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
MSM_EXT_DISPLAY_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/msm_ext_display
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := MSM_EXT_DISPLAY_ROOT=$(MSM_EXT_DISPLAY_BLD_DIR)
KBUILD_OPTIONS += MODNAME=msm_ext_display
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm-ext-disp-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := msm_ext_display.ko
LOCAL_MODULE_KBUILD_NAME := msm_ext_display.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@@ -0,0 +1,16 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_msm_ext_display.bzl", "define_msm_ext_display")
package(
default_visibility = [
"//visibility:public"
],
)
ddk_headers(
name = "msm_ext_display_headers",
hdrs = glob(["include/*.h"]),
includes = ["include"]
)
define_msm_ext_display()

View File

@@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(MSM_EXT_DISPLAY_ROOT)/config/kalamammdriversconf.h
obj-m += msm_ext_display.o
msm_ext_display-y := src/msm_ext_display.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@@ -0,0 +1,4 @@
config MSM_EXT_DISPLAY
bool "Enable msm_ext_display"
help
Enable msm_ext_display driver

View File

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += MSM_EXT_DISPLAY_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@@ -0,0 +1 @@
CONFIG_MSM_EXT_DISPLAY=y

View File

@@ -0,0 +1,31 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module", "ddk_submodule")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
ddk_module(
name = "{}_msm_ext_display".format(tv),
srcs = ["src/msm_ext_display.c"],
out = "msm_ext_display.ko",
defconfig = "defconfig",
kconfig = "Kconfig",
deps = ["//msm-kernel:all_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers"],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_msm_ext_display_dist".format(tv),
data = [":{}_msm_ext_display".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_msm_ext_display():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@@ -0,0 +1,240 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _MSM_EXT_DISPLAY_H_
#define _MSM_EXT_DISPLAY_H_
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/extcon.h>
#define AUDIO_ACK_SET_ENABLE BIT(5)
#define AUDIO_ACK_ENABLE BIT(4)
#define AUDIO_ACK_CONNECT BIT(0)
#define MSM_EXT_DISP_MAX_CODECS 2
/*
* Flags to be used with the HPD operation of the external display
* interface:
* MSM_EXT_DISP_HPD_AUDIO: audio will be routed to external display
* MSM_EXT_DISP_HPD_VIDEO: video will be routed to external display
*/
#define MSM_EXT_DISP_HPD_AUDIO BIT(0)
#define MSM_EXT_DISP_HPD_VIDEO BIT(1)
/**
* struct ext_disp_cable_notify - cable notify handler structure
* @link: a link for the linked list
* @status: current status of HDMI/DP cable connection
* @hpd_notify: callback function to provide cable status
*/
struct ext_disp_cable_notify {
struct list_head link;
int status;
void (*hpd_notify)(struct ext_disp_cable_notify *h);
};
struct msm_ext_disp_audio_edid_blk {
u8 *audio_data_blk;
unsigned int audio_data_blk_size; /* in bytes */
u8 *spk_alloc_data_blk;
unsigned int spk_alloc_data_blk_size; /* in bytes */
};
struct msm_ext_disp_audio_setup_params {
u32 sample_rate_hz;
u32 num_of_channels;
u32 channel_allocation;
u32 level_shift;
bool down_mix;
u32 sample_present;
};
/*
* External Display identifier for use to determine which interface
* the audio driver is interacting with.
*/
enum msm_ext_disp_type {
EXT_DISPLAY_TYPE_HDMI = EXTCON_DISP_HDMI,
EXT_DISPLAY_TYPE_DP = EXTCON_DISP_DP,
EXT_DISPLAY_TYPE_MAX = 0xFFFFFFFF
};
/*
* External Display cable state used by display interface to indicate
* connect/disconnect of interface.
*/
enum msm_ext_disp_cable_state {
EXT_DISPLAY_CABLE_DISCONNECT,
EXT_DISPLAY_CABLE_CONNECT,
EXT_DISPLAY_CABLE_STATE_MAX
};
/**
* External Display power state used by display interface to indicate
* power on/off of the interface.
*/
enum msm_ext_disp_power_state {
EXT_DISPLAY_POWER_OFF,
EXT_DISPLAY_POWER_ON,
EXT_DISPLAY_POWER_MAX
};
/**
* struct msm_ext_disp_codec_id - codec information
* @type: external display type
* @ctrl_id: controller id
* @stream_id: stream_id
*/
struct msm_ext_disp_codec_id {
enum msm_ext_disp_type type;
int ctrl_id;
int stream_id;
};
/**
* struct msm_ext_disp_intf_ops - operations exposed to display interface
* @audio_config: configures the audio operations exposed to codec driver
* @audio_notify: notifies the audio connection state to user modules.
* @video_notify: notifies the video connection state to user modules.
*/
struct msm_ext_disp_intf_ops {
int (*audio_config)(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state);
int (*audio_notify)(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state);
int (*video_notify)(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state);
};
/**
* struct msm_ext_disp_audio_codec_ops - operations exposed to audio codec
* @audio_info_setup: configure audio on interface
* @get_audio_edid_blk: retrieve audio edid block
* @cable_status: cable connected/disconnected
* @get_intf_id: id of connected interface
* @teardown_done: audio session teardown done by qdsp
* @acknowledge: acknowledge audio status received by user modules
* @ready: notify audio when codec driver is ready.
*/
struct msm_ext_disp_audio_codec_ops {
int (*audio_info_setup)(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params);
int (*get_audio_edid_blk)(struct platform_device *pdev,
struct msm_ext_disp_audio_edid_blk *blk);
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
void (*teardown_done)(struct platform_device *pdev);
int (*acknowledge)(struct platform_device *pdev, u32 ack);
int (*ready)(struct platform_device *pdev);
};
/**
* struct msm_ext_disp_init_data - data needed to register a display interface
* @type: external display type
* @intf_ops: external display interface operations
* @codec_ops: audio codec operations
* @pdev: platform device instance of the interface driver
* @intf_data: interface specific data
*/
struct msm_ext_disp_init_data {
struct msm_ext_disp_codec_id codec;
struct msm_ext_disp_intf_ops intf_ops;
struct msm_ext_disp_audio_codec_ops codec_ops;
struct platform_device *pdev;
void *intf_data;
};
/**
* struct msm_ext_disp_data - data needed by interface modules
* @intf_pdev: platform device instance of the interface
* @intf_data: data related to interface module
*/
struct msm_ext_disp_data {
struct platform_device *intf_pdev;
void *intf_data;
};
#if IS_ENABLED(CONFIG_MSM_EXT_DISPLAY)
/**
* msm_ext_disp_register_audio_codec() - audio codec registration
* @pdev: platform device pointer
* @codec_ops: audio codec operations
*/
int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops);
/**
* msm_ext_disp_select_audio_codec() - select audio codec
* @pdev: platform device pointer
* @codec: codec id information
*/
int msm_ext_disp_select_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec);
/**
* msm_hdmi_register_audio_codec() - wrapper for hdmi audio codec
* registration
* @pdev: platform device pointer
* @codec_ops: audio codec operations
*/
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops);
/**
* msm_ext_disp_register_intf() - display interface registration
* @init_data: data needed to register the display interface
*/
int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data);
/**
* msm_ext_disp_deregister_intf() - display interface deregistration
* @init_data: data needed to deregister the display interface
*/
int msm_ext_disp_deregister_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data);
#else
static inline int msm_ext_disp_register_audio_codec(
struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
return 0;
}
static inline int msm_ext_disp_select_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec)
{
return 0;
}
static inline int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
return 0;
}
static inline int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
return 0;
}
static inline int msm_ext_disp_deregister_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
return 0;
}
#endif
#endif /*_MSM_EXT_DISPLAY_H_*/

View File

@@ -0,0 +1,702 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/iopoll.h>
#include <linux/types.h>
#include <linux/of_platform.h>
#include <linux/extcon-provider.h>
#include <msm_ext_display.h>
#include <linux/extcon-provider.h>
struct msm_ext_disp_list {
struct msm_ext_disp_init_data *data;
struct list_head list;
};
struct msm_ext_disp {
struct msm_ext_disp_data ext_disp_data;
struct platform_device *pdev;
struct msm_ext_disp_codec_id current_codec;
struct msm_ext_disp_audio_codec_ops *ops;
struct extcon_dev *audio_sdev[MSM_EXT_DISP_MAX_CODECS];
bool audio_session_on;
struct list_head display_list;
struct mutex lock;
bool update_audio;
};
static const unsigned int msm_ext_disp_supported_cable[] = {
EXTCON_DISP_DP,
EXTCON_DISP_HDMI,
EXTCON_NONE,
};
static int msm_ext_disp_extcon_register(struct msm_ext_disp *ext_disp, int id)
{
int ret = 0;
if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("invalid params\n");
return -EINVAL;
}
ext_disp->audio_sdev[id] = devm_extcon_dev_allocate(
&ext_disp->pdev->dev,
msm_ext_disp_supported_cable);
if (IS_ERR(ext_disp->audio_sdev[id]))
return PTR_ERR(ext_disp->audio_sdev[id]);
ret = devm_extcon_dev_register(&ext_disp->pdev->dev,
ext_disp->audio_sdev[id]);
if (ret) {
pr_err("audio registration failed\n");
return ret;
}
pr_debug("extcon registration done\n");
return ret;
}
static void msm_ext_disp_extcon_unregister(struct msm_ext_disp *ext_disp,
int id)
{
if (!ext_disp || !ext_disp->pdev || id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("Invalid params\n");
return;
}
devm_extcon_dev_unregister(&ext_disp->pdev->dev,
ext_disp->audio_sdev[id]);
}
static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
{
switch (type) {
case EXT_DISPLAY_TYPE_HDMI:
return "EXT_DISPLAY_TYPE_HDMI";
case EXT_DISPLAY_TYPE_DP:
return "EXT_DISPLAY_TYPE_DP";
default: return "???";
}
}
static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_init_data *data)
{
struct msm_ext_disp_list *node;
if (!ext_disp || !data) {
pr_err("Invalid params\n");
return -EINVAL;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
node->data = data;
list_add(&node->list, &ext_disp->display_list);
pr_debug("Added new display (%s) ctld (%d) stream (%d)\n",
msm_ext_disp_name(data->codec.type),
data->codec.ctrl_id, data->codec.stream_id);
return 0;
}
static int msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_init_data *data)
{
struct msm_ext_disp_list *node;
struct list_head *pos = NULL;
if (!ext_disp || !data) {
pr_err("Invalid params\n");
return -EINVAL;
}
list_for_each(pos, &ext_disp->display_list) {
node = list_entry(pos, struct msm_ext_disp_list, list);
if (node->data == data) {
list_del(pos);
pr_debug("Deleted the intf data\n");
kfree(node);
return 0;
}
}
pr_debug("Intf data not present for delete op\n");
return 0;
}
static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec,
struct msm_ext_disp_init_data **data)
{
int ret = 0;
struct msm_ext_disp_list *node;
struct list_head *position = NULL;
if (!ext_disp || !data || !codec) {
pr_err("Invalid params\n");
ret = -EINVAL;
goto end;
}
*data = NULL;
list_for_each(position, &ext_disp->display_list) {
node = list_entry(position, struct msm_ext_disp_list, list);
if (node->data->codec.type == codec->type &&
node->data->codec.stream_id == codec->stream_id &&
node->data->codec.ctrl_id == codec->ctrl_id) {
*data = node->data;
break;
}
}
if (!*data)
ret = -ENODEV;
end:
return ret;
}
static int msm_ext_disp_process_audio(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state new_state)
{
int ret = 0;
int state;
struct extcon_dev *audio_sdev;
if (!ext_disp->ops) {
pr_err("codec not registered, skip notification\n");
ret = -EPERM;
goto end;
}
audio_sdev = ext_disp->audio_sdev[codec->stream_id];
state = extcon_get_state(audio_sdev, codec->type);
if (state == !!new_state) {
ret = -EEXIST;
pr_debug("same state\n");
goto end;
}
ret = extcon_set_state_sync(audio_sdev,
codec->type, !!new_state);
if (ret)
pr_err("Failed to set state. Error = %d\n", ret);
else
pr_debug("state changed to %d\n", new_state);
end:
return ret;
}
static struct msm_ext_disp *msm_ext_disp_validate_and_get(
struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
struct msm_ext_disp_data *ext_disp_data;
struct msm_ext_disp *ext_disp;
if (!pdev) {
pr_err("invalid platform device\n");
goto err;
}
if (!codec ||
codec->type >= EXT_DISPLAY_TYPE_MAX ||
codec->ctrl_id != 0 ||
codec->stream_id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("invalid display codec id\n");
goto err;
}
if (state < EXT_DISPLAY_CABLE_DISCONNECT ||
state >= EXT_DISPLAY_CABLE_STATE_MAX) {
pr_err("invalid HPD state (%d)\n", state);
goto err;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("invalid drvdata\n");
goto err;
}
ext_disp = container_of(ext_disp_data,
struct msm_ext_disp, ext_disp_data);
return ext_disp;
err:
return ERR_PTR(-EINVAL);
}
static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
struct msm_ext_disp_codec_id *codec)
{
int ret = 0;
struct msm_ext_disp_init_data *data = NULL;
ret = msm_ext_disp_get_intf_data(ext_disp, codec, &data);
if (ret || !data) {
pr_err("Display not found (%s) ctld (%d) stream (%d)\n",
msm_ext_disp_name(codec->type),
codec->ctrl_id, codec->stream_id);
goto end;
}
if (ext_disp->ops) {
*ext_disp->ops = data->codec_ops;
ext_disp->current_codec = *codec;
/* update pdev for interface to use */
ext_disp->ext_disp_data.intf_pdev = data->pdev;
ext_disp->ext_disp_data.intf_data = data->intf_data;
}
end:
return ret;
}
static int msm_ext_disp_audio_config(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
int ret = 0;
struct msm_ext_disp *ext_disp;
ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
if (IS_ERR(ext_disp)) {
ret = PTR_ERR(ext_disp);
goto end;
}
if (state == EXT_DISPLAY_CABLE_CONNECT) {
ret = msm_ext_disp_select_audio_codec(pdev, codec);
} else {
mutex_lock(&ext_disp->lock);
if (ext_disp->ops)
memset(ext_disp->ops, 0, sizeof(*ext_disp->ops));
pr_debug("codec ops cleared for %s\n",
msm_ext_disp_name(ext_disp->current_codec.type));
ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
mutex_unlock(&ext_disp->lock);
}
end:
return ret;
}
static int msm_ext_disp_audio_notify(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec,
enum msm_ext_disp_cable_state state)
{
int ret = 0;
struct msm_ext_disp *ext_disp;
ext_disp = msm_ext_disp_validate_and_get(pdev, codec, state);
if (IS_ERR(ext_disp)) {
ret = PTR_ERR(ext_disp);
goto end;
}
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_process_audio(ext_disp, codec, state);
mutex_unlock(&ext_disp->lock);
end:
return ret;
}
static void msm_ext_disp_ready_for_display(struct msm_ext_disp *ext_disp)
{
int ret;
struct msm_ext_disp_init_data *data = NULL;
if (!ext_disp) {
pr_err("invalid input\n");
return;
}
ret = msm_ext_disp_get_intf_data(ext_disp,
&ext_disp->current_codec, &data);
if (ret) {
pr_err("%s not found\n",
msm_ext_disp_name(ext_disp->current_codec.type));
return;
}
*ext_disp->ops = data->codec_ops;
data->codec_ops.ready(ext_disp->pdev);
}
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
return msm_ext_disp_register_audio_codec(pdev, ops);
}
/**
* Register audio codec ops to display driver
* for HDMI/Display Port usecase support.
*
* @return 0 on success, negative value on error
*
*/
int msm_ext_disp_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !ops) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
if (ext_disp->ops) {
pr_err("Codec already registered\n");
ret = -EINVAL;
goto end;
}
ext_disp->ops = ops;
pr_debug("audio codec registered\n");
if (ext_disp->update_audio) {
ext_disp->update_audio = false;
msm_ext_disp_update_audio_ops(ext_disp, &ext_disp->current_codec);
msm_ext_disp_process_audio(ext_disp, &ext_disp->current_codec,
EXT_DISPLAY_CABLE_CONNECT);
}
end:
mutex_unlock(&ext_disp->lock);
if (ext_disp->current_codec.type != EXT_DISPLAY_TYPE_MAX)
msm_ext_disp_ready_for_display(ext_disp);
return ret;
}
EXPORT_SYMBOL_GPL(msm_ext_disp_register_audio_codec);
int msm_ext_disp_select_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_codec_id *codec)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !codec) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
if (!ext_disp->ops) {
pr_warn("Codec is not registered\n");
ext_disp->update_audio = true;
ext_disp->current_codec = *codec;
ret = -EINVAL;
goto end;
}
ret = msm_ext_disp_update_audio_ops(ext_disp, codec);
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL_GPL(msm_ext_disp_select_audio_codec);
static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
{
struct msm_ext_disp_audio_codec_ops *ops;
if (!init_data) {
pr_err("Invalid init_data\n");
return -EINVAL;
}
if (!init_data->pdev) {
pr_err("Invalid display intf pdev\n");
return -EINVAL;
}
if (init_data->codec.type >= EXT_DISPLAY_TYPE_MAX ||
init_data->codec.ctrl_id != 0 ||
init_data->codec.stream_id >= MSM_EXT_DISP_MAX_CODECS) {
pr_err("Invalid codec info type(%d), ctrl(%d) stream(%d)\n",
init_data->codec.type,
init_data->codec.ctrl_id,
init_data->codec.stream_id);
return -EINVAL;
}
ops = &init_data->codec_ops;
if (!ops->audio_info_setup || !ops->get_audio_edid_blk ||
!ops->cable_status || !ops->get_intf_id ||
!ops->teardown_done || !ops->acknowledge ||
!ops->ready) {
pr_err("Invalid codec operation pointers\n");
return -EINVAL;
}
return 0;
}
int msm_ext_disp_register_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
int ret = 0;
struct msm_ext_disp_init_data *data = NULL;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !init_data) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_validate_intf(init_data);
if (ret)
goto end;
ret = msm_ext_disp_get_intf_data(ext_disp, &init_data->codec, &data);
if (!ret) {
pr_err("%s already registered. ctrl(%d) stream(%d)\n",
msm_ext_disp_name(init_data->codec.type),
init_data->codec.ctrl_id,
init_data->codec.stream_id);
goto end;
}
ret = msm_ext_disp_add_intf_data(ext_disp, init_data);
if (ret)
goto end;
init_data->intf_ops.audio_config = msm_ext_disp_audio_config;
init_data->intf_ops.audio_notify = msm_ext_disp_audio_notify;
pr_debug("%s registered. ctrl(%d) stream(%d)\n",
msm_ext_disp_name(init_data->codec.type),
init_data->codec.ctrl_id,
init_data->codec.stream_id);
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL_GPL(msm_ext_disp_register_intf);
int msm_ext_disp_deregister_intf(struct platform_device *pdev,
struct msm_ext_disp_init_data *init_data)
{
int ret = 0;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev || !init_data) {
pr_err("Invalid params\n");
return -EINVAL;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("Invalid drvdata\n");
return -EINVAL;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
mutex_lock(&ext_disp->lock);
ret = msm_ext_disp_remove_intf_data(ext_disp, init_data);
if (ret)
goto end;
init_data->intf_ops.audio_config = NULL;
init_data->intf_ops.audio_notify = NULL;
pr_debug("%s deregistered\n",
msm_ext_disp_name(init_data->codec.type));
end:
mutex_unlock(&ext_disp->lock);
return ret;
}
EXPORT_SYMBOL_GPL(msm_ext_disp_deregister_intf);
static int msm_ext_disp_probe(struct platform_device *pdev)
{
int ret = 0, id;
struct device_node *of_node = NULL;
struct msm_ext_disp *ext_disp = NULL;
if (!pdev) {
pr_err("No platform device found\n");
ret = -ENODEV;
goto end;
}
of_node = pdev->dev.of_node;
if (!of_node) {
pr_err("No device node found\n");
ret = -ENODEV;
goto end;
}
ext_disp = devm_kzalloc(&pdev->dev, sizeof(*ext_disp), GFP_KERNEL);
if (!ext_disp) {
ret = -ENOMEM;
goto end;
}
platform_set_drvdata(pdev, &ext_disp->ext_disp_data);
ext_disp->pdev = pdev;
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++) {
ret = msm_ext_disp_extcon_register(ext_disp, id);
if (ret)
goto child_node_failure;
}
ret = of_platform_populate(of_node, NULL, NULL, &pdev->dev);
if (ret) {
pr_err("Failed to add child devices. Error = %d\n", ret);
goto child_node_failure;
} else {
pr_debug("%s: Added child devices.\n", __func__);
}
mutex_init(&ext_disp->lock);
INIT_LIST_HEAD(&ext_disp->display_list);
ext_disp->current_codec.type = EXT_DISPLAY_TYPE_MAX;
ext_disp->update_audio = false;
return ret;
child_node_failure:
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
msm_ext_disp_extcon_unregister(ext_disp, id);
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
return ret;
}
static int msm_ext_disp_remove(struct platform_device *pdev)
{
int ret = 0, id;
struct msm_ext_disp *ext_disp = NULL;
struct msm_ext_disp_data *ext_disp_data = NULL;
if (!pdev) {
pr_err("No platform device\n");
ret = -ENODEV;
goto end;
}
ext_disp_data = platform_get_drvdata(pdev);
if (!ext_disp_data) {
pr_err("No drvdata found\n");
ret = -ENODEV;
goto end;
}
ext_disp = container_of(ext_disp_data, struct msm_ext_disp,
ext_disp_data);
for (id = 0; id < MSM_EXT_DISP_MAX_CODECS; id++)
msm_ext_disp_extcon_unregister(ext_disp, id);
mutex_destroy(&ext_disp->lock);
devm_kfree(&ext_disp->pdev->dev, ext_disp);
end:
return ret;
}
static const struct of_device_id msm_ext_dt_match[] = {
{.compatible = "qcom,msm-ext-disp",},
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, msm_ext_dt_match);
static struct platform_driver this_driver = {
.probe = msm_ext_disp_probe,
.remove = msm_ext_disp_remove,
.driver = {
.name = "msm-ext-disp",
.of_match_table = msm_ext_dt_match,
},
};
static int __init msm_ext_disp_init(void)
{
int ret = 0;
ret = platform_driver_register(&this_driver);
if (ret)
pr_err("failed, ret = %d\n", ret);
return ret;
}
subsys_initcall(msm_ext_disp_init);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("MSM External Display");

View File

@@ -0,0 +1,42 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=sync_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync_fence.ko
LOCAL_MODULE_KBUILD_NAME := sync_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@@ -0,0 +1,22 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_sync_fence.bzl", "define_sync_fence")
package(
default_visibility = [
"//visibility:public"
],
)
ddk_headers(
name = "sync_fence_uapi_headers",
hdrs = glob(["include/uapi/sync_fence/*.h"]),
includes = ["include"]
)
ddk_headers(
name = "sync_fence_headers",
hdrs = glob(["include/*.h"]),
includes = ["include"]
)
define_sync_fence()

View File

@@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/
include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h
ifdef CONFIG_QCOM_SPEC_SYNC
obj-m += sync_fence.o
sync_fence-y := src/qcom_sync_file.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@@ -0,0 +1,4 @@
config QCOM_SPEC_SYNC
bool "Enable spec fence"
help
Enable sync_fence driver

View File

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@@ -0,0 +1 @@
CONFIG_QCOM_SPEC_SYNC=y

View File

@@ -0,0 +1,33 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
ddk_module(
name = "{}_sync_fence".format(tv),
srcs = ["src/qcom_sync_file.c"],
out = "sync_fence.ko",
kconfig = "Kconfig",
defconfig = "defconfig",
deps = [
"//msm-kernel:all_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_sync_fence_dist".format(tv),
data = [":{}_sync_fence".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_sync_fence():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _LINUX_QCOM_SPEC_SYNC_H
#define _LINUX_QCOM_SPEC_SYNC_H
#include <linux/dma-fence-array.h>
#define SPEC_FENCE_FLAG_FENCE_ARRAY 16 /* fence-array is speculative */
#define SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND 17 /* fence-array is bound */
#if IS_ENABLED(CONFIG_QCOM_SPEC_SYNC)
/**
* spec_sync_wait_bind_array() - Waits until the fence-array passed as parameter is bound.
* @fence_array: fence-array to wait-on until it is populated.
* @timeout_ms: timeout to wait.
*
* This function will wait until the fence-array passed as paremeter is bound; i.e. all the
* dma-fences that conform the fence-array are populated by the spec-fence driver bind ioctl.
* Once this function returns success, all the fences in the array should be valid.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms);
#else
static inline int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
{
return -EINVAL;
}
#endif /* CONFIG_QCOM_SPEC_SYNC */
#endif /* _LINUX_QCOM_SPEC_SYNC_H */

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
header-y += sync_fence/

View File

@@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_LINUX_SPEC_SYNC_H
#define _UAPI_LINUX_SPEC_SYNC_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define SPEC_FENCE_SIGNAL_ANY 0x1
#define SPEC_FENCE_SIGNAL_ALL 0x2
/**
* struct fence_bind_data - data passed to bind ioctl
* @out_bind_fd: file descriptor of second fence
* @fds: file descriptor list of child fences
*/
struct fence_bind_data {
__u32 out_bind_fd;
__u64 fds;
};
/**
* struct fence_create_data - detailed fence information
* @num_fences: Total fences that array needs to carry.
* @flags: Flags specifying on how to signal the array
* @out_bind_fd: Returns the fence fd.
*/
struct fence_create_data {
__u32 num_fences;
__u32 flags;
__u32 out_bind_fd;
};
#define SPEC_SYNC_MAGIC '>'
/**
* DOC: SPEC_SYNC_IOC_BIND - bind two fences
*
* Takes a struct fence_bind_data. binds the child fds with the fence array
* pointed by fd1.
*/
#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data)
/**
* DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array
*
* Takes a struct fence_create_data. If num_fences is > 0, fence array will be
* created and returns the array fd in fence_create_data.fd1
*/
#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data)
/**
* DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version
*
* Returns Spec driver version.
*/
#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64)
#endif /* _UAPI_LINUX_SPEC_SYNC_H */

View File

@@ -0,0 +1,596 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/sync_file.h>
#include <uapi/sync_fence/qcom_sync_file.h>
#include <qcom_sync_file.h>
#include <linux/version.h>
#define CLASS_NAME "sync"
#define DRV_NAME "spec_sync"
#define DRV_VERSION 1
#define NAME_LEN 32
#define FENCE_MIN 1
#define FENCE_MAX 32
#if IS_ENABLED(CONFIG_DEBUG_FS)
#define MAX_DEVICE_SUPPORTED 2
#else
#define MAX_DEVICE_SUPPORTED 1
#endif
#define DUMMY_CONTEXT 0xfafadadafafadada
#define DUMMY_SEQNO 0xefa9ce00efa9ce00
struct dummy_spec_fence {
struct dma_fence fence;
spinlock_t lock;
};
struct sync_device {
/* device info */
struct class *dev_class;
dev_t dev_num;
struct device *dev;
struct cdev *cdev;
struct mutex lock;
struct dummy_spec_fence *dummy_fence;
/* device drv data */
atomic_t device_available;
char name[NAME_LEN];
uint32_t version;
struct mutex l_lock;
struct list_head fence_array_list;
wait_queue_head_t wait_queue;
};
struct fence_array_node {
struct dma_fence_array *fence_array;
struct list_head list;
};
/* Speculative Sync Device Driver State */
static struct sync_device sync_dev;
static const char *spec_fence_get_name_dummy(struct dma_fence *fence)
{
return "dummy_fence";
}
static const struct dma_fence_ops dummy_spec_fence_ops = {
.get_driver_name = spec_fence_get_name_dummy,
.get_timeline_name = spec_fence_get_name_dummy,
};
static bool sanitize_fence_array(struct dma_fence_array *fence)
{
struct fence_array_node *node;
int ret = false;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry(node, &sync_dev.fence_array_list, list) {
if (node->fence_array == fence) {
ret = true;
break;
}
}
mutex_unlock(&sync_dev.l_lock);
return ret;
}
static void clear_fence_array_tracker(bool force_clear)
{
struct fence_array_node *node, *temp;
struct dma_fence_array *array;
struct dma_fence *fence;
bool is_signaled;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) {
array = node->fence_array;
fence = &array->base;
is_signaled = dma_fence_is_signaled(fence);
if (force_clear && !array->fences)
array->num_fences = 0;
pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled,
atomic_read(&array->num_pending));
if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending))
dma_fence_signal(fence);
if (force_clear || is_signaled) {
dma_fence_put(fence);
list_del(&node->list);
kfree(node);
}
}
mutex_unlock(&sync_dev.l_lock);
}
static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name)
{
if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) {
pr_err("number of device fds are limited to %d, device opened:%d\n",
MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available));
return NULL;
} else if (!atomic_read(&obj->device_available)) {
memset(obj->name, 0, NAME_LEN);
strscpy(obj->name, name, sizeof(obj->name));
}
atomic_inc(&obj->device_available);
return obj;
}
static int spec_sync_open(struct inode *inode, struct file *file)
{
char task_comm[TASK_COMM_LEN];
struct sync_device *obj = &sync_dev;
int ret = 0;
if (!inode || !inode->i_cdev || !file) {
pr_err("NULL pointer passed\n");
return -EINVAL;
}
mutex_lock(&sync_dev.lock);
get_task_comm(task_comm, current);
obj = spec_fence_init_locked(obj, task_comm);
if (!obj) {
pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm);
ret = -EEXIST;
goto end;
}
file->private_data = obj;
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_release(struct inode *inode, struct file *file)
{
int ret = 0;
struct sync_device *obj = file->private_data;
mutex_lock(&sync_dev.lock);
if (!atomic_read(&obj->device_available)) {
pr_err("no device to release!!\n");
ret = -ENODEV;
goto end;
}
atomic_dec(&obj->device_available);
if (!atomic_read(&obj->device_available))
clear_fence_array_tracker(true);
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg)
{
uint32_t version = obj->version;
if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t)))
return -EFAULT;
return 0;
}
static int spec_sync_create_array(struct fence_create_data *f)
{
int fd = get_unused_fd_flags(O_CLOEXEC);
struct sync_file *sync_file;
struct dma_fence_array *fence_array;
struct fence_array_node *node;
struct dma_fence **fences;
struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence;
bool signal_any;
int i, ret = 0;
if (fd < 0) {
pr_err("failed to get_unused_fd_flags\n");
return fd;
}
if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) {
pr_err("invalid arguments num_fences:%d\n", f->num_fences);
ret = -ERANGE;
goto error_args;
}
fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO);
if (!fences) {
ret = -ENOMEM;
goto error_args;
}
for (i = 0; i < f->num_fences; i++) {
fences[i] = &dummy_fence_p->fence;
/*
* Increase dummy-fences refcount here, we must do this since any call to
* fence-array release while dummy-fences are the children of the fence-array
* will decrement the dummy_fence refcount. Therefore, to prevent the release
* of the dummy_fence fences, we must keep an extra refcount for every time that
* the fence-array->release can decrement its children's refcount. the extra
* refcount will be decreased impilictly when dma_fence_put(&fence_array->base)
* called.
*/
dma_fence_get(&dummy_fence_p->fence);
}
signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true;
fence_array = dma_fence_array_create(f->num_fences, fences,
dma_fence_context_alloc(1), 0, signal_any);
if (!fence_array) {
/* fence-array create failed, remove extra refcounts */
for (i = 0; i < f->num_fences; i++)
dma_fence_put(&dummy_fence_p->fence);
kfree(fences);
ret = -EINVAL;
goto error_args;
}
/* Set the enable signal such that signalling is not done during wait*/
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags);
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags);
sync_file = sync_file_create(&fence_array->base);
if (!sync_file) {
pr_err("sync_file_create fail\n");
ret = -EINVAL;
goto err;
}
node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL);
if (!node) {
fput(sync_file->file);
ret = -ENOMEM;
goto err;
}
fd_install(fd, sync_file->file);
node->fence_array = fence_array;
mutex_lock(&sync_dev.l_lock);
list_add_tail(&node->list, &sync_dev.fence_array_list);
mutex_unlock(&sync_dev.l_lock);
pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences);
return fd;
err:
dma_fence_put(&fence_array->base);
error_args:
put_unused_fd(fd);
return ret;
}
static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg)
{
struct fence_create_data f;
int fd;
if (copy_from_user(&f, (void __user *)arg, sizeof(f)))
return -EFAULT;
fd = spec_sync_create_array(&f);
if (fd < 0)
return fd;
f.out_bind_fd = fd;
if (copy_to_user((void __user *)arg, &f, sizeof(f)))
return -EFAULT;
return 0;
}
int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
{
int ret;
/* Check if fence-array is a speculative fence */
if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) {
pr_err("invalid fence!\n");
return -EINVAL;
} else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) {
/* This fence-array is already bound, just return success */
return 0;
}
/* Wait for the fence-array bind */
ret = wait_event_timeout(sync_dev.wait_queue,
test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags),
msecs_to_jiffies(timeout_ms));
if (!ret) {
pr_err("timed out waiting for bind fence-array %d\n", timeout_ms);
ret = -ETIMEDOUT;
} else {
ret = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(spec_sync_wait_bind_array);
static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info)
{
struct dma_fence_array *fence_array;
struct dma_fence *fence = NULL;
struct dma_fence *user_fence = NULL;
int *user_fds, ret = 0, i;
u32 num_fences;
fence = sync_file_get_fence(sync_bind_info->out_bind_fd);
if (!fence) {
pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd);
return -EINVAL;
}
if (dma_fence_is_signaled(fence)) {
pr_err("spec fence is already signaled, out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
fence_array = container_of(fence, struct dma_fence_array, base);
if (!sanitize_fence_array(fence_array)) {
pr_err("spec fence not found in the registered list out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
num_fences = fence_array->num_fences;
for (i = 0; i < num_fences; i++) {
if (!(fence_array->fences[i]->context == DUMMY_CONTEXT &&
fence_array->fences[i]->seqno == DUMMY_SEQNO)) {
pr_err("fence array already populated, spec fd:%d status:%d flags:0x%lx\n",
sync_bind_info->out_bind_fd, dma_fence_get_status(fence),
fence->flags);
ret = -EINVAL;
goto end;
}
}
user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL);
if (!user_fds) {
ret = -ENOMEM;
goto end;
}
if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds,
num_fences * sizeof(int))) {
ret = -EFAULT;
goto out;
}
spin_lock(fence->lock);
for (i = 0; i < num_fences; i++) {
user_fence = sync_file_get_fence(user_fds[i]);
if (!user_fence) {
pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n",
user_fds[i], sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto bind_invalid;
} else if (user_fence->context == fence_array->base.context &&
user_fence->seqno == fence_array->base.seqno) {
pr_err("invalid spec fence, ufd:%d o_b_fd:%d ctx:%lld seqno:%lld\n",
user_fds[i], sync_bind_info->out_bind_fd,
user_fence->context, user_fence->seqno);
ret = -EINVAL;
goto bind_invalid;
}
fence_array->fences[i] = user_fence;
/*
* At this point the fence-array fully contains valid fences and no more the
* dummy-fence, therefore, we must release the extra refcount that the
* creation of the speculative fence added to the dummy-fence.
*/
dma_fence_put(&sync_dev.dummy_fence->fence);
pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd,
i, user_fds[i], fence_array->fences[i]->error);
}
clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
spin_unlock(fence->lock);
dma_fence_enable_sw_signaling(&fence_array->base);
clear_fence_array_tracker(false);
bind_invalid:
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags);
wake_up_all(&sync_dev.wait_queue);
if (ret) {
dma_fence_set_error(fence, -EINVAL);
spin_unlock(fence->lock);
dma_fence_signal(fence);
clear_fence_array_tracker(false);
}
out:
kfree(user_fds);
end:
dma_fence_put(fence);
return ret;
}
static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg)
{
struct fence_bind_data sync_bind_info;
if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data)))
return -EFAULT;
if (sync_bind_info.out_bind_fd < 0) {
pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd);
return -EINVAL;
}
return spec_sync_bind_array(&sync_bind_info);
}
static long spec_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct sync_device *obj = file->private_data;
int ret = 0;
switch (cmd) {
case SPEC_SYNC_IOC_CREATE_FENCE:
ret = spec_sync_ioctl_create_fence(obj, arg);
break;
case SPEC_SYNC_IOC_BIND:
ret = spec_sync_ioctl_bind(obj, arg);
break;
case SPEC_SYNC_IOC_GET_VER:
ret = spec_sync_ioctl_get_ver(obj, arg);
break;
default:
ret = -ENOTTY;
}
return ret;
}
const struct file_operations spec_sync_fops = {
.owner = THIS_MODULE,
.open = spec_sync_open,
.release = spec_sync_release,
.unlocked_ioctl = spec_sync_ioctl,
};
static int spec_sync_register_device(void)
{
struct dummy_spec_fence *dummy_fence_p = NULL;
int ret;
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
sync_dev.dev_class = class_create(CLASS_NAME);
#else
sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME);
#endif
if (sync_dev.dev_class == NULL) {
pr_err("%s: class_create fail.\n", __func__);
goto res_err;
}
ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME);
if (ret) {
pr_err("%s: alloc_chrdev_region fail.\n", __func__);
goto alloc_chrdev_region_err;
}
sync_dev.dev = device_create(sync_dev.dev_class, NULL,
sync_dev.dev_num,
&sync_dev, DRV_NAME);
if (IS_ERR(sync_dev.dev)) {
pr_err("%s: device_create fail.\n", __func__);
goto device_create_err;
}
sync_dev.cdev = cdev_alloc();
if (sync_dev.cdev == NULL) {
pr_err("%s: cdev_alloc fail.\n", __func__);
goto cdev_alloc_err;
}
cdev_init(sync_dev.cdev, &spec_sync_fops);
sync_dev.cdev->owner = THIS_MODULE;
ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1);
if (ret) {
pr_err("%s: cdev_add fail.\n", __func__);
goto cdev_add_err;
}
sync_dev.version = DRV_VERSION;
mutex_init(&sync_dev.lock);
mutex_init(&sync_dev.l_lock);
INIT_LIST_HEAD(&sync_dev.fence_array_list);
init_waitqueue_head(&sync_dev.wait_queue);
dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL);
if (!dummy_fence_p) {
ret = -ENOMEM;
goto cdev_add_err;
}
spin_lock_init(&dummy_fence_p->lock);
dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock,
DUMMY_CONTEXT, DUMMY_SEQNO);
sync_dev.dummy_fence = dummy_fence_p;
return 0;
cdev_add_err:
cdev_del(sync_dev.cdev);
cdev_alloc_err:
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
device_create_err:
unregister_chrdev_region(sync_dev.dev_num, 1);
alloc_chrdev_region_err:
class_destroy(sync_dev.dev_class);
res_err:
return -ENODEV;
}
static int __init spec_sync_init(void)
{
int ret = 0;
ret = spec_sync_register_device();
if (ret) {
pr_err("%s: speculative sync driver register fail.\n", __func__);
return ret;
}
return ret;
}
static void __exit spec_sync_deinit(void)
{
cdev_del(sync_dev.cdev);
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
unregister_chrdev_region(sync_dev.dev_num, 1);
class_destroy(sync_dev.dev_class);
dma_fence_put(&sync_dev.dummy_fence->fence);
}
module_init(spec_sync_init);
module_exit(spec_sync_deinit);
MODULE_DESCRIPTION("QCOM Speculative Sync Driver");
MODULE_LICENSE("GPL v2");