sm8750: init kernel modules repo

This commit is contained in:
2025-08-11 12:21:01 +02:00
parent 2681143b87
commit facad83b01
8851 changed files with 6894561 additions and 0 deletions

View File

@@ -0,0 +1,42 @@
LOCAL_PATH := $(call my-dir)
LOCAL_MODULE_DDK_BUILD := true
include $(CLEAR_VARS)
# This makefile is only for DLKM
ifneq ($(findstring vendor,$(LOCAL_PATH)),)
ifneq ($(findstring opensource,$(LOCAL_PATH)),)
SYNC_FENCE_BLD_DIR := $(TOP)/vendor/qcom/opensource/mm-drivers/sync_fence
endif # opensource
DLKM_DIR := $(TOP)/device/qcom/common/dlkm
LOCAL_ADDITIONAL_DEPENDENCIES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
###########################################################
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SYNC_FENCE_ROOT=$(SYNC_FENCE_BLD_DIR)
KBUILD_OPTIONS += MODNAME=sync_fence
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync-fence-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(wildcard $(LOCAL_PATH)/**/*) $(wildcard $(LOCAL_PATH)/*)
LOCAL_MODULE := sync_fence.ko
LOCAL_MODULE_KBUILD_NAME := sync_fence.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###########################################################
endif # DLKM check

View File

@@ -0,0 +1,22 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
load(":define_sync_fence.bzl", "define_sync_fence")
package(
default_visibility = [
"//visibility:public"
],
)
ddk_headers(
name = "sync_fence_uapi_headers",
hdrs = glob(["include/uapi/sync_fence/*.h"]),
includes = ["include"]
)
ddk_headers(
name = "sync_fence_headers",
hdrs = glob(["include/*.h"]),
includes = ["include"]
)
define_sync_fence()

View File

@@ -0,0 +1,16 @@
# SPDX-License-Identifier: GPL-2.0-only
KDIR := $(TOP)/kernel_platform/msm-kernel
LINUXINCLUDE += -I$(SYNC_FENCE_ROOT)sync_fence/include/
include $(SYNC_FENCE_ROOT)/config/kalamammdrivers.conf
LINUXINCLUDE += -include $(SYNC_FENCE_ROOT)/config/kalamammdriversconf.h
ifdef CONFIG_QCOM_SPEC_SYNC
obj-m += sync_fence.o
sync_fence-y := src/qcom_sync_file.o
CDEFINES += -DBUILD_TIMESTAMP=\"$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')\"
endif
EXTRA_CFLAGS += -Wformat-extra-args -Wstrict-prototypes -Wformat-insufficient-args \
-Wformat-invalid-specifier -Wformat-zero-length -Wnonnull

View File

@@ -0,0 +1,4 @@
config QCOM_SPEC_SYNC
bool "Enable spec fence"
help
Enable sync_fence driver

View File

@@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
KBUILD_OPTIONS += SYNC_FENCE_ROOT=$(KERNEL_SRC)/$(M)/../
all: modules
modules_install:
$(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(M) modules_install
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $@ $(KBUILD_OPTIONS)
clean:
rm -f *.o *.ko *.mod.c *.mod.o *~ .*.cmd Module.symvers
rm -rf .tmp_versions

View File

@@ -0,0 +1 @@
CONFIG_QCOM_SPEC_SYNC=y

View File

@@ -0,0 +1,33 @@
load("//build/kernel/kleaf:kernel.bzl", "ddk_module")
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
load("//msm-kernel:target_variants.bzl", "get_all_variants")
def _define_module(target, variant):
tv = "{}_{}".format(target, variant)
ddk_module(
name = "{}_sync_fence".format(tv),
srcs = ["src/qcom_sync_file.c"],
out = "sync_fence.ko",
kconfig = "Kconfig",
defconfig = "defconfig",
deps = [
"//msm-kernel:all_headers",
"//vendor/qcom/opensource/mm-drivers:mm_drivers_headers",
],
kernel_build = "//msm-kernel:{}".format(tv),
)
copy_to_dist_dir(
name = "{}_sync_fence_dist".format(tv),
data = [":{}_sync_fence".format(tv)],
dist_dir = "out/target/product/{}/dlkm/lib/modules".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
def define_sync_fence():
for (t, v) in get_all_variants():
_define_module(t, v)

View File

@@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _LINUX_QCOM_SPEC_SYNC_H
#define _LINUX_QCOM_SPEC_SYNC_H
#include <linux/dma-fence-array.h>
#define SPEC_FENCE_FLAG_FENCE_ARRAY 16 /* fence-array is speculative */
#define SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND 17 /* fence-array is bound */
#if IS_ENABLED(CONFIG_QCOM_SPEC_SYNC)
/**
* spec_sync_wait_bind_array() - Waits until the fence-array passed as parameter is bound.
* @fence_array: fence-array to wait-on until it is populated.
* @timeout_ms: timeout to wait.
*
* This function will wait until the fence-array passed as paremeter is bound; i.e. all the
* dma-fences that conform the fence-array are populated by the spec-fence driver bind ioctl.
* Once this function returns success, all the fences in the array should be valid.
*
* Return: 0 on success or negative errno (-EINVAL)
*/
int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms);
#else
static inline int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
{
return -EINVAL;
}
#endif /* CONFIG_QCOM_SPEC_SYNC */
#endif /* _LINUX_QCOM_SPEC_SYNC_H */

View File

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
header-y += sync_fence/

View File

@@ -0,0 +1,63 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_LINUX_SPEC_SYNC_H
#define _UAPI_LINUX_SPEC_SYNC_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define SPEC_FENCE_SIGNAL_ANY 0x1
#define SPEC_FENCE_SIGNAL_ALL 0x2
/**
* struct fence_bind_data - data passed to bind ioctl
* @out_bind_fd: file descriptor of second fence
* @fds: file descriptor list of child fences
*/
struct fence_bind_data {
__u32 out_bind_fd;
__u64 fds;
};
/**
* struct fence_create_data - detailed fence information
* @num_fences: Total fences that array needs to carry.
* @flags: Flags specifying on how to signal the array
* @out_bind_fd: Returns the fence fd.
*/
struct fence_create_data {
__u32 num_fences;
__u32 flags;
__u32 out_bind_fd;
};
#define SPEC_SYNC_MAGIC '>'
/**
* DOC: SPEC_SYNC_IOC_BIND - bind two fences
*
* Takes a struct fence_bind_data. binds the child fds with the fence array
* pointed by fd1.
*/
#define SPEC_SYNC_IOC_BIND _IOWR(SPEC_SYNC_MAGIC, 3, struct fence_bind_data)
/**
* DOC: SPEC_SYNC_IOC_CREATE_FENCE - Create a fence array
*
* Takes a struct fence_create_data. If num_fences is > 0, fence array will be
* created and returns the array fd in fence_create_data.fd1
*/
#define SPEC_SYNC_IOC_CREATE_FENCE _IOWR(SPEC_SYNC_MAGIC, 4, struct fence_create_data)
/**
* DOC: SPEC_SYNC_IOC_GET_VER - Get Spec driver version
*
* Returns Spec driver version.
*/
#define SPEC_SYNC_IOC_GET_VER _IOWR(SPEC_SYNC_MAGIC, 5, __u64)
#endif /* _UAPI_LINUX_SPEC_SYNC_H */

View File

@@ -0,0 +1,596 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/cdev.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <linux/dma-fence-array.h>
#include <linux/sync_file.h>
#include <uapi/sync_fence/qcom_sync_file.h>
#include <qcom_sync_file.h>
#include <linux/version.h>
#define CLASS_NAME "sync"
#define DRV_NAME "spec_sync"
#define DRV_VERSION 1
#define NAME_LEN 32
#define FENCE_MIN 1
#define FENCE_MAX 32
#if IS_ENABLED(CONFIG_DEBUG_FS)
#define MAX_DEVICE_SUPPORTED 2
#else
#define MAX_DEVICE_SUPPORTED 1
#endif
#define DUMMY_CONTEXT 0xfafadadafafadada
#define DUMMY_SEQNO 0xefa9ce00efa9ce00
struct dummy_spec_fence {
struct dma_fence fence;
spinlock_t lock;
};
struct sync_device {
/* device info */
struct class *dev_class;
dev_t dev_num;
struct device *dev;
struct cdev *cdev;
struct mutex lock;
struct dummy_spec_fence *dummy_fence;
/* device drv data */
atomic_t device_available;
char name[NAME_LEN];
uint32_t version;
struct mutex l_lock;
struct list_head fence_array_list;
wait_queue_head_t wait_queue;
};
struct fence_array_node {
struct dma_fence_array *fence_array;
struct list_head list;
};
/* Speculative Sync Device Driver State */
static struct sync_device sync_dev;
static const char *spec_fence_get_name_dummy(struct dma_fence *fence)
{
return "dummy_fence";
}
static const struct dma_fence_ops dummy_spec_fence_ops = {
.get_driver_name = spec_fence_get_name_dummy,
.get_timeline_name = spec_fence_get_name_dummy,
};
static bool sanitize_fence_array(struct dma_fence_array *fence)
{
struct fence_array_node *node;
int ret = false;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry(node, &sync_dev.fence_array_list, list) {
if (node->fence_array == fence) {
ret = true;
break;
}
}
mutex_unlock(&sync_dev.l_lock);
return ret;
}
static void clear_fence_array_tracker(bool force_clear)
{
struct fence_array_node *node, *temp;
struct dma_fence_array *array;
struct dma_fence *fence;
bool is_signaled;
mutex_lock(&sync_dev.l_lock);
list_for_each_entry_safe(node, temp, &sync_dev.fence_array_list, list) {
array = node->fence_array;
fence = &array->base;
is_signaled = dma_fence_is_signaled(fence);
if (force_clear && !array->fences)
array->num_fences = 0;
pr_debug("force_clear:%d is_signaled:%d pending:%d\n", force_clear, is_signaled,
atomic_read(&array->num_pending));
if (force_clear && !is_signaled && atomic_dec_and_test(&array->num_pending))
dma_fence_signal(fence);
if (force_clear || is_signaled) {
dma_fence_put(fence);
list_del(&node->list);
kfree(node);
}
}
mutex_unlock(&sync_dev.l_lock);
}
static struct sync_device *spec_fence_init_locked(struct sync_device *obj, const char *name)
{
if (atomic_read(&obj->device_available) >= MAX_DEVICE_SUPPORTED) {
pr_err("number of device fds are limited to %d, device opened:%d\n",
MAX_DEVICE_SUPPORTED, atomic_read(&obj->device_available));
return NULL;
} else if (!atomic_read(&obj->device_available)) {
memset(obj->name, 0, NAME_LEN);
strscpy(obj->name, name, sizeof(obj->name));
}
atomic_inc(&obj->device_available);
return obj;
}
static int spec_sync_open(struct inode *inode, struct file *file)
{
char task_comm[TASK_COMM_LEN];
struct sync_device *obj = &sync_dev;
int ret = 0;
if (!inode || !inode->i_cdev || !file) {
pr_err("NULL pointer passed\n");
return -EINVAL;
}
mutex_lock(&sync_dev.lock);
get_task_comm(task_comm, current);
obj = spec_fence_init_locked(obj, task_comm);
if (!obj) {
pr_err("Spec device exists owner:%s caller:%s\n", sync_dev.name, task_comm);
ret = -EEXIST;
goto end;
}
file->private_data = obj;
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_release(struct inode *inode, struct file *file)
{
int ret = 0;
struct sync_device *obj = file->private_data;
mutex_lock(&sync_dev.lock);
if (!atomic_read(&obj->device_available)) {
pr_err("no device to release!!\n");
ret = -ENODEV;
goto end;
}
atomic_dec(&obj->device_available);
if (!atomic_read(&obj->device_available))
clear_fence_array_tracker(true);
end:
mutex_unlock(&sync_dev.lock);
return ret;
}
static int spec_sync_ioctl_get_ver(struct sync_device *obj, unsigned long __user arg)
{
uint32_t version = obj->version;
if (copy_to_user((void __user *)arg, &version, sizeof(uint32_t)))
return -EFAULT;
return 0;
}
static int spec_sync_create_array(struct fence_create_data *f)
{
int fd = get_unused_fd_flags(O_CLOEXEC);
struct sync_file *sync_file;
struct dma_fence_array *fence_array;
struct fence_array_node *node;
struct dma_fence **fences;
struct dummy_spec_fence *dummy_fence_p = sync_dev.dummy_fence;
bool signal_any;
int i, ret = 0;
if (fd < 0) {
pr_err("failed to get_unused_fd_flags\n");
return fd;
}
if (f->num_fences < FENCE_MIN || f->num_fences > FENCE_MAX) {
pr_err("invalid arguments num_fences:%d\n", f->num_fences);
ret = -ERANGE;
goto error_args;
}
fences = kmalloc_array(f->num_fences, sizeof(void *), GFP_KERNEL|__GFP_ZERO);
if (!fences) {
ret = -ENOMEM;
goto error_args;
}
for (i = 0; i < f->num_fences; i++) {
fences[i] = &dummy_fence_p->fence;
/*
* Increase dummy-fences refcount here, we must do this since any call to
* fence-array release while dummy-fences are the children of the fence-array
* will decrement the dummy_fence refcount. Therefore, to prevent the release
* of the dummy_fence fences, we must keep an extra refcount for every time that
* the fence-array->release can decrement its children's refcount. the extra
* refcount will be decreased impilictly when dma_fence_put(&fence_array->base)
* called.
*/
dma_fence_get(&dummy_fence_p->fence);
}
signal_any = f->flags & SPEC_FENCE_SIGNAL_ALL ? false : true;
fence_array = dma_fence_array_create(f->num_fences, fences,
dma_fence_context_alloc(1), 0, signal_any);
if (!fence_array) {
/* fence-array create failed, remove extra refcounts */
for (i = 0; i < f->num_fences; i++)
dma_fence_put(&dummy_fence_p->fence);
kfree(fences);
ret = -EINVAL;
goto error_args;
}
/* Set the enable signal such that signalling is not done during wait*/
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence_array->base.flags);
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags);
sync_file = sync_file_create(&fence_array->base);
if (!sync_file) {
pr_err("sync_file_create fail\n");
ret = -EINVAL;
goto err;
}
node = kzalloc((sizeof(struct fence_array_node)), GFP_KERNEL);
if (!node) {
fput(sync_file->file);
ret = -ENOMEM;
goto err;
}
fd_install(fd, sync_file->file);
node->fence_array = fence_array;
mutex_lock(&sync_dev.l_lock);
list_add_tail(&node->list, &sync_dev.fence_array_list);
mutex_unlock(&sync_dev.l_lock);
pr_debug("spec fd:%d num_fences:%u\n", fd, f->num_fences);
return fd;
err:
dma_fence_put(&fence_array->base);
error_args:
put_unused_fd(fd);
return ret;
}
static int spec_sync_ioctl_create_fence(struct sync_device *obj, unsigned long __user arg)
{
struct fence_create_data f;
int fd;
if (copy_from_user(&f, (void __user *)arg, sizeof(f)))
return -EFAULT;
fd = spec_sync_create_array(&f);
if (fd < 0)
return fd;
f.out_bind_fd = fd;
if (copy_to_user((void __user *)arg, &f, sizeof(f)))
return -EFAULT;
return 0;
}
int spec_sync_wait_bind_array(struct dma_fence_array *fence_array, u32 timeout_ms)
{
int ret;
/* Check if fence-array is a speculative fence */
if (!fence_array || !test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY, &fence_array->base.flags)) {
pr_err("invalid fence!\n");
return -EINVAL;
} else if (test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags)) {
/* This fence-array is already bound, just return success */
return 0;
}
/* Wait for the fence-array bind */
ret = wait_event_timeout(sync_dev.wait_queue,
test_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags),
msecs_to_jiffies(timeout_ms));
if (!ret) {
pr_err("timed out waiting for bind fence-array %d\n", timeout_ms);
ret = -ETIMEDOUT;
} else {
ret = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(spec_sync_wait_bind_array);
static int spec_sync_bind_array(struct fence_bind_data *sync_bind_info)
{
struct dma_fence_array *fence_array;
struct dma_fence *fence = NULL;
struct dma_fence *user_fence = NULL;
int *user_fds, ret = 0, i;
u32 num_fences;
fence = sync_file_get_fence(sync_bind_info->out_bind_fd);
if (!fence) {
pr_err("dma fence failure out_fd:%d\n", sync_bind_info->out_bind_fd);
return -EINVAL;
}
if (dma_fence_is_signaled(fence)) {
pr_err("spec fence is already signaled, out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
fence_array = container_of(fence, struct dma_fence_array, base);
if (!sanitize_fence_array(fence_array)) {
pr_err("spec fence not found in the registered list out_fd:%d\n",
sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto end;
}
num_fences = fence_array->num_fences;
for (i = 0; i < num_fences; i++) {
if (!(fence_array->fences[i]->context == DUMMY_CONTEXT &&
fence_array->fences[i]->seqno == DUMMY_SEQNO)) {
pr_err("fence array already populated, spec fd:%d status:%d flags:0x%lx\n",
sync_bind_info->out_bind_fd, dma_fence_get_status(fence),
fence->flags);
ret = -EINVAL;
goto end;
}
}
user_fds = kzalloc(num_fences * (sizeof(int)), GFP_KERNEL);
if (!user_fds) {
ret = -ENOMEM;
goto end;
}
if (copy_from_user(user_fds, (void __user *)sync_bind_info->fds,
num_fences * sizeof(int))) {
ret = -EFAULT;
goto out;
}
spin_lock(fence->lock);
for (i = 0; i < num_fences; i++) {
user_fence = sync_file_get_fence(user_fds[i]);
if (!user_fence) {
pr_warn("bind fences are invalid !! user_fd:%d out_bind_fd:%d\n",
user_fds[i], sync_bind_info->out_bind_fd);
ret = -EINVAL;
goto bind_invalid;
} else if (user_fence->context == fence_array->base.context &&
user_fence->seqno == fence_array->base.seqno) {
pr_err("invalid spec fence, ufd:%d o_b_fd:%d ctx:%lld seqno:%lld\n",
user_fds[i], sync_bind_info->out_bind_fd,
user_fence->context, user_fence->seqno);
ret = -EINVAL;
goto bind_invalid;
}
fence_array->fences[i] = user_fence;
/*
* At this point the fence-array fully contains valid fences and no more the
* dummy-fence, therefore, we must release the extra refcount that the
* creation of the speculative fence added to the dummy-fence.
*/
dma_fence_put(&sync_dev.dummy_fence->fence);
pr_debug("spec fd:%d i:%d bind fd:%d error:%d\n", sync_bind_info->out_bind_fd,
i, user_fds[i], fence_array->fences[i]->error);
}
clear_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags);
spin_unlock(fence->lock);
dma_fence_enable_sw_signaling(&fence_array->base);
clear_fence_array_tracker(false);
bind_invalid:
set_bit(SPEC_FENCE_FLAG_FENCE_ARRAY_BOUND, &fence_array->base.flags);
wake_up_all(&sync_dev.wait_queue);
if (ret) {
dma_fence_set_error(fence, -EINVAL);
spin_unlock(fence->lock);
dma_fence_signal(fence);
clear_fence_array_tracker(false);
}
out:
kfree(user_fds);
end:
dma_fence_put(fence);
return ret;
}
static int spec_sync_ioctl_bind(struct sync_device *obj, unsigned long __user arg)
{
struct fence_bind_data sync_bind_info;
if (copy_from_user(&sync_bind_info, (void __user *)arg, sizeof(struct fence_bind_data)))
return -EFAULT;
if (sync_bind_info.out_bind_fd < 0) {
pr_err("Invalid out_fd:%d\n", sync_bind_info.out_bind_fd);
return -EINVAL;
}
return spec_sync_bind_array(&sync_bind_info);
}
static long spec_sync_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct sync_device *obj = file->private_data;
int ret = 0;
switch (cmd) {
case SPEC_SYNC_IOC_CREATE_FENCE:
ret = spec_sync_ioctl_create_fence(obj, arg);
break;
case SPEC_SYNC_IOC_BIND:
ret = spec_sync_ioctl_bind(obj, arg);
break;
case SPEC_SYNC_IOC_GET_VER:
ret = spec_sync_ioctl_get_ver(obj, arg);
break;
default:
ret = -ENOTTY;
}
return ret;
}
const struct file_operations spec_sync_fops = {
.owner = THIS_MODULE,
.open = spec_sync_open,
.release = spec_sync_release,
.unlocked_ioctl = spec_sync_ioctl,
};
static int spec_sync_register_device(void)
{
struct dummy_spec_fence *dummy_fence_p = NULL;
int ret;
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
sync_dev.dev_class = class_create(CLASS_NAME);
#else
sync_dev.dev_class = class_create(THIS_MODULE, CLASS_NAME);
#endif
if (sync_dev.dev_class == NULL) {
pr_err("%s: class_create fail.\n", __func__);
goto res_err;
}
ret = alloc_chrdev_region(&sync_dev.dev_num, 0, 1, DRV_NAME);
if (ret) {
pr_err("%s: alloc_chrdev_region fail.\n", __func__);
goto alloc_chrdev_region_err;
}
sync_dev.dev = device_create(sync_dev.dev_class, NULL,
sync_dev.dev_num,
&sync_dev, DRV_NAME);
if (IS_ERR(sync_dev.dev)) {
pr_err("%s: device_create fail.\n", __func__);
goto device_create_err;
}
sync_dev.cdev = cdev_alloc();
if (sync_dev.cdev == NULL) {
pr_err("%s: cdev_alloc fail.\n", __func__);
goto cdev_alloc_err;
}
cdev_init(sync_dev.cdev, &spec_sync_fops);
sync_dev.cdev->owner = THIS_MODULE;
ret = cdev_add(sync_dev.cdev, sync_dev.dev_num, 1);
if (ret) {
pr_err("%s: cdev_add fail.\n", __func__);
goto cdev_add_err;
}
sync_dev.version = DRV_VERSION;
mutex_init(&sync_dev.lock);
mutex_init(&sync_dev.l_lock);
INIT_LIST_HEAD(&sync_dev.fence_array_list);
init_waitqueue_head(&sync_dev.wait_queue);
dummy_fence_p = kzalloc(sizeof(struct dummy_spec_fence), GFP_KERNEL);
if (!dummy_fence_p) {
ret = -ENOMEM;
goto cdev_add_err;
}
spin_lock_init(&dummy_fence_p->lock);
dma_fence_init(&dummy_fence_p->fence, &dummy_spec_fence_ops, &dummy_fence_p->lock,
DUMMY_CONTEXT, DUMMY_SEQNO);
sync_dev.dummy_fence = dummy_fence_p;
return 0;
cdev_add_err:
cdev_del(sync_dev.cdev);
cdev_alloc_err:
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
device_create_err:
unregister_chrdev_region(sync_dev.dev_num, 1);
alloc_chrdev_region_err:
class_destroy(sync_dev.dev_class);
res_err:
return -ENODEV;
}
static int __init spec_sync_init(void)
{
int ret = 0;
ret = spec_sync_register_device();
if (ret) {
pr_err("%s: speculative sync driver register fail.\n", __func__);
return ret;
}
return ret;
}
static void __exit spec_sync_deinit(void)
{
cdev_del(sync_dev.cdev);
device_destroy(sync_dev.dev_class, sync_dev.dev_num);
unregister_chrdev_region(sync_dev.dev_num, 1);
class_destroy(sync_dev.dev_class);
dma_fence_put(&sync_dev.dummy_fence->fence);
}
module_init(spec_sync_init);
module_exit(spec_sync_deinit);
MODULE_DESCRIPTION("QCOM Speculative Sync Driver");
MODULE_LICENSE("GPL v2");