replace common qcom sources with samsung ones

This commit is contained in:
SaschaNes
2025-08-12 22:13:00 +02:00
parent ba24dcded9
commit 6f7753de11
5682 changed files with 2450203 additions and 103634 deletions

View File

@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2008-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __ADRENO_H
#define __ADRENO_H
@@ -79,41 +79,6 @@
/* ADRENO_GPUREV - Return the GPU ID for the given adreno_device */
#define ADRENO_GPUREV(_a) ((_a)->gpucore->gpurev)
/*
* Disable local interrupts and CPU preemption to avoid interruptions
* while holding the CP semaphore; otherwise, it could stall the CP.
* Make sure to call ADRENO_RELEASE_CP_SEMAPHORE after calling the
* below macro to reenable CPU interrupts.
*/
#define ADRENO_ACQUIRE_CP_SEMAPHORE(_adreno_dev, _flags) \
({ \
bool ret = true; \
if ((_adreno_dev)->gpucore->gpudev->acquire_cp_semaphore) { \
local_irq_save(_flags); \
preempt_disable(); \
ret = (_adreno_dev)->gpucore->gpudev->acquire_cp_semaphore(_adreno_dev); \
if (!ret) { \
preempt_enable(); \
local_irq_restore(_flags); \
dev_err_ratelimited(KGSL_DEVICE(_adreno_dev)->dev, \
"Timed out waiting to acquire CP semaphore:" \
" status=0x%08x\n", ret); \
} \
} \
ret; \
})
#define ADRENO_RELEASE_CP_SEMAPHORE(_adreno_dev, _flags) \
({ \
do { \
if ((_adreno_dev)->gpucore->gpudev->release_cp_semaphore) { \
(_adreno_dev)->gpucore->gpudev->release_cp_semaphore(_adreno_dev); \
preempt_enable(); \
local_irq_restore(_flags); \
} \
} while (0);\
})
/*
* ADRENO_FEATURE - return true if the specified feature is supported by the GPU
* core
@@ -286,18 +251,16 @@ enum adreno_gpurev {
ADRENO_REV_GEN7_9_1 = ADRENO_GPUREV_VALUE(7, 9, 1),
ADRENO_REV_GEN7_14_0 = ADRENO_GPUREV_VALUE(7, 14, 0),
ADRENO_REV_GEN7_11_0 = ADRENO_GPUREV_VALUE(7, 11, 0),
ADRENO_REV_GEN7_17_0 = ADRENO_GPUREV_VALUE(7, 17, 0),
ADRENO_REV_GEN8_0_0 = ADRENO_GPUREV_VALUE(8, 0, 0),
ADRENO_REV_GEN8_0_1 = ADRENO_GPUREV_VALUE(8, 0, 1),
ADRENO_REV_GEN8_3_0 = ADRENO_GPUREV_VALUE(8, 3, 0),
ADRENO_REV_GEN8_4_0 = ADRENO_GPUREV_VALUE(8, 4, 0),
ADRENO_REV_GEN8_6_0 = ADRENO_GPUREV_VALUE(8, 6, 0),
};
#define ADRENO_SOFT_FAULT BIT(0)
#define ADRENO_HARD_FAULT BIT(1)
#define ADRENO_TIMEOUT_FAULT BIT(2)
#define ADRENO_IOMMU_STALL_ON_PAGE_FAULT BIT(3)
#define ADRENO_IOMMU_PAGE_FAULT BIT(3)
#define ADRENO_PREEMPT_FAULT BIT(4)
#define ADRENO_GMU_FAULT BIT(5)
#define ADRENO_CTX_DETATCH_TIMEOUT_FAULT BIT(6)
@@ -801,15 +764,8 @@ struct adreno_device {
/** @scheduler_work: work_struct to put the gpu command scheduler in a work queue */
struct kthread_work scheduler_work;
/** @scheduler_fault: Atomic to trigger scheduler based fault recovery */
atomic_t scheduler_fault;
/** @dcvs_tuning_mutex: Mutex taken during dcvs tuning */
struct mutex dcvs_tuning_mutex;
/** @dcvs_tuning_mingap_lvl: Current DCVS tuning level for mingap */
u32 dcvs_tuning_mingap_lvl;
/** @dcvs_tuning_penalty_lvl: Current DCVS tuning level for penalty */
u32 dcvs_tuning_penalty_lvl;
/** @dcvs_tuning_numbusy_lvl: Current DCVS tuning level for numbusy */
u32 dcvs_tuning_numbusy_lvl;
atomic_t scheduler_fault;
bool opcode_err;
};
/* Time to wait for suspend recovery gate to complete */
@@ -1038,14 +994,6 @@ struct adreno_gpudev {
* @lpac_fault_header: Print LPAC fault header
*/
void (*lpac_fault_header)(struct adreno_device *adreno_dev, struct kgsl_drawobj *drawobj);
/**
* @acquire_cp_semaphore: Return true if CP semaphore is acquired, otherwise false
*/
bool (*acquire_cp_semaphore)(struct adreno_device *adreno_dev);
/**
* @release_cp_semaphore: Release CP semaphore
*/
void (*release_cp_semaphore)(struct adreno_device *adreno_dev);
};
/**
@@ -1333,12 +1281,10 @@ ADRENO_TARGET(gen7_9_0, ADRENO_REV_GEN7_9_0)
ADRENO_TARGET(gen7_9_1, ADRENO_REV_GEN7_9_1)
ADRENO_TARGET(gen7_14_0, ADRENO_REV_GEN7_14_0)
ADRENO_TARGET(gen7_11_0, ADRENO_REV_GEN7_11_0)
ADRENO_TARGET(gen7_17_0, ADRENO_REV_GEN7_17_0)
ADRENO_TARGET(gen8_0_0, ADRENO_REV_GEN8_0_0)
ADRENO_TARGET(gen8_0_1, ADRENO_REV_GEN8_0_1)
ADRENO_TARGET(gen8_3_0, ADRENO_REV_GEN8_3_0)
ADRENO_TARGET(gen8_4_0, ADRENO_REV_GEN8_4_0)
ADRENO_TARGET(gen8_6_0, ADRENO_REV_GEN8_6_0)
static inline int adreno_is_gen7_9_x(struct adreno_device *adreno_dev)
{
@@ -1351,28 +1297,23 @@ static inline int adreno_is_gen7_0_x_family(struct adreno_device *adreno_dev)
adreno_is_gen7_4_0(adreno_dev) || adreno_is_gen7_3_0(adreno_dev);
}
static inline int adreno_is_gen7_14_0_family(struct adreno_device *adreno_dev)
{
return adreno_is_gen7_14_0(adreno_dev) || adreno_is_gen7_17_0(adreno_dev);
}
static inline int adreno_is_gen7_2_x_family(struct adreno_device *adreno_dev)
{
return adreno_is_gen7_2_0(adreno_dev) || adreno_is_gen7_2_1(adreno_dev) ||
adreno_is_gen7_6_0(adreno_dev) || adreno_is_gen7_9_x(adreno_dev) ||
adreno_is_gen7_14_0_family(adreno_dev) || adreno_is_gen7_11_0(adreno_dev);
adreno_is_gen7_14_0(adreno_dev) || adreno_is_gen7_11_0(adreno_dev);
}
static inline int adreno_is_gen8_0_x_family(struct adreno_device *adreno_dev)
{
return adreno_is_gen8_0_0(adreno_dev) || adreno_is_gen8_0_1(adreno_dev) ||
adreno_is_gen8_4_0(adreno_dev) || adreno_is_gen8_6_0(adreno_dev);
adreno_is_gen8_4_0(adreno_dev);
}
/* Gen7 targets which does not support concurrent binning */
static inline int adreno_is_gen7_no_cb_family(struct adreno_device *adreno_dev)
{
return adreno_is_gen7_14_0_family(adreno_dev) || adreno_is_gen7_3_0(adreno_dev);
return adreno_is_gen7_14_0(adreno_dev) || adreno_is_gen7_3_0(adreno_dev);
}
/*
@@ -1977,7 +1918,7 @@ void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev);
*/
void adreno_get_bus_counters(struct adreno_device *adreno_dev);
/**
/**
* adreno_gmu_bus_ab_quantize - Calculate the AB vote that needs to be sent to GMU
* @adreno_dev: Handle to the adreno device
* @ab: ab request that needs to be scaled in MBps