replace common qcom sources with samsung ones
This commit is contained in:
@@ -1208,7 +1208,7 @@ static int kgsl_iommu_fault_handler(struct kgsl_mmu *mmu,
|
||||
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
|
||||
u64 ptbase;
|
||||
u32 contextidr;
|
||||
bool stall, terminate;
|
||||
bool stall;
|
||||
struct kgsl_process_private *private;
|
||||
struct kgsl_context *context;
|
||||
|
||||
@@ -1219,14 +1219,23 @@ static int kgsl_iommu_fault_handler(struct kgsl_mmu *mmu,
|
||||
context = kgsl_context_get(device, contextidr);
|
||||
|
||||
stall = kgsl_iommu_check_stall_on_fault(ctx, mmu, flags);
|
||||
terminate = test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &mmu->pfpolicy) &&
|
||||
test_bit(KGSL_MMU_PAGEFAULT_TERMINATE, &mmu->features);
|
||||
|
||||
atomic_inc(&device->gpu_exception_count[GPU_PAGE_FAULT]);
|
||||
|
||||
kgsl_iommu_print_fault(mmu, ctx, addr, ptbase, contextidr, flags, private,
|
||||
context);
|
||||
kgsl_iommu_add_fault_info(context, addr, flags);
|
||||
|
||||
#if IS_ENABLED(CONFIG_SEC_ABC)
|
||||
#if IS_ENABLED(CONFIG_SEC_FACTORY)
|
||||
sec_abc_send_event("MODULE=gpu_qc@INFO=gpu_page_fault");
|
||||
#else
|
||||
sec_abc_send_event("MODULE=gpu_qc@WARN=gpu_page_fault");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if (stall || terminate) {
|
||||
if (stall) {
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
u32 sctlr;
|
||||
|
||||
/*
|
||||
@@ -1237,18 +1246,11 @@ static int kgsl_iommu_fault_handler(struct kgsl_mmu *mmu,
|
||||
sctlr &= ~(0x1 << KGSL_IOMMU_SCTLR_CFIE_SHIFT);
|
||||
KGSL_IOMMU_SET_CTX_REG(ctx, KGSL_IOMMU_CTX_SCTLR, sctlr);
|
||||
|
||||
/* Make sure the above write goes through before we return */
|
||||
wmb();
|
||||
|
||||
/* This is used by reset/recovery path */
|
||||
if (stall) {
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
|
||||
ctx->stalled_on_fault = true;
|
||||
|
||||
ctx->stalled_on_fault = true;
|
||||
|
||||
/* Go ahead with recovery*/
|
||||
adreno_scheduler_fault(adreno_dev, ADRENO_IOMMU_STALL_ON_PAGE_FAULT);
|
||||
}
|
||||
/* Go ahead with recovery*/
|
||||
adreno_scheduler_fault(adreno_dev, ADRENO_IOMMU_PAGE_FAULT);
|
||||
}
|
||||
|
||||
kgsl_context_put(context);
|
||||
@@ -1258,11 +1260,11 @@ static int kgsl_iommu_fault_handler(struct kgsl_mmu *mmu,
|
||||
* Fallback to smmu fault handler during globals faults to print useful
|
||||
* debug information.
|
||||
*/
|
||||
if ((!(stall || terminate)) && kgsl_iommu_addr_is_global(mmu, addr))
|
||||
if (!stall && kgsl_iommu_addr_is_global(mmu, addr))
|
||||
return -ENOSYS;
|
||||
|
||||
/* Return -EBUSY to keep the IOMMU driver from resuming on a stall or terminate */
|
||||
return (stall || terminate) ? -EBUSY : 0;
|
||||
/* Return -EBUSY to keep the IOMMU driver from resuming on a stall */
|
||||
return stall ? -EBUSY : 0;
|
||||
}
|
||||
|
||||
static int kgsl_iommu_default_fault_handler(struct iommu_domain *domain,
|
||||
@@ -1802,48 +1804,6 @@ static void kgsl_iommu_configure_gpu_sctlr(struct kgsl_mmu *mmu,
|
||||
KGSL_IOMMU_SET_CTX_REG(ctx, KGSL_IOMMU_CTX_SCTLR, sctlr_val);
|
||||
}
|
||||
|
||||
static bool _ctx_terminated_on_fault(struct kgsl_mmu *mmu, struct kgsl_iommu_context *ctx)
|
||||
{
|
||||
u32 fsr;
|
||||
|
||||
/*
|
||||
* We only need this if SMMU is configured to be in TERMINATE mode in the presence of an
|
||||
* outstanding fault
|
||||
*/
|
||||
if (!test_bit(KGSL_MMU_PAGEFAULT_TERMINATE, &mmu->features) ||
|
||||
!test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE, &mmu->pfpolicy))
|
||||
return false;
|
||||
|
||||
kgsl_iommu_enable_clk(mmu);
|
||||
|
||||
fsr = KGSL_IOMMU_GET_CTX_REG(ctx, KGSL_IOMMU_CTX_FSR);
|
||||
|
||||
/* Make sure the above read finishes before we compare it */
|
||||
rmb();
|
||||
|
||||
kgsl_iommu_disable_clk(mmu);
|
||||
|
||||
/* See if there is an outstanding fault */
|
||||
if (fsr & ~KGSL_IOMMU_FSR_TRANSLATION_FORMAT_MASK)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* kgsl_iommu_ctx_terminated_on_fault - Detect if GC SMMU is terminating transactions in the
|
||||
* presense of an outstanding fault.
|
||||
*/
|
||||
static bool kgsl_iommu_ctx_terminated_on_fault(struct kgsl_mmu *mmu)
|
||||
{
|
||||
struct kgsl_iommu *iommu = &mmu->iommu;
|
||||
|
||||
if (_ctx_terminated_on_fault(mmu, &iommu->user_context))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int kgsl_iommu_start(struct kgsl_mmu *mmu)
|
||||
{
|
||||
struct kgsl_iommu *iommu = &mmu->iommu;
|
||||
@@ -1881,13 +1841,8 @@ static void kgsl_iommu_context_clear_fsr(struct kgsl_mmu *mmu, struct kgsl_iommu
|
||||
{
|
||||
unsigned int sctlr_val;
|
||||
|
||||
if (ctx->stalled_on_fault || _ctx_terminated_on_fault(mmu, ctx)) {
|
||||
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
|
||||
|
||||
if (ctx->stalled_on_fault) {
|
||||
kgsl_iommu_enable_clk(mmu);
|
||||
|
||||
dev_err_ratelimited(device->dev, "Clearing pagefault bits in FSR\n");
|
||||
|
||||
KGSL_IOMMU_SET_CTX_REG(ctx, KGSL_IOMMU_CTX_FSR, 0xffffffff);
|
||||
/*
|
||||
* Re-enable context fault interrupts after clearing
|
||||
@@ -1903,8 +1858,7 @@ static void kgsl_iommu_context_clear_fsr(struct kgsl_mmu *mmu, struct kgsl_iommu
|
||||
*/
|
||||
wmb();
|
||||
kgsl_iommu_disable_clk(mmu);
|
||||
if (ctx->stalled_on_fault)
|
||||
ctx->stalled_on_fault = false;
|
||||
ctx->stalled_on_fault = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3001,7 +2955,6 @@ static void kgsl_iommu_sysfs_init(struct kgsl_mmu *mmu)
|
||||
static const struct kgsl_mmu_ops kgsl_iommu_ops = {
|
||||
.mmu_close = kgsl_iommu_close,
|
||||
.mmu_start = kgsl_iommu_start,
|
||||
.mmu_ctx_terminated_on_fault = kgsl_iommu_ctx_terminated_on_fault,
|
||||
.mmu_clear_fsr = kgsl_iommu_clear_fsr,
|
||||
.mmu_get_current_ttbr0 = kgsl_iommu_get_current_ttbr0,
|
||||
.mmu_enable_clk = kgsl_iommu_enable_clk,
|
||||
|
Reference in New Issue
Block a user