Add samsung specific changes

This commit is contained in:
2025-08-11 14:29:00 +02:00
parent c66122e619
commit 4d134a1294
2688 changed files with 1127995 additions and 11475 deletions

View File

@@ -8,6 +8,17 @@
# Santosh Yaraganavi <santosh.sy@samsung.com>
# Vinayak Holikatti <h.vinayak@samsung.com>
config UFS_DBG
bool "Universal Flash Storage Debug Mode"
depends on SCSI_UFSHCD
help
This selects the debug mode for ufs vendor driver.
Enables tracing for QTI ufs driver.
This also enables ftrace for ufs core and SCSI.
Enabling this would have an adverse performance impact on disk IO.
If unsure, say N.
config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
depends on PCI
@@ -142,3 +153,33 @@ config SCSI_UFS_SPRD
Select this if you have UFS controller on Unisoc chipset.
If unsure, say N.
config SCSI_UFS_CRYPTO_QTI
tristate "UFS Crypto Engine Support for QCOM Hardware Wrapped Keys"
depends on SCSI_UFS_CRYPTO && SCSI_UFS_QCOM && QCOM_INLINE_CRYPTO_ENGINE
help
Enable Crypto Engine Support in UFS for QCOM chipsets supporting
wrapped keys. This is explicitly required until wrapped keys support
for QCOM chipsets is part of Linux upstream.
Select this to enable QCOM Hardware Wrapped Key support for the
ICE hardware.
config SEC_UFS_FEATURE
bool "SEC specific UFS feature"
default n
depends on SCSI_UFSHCD
help
Enable Samsung feature support
Enabling this allows kernel to use SEC specific feature
defined and implemented by SEC.
config SCSI_UFS_TEST_MODE
bool "Samsung UFS TEST feature"
default n
depends on SEC_UFS_FEATURE
help
This selects support for test mode for debugging.
Select this option if this feature is needed on working.
If unsure, say N.

View File

@@ -3,7 +3,13 @@
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
ifeq ($(CONFIG_SEC_UFS_FEATURE),y)
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o
ufs_qcom-y = ufs-qcom.o ufs-sec-feature.o ufs-sec-sysfs.o
else
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
endif
obj-$(CONFIG_SCSI_UFS_CRYPTO_QTI) += ufshcd-crypto-qti.o
obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o

View File

@@ -0,0 +1,349 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ufsqcom
#if !defined(_TRACE_UFS_QCOM_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_UFS_QCOM_H
#include <linux/tracepoint.h>
#define str_opcode(opcode) \
__print_symbolic(opcode, \
{ WRITE_16, "WRITE_16" }, \
{ WRITE_10, "WRITE_10" }, \
{ READ_16, "READ_16" }, \
{ READ_10, "READ_10" }, \
{ SYNCHRONIZE_CACHE, "SYNC" }, \
{ UNMAP, "UNMAP" })
#define UFS_NOTIFY_CHANGE_STATUS \
EM(PRE_CHANGE, "PRE_CHANGE") \
EMe(POST_CHANGE, "POST_CHANGE")
#define UFS_PM_OP \
EM(UFS_RUNTIME_PM, "UFS_RUNTIME_PM") \
EM(UFS_SYSTEM_PM, "UFS_SYSTEM_PM") \
EMe(UFS_SHUTDOWN_PM, "UFS_SHUTDOWN_PM")
#define UFS_LINK_STATES \
EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
EM(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE") \
EMe(UIC_LINK_BROKEN_STATE, "UIC_LINK_BROKEN_STATE")
#define UFS_PWR_MODES \
EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
#define UFS_CMD_TRACE_STRINGS \
EM(UFS_CMD_SEND, "send_req") \
EM(UFS_CMD_COMP, "complete_rsp") \
EM(UFS_DEV_COMP, "dev_complete") \
EM(UFS_QUERY_SEND, "query_send") \
EM(UFS_QUERY_COMP, "query_complete") \
EM(UFS_QUERY_ERR, "query_complete_err") \
EM(UFS_TM_SEND, "tm_send") \
EM(UFS_TM_COMP, "tm_complete") \
EMe(UFS_TM_ERR, "tm_complete_err")
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
#undef EMe
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define EMe(a, b) TRACE_DEFINE_ENUM(a);
UFS_NOTIFY_CHANGE_STATUS;
UFS_PM_OP;
UFS_LINK_STATES;
UFS_PWR_MODES;
UFS_CMD_TRACE_STRINGS;
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
* that will be printed in the output.
*/
#undef EM
#undef EMe
#define EM(a, b) {a, b},
#define EMe(a, b) {a, b}
TRACE_EVENT(ufs_qcom_pwr_change_notify,
TP_PROTO(const char *dev_name, int status, u32 gear_rx, u32 pwr_rx,
u32 hs_rate, int err),
TP_ARGS(dev_name, status, gear_rx, pwr_rx, hs_rate, err),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(int, status)
__field(u32, gear_rx)
__field(u32, pwr_rx)
__field(u32, hs_rate)
__field(int, err)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->gear_rx = gear_rx;
__entry->pwr_rx = pwr_rx;
__entry->hs_rate = hs_rate;
__entry->err = err;
),
TP_printk("%s: status = %s, gear_rx = %d, pwr_rx = %d, hs_rate = %d, err = %d",
__get_str(dev_name),
__print_symbolic(__entry->status, UFS_NOTIFY_CHANGE_STATUS),
__entry->gear_rx,
__entry->pwr_rx,
__entry->hs_rate,
__entry->err)
);
TRACE_EVENT(ufs_qcom_command,
TP_PROTO(const char *dev_name, int cmd_t, u8 opcode,
unsigned int tag, u32 doorbell, int size),
TP_ARGS(dev_name, cmd_t, opcode, tag, doorbell, size),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(int, cmd_t)
__field(u8, opcode)
__field(int, tag)
__field(u32, doorbell)
__field(int, size)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->cmd_t = cmd_t;
__entry->opcode = opcode;
__entry->tag = tag;
__entry->doorbell = doorbell;
__entry->size = size;
),
TP_printk(
"%s: %s: tag: %d, DB: 0x%x, size: %d, opcode: 0x%x (%s)",
__get_str(dev_name),
__print_symbolic(__entry->cmd_t, UFS_CMD_TRACE_STRINGS),
__entry->tag,
__entry->doorbell,
__entry->size,
(u32)__entry->opcode,
str_opcode(__entry->opcode)
)
);
TRACE_EVENT(ufs_qcom_uic,
TP_PROTO(const char *dev_name, int cmd_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
TP_ARGS(dev_name, cmd_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(int, cmd_t)
__field(u32, cmd)
__field(u32, arg1)
__field(u32, arg2)
__field(u32, arg3)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->cmd_t = cmd_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
__entry->arg2 = arg2;
__entry->arg3 = arg3;
),
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
__get_str(dev_name),
__print_symbolic(__entry->cmd_t, UFS_CMD_TRACE_STRINGS),
__entry->cmd,
__entry->arg1,
__entry->arg2,
__entry->arg3
)
);
TRACE_EVENT(ufs_qcom_hook_check_int_errors,
TP_PROTO(const char *dev_name, u32 err, u32 uic_err),
TP_ARGS(dev_name, err, uic_err),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(u32, err)
__field(u32, uic_err)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->err = err;
__entry->uic_err = uic_err;
),
TP_printk(
"%s: err: 0x%x, uic_err: 0x%x",
__get_str(dev_name),
__entry->err,
__entry->uic_err
)
);
TRACE_EVENT(ufs_qcom_shutdown,
TP_PROTO(const char *dev_name),
TP_ARGS(dev_name),
TP_STRUCT__entry(
__string(dev_name, dev_name)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
),
TP_printk(
"%s: Going to Shutdown!",
__get_str(dev_name)
)
);
DECLARE_EVENT_CLASS(ufs_qcom_clk_template,
TP_PROTO(const char *dev_name, int status, bool on, int err),
TP_ARGS(dev_name, status, on, err),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(int, status)
__field(bool, on)
__field(int, err)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->on = on;
__entry->err = err;
),
TP_printk("%s: status = %s, on= %d, err = %d",
__get_str(dev_name),
__print_symbolic(__entry->status, UFS_NOTIFY_CHANGE_STATUS),
__entry->on,
__entry->err)
);
DEFINE_EVENT(ufs_qcom_clk_template, ufs_qcom_setup_clocks,
TP_PROTO(const char *dev_name, int status, bool on, int err),
TP_ARGS(dev_name, status, on, err));
DEFINE_EVENT(ufs_qcom_clk_template, ufs_qcom_clk_scale_notify,
TP_PROTO(const char *dev_name, int status, bool on, int err),
TP_ARGS(dev_name, status, on, err));
DECLARE_EVENT_CLASS(ufs_qcom_noify_template,
TP_PROTO(const char *dev_name, int status, int err),
TP_ARGS(dev_name, status, err),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(int, status)
__field(int, err)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->status = status;
__entry->err = err;
),
TP_printk("%s: status = %s, err = %d",
__get_str(dev_name),
__print_symbolic(__entry->status, UFS_NOTIFY_CHANGE_STATUS),
__entry->err)
);
DEFINE_EVENT(ufs_qcom_noify_template, ufs_qcom_hce_enable_notify,
TP_PROTO(const char *dev_name, int status, int err),
TP_ARGS(dev_name, status, err));
DEFINE_EVENT(ufs_qcom_noify_template, ufs_qcom_link_startup_notify,
TP_PROTO(const char *dev_name, int status, int err),
TP_ARGS(dev_name, status, err));
DECLARE_EVENT_CLASS(ufs_qcom_pm_template,
TP_PROTO(const char *dev_name, int pm_op, int rpm_lvl, int spm_lvl,
int uic_link_state, int curr_dev_pwr_mode, int err),
TP_ARGS(dev_name, pm_op, rpm_lvl, spm_lvl, uic_link_state,
curr_dev_pwr_mode, err),
TP_STRUCT__entry(
__string(dev_name, dev_name)
__field(int, pm_op)
__field(int, rpm_lvl)
__field(int, spm_lvl)
__field(int, uic_link_state)
__field(int, curr_dev_pwr_mode)
__field(int, err)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
__entry->pm_op = pm_op;
__entry->rpm_lvl = rpm_lvl;
__entry->spm_lvl = spm_lvl;
__entry->uic_link_state = uic_link_state;
__entry->curr_dev_pwr_mode = curr_dev_pwr_mode;
__entry->err = err;
),
TP_printk(
"%s: pm_op = %s, rpm_lvl = %d, spm_lvl = %d, link_state = %s, dev_pwr_mode = %s, err = %d",
__get_str(dev_name),
__print_symbolic(__entry->pm_op, UFS_PM_OP),
__entry->rpm_lvl,
__entry->spm_lvl,
__print_symbolic(__entry->uic_link_state, UFS_LINK_STATES),
__print_symbolic(__entry->curr_dev_pwr_mode, UFS_PWR_MODES),
__entry->err
)
);
DEFINE_EVENT(ufs_qcom_pm_template, ufs_qcom_suspend,
TP_PROTO(const char *dev_name, int pm_op, int rpm_lvl, int spm_lvl,
int uic_link_state, int curr_dev_pwr_mode, int err),
TP_ARGS(dev_name, pm_op, rpm_lvl, spm_lvl, uic_link_state,
curr_dev_pwr_mode, err));
DEFINE_EVENT(ufs_qcom_pm_template, ufs_qcom_resume,
TP_PROTO(const char *dev_name, int pm_op, int rpm_lvl, int spm_lvl,
int uic_link_state, int curr_dev_pwr_mode, int err),
TP_ARGS(dev_name, pm_op, rpm_lvl, spm_lvl, uic_link_state,
curr_dev_pwr_mode, err));
#endif /* if !defined(_TRACE_UFS_QCOM_H) || defined(TRACE_HEADER_MULTI_READ) */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/ufs/host
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE ufs-qcom-trace
/* This part must be outside protection */
#include <trace/define_trace.h>

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef UFS_QCOM_H_
@@ -7,12 +8,18 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/phy/phy.h>
#include <linux/pm_qos.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <soc/qcom/ice.h>
#include <ufs/ufshcd.h>
#include <ufs/unipro.h>
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_UFS_QCOM_HOSTS 2
#define MAX_U32 (~(u32)0)
#define MPHY_TX_FSM_STATE 0x41
#define MPHY_RX_FSM_STATE 0xC1
#define TX_FSM_HIBERN8 0x1
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
@@ -23,11 +30,42 @@
#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
/* vendor specific pre-defined parameters */
#define UFS_VENDOR_MICRON 0x12C
#define SLOW 1
#define FAST 2
enum ufs_qcom_phy_submode {
UFS_QCOM_PHY_SUBMODE_NON_G4,
UFS_QCOM_PHY_SUBMODE_G4,
UFS_QCOM_PHY_SUBMODE_G5,
};
enum ufs_qcom_ber_mode {
UFS_QCOM_BER_MODE_G1_G4,
UFS_QCOM_BER_MODE_G5,
UFS_QCOM_BER_MODE_MAX,
};
#define UFS_QCOM_LIMIT_NUM_LANES_RX 2
#define UFS_QCOM_LIMIT_NUM_LANES_TX 2
#define UFS_QCOM_LIMIT_HSGEAR_RX UFS_HS_G4
#define UFS_QCOM_LIMIT_HSGEAR_TX UFS_HS_G4
#define UFS_QCOM_LIMIT_PWMGEAR_RX UFS_PWM_G4
#define UFS_QCOM_LIMIT_PWMGEAR_TX UFS_PWM_G4
#define UFS_QCOM_LIMIT_RX_PWR_PWM SLOW_MODE
#define UFS_QCOM_LIMIT_TX_PWR_PWM SLOW_MODE
#define UFS_QCOM_LIMIT_RX_PWR_HS FAST_MODE
#define UFS_QCOM_LIMIT_TX_PWR_HS FAST_MODE
#define UFS_QCOM_LIMIT_HS_RATE PA_HS_MODE_B
#define UFS_QCOM_LIMIT_DESIRED_MODE FAST
#define UFS_QCOM_LIMIT_PHY_SUBMODE UFS_QCOM_PHY_SUBMODE_G4
#define UFS_MEM_REG_PA_ERR_CODE 0xCC
/* default value of auto suspend is 3 seconds */
#define UFS_QCOM_AUTO_SUSPEND_DELAY 3000
#define UFS_QCOM_CLK_GATING_DELAY_MS_PWR_SAVE 20
#define UFS_QCOM_CLK_GATING_DELAY_MS_PERF 50
/* QCOM UFS host controller vendor specific registers */
enum {
@@ -54,6 +92,9 @@ enum {
* added in HW Version 3.0.0
*/
UFS_AH8_CFG = 0xFC,
UFS_RD_REG_MCQ = 0xD00,
UFS_MEM_ICE = 0x2600,
REG_UFS_DEBUG_SPARE_CFG = 0x284C,
REG_UFS_CFG3 = 0x271C,
};
@@ -69,12 +110,23 @@ enum {
UFS_DBG_RD_REG_TMRLUT = 0x700,
UFS_UFS_DBG_RD_REG_OCSC = 0x800,
UFS_UFS_DBG_RAM_CTL = 0x1000,
UFS_UFS_DBG_RAM_RD_FATA_DWn = 0x1024,
UFS_UFS_DBG_RD_DESC_RAM = 0x1500,
UFS_UFS_DBG_RD_PRDT_RAM = 0x1700,
UFS_UFS_DBG_RD_RESP_RAM = 0x1800,
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
/* QCOM UFS host controller vendor specific H8 count registers */
enum {
REG_UFS_HW_H8_ENTER_CNT = 0x2700,
REG_UFS_SW_H8_ENTER_CNT = 0x2704,
REG_UFS_SW_AFTER_HW_H8_ENTER_CNT = 0x2708,
REG_UFS_HW_H8_EXIT_CNT = 0x270C,
REG_UFS_SW_H8_EXIT_CNT = 0x2710,
};
enum {
UFS_MEM_CQIS_VS = 0x8,
};
@@ -84,6 +136,7 @@ enum {
/* bit definitions for REG_UFS_CFG0 register */
#define QUNIPRO_G4_SEL BIT(5)
#define HCI_UAWM_OOO_DIS BIT(0)
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL BIT(0)
@@ -106,10 +159,6 @@ enum {
#define TMRLUT_HW_CGC_EN BIT(6)
#define OCSC_HW_CGC_EN BIT(7)
/* bit definitions for REG_UFS_PARAM0 */
#define MAX_HS_GEAR_MASK GENMASK(6, 4)
#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
@@ -118,20 +167,101 @@ enum {
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit definitions for UFS_AH8_CFG register */
#define CC_UFS_HCLK_REQ_EN BIT(1)
#define CC_UFS_SYS_CLK_REQ_EN BIT(2)
#define CC_UFS_ICE_CORE_CLK_REQ_EN BIT(3)
#define CC_UFS_UNIPRO_CORE_CLK_REQ_EN BIT(4)
#define CC_UFS_AUXCLK_REQ_EN BIT(5)
#define UNUSED_UNIPRO_CORE_CGC_EN BIT(11)
#define UNUSED_UNIPRO_SYMB_CGC_EN BIT(12)
#define UNUSED_UNIPRO_CLK_GATED (UNUSED_UNIPRO_CORE_CGC_EN |\
UNUSED_UNIPRO_SYMB_CGC_EN)
#define UFS_HW_CLK_CTRL_EN (CC_UFS_SYS_CLK_REQ_EN |\
CC_UFS_ICE_CORE_CLK_REQ_EN |\
CC_UFS_UNIPRO_CORE_CLK_REQ_EN |\
CC_UFS_AUXCLK_REQ_EN)
/* UFS_MEM_PARAM0 register */
#define UFS_MAX_HS_GEAR_SHIFT (4)
#define UFS_MAX_HS_GEAR_MASK (0x7 << UFS_MAX_HS_GEAR_SHIFT)
#define UFS_QCOM_MAX_HS_GEAR(x) (((x) & UFS_MAX_HS_GEAR_MASK) >>\
UFS_MAX_HS_GEAR_SHIFT)
/* bit offset */
#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
enum {
MASK_UFS_PHY_SOFT_RESET = 0x2,
};
enum ufs_qcom_phy_init_type {
UFS_PHY_INIT_FULL,
UFS_PHY_INIT_CFG_RESTORE,
};
/* QCOM UFS debug print bit mask */
#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
#define DME_VS_CORE_CLK_CTRL 0xD002
#define BIT_TX_EOB_COND BIT(23)
#define PA_VS_CONFIG_REG2 0x9005
#define H8_ENTER_COND_OFFSET 0x6
#define H8_ENTER_COND_MASK GENMASK(7, 6)
#define BIT_RX_EOB_COND BIT(5)
#define BIT_LINKCFG_WAIT_LL1_RX_CFG_RDY BIT(26)
#define SAVECONFIGTIME_MODE_MASK 0x6000
#define DME_VS_CORE_CLK_CTRL 0xD002
#define TX_HS_EQUALIZER 0x0037
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
#define PA_VS_CLK_CFG_REG 0x9004
#define PA_VS_CLK_CFG_REG_MASK 0x1FF
#define PA_VS_CORE_CLK_40NS_CYCLES 0x9007
#define PA_VS_CORE_CLK_40NS_CYCLES_MASK 0x3F
#define DL_VS_CLK_CFG 0xA00B
#define DL_VS_CLK_CFG_MASK 0x3FF
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK_V4 0xFFF
#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_OFFSET_V4 0x10
#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
/* Device Quirks */
/*
* Some ufs devices may need more time to be in hibern8 before exiting.
* Enable this quirk to give it an additional 100us.
*/
#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 15)
/*
* Some ufs device vendors need a different TSync length.
* Enable this quirk to give an additional TX_HS_SYNC_LENGTH.
*/
#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH (1 << 16)
/*
* Some ufs device vendors need a different Deemphasis setting.
* Enable this quirk to tune TX Deemphasis parameters.
*/
#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING (1 << 17)
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
u8 *major, u16 *minor, u16 *step)
@@ -167,6 +297,11 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
mb();
}
struct ufs_qcom_bus_vote {
bool is_max_bw_needed;
struct device_attribute max_bus_bw;
};
/* Host controller hardware version: major.minor.step */
struct ufs_hw_version {
u16 step;
@@ -181,6 +316,181 @@ struct ufs_qcom_testbus {
struct gpio_desc;
struct qos_cpu_group {
cpumask_t mask;
unsigned int *votes;
struct dev_pm_qos_request *qos_req;
bool voted;
struct work_struct vwork;
struct ufs_qcom_host *host;
unsigned int curr_vote;
bool perf_core;
};
struct ufs_qcom_qos_req {
struct qos_cpu_group *qcg;
unsigned int num_groups;
struct workqueue_struct *workq;
};
/* Check for QOS_POWER when added to DT */
enum constraint {
QOS_PERF,
QOS_POWER,
QOS_MAX,
};
enum ufs_qcom_therm_lvl {
UFS_QCOM_LVL_NO_THERM, /* No thermal mitigation */
UFS_QCOM_LVL_AGGR_THERM, /* Aggressive thermal mitigation */
UFS_QCOM_LVL_MAX_THERM, /* Max thermal mitigation */
};
struct ufs_qcom_thermal {
struct thermal_cooling_device *tcd;
unsigned long curr_state;
};
/* Algorithm Selection */
#define STATIC_ALLOC_ALG1 0x0
#define FLOOR_BASED_ALG2 BIT(0)
#define INSTANTANEOUS_ALG3 BIT(1)
enum {
REG_UFS_MEM_ICE_NUM_AES_CORES = 0x2608,
REG_UFS_MEM_SHARED_ICE_CONFIG = 0x260C,
REG_UFS_MEM_SHARED_ICE_ALG1_NUM_CORE = 0x2610,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_0 = 0x2614,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_0 = 0x2618,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_1 = 0x261C,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_1 = 0x2620,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_2 = 0x2624,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_2 = 0x2628,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_3 = 0x262C,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_3 = 0x2630,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_4 = 0x2634,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_4 = 0x2638,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_5 = 0x263C,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_5 = 0x2640,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_6 = 0x2644,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_6 = 0x2648,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_7 = 0x264C,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_7 = 0x2650,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_8 = 0x2654,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_8 = 0x2658,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_CORE_9 = 0x265C,
REG_UFS_MEM_SHARED_ICE_ALG2_NUM_TASK_9 = 0x2660,
REG_UFS_MEM_SHARED_ICE_ALG3_NUM_CORE = 0x2664,
};
struct shared_ice_alg2_config {
/* group names */
char name[3];
/*
* num_core_tx_stream, num_core_rx_stream, num_wr_task_max,
* num_wr_task_min, num_rd_task_max, num_rd_task_min
*/
unsigned int val[6];
};
/*
* Default overrides:
* There're 10 sets of settings for floor-based algorithm
*/
static struct shared_ice_alg2_config alg2_config[] = {
{"G0", {5, 12, 0, 0, 32, 0}},
{"G1", {12, 5, 32, 0, 0, 0}},
{"G2", {6, 11, 4, 1, 32, 1}},
{"G3", {6, 11, 7, 1, 32, 1}},
{"G4", {7, 10, 11, 1, 32, 1}},
{"G5", {7, 10, 14, 1, 32, 1}},
{"G6", {8, 9, 18, 1, 32, 1}},
{"G7", {9, 8, 21, 1, 32, 1}},
{"G8", {10, 7, 24, 1, 32, 1}},
{"G9", {10, 7, 32, 1, 32, 1}},
};
/**
* Refer struct shared_ice_alg2_config
*/
static inline void __get_alg2_grp_params(unsigned int *val, int *c, int *t)
{
*c = ((val[0] << 8) | val[1] | (1 << 31));
*t = ((val[2] << 24) | (val[3] << 16) | (val[4] << 8) | val[5]);
}
static inline void get_alg2_grp_params(unsigned int group, int *core, int *task)
{
struct shared_ice_alg2_config *p = &alg2_config[group];
__get_alg2_grp_params(p->val, core, task);
}
/**
* struct ufs_qcom_ber_hist - record the detail of each BER event.
* @pos: index of event.
* @uec_pa: PA error type.
* @err_code: error code, only needed for PA error.
* @gear: the gear info when PHY PA occurs.
* @tstamp: record timestamp.
* @run_time: valid running time since last event.
* @full_time: total time since last event.
* @cnt: total error count.
* @name: mode name.
*/
struct ufs_qcom_ber_hist {
#define UFS_QCOM_EVT_LEN 32
int pos;
u32 uec_pa[UFS_QCOM_EVT_LEN];
u32 err_code[UFS_QCOM_EVT_LEN];
u32 gear[UFS_QCOM_EVT_LEN];
ktime_t tstamp[UFS_QCOM_EVT_LEN];
s64 run_time[UFS_QCOM_EVT_LEN];
s64 full_time[UFS_QCOM_EVT_LEN];
u32 cnt;
char *name;
};
struct ufs_qcom_ber_table {
enum ufs_qcom_ber_mode mode;
u32 ber_threshold;
};
struct ufs_qcom_regs {
struct list_head list;
const char *prefix;
u32 *ptr;
size_t len;
};
/**
* struct cpu_freq_info - keep CPUs frequency info
* @cpu: the cpu to bump up when requests on perf core exceeds the threshold
* @min_cpu_scale_freq: the minimal frequency of the cpu
* @max_cpu_scale_freq: the maximal frequency of the cpu
*/
struct cpu_freq_info {
u32 cpu;
unsigned int min_cpu_scale_freq;
unsigned int max_cpu_scale_freq;
};
struct ufs_qcom_dev_params {
u32 pwm_rx_gear; /* pwm rx gear to work in */
u32 pwm_tx_gear; /* pwm tx gear to work in */
u32 hs_rx_gear; /* hs rx gear to work in */
u32 hs_tx_gear; /* hs tx gear to work in */
u32 rx_lanes; /* number of rx lanes */
u32 tx_lanes; /* number of tx lanes */
u32 rx_pwr_pwm; /* rx pwm working pwr */
u32 tx_pwr_pwm; /* tx pwm working pwr */
u32 rx_pwr_hs; /* rx hs working pwr */
u32 tx_pwr_hs; /* tx hs working pwr */
u32 hs_rate; /* rate A/B to work in HS */
int phy_submode; /* gear number */
u32 desired_working_mode;
};
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
@@ -195,10 +505,27 @@ struct ufs_qcom_host {
* configuration even after UFS controller core power collapse.
*/
#define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE 0x2
/*
* Set this capability if host controller supports Qunipro internal
* clock gating.
*/
#define UFS_QCOM_CAP_QUNIPRO_CLK_GATING 0x4
/*
* Set this capability if host controller supports SVS2 frequencies.
*/
#define UFS_QCOM_CAP_SVS2 0x8
/*
* Set this capability if host controller supports shared ICE.
*/
#define UFS_QCOM_CAP_SHARED_ICE BIT(4)
u32 caps;
struct phy *generic_phy;
struct ufs_hba *hba;
struct ufs_qcom_bus_vote bus_vote;
struct ufs_pa_layer_attr dev_req_params;
struct clk *rx_l0_sync_clk;
struct clk *tx_l0_sync_clk;
@@ -217,8 +544,11 @@ struct ufs_qcom_host {
bool is_dev_ref_clk_enabled;
struct ufs_hw_version hw_ver;
bool reset_in_progress;
u32 dev_ref_clk_en_mask;
/* Bitmask for enabling debug prints */
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
/* Reset control of HCI */
@@ -227,9 +557,69 @@ struct ufs_qcom_host {
struct gpio_desc *device_reset;
u32 hs_gear;
struct ufs_qcom_dev_params host_pwr_cap;
bool disable_lpm;
bool vdd_hba_pc;
struct notifier_block vdd_hba_reg_nb;
struct ufs_vreg *vddp_ref_clk;
struct ufs_vreg *vccq_parent;
struct ufs_vreg *vccq_proxy_client;
bool work_pending;
bool bypass_g4_cfgready;
bool is_dt_pm_level_read;
bool is_phy_pwr_on;
/* Protect the usage of is_phy_pwr_on against racing */
struct mutex phy_mutex;
struct ufs_qcom_qos_req *ufs_qos;
struct ufs_qcom_thermal uqt;
/* FlashPVL entries */
bool err_occurred;
bool crash_on_err;
atomic_t scale_up;
atomic_t clks_on;
unsigned long load_delay_ms;
#define NUM_REQS_HIGH_THRESH 64
#define NUM_REQS_LOW_THRESH 32
atomic_t num_reqs_threshold;
bool cur_freq_vote;
struct delayed_work fwork;
bool cpufreq_dis;
struct cpu_freq_info *cpu_info;
/* number of CPUs to bump up */
int num_cpus;
void *ufs_ipc_log_ctx;
bool dbg_en;
struct device_node *np;
int chosen_algo;
struct ufs_clk_info *ref_clki;
struct ufs_clk_info *core_unipro_clki;
atomic_t hi_pri_en;
atomic_t therm_mitigation;
cpumask_t perf_mask;
cpumask_t def_mask;
u32 *esi_affinity_mask;
bool disable_wb_support;
struct ufs_qcom_ber_hist ber_hist[UFS_QCOM_BER_MODE_MAX];
struct list_head regs_list_head;
bool ber_th_exceeded;
bool irq_affinity_support;
bool esi_enabled;
bool bypass_pbl_rst_wa;
atomic_t cqhp_update_pending;
struct notifier_block ufs_qcom_panic_nb;
bool broken_ahit_wa;
unsigned long active_cmds;
u32 hs_gear;
u32 max_cpus;
#if IS_ENABLED(CONFIG_SEC_UFS_FEATURE)
bool skip_flush;
#endif
};
static inline u32
@@ -246,10 +636,120 @@ ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
const char *str, void *priv));
static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
{
return host->caps & UFS_QCOM_CAP_QUNIPRO;
}
static inline bool ufs_qcom_cap_qunipro_clk_gating(struct ufs_qcom_host *host)
{
return !!(host->caps & UFS_QCOM_CAP_QUNIPRO_CLK_GATING);
}
static inline bool ufs_qcom_cap_svs2(struct ufs_qcom_host *host)
{
return !!(host->caps & UFS_QCOM_CAP_SVS2);
}
static inline bool is_shared_ice_supported(struct ufs_qcom_host *host)
{
return !!(host->caps & UFS_QCOM_CAP_SHARED_ICE);
}
/**
* ufshcd_dme_rmw - get modify set a dme attribute
* @hba - per adapter instance
* @mask - mask to apply on read value
* @val - actual value to write
* @attr - dme attribute
*/
static inline int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
u32 val, u32 attr)
{
u32 cfg = 0;
int err = 0;
err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
if (err)
goto out;
cfg &= ~mask;
cfg |= (val & mask);
err = ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
out:
return err;
}
/*
* IOCTL opcode for ufs queries has the following opcode after
* SCSI_IOCTL_GET_PCI
*/
#define UFS_IOCTL_QUERY 0x5388
/**
* struct ufs_ioctl_query_data - used to transfer data to and from user via
* ioctl
* @opcode: type of data to query (descriptor/attribute/flag)
* @idn: id of the data structure
* @buf_size: number of allocated bytes/data size on return
* @buffer: data location
*
* Received: buffer and buf_size (available space for transferred data)
* Submitted: opcode, idn, length, buf_size
*/
struct ufs_ioctl_query_data {
/*
* User should select one of the opcode defined in "enum query_opcode".
* Please check include/uapi/scsi/ufs/ufs.h for the definition of it.
* Note that only UPIU_QUERY_OPCODE_READ_DESC,
* UPIU_QUERY_OPCODE_READ_ATTR & UPIU_QUERY_OPCODE_READ_FLAG are
* supported as of now. All other query_opcode would be considered
* invalid.
* As of now only read query operations are supported.
*/
__u32 opcode;
/*
* User should select one of the idn from "enum flag_idn" or "enum
* attr_idn" or "enum desc_idn" based on whether opcode above is
* attribute, flag or descriptor.
* Please check include/uapi/scsi/ufs/ufs.h for the definition of it.
*/
__u8 idn;
/*
* User should specify the size of the buffer (buffer[0] below) where
* it wants to read the query data (attribute/flag/descriptor).
* As we might end up reading less data then what is specified in
* buf_size. So we are updating buf_size to what exactly we have read.
*/
__u16 buf_size;
/*
* placeholder for the start of the data buffer where kernel will copy
* the query data (attribute/flag/descriptor) read from the UFS device
* Note:
* For Read/Write Attribute you will have to allocate 4 bytes
* For Read/Write Flag you will have to allocate 1 byte
*/
__u8 buffer[0];
};
/* ufs-qcom-ice.c */
#ifdef CONFIG_SCSI_UFS_CRYPTO
void ufs_qcom_ice_disable(struct ufs_qcom_host *host);
void ufs_qcom_ice_debug(struct ufs_qcom_host *host);
#else
static inline void ufs_qcom_ice_disable(struct ufs_qcom_host *host)
{
}
static inline void ufs_qcom_ice_debug(struct ufs_qcom_host *host)
{
}
#endif /* !CONFIG_SCSI_UFS_CRYPTO */
#endif /* UFS_QCOM_H_ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,242 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Samsung Specific feature
*
* Copyright (C) 2023 Samsung Electronics Co., Ltd.
*
* Authors:
* Storage Driver <storage.sec@samsung.com>
*/
#ifndef __UFS_SEC_FEATURE_H__
#define __UFS_SEC_FEATURE_H__
#include "../core/ufshcd-priv.h"
#include <ufs/ufshci.h>
#include <linux/sched/clock.h>
#include <linux/notifier.h>
/* unique number */
#define UFS_UN_20_DIGITS 20
#define UFS_UN_MAX_DIGITS (UFS_UN_20_DIGITS + 1)
#define SERIAL_NUM_SIZE 7
#define SCSI_UFS_TIMEOUT (10 * HZ)
#define UFS_S_INFO_SIZE 512
#define UFS_SHI_SIZE 256
#define HEALTH_DESC_PARAM_SEC_FLT 0x22
#define HEALTH_DESC_PARAM_KIC_FLT 0x11
#define HEALTH_DESC_PARAM_MIC_FLT 0x5
#define HEALTH_DESC_PARAM_SKH_FLT 0x5
struct ufs_vendor_dev_info {
struct ufs_hba *hba;
char unique_number[UFS_UN_MAX_DIGITS];
u8 lt;
u16 flt;
u8 eli;
unsigned int ic;
char s_info[UFS_S_INFO_SIZE];
char shi[UFS_SHI_SIZE];
bool device_stuck;
};
struct ufs_sec_cmd_info {
u8 opcode;
u32 lba;
int transfer_len;
u8 lun;
};
enum ufs_sec_wb_state {
WB_OFF = 0,
WB_ON
};
struct ufs_sec_wb_info {
bool support;
u64 state_ts;
u64 enable_ms;
u64 disable_ms;
u64 amount_kb;
u64 enable_cnt;
u64 disable_cnt;
u64 err_cnt;
};
enum ufs_sec_hcgc_op {
HCGC_OP_nop = 0,
HCGC_OP_stop,
HCGC_OP_analyze,
HCGC_OP_execute,
HCGC_OP_max,
};
/* HCGC : vendor specific flag_idn */
enum {
QUERY_FLAG_IDN_SEC_HCGC_ANALYSYS = 0x13,
QUERY_FLAG_IDN_SEC_HCGC_EXECUTE = 0x14,
};
/* HCGC : vendor specific attr_idn */
enum {
QUERY_ATTR_IDN_SEC_HCGC_STATE = 0xF0, // bHCGCState, bHCGCProgressStatus
QUERY_ATTR_IDN_SEC_HCGC_SIZE = 0xFA, // wHCGCSize
QUERY_ATTR_IDN_SEC_HCGC_AVAIL_SIZE = 0xFC, // wHCGCAvailSize, bHCGCFreeBlockMaxSize
QUERY_ATTR_IDN_SEC_HCGC_RATIO = 0xFE, // bHCGCRatio, bHCGCFreeBlockLevel
QUERY_ATTR_IDN_SEC_HCGC_OPERATION = 0xFF, // wHCGCOperation
};
/* HCGC : vendor specific desc_idn */
enum {
QUERY_DESC_IDN_VENDOR_DEVICE = 0xF0
};
/* HCGC : vendor specific device_desc_param */
enum {
DEVICE_DESC_PARAM_VENDOR_FEA_SUP = 0xFB
};
/* HCGC : Possible values for dExtendedUFSFeaturesSupport */
enum {
UFS_SEC_EXT_HCGC_SUPPORT = BIT(10),
};
/* HCGC : Possible values for dVendorSpecificFeaturesSupport */
enum {
UFS_VENDOR_DEV_HCGC = BIT(0),
UFS_VENDOR_DEV_STREAMID = BIT(4),
UFS_VENDOR_DEV_PSA = BIT(5),
};
enum ufs_sec_hcgc_status {
HCGC_STATE_need_to_analyze = 0,
HCGC_STATE_analyzing,
HCGC_STATE_need_to_execute,
HCGC_STATE_executing,
HCGC_STATE_done,
HCGC_STATE_max,
};
struct ufs_sec_hcgc_info {
bool support; /* UFS : feature support */
bool allow; /* Host : feature allow */
int disable_threshold_lt; /* LT threshold that HCGC is not allowed */
u32 bHCGCState;
u32 wHCGCAvailSize;
u32 wHCGCSize;
u32 bHCGCRatio;
u32 bHCGCOperation;
atomic_t hcgc_op_cnt[HCGC_OP_max]; /* HCGC op count */
atomic_t hcgc_op_err_cnt[HCGC_OP_max]; /* HCGC op error count */
};
enum ufs_sec_log_str_t {
UFS_SEC_CMD_SEND,
UFS_SEC_CMD_COMP,
UFS_SEC_QUERY_SEND,
UFS_SEC_QUERY_COMP,
UFS_SEC_NOP_SEND,
UFS_SEC_NOP_COMP,
UFS_SEC_TM_SEND,
UFS_SEC_TM_COMP,
UFS_SEC_TM_ERR,
UFS_SEC_UIC_SEND,
UFS_SEC_UIC_COMP,
};
static const char * const ufs_sec_log_str[] = {
[UFS_SEC_CMD_SEND] = "scsi_send",
[UFS_SEC_CMD_COMP] = "scsi_cmpl",
[UFS_SEC_QUERY_SEND] = "query_send",
[UFS_SEC_QUERY_COMP] = "query_cmpl",
[UFS_SEC_NOP_SEND] = "nop_send",
[UFS_SEC_NOP_COMP] = "nop_cmpl",
[UFS_SEC_TM_SEND] = "tm_send",
[UFS_SEC_TM_COMP] = "tm_cmpl",
[UFS_SEC_TM_ERR] = "tm_err",
[UFS_SEC_UIC_SEND] = "uic_send",
[UFS_SEC_UIC_COMP] = "uic_cmpl",
};
struct ufs_sec_cmd_log_entry {
const char *str; /* ufs_sec_log_str */
u8 lun;
u8 cmd_id;
u32 lba;
int transfer_len;
u8 idn; /* used only for query idn */
unsigned long outstanding_reqs;
unsigned int tag;
u64 tstamp;
};
#define UFS_SEC_CMD_LOGGING_MAX 200
#define UFS_SEC_CMD_LOGNODE_MAX 64
struct ufs_sec_cmd_log_info {
struct ufs_sec_cmd_log_entry *entries;
int pos;
};
struct ufs_sec_feature_info {
struct ufs_vendor_dev_info *vdi;
struct ufs_sec_wb_info *ufs_wb;
struct ufs_sec_wb_info *ufs_wb_backup;
struct ufs_sec_hcgc_info *ufs_hcgc;
struct ufs_sec_err_info *ufs_err;
struct ufs_sec_err_info *ufs_err_backup;
struct ufs_sec_err_info *ufs_err_hist;
struct ufs_sec_cmd_log_info *ufs_cmd_log;
struct notifier_block reboot_notify;
struct delayed_work noti_work;
u32 ext_ufs_feature_sup;
u32 vendor_spec_feature_sup;
u32 last_ucmd;
bool ucmd_complete;
enum query_opcode last_qcmd;
enum dev_cmd_type qcmd_type;
bool qcmd_complete;
};
extern struct device *sec_ufs_node_dev;
void ufs_sec_get_health_desc(struct ufs_hba *hba);
inline bool ufs_sec_is_err_cnt_allowed(void);
/* call by vendor module */
void ufs_sec_config_features(struct ufs_hba *hba);
void ufs_sec_adjust_caps_quirks(struct ufs_hba *hba);
void ufs_sec_init_logging(struct device *dev);
void ufs_sec_set_features(struct ufs_hba *hba);
void ufs_sec_remove_features(struct ufs_hba *hba);
void ufs_sec_register_vendor_hooks(void);
void ufs_sec_check_device_stuck(void);
void ufs_sec_inc_hwrst_cnt(void);
void ufs_sec_inc_op_err(struct ufs_hba *hba, enum ufs_event_type evt, void *data);
void ufs_sec_print_err(void);
/* SEC next WB */
bool ufs_sec_is_wb_supported(void);
int ufs_sec_wb_ctrl(bool enable);
void ufs_sec_wb_register_reset_notify(void *func);
/* SEC HCGC */
bool ufs_sec_is_hcgc_allowed(void);
int ufs_sec_hcgc_query_attr(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u32 *val);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,258 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Samsung Specific feature : sysfs-nodes
*
* Copyright (C) 2023 Samsung Electronics Co., Ltd.
*
* Authors:
* Storage Driver <storage.sec@samsung.com>
*/
#ifndef __UFS_SEC_SYSFS_H__
#define __UFS_SEC_SYSFS_H__
#include "ufs-sec-feature.h"
#include <linux/delay.h>
#include <linux/sec_class.h>
#include <linux/sec_debug.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_proto.h>
void ufs_sec_add_sysfs_nodes(struct ufs_hba *hba);
void ufs_sec_remove_sysfs_nodes(struct ufs_hba *hba);
extern struct ufs_sec_feature_info ufs_sec_features;
#define get_vdi_member(member) ufs_sec_features.vdi->member
/* SEC error info : begin */
/* UFSHCD UIC layer error flags : in ufshcd.c */
enum {
UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
};
struct SEC_UFS_op_cnt {
unsigned int HW_RESET_cnt;
unsigned int link_startup_cnt;
unsigned int Hibern8_enter_cnt;
unsigned int Hibern8_exit_cnt;
unsigned int AH8_err_cnt;
unsigned int HB_hist_cnt;
unsigned int op_err;
};
struct SEC_UFS_UIC_cmd_cnt {
u8 DME_GET_err;
u8 DME_SET_err;
u8 DME_PEER_GET_err;
u8 DME_PEER_SET_err;
u8 DME_POWERON_err;
u8 DME_POWEROFF_err;
u8 DME_ENABLE_err;
u8 DME_RESET_err;
u8 DME_END_PT_RST_err;
u8 DME_LINK_STARTUP_err;
u8 DME_HIBER_ENTER_err;
u8 DME_HIBER_EXIT_err;
u8 DME_TEST_MODE_err;
unsigned int UIC_cmd_err;
};
struct SEC_UFS_UIC_err_cnt {
u8 PAERR_cnt;
u8 DL_PA_INIT_ERR_cnt;
u8 DL_NAC_RCVD_ERR_cnt;
u8 DL_TC_REPLAY_ERR_cnt;
u8 DL_FC_PROTECT_ERR_cnt;
u8 NLERR_cnt;
u8 TLERR_cnt;
u8 DMEERR_cnt;
unsigned int DLERR_cnt;
unsigned int UIC_err;
unsigned int PAERR_linereset;
unsigned int PAERR_lane[3];
};
struct SEC_UFS_Fatal_err_cnt {
u8 DFE; // Device_Fatal
u8 CFE; // Controller_Fatal
u8 SBFE; // System_Bus_Fatal
u8 CEFE; // Crypto_Engine_Fatal
u8 LLE; // Link Lost
unsigned int Fatal_err;
};
struct SEC_UFS_UTP_cnt {
u8 UTMR_query_task_cnt;
u8 UTMR_abort_task_cnt;
u8 UTMR_logical_reset_cnt;
u8 UTR_read_err;
u8 UTR_write_err;
u8 UTR_sync_cache_err;
u8 UTR_unmap_err;
u8 UTR_etc_err;
unsigned int UTP_err;
};
struct SEC_UFS_QUERY_cnt {
u8 NOP_err;
u8 R_Desc_err;
u8 W_Desc_err;
u8 R_Attr_err;
u8 W_Attr_err;
u8 R_Flag_err;
u8 Set_Flag_err;
u8 Clear_Flag_err;
u8 Toggle_Flag_err;
unsigned int Query_err;
};
struct SEC_SCSI_SENSE_cnt {
unsigned int scsi_medium_err;
unsigned int scsi_hw_err;
};
struct ufs_sec_err_info {
struct SEC_UFS_op_cnt op_cnt;
struct SEC_UFS_UIC_cmd_cnt UIC_cmd_cnt;
struct SEC_UFS_UIC_err_cnt UIC_err_cnt;
struct SEC_UFS_Fatal_err_cnt Fatal_err_cnt;
struct SEC_UFS_UTP_cnt UTP_cnt;
struct SEC_UFS_QUERY_cnt Query_cnt;
struct SEC_SCSI_SENSE_cnt sense_cnt;
};
#define get_err_member(member) ufs_sec_features.ufs_err->member
#define get_err_backup_member(member) ufs_sec_features.ufs_err_backup->member
#define get_err_hist_member(member) ufs_sec_features.ufs_err_hist->member
#define SEC_UFS_ERR_INFO_BACKUP(err_cnt, member) ({ \
get_err_backup_member(err_cnt).member += get_err_member(err_cnt).member; \
get_err_member(err_cnt).member = 0; })
/* Get the sum of error count about current booting */
#define SEC_UFS_ERR_INFO_GET_VALUE(err_cnt, member) \
(get_err_backup_member(err_cnt).member + get_err_member(err_cnt).member)
/* Get the sum of error count about current and previous booting */
#define SEC_UFS_ERR_INFO_HIST_SUM_GET_VALUE(err_cnt, member) \
(SEC_UFS_ERR_INFO_GET_VALUE(err_cnt, member) + get_err_hist_member(err_cnt).member)
#define SEC_UFS_ERR_INFO_HIST_SET_VALUE(err_cnt, member, value) \
(get_err_hist_member(err_cnt).member = (value - '0'))
/* Counting errors */
#define SEC_UFS_ERR_CNT_INC(count, max) ((count) += ((count) < (max)) ? 1 : 0)
#define SEC_UFS_OP_ERR_CNT_INC(member, max) ({ \
struct SEC_UFS_op_cnt *op_cnt = &get_err_member(op_cnt); \
\
SEC_UFS_ERR_CNT_INC(op_cnt->member, max); \
SEC_UFS_ERR_CNT_INC(op_cnt->op_err, UINT_MAX); \
})
#define SEC_UFS_ERR_CNT_ADD(count, value, max) \
((count) += (count < max) ? (((count + value) < (max)) ? value : (max - count)) : 0)
/* device attributes : begin */
#define SEC_UFS_DATA_ATTR_RO(name, fmt, args...) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, fmt, args); \
} \
static DEVICE_ATTR_RO(name)
/* store function has to be defined */
#define SEC_UFS_DATA_ATTR_RW(name, fmt, args...) \
static ssize_t name##_show(struct device *dev, struct device_attribute *attr, char *buf) \
{ \
return sprintf(buf, fmt, args); \
} \
static DEVICE_ATTR(name, 0664, name##_show, name##_store)
/* device attributes : end */
#define get_min_errinfo(type, min_val, err_cnt, member) \
min_t(type, min_val, SEC_UFS_ERR_INFO_GET_VALUE(err_cnt, member))
#define get_min_HB_errinfo(type, min_val) \
min_t(type, min_val, \
SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, Hibern8_enter_cnt) + \
SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, Hibern8_exit_cnt) + \
SEC_UFS_ERR_INFO_GET_VALUE(op_cnt, AH8_err_cnt))
#define get_min_errinfo_hist(type, min_val, err_cnt, member) \
min_t(type, min_val, SEC_UFS_ERR_INFO_HIST_SUM_GET_VALUE(err_cnt, member))
#define get_min_HB_errinfo_hist(type, min_val) \
min_t(type, min_val, \
SEC_UFS_ERR_INFO_HIST_SUM_GET_VALUE(op_cnt, Hibern8_enter_cnt) + \
SEC_UFS_ERR_INFO_HIST_SUM_GET_VALUE(op_cnt, Hibern8_exit_cnt) + \
SEC_UFS_ERR_INFO_HIST_SUM_GET_VALUE(op_cnt, AH8_err_cnt) + \
SEC_UFS_ERR_INFO_HIST_SUM_GET_VALUE(op_cnt, HB_hist_cnt))
#define ERR_SUM_SIZE 28
#define ERR_HIST_SUM_SIZE 29
/**
* UFS Error Information
*
* Format : U0I0H0L0X0Q0R0W0F0SM0SH0HB0
* U : UTP cmd error count
* I : UIC error count
* H : HWRESET count
* L : Link startup failure count
* X : Link Lost Error count
* Q : UTMR QUERY_TASK error count
* R : READ error count
* W : WRITE error count
* F : Device Fatal Error count
* SM : Sense Medium error count
* SH : Sense Hardware error count
* HB : Hibern8 enter/exit error count + Auto-H8 error count
**/
#define SEC_UFS_ERR_SUM(buf) \
sprintf(buf, "U%uI%uH%uL%uX%uQ%uR%uW%uF%uSM%uSH%uHB%u", \
get_min_errinfo(u32, 9, UTP_cnt, UTP_err), \
get_min_errinfo(u32, 9, UIC_err_cnt, UIC_err), \
get_min_errinfo(u32, 9, op_cnt, HW_RESET_cnt), \
get_min_errinfo(u32, 9, op_cnt, link_startup_cnt), \
get_min_errinfo(u8, 9, Fatal_err_cnt, LLE), \
get_min_errinfo(u8, 9, UTP_cnt, UTMR_query_task_cnt), \
get_min_errinfo(u8, 9, UTP_cnt, UTR_read_err), \
get_min_errinfo(u8, 9, UTP_cnt, UTR_write_err), \
get_min_errinfo(u8, 9, Fatal_err_cnt, DFE), \
get_min_errinfo(u32, 9, sense_cnt, scsi_medium_err), \
get_min_errinfo(u32, 9, sense_cnt, scsi_hw_err), \
get_min_HB_errinfo(u32, 9))
/**
* UFS Error Information
* previous boot's error count + current boot's error count
**/
#define SEC_UFS_ERR_HIST_SUM(buf) \
sprintf(buf, "U%uI%uH%uL%uX%uQ%uR%uW%uF%uSM%uSH%uHB%u\n", \
get_min_errinfo_hist(u32, 9, UTP_cnt, UTP_err), \
get_min_errinfo_hist(u32, 9, UIC_err_cnt, UIC_err), \
get_min_errinfo_hist(u32, 9, op_cnt, HW_RESET_cnt), \
get_min_errinfo_hist(u32, 9, op_cnt, link_startup_cnt), \
get_min_errinfo_hist(u8, 9, Fatal_err_cnt, LLE), \
get_min_errinfo_hist(u8, 9, UTP_cnt, UTMR_query_task_cnt), \
get_min_errinfo_hist(u8, 9, UTP_cnt, UTR_read_err), \
get_min_errinfo_hist(u8, 9, UTP_cnt, UTR_write_err), \
get_min_errinfo_hist(u8, 9, Fatal_err_cnt, DFE), \
get_min_errinfo_hist(u32, 9, sense_cnt, scsi_medium_err), \
get_min_errinfo_hist(u32, 9, sense_cnt, scsi_hw_err), \
get_min_HB_errinfo_hist(u32, 9))
/* SEC error info : end */
/* SEC next WB : begin */
#define SEC_UFS_WB_INFO_BACKUP(member) ({ \
ufs_sec_features.ufs_wb_backup->member += ufs_sec_features.ufs_wb->member; \
ufs_sec_features.ufs_wb->member = 0; })
/* SEC next WB : end */
#endif

View File

@@ -0,0 +1,232 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <ufs/ufshcd.h>
#include <ufs/ufshcd-crypto.h>
#include "ufs-qcom.h"
#include <soc/qcom/ice.h>
/* Blk-crypto modes supported by UFS crypto */
static const struct ufs_crypto_alg_entry {
enum ufs_crypto_alg ufs_alg;
enum ufs_crypto_key_size ufs_key_size;
} ufs_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = {
[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
.ufs_alg = UFS_CRYPTO_ALG_AES_XTS,
.ufs_key_size = UFS_CRYPTO_KEY_SIZE_256,
},
};
static int ufshcd_qti_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
int i;
int cap_idx = -1;
union ufs_crypto_cfg_entry cfg = {};
int err;
u8 ice_key_size;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) {
if (ccap_array[i].algorithm_id == alg->ufs_alg &&
ccap_array[i].key_size == alg->ufs_key_size &&
(ccap_array[i].sdus_mask & data_unit_mask)) {
cap_idx = i;
break;
}
}
if (WARN_ON(cap_idx < 0))
return -EOPNOTSUPP;
cfg.data_unit_size = data_unit_mask;
cfg.crypto_cap_idx = cap_idx;
cfg.config_enable = UFS_CRYPTO_CONFIGURATION_ENABLE;
if (key->crypto_cfg.key_type != BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) {
if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
memcpy(cfg.crypto_key, key->raw, key->size / 2);
memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE / 2,
key->raw + key->size / 2, key->size / 2);
} else {
memcpy(cfg.crypto_key, key->raw, key->size);
}
}
if (key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
ice_key_size = QCOM_ICE_CRYPTO_KEY_SIZE_WRAPPED;
else
ice_key_size = QCOM_ICE_CRYPTO_KEY_SIZE_256;
if (host->reset_in_progress) {
pr_err("UFS host reset in progress, state = 0x%x\n",
hba->ufshcd_state);
return -EINVAL;
}
ufshcd_hold(hba);
err = qcom_ice_program_key_hwkm(host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
ice_key_size, key,
cfg.data_unit_size, slot);
if (err)
pr_err("%s: error programming key, err = %d, slot = %d\n",
__func__, err, slot);
ufshcd_release(hba);
memzero_explicit(&cfg, sizeof(cfg));
return err;
}
static int ufshcd_qti_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err = 0;
if (host->reset_in_progress) {
pr_err("UFS host reset in progress, state = 0x%x\n",
hba->ufshcd_state);
return -EINVAL;
}
ufshcd_hold(hba);
err = qcom_ice_evict_key(host->ice, slot);
if (err)
pr_err("%s: error evicting key, err = %d, slot = %d\n",
__func__, err, slot);
ufshcd_release(hba);
return err;
}
static int ufshcd_qti_crypto_derive_sw_secret(struct blk_crypto_profile *profile,
const u8 wkey[], size_t wkey_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
err = qcom_ice_derive_sw_secret(host->ice, wkey, wkey_size, sw_secret);
if (err)
pr_err("%s: error deriving software secret, err = %d\n",
__func__, err);
return err;
}
static const struct blk_crypto_ll_ops ufshcd_qti_crypto_ops = {
.keyslot_program = ufshcd_qti_crypto_keyslot_program,
.keyslot_evict = ufshcd_qti_crypto_keyslot_evict,
.derive_sw_secret = ufshcd_qti_crypto_derive_sw_secret,
};
static enum blk_crypto_mode_num
ufshcd_find_blk_crypto_mode(union ufs_crypto_cap_entry cap)
{
int i;
for (i = 0; i < ARRAY_SIZE(ufs_crypto_algs); i++) {
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
if (ufs_crypto_algs[i].ufs_alg == cap.algorithm_id &&
ufs_crypto_algs[i].ufs_key_size == cap.key_size) {
return i;
}
}
return BLK_ENCRYPTION_MODE_INVALID;
}
/**
* ufshcd_hba_init_crypto_capabilities - Read crypto capabilities, init crypto
* fields in hba
* @hba: Per adapter instance
*
* Return: 0 if crypto was initialized or is not supported, else a -errno value.
*/
int ufshcd_qti_hba_init_crypto_capabilities(struct ufs_hba *hba)
{
int cap_idx;
int err = 0;
enum blk_crypto_mode_num blk_mode_num;
/*
* Don't use crypto if either the hardware doesn't advertise the
* standard crypto capability bit *or* if the vendor specific driver
* hasn't advertised that crypto is supported.
*/
if (!(ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES) &
MASK_CRYPTO_SUPPORT))
goto out;
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
goto out;
hba->crypto_capabilities.reg_val =
cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
hba->crypto_cfg_register =
(u32)hba->crypto_capabilities.config_array_ptr * 0x100;
hba->crypto_cap_array =
devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap,
sizeof(hba->crypto_cap_array[0]), GFP_KERNEL);
if (!hba->crypto_cap_array) {
err = -ENOMEM;
goto out;
}
/* The actual number of configurations supported is (CFGC+1) */
err = devm_blk_crypto_profile_init(hba->dev, &hba->crypto_profile,
hba->crypto_capabilities.config_count + 1);
if (err)
goto out;
hba->crypto_profile.ll_ops = ufshcd_qti_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
hba->crypto_profile.max_dun_bytes_supported = 8;
hba->crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
hba->crypto_profile.dev = hba->dev;
/*
* Cache all the UFS crypto capabilities and advertise the supported
* crypto modes and data unit sizes to the block layer.
*/
for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap;
cap_idx++) {
hba->crypto_cap_array[cap_idx].reg_val =
cpu_to_le32(ufshcd_readl(hba,
REG_UFS_CRYPTOCAP +
cap_idx * sizeof(__le32)));
blk_mode_num =
ufshcd_find_blk_crypto_mode(hba->crypto_cap_array[cap_idx]);
if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID)
hba->crypto_profile.modes_supported[blk_mode_num] |=
hba->crypto_cap_array[cap_idx].sdus_mask * 512;
}
return 0;
out:
/* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */
hba->caps &= ~UFSHCD_CAP_CRYPTO;
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_qti_hba_init_crypto_capabilities);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("UFS Crypto ops QTI implementation");