Add samsung specific changes

This commit is contained in:
2025-08-11 14:29:00 +02:00
parent c66122e619
commit 4d134a1294
2688 changed files with 1127995 additions and 11475 deletions

View File

@@ -1071,3 +1071,25 @@ config MMC_LITEX
module will be called litex_mmc.
If unsure, say N.
config SDHCI_MSM_DBG
bool "Qualcomm Technologies, Inc. SDHCI_MSM_DBG Driver"
depends on MMC_SDHCI_MSM
help
This enables the IPC tracing events for sdhci_msm driver.
Tracing sdhci_msm events helps to trace significant events
used for debugging purpose.
If unsure, say N here.
config MMC_CRYPTO_QTI
tristate "MMC Crypto Engine Support for QCOM Hardware Wrapped Keys"
depends on MMC_CRYPTO && QCOM_INLINE_CRYPTO_ENGINE
help
Enable Crypto Engine Support in MMC for QCOM chipsets supporting
wrapped keys. This is explicitly required until wrapped keys support
for QCOM chipsets is part of Linux upstream.
Select this to enable QCOM Hardware Wrapped Key support for the
ICE hardware.

View File

@@ -98,6 +98,7 @@ obj-$(CONFIG_MMC_SDHCI_SPRD) += sdhci-sprd.o
obj-$(CONFIG_MMC_SUNPLUS) += sunplus-mmc.o
obj-$(CONFIG_MMC_CQHCI) += cqhci.o
cqhci-y += cqhci-core.o
cqhci-$(CONFIG_MMC_CRYPTO_QTI) += cqhci-crypto-qti.o
cqhci-$(CONFIG_MMC_CRYPTO) += cqhci-crypto.o
obj-$(CONFIG_MMC_HSQ) += mmc_hsq.o
obj-$(CONFIG_MMC_LITEX) += litex_mmc.o

View File

@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@@ -19,6 +20,7 @@
#include "cqhci.h"
#include "cqhci-crypto.h"
#include "cqhci-crypto-qti.h"
#define DCMD_SLOT 31
#define NUM_SLOTS 32
@@ -108,6 +110,10 @@ static void cqhci_set_irqs(struct cqhci_host *cq_host, u32 set)
static void cqhci_dumpregs(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
int offset = 0;
if (cq_host->offset_changed)
offset = CQE_V5_VENDOR_CFG;
CQHCI_DUMP("============ CQHCI REGISTER DUMP ===========\n");
@@ -144,6 +150,8 @@ static void cqhci_dumpregs(struct cqhci_host *cq_host)
CQHCI_DUMP("Resp idx: 0x%08x | Resp arg: 0x%08x\n",
cqhci_readl(cq_host, CQHCI_CRI),
cqhci_readl(cq_host, CQHCI_CRA));
CQHCI_DUMP("Vendor cfg 0x%08x\n",
cqhci_readl(cq_host, CQHCI_VENDOR_CFG + offset));
if (cq_host->ops->dumpregs)
cq_host->ops->dumpregs(mmc);
@@ -351,6 +359,9 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
__cqhci_enable(cq_host);
if (cq_host->ops->enhanced_strobe_mask)
cq_host->ops->enhanced_strobe_mask(mmc, true);
cq_host->enabled = true;
#ifdef DEBUG
@@ -405,6 +416,9 @@ static void cqhci_disable(struct mmc_host *mmc)
__cqhci_disable(cq_host);
if (cq_host->ops->enhanced_strobe_mask)
cq_host->ops->enhanced_strobe_mask(mmc, false);
dmam_free_coherent(mmc_dev(mmc), cq_host->data_size,
cq_host->trans_desc_base,
cq_host->trans_desc_dma_base);
@@ -781,6 +795,10 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
struct cqhci_slot *slot = &cq_host->slot[tag];
struct mmc_request *mrq = slot->mrq;
struct mmc_data *data;
int offset = 0;
if (cq_host->offset_changed)
offset = CQE_V5_VENDOR_CFG;
if (!mrq) {
WARN_ONCE(1, "%s: cqhci: spurious TCN for tag %d\n",
@@ -804,6 +822,11 @@ static void cqhci_finish_mrq(struct mmc_host *mmc, unsigned int tag)
data->bytes_xfered = 0;
else
data->bytes_xfered = data->blksz * data->blocks;
} else {
cqhci_writel(cq_host, cqhci_readl(cq_host,
CQHCI_VENDOR_CFG + offset) |
CMDQ_SEND_STATUS_TRIGGER,
CQHCI_VENDOR_CFG + offset);
}
mmc_cqe_request_done(mmc, mrq);
@@ -817,7 +840,6 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
struct cqhci_host *cq_host = mmc->cqe_private;
status = cqhci_readl(cq_host, CQHCI_IS);
cqhci_writel(cq_host, status, CQHCI_IS);
pr_debug("%s: cqhci: IRQ status: 0x%08x\n", mmc_hostname(mmc), status);
@@ -829,7 +851,14 @@ irqreturn_t cqhci_irq(struct mmc_host *mmc, u32 intmask, int cmd_error,
mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_GCE);
if (status & CQHCI_IS_ICCE)
mmc_debugfs_err_stats_inc(mmc, MMC_ERR_CMDQ_ICCE);
pr_err("%s: cqhci: error IRQ status: 0x%08x cmd error %d data error %d\n",
mmc_hostname(mmc), status, cmd_error, data_error);
cqhci_dumpregs(cq_host);
cqhci_writel(cq_host, status, CQHCI_IS);
cqhci_error_irq(mmc, status, cmd_error, data_error);
} else {
/* Clear interrupt */
cqhci_writel(cq_host, status, CQHCI_IS);
}
if (status & CQHCI_IS_TCC) {
@@ -1196,7 +1225,11 @@ int cqhci_init(struct cqhci_host *cq_host, struct mmc_host *mmc,
goto out_err;
}
#if IS_ENABLED(CONFIG_MMC_CRYPTO_QTI)
err = cqhci_qti_crypto_init(cq_host);
#else
err = cqhci_crypto_init(cq_host);
#endif
if (err) {
pr_err("%s: CQHCI crypto initialization failed\n",
mmc_hostname(mmc));

View File

@@ -0,0 +1,227 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/algapi.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "cqhci-crypto-qti.h"
#include <linux/blk-crypto-profile.h>
#include <soc/qcom/ice.h>
#define RAW_SECRET_SIZE 32
#define MINIMUM_DUN_SIZE 512
#define MAXIMUM_DUN_SIZE 65536
#define SDCC_CE 20
static const struct cqhci_crypto_alg_entry {
enum cqhci_crypto_alg alg;
enum cqhci_crypto_key_size key_size;
} cqhci_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = {
[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
.alg = CQHCI_CRYPTO_ALG_AES_XTS,
.key_size = CQHCI_CRYPTO_KEY_SIZE_256,
},
};
static inline struct cqhci_host *
cqhci_host_from_crypto(struct blk_crypto_profile *profile)
{
struct mmc_host *mmc = container_of(profile, struct mmc_host, crypto_profile);
return mmc->cqe_private;
}
static int cqhci_crypto_qti_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct cqhci_host *cq_host = cqhci_host_from_crypto(profile);
int err = 0;
u8 data_unit_mask = -1;
const struct cqhci_crypto_alg_entry *alg;
int i = 0;
int cap_idx = -1;
u8 ice_key_size = 0;
const union cqhci_crypto_cap_entry *ccap_array =
cq_host->crypto_cap_array;
if (!key) {
pr_err("Invalid/no key present\n");
return -EINVAL;
}
alg = &cqhci_crypto_algs[key->crypto_cfg.crypto_mode];
data_unit_mask = key->crypto_cfg.data_unit_size / MINIMUM_DUN_SIZE;
BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < cq_host->crypto_capabilities.num_crypto_cap; i++) {
if (ccap_array[i].algorithm_id == alg->alg &&
ccap_array[i].key_size == alg->key_size &&
(ccap_array[i].sdus_mask & data_unit_mask)) {
cap_idx = i;
break;
}
}
if (WARN_ON(cap_idx < 0))
return -EOPNOTSUPP;
if (key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
ice_key_size = QCOM_ICE_CRYPTO_KEY_SIZE_WRAPPED;
else
ice_key_size = QCOM_ICE_CRYPTO_KEY_SIZE_256;
err = qcom_ice_program_key_hwkm(cq_host->ice, QCOM_ICE_CRYPTO_ALG_AES_XTS, ice_key_size,
key, data_unit_mask, slot);
if (err)
pr_err("%s: failed with error %d\n", __func__, err);
return err;
}
static int cqhci_crypto_qti_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
int err = 0;
struct cqhci_host *host = cqhci_host_from_crypto(profile);
err = qcom_ice_evict_key(host->ice, slot);
if (err)
pr_err("%s: failed with error %d\n", __func__, err);
return err;
}
static int cqhci_crypto_qti_derive_raw_secret(struct blk_crypto_profile *profile,
const u8 *wrapped_key, size_t wrapped_key_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
{
int err = 0;
struct cqhci_host *host = cqhci_host_from_crypto(profile);
err = qcom_ice_derive_sw_secret(host->ice, wrapped_key, wrapped_key_size,
sw_secret);
if (err)
pr_err("%s: failed with error %d\n", __func__, err);
return err;
}
static const struct blk_crypto_ll_ops cqhci_crypto_qti_ops = {
.keyslot_program = cqhci_crypto_qti_keyslot_program,
.keyslot_evict = cqhci_crypto_qti_keyslot_evict,
.derive_sw_secret = cqhci_crypto_qti_derive_raw_secret
};
static enum blk_crypto_mode_num
cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap)
{
int i = 0;
for (i = 0; i < ARRAY_SIZE(cqhci_crypto_algs); i++) {
BUILD_BUG_ON(CQHCI_CRYPTO_KEY_SIZE_INVALID != 0);
if (cqhci_crypto_algs[i].alg == cap.algorithm_id &&
cqhci_crypto_algs[i].key_size == cap.key_size)
return i;
}
return BLK_ENCRYPTION_MODE_INVALID;
}
/**
* cqhci_crypto_init - initialize CQHCI crypto support
* @cq_host: a cqhci host
*
* If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
* CQHCI_CAP_CS, initialize the crypto support. This involves reading the
* crypto capability registers, initializing the keyslot manager, clearing all
* keyslots, and enabling 128-bit task descriptors.
*
* Return: 0 if crypto was initialized or isn't supported; whether
* MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
* Also can return a negative errno value on unexpected error.
*/
int cqhci_qti_crypto_init(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
struct blk_crypto_profile *profile = &mmc->crypto_profile;
unsigned int num_keyslots = 0;
unsigned int cap_idx = 0;
enum blk_crypto_mode_num blk_mode_num = 0;
unsigned int slot = 0;
int err = 0;
if (!(mmc->caps2 & MMC_CAP2_CRYPTO) ||
!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
goto out;
cq_host->crypto_capabilities.reg_val =
cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
cq_host->crypto_cfg_register =
(u32)cq_host->crypto_capabilities.config_array_ptr * 0x100;
cq_host->crypto_cap_array =
devm_kcalloc(dev, cq_host->crypto_capabilities.num_crypto_cap,
sizeof(cq_host->crypto_cap_array[0]), GFP_KERNEL);
if (!cq_host->crypto_cap_array) {
err = -ENOMEM;
goto out;
}
/*
* CCAP.CFGC is off by one, so the actual number of crypto
* configurations (a.k.a. keyslots) is CCAP.CFGC + 1.
*/
num_keyslots = cq_host->crypto_capabilities.config_count + 1;
err = devm_blk_crypto_profile_init(dev, profile, num_keyslots);
if (err)
goto out;
profile->ll_ops = cqhci_crypto_qti_ops;
profile->dev = dev;
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
profile->max_dun_bytes_supported = 4;
profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
/*
* Cache all the crypto capabilities and advertise the supported crypto
* modes and data unit sizes to the block layer.
*/
for (cap_idx = 0; cap_idx < cq_host->crypto_capabilities.num_crypto_cap;
cap_idx++) {
cq_host->crypto_cap_array[cap_idx].reg_val =
cpu_to_le32(cqhci_readl(cq_host,
CQHCI_CRYPTOCAP +
cap_idx * sizeof(__le32)));
blk_mode_num = cqhci_find_blk_crypto_mode(
cq_host->crypto_cap_array[cap_idx]);
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
continue;
profile->modes_supported[blk_mode_num] |=
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
/* Clear all the keyslots so that we start in a known state. */
for (slot = 0; slot < num_keyslots; slot++)
profile->ll_ops.keyslot_evict(profile, NULL, slot);
/* CQHCI crypto requires the use of 128-bit task descriptors. */
cq_host->caps |= CQHCI_TASK_DESC_SZ_128;
return 0;
out:
mmc->caps2 &= ~MMC_CAP2_CRYPTO;
return err;
}
MODULE_DESCRIPTION("Vendor specific CQHCI Crypto Engine Support");
MODULE_LICENSE("GPL");

View File

@@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UFSHCD_CRYPTO_QTI_H
#define _UFSHCD_CRYPTO_QTI_H
#include "cqhci-crypto.h"
#if IS_ENABLED(CONFIG_MMC_CRYPTO_QTI)
int cqhci_qti_crypto_init(struct cqhci_host *cq_host);
#else
int cqhci_qti_crypto_init(struct cqhci_host *cq_host)
{
return 0;
}
#endif /* CONFIG_MMC_CRYPTO_QTI) */
#endif /* _UFSHCD_ICE_QTI_H */

View File

@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef LINUX_MMC_CQHCI_H
#define LINUX_MMC_CQHCI_H
@@ -119,6 +120,14 @@
/* command response argument */
#define CQHCI_CRA 0x5C
/*
* Add new macro for updated CQ vendor specific
* register address for SDHC v5.0 onwards.
*/
#define CQE_V5_VENDOR_CFG 0x900
#define CQHCI_VENDOR_CFG 0x100
#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
/* crypto capabilities */
#define CQHCI_CCAP 0x100
#define CQHCI_CRYPTOCAP 0x104
@@ -246,6 +255,7 @@ struct cqhci_host {
bool activated;
bool waiting_for_idle;
bool recovery_halt;
bool offset_changed;
size_t desc_size;
size_t data_size;
@@ -276,6 +286,7 @@ struct cqhci_host {
union cqhci_crypto_capabilities crypto_capabilities;
union cqhci_crypto_cap_entry *crypto_cap_array;
u32 crypto_cfg_register;
struct qcom_ice *ice;
#endif
};
@@ -293,6 +304,7 @@ struct cqhci_host_ops {
int (*program_key)(struct cqhci_host *cq_host,
const union cqhci_crypto_cfg_entry *cfg, int slot);
#endif
void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
};
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)

File diff suppressed because it is too large Load Diff