Merge 485104cd63
("scsi: ufs: qcom: fix dev reference leaked through of_qcom_ice_get") into android15-6.6-lts
Steps on the way to 6.6.89 Change-Id: Id61f327e85906ff1c3f5230a9c618a98b8cb3f09 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -196,13 +196,6 @@
|
||||
wakeup-event-action = <EV_ACT_ASSERTED>;
|
||||
wakeup-source;
|
||||
};
|
||||
|
||||
key-suspend {
|
||||
label = "Suspend";
|
||||
gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>;
|
||||
linux,input-type = <EV_KEY>;
|
||||
linux,code = <KEY_SLEEP>;
|
||||
};
|
||||
};
|
||||
|
||||
fan: pwm-fan {
|
||||
|
@@ -229,9 +229,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
|
||||
#define _ASM_EXTABLE_UA(from, to) \
|
||||
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS)
|
||||
|
||||
#define _ASM_EXTABLE_CPY(from, to) \
|
||||
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY)
|
||||
|
||||
#define _ASM_EXTABLE_FAULT(from, to) \
|
||||
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)
|
||||
|
||||
|
@@ -36,7 +36,7 @@
|
||||
#define EX_TYPE_DEFAULT 1
|
||||
#define EX_TYPE_FAULT 2
|
||||
#define EX_TYPE_UACCESS 3
|
||||
#define EX_TYPE_COPY 4
|
||||
/* unused, was: #define EX_TYPE_COPY 4 */
|
||||
#define EX_TYPE_CLEAR_FS 5
|
||||
#define EX_TYPE_FPU_RESTORE 6
|
||||
#define EX_TYPE_BPF 7
|
||||
|
@@ -288,14 +288,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
|
||||
copy_user = is_copy_from_user(regs);
|
||||
instrumentation_end();
|
||||
|
||||
switch (fixup_type) {
|
||||
case EX_TYPE_UACCESS:
|
||||
case EX_TYPE_COPY:
|
||||
if (!copy_user)
|
||||
return IN_KERNEL;
|
||||
m->kflags |= MCE_IN_KERNEL_COPYIN;
|
||||
fallthrough;
|
||||
if (copy_user) {
|
||||
m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
|
||||
return IN_KERNEL_RECOV;
|
||||
}
|
||||
|
||||
switch (fixup_type) {
|
||||
case EX_TYPE_FAULT_MCE_SAFE:
|
||||
case EX_TYPE_DEFAULT_MCE_SAFE:
|
||||
m->kflags |= MCE_IN_KERNEL_RECOV;
|
||||
|
@@ -163,13 +163,6 @@ static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
|
||||
return ex_handler_default(fixup, regs);
|
||||
}
|
||||
|
||||
static bool ex_handler_copy(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
|
||||
return ex_handler_fault(fixup, regs, trapnr);
|
||||
}
|
||||
|
||||
static bool ex_handler_msr(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, bool wrmsr, bool safe, int reg)
|
||||
{
|
||||
@@ -267,8 +260,6 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
|
||||
return ex_handler_fault(e, regs, trapnr);
|
||||
case EX_TYPE_UACCESS:
|
||||
return ex_handler_uaccess(e, regs, trapnr, fault_addr);
|
||||
case EX_TYPE_COPY:
|
||||
return ex_handler_copy(e, regs, trapnr);
|
||||
case EX_TYPE_CLEAR_FS:
|
||||
return ex_handler_clear_fs(e, regs);
|
||||
case EX_TYPE_FPU_RESTORE:
|
||||
|
@@ -313,13 +313,13 @@ static int hd44780_probe(struct platform_device *pdev)
|
||||
fail3:
|
||||
kfree(hd);
|
||||
fail2:
|
||||
kfree(lcd);
|
||||
charlcd_free(lcd);
|
||||
fail1:
|
||||
kfree(hdc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hd44780_remove(struct platform_device *pdev)
|
||||
static void hd44780_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct charlcd *lcd = platform_get_drvdata(pdev);
|
||||
struct hd44780_common *hdc = lcd->drvdata;
|
||||
@@ -328,8 +328,7 @@ static int hd44780_remove(struct platform_device *pdev)
|
||||
kfree(hdc->hd44780);
|
||||
kfree(lcd->drvdata);
|
||||
|
||||
kfree(lcd);
|
||||
return 0;
|
||||
charlcd_free(lcd);
|
||||
}
|
||||
|
||||
static const struct of_device_id hd44780_of_match[] = {
|
||||
@@ -340,7 +339,7 @@ MODULE_DEVICE_TABLE(of, hd44780_of_match);
|
||||
|
||||
static struct platform_driver hd44780_driver = {
|
||||
.probe = hd44780_probe,
|
||||
.remove = hd44780_remove,
|
||||
.remove_new = hd44780_remove,
|
||||
.driver = {
|
||||
.name = "hd44780",
|
||||
.of_match_table = hd44780_of_match,
|
||||
|
@@ -14,6 +14,17 @@
|
||||
|
||||
#include "rzg2l-cpg.h"
|
||||
|
||||
/* Specific registers. */
|
||||
#define CPG_PL2SDHI_DSEL (0x218)
|
||||
|
||||
/* Clock select configuration. */
|
||||
#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2)
|
||||
#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2)
|
||||
|
||||
/* Clock status configuration. */
|
||||
#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1)
|
||||
#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1)
|
||||
|
||||
enum clk_ids {
|
||||
/* Core Clock Outputs exported to DT */
|
||||
LAST_DT_CORE_CLK = R9A07G043_CLK_P0_DIV2,
|
||||
@@ -75,8 +86,12 @@ static const struct clk_div_table dtable_1_32[] = {
|
||||
|
||||
/* Mux clock tables */
|
||||
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
|
||||
#ifdef CONFIG_ARM64
|
||||
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
|
||||
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
#endif
|
||||
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
|
||||
static const u32 mtable_sdhi[] = { 1, 2, 3 };
|
||||
|
||||
static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
|
||||
/* External Clock Inputs */
|
||||
@@ -120,11 +135,18 @@ static const struct cpg_core_clk r9a07g043_core_clks[] __initconst = {
|
||||
DEF_DIV("P2", R9A07G043_CLK_P2, CLK_PLL3_DIV2_4_2, DIVPL3A, dtable_1_32),
|
||||
DEF_FIXED("M0", R9A07G043_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
|
||||
DEF_FIXED("ZT", R9A07G043_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
|
||||
#ifdef CONFIG_ARM64
|
||||
DEF_MUX("HP", R9A07G043_CLK_HP, SEL_PLL6_2, sel_pll6_2),
|
||||
#endif
|
||||
#ifdef CONFIG_RISCV
|
||||
DEF_FIXED("HP", R9A07G043_CLK_HP, CLK_PLL6_250, 1, 1),
|
||||
#endif
|
||||
DEF_FIXED("SPI0", R9A07G043_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
|
||||
DEF_FIXED("SPI1", R9A07G043_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
|
||||
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, sel_shdi),
|
||||
DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, sel_shdi),
|
||||
DEF_SD_MUX("SD0", R9A07G043_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_SD_MUX("SD1", R9A07G043_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G043_CLK_SD0, 1, 4),
|
||||
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G043_CLK_SD1, 1, 4),
|
||||
};
|
||||
|
@@ -15,6 +15,17 @@
|
||||
|
||||
#include "rzg2l-cpg.h"
|
||||
|
||||
/* Specific registers. */
|
||||
#define CPG_PL2SDHI_DSEL (0x218)
|
||||
|
||||
/* Clock select configuration. */
|
||||
#define SEL_SDHI0 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 0, 2)
|
||||
#define SEL_SDHI1 SEL_PLL_PACK(CPG_PL2SDHI_DSEL, 4, 2)
|
||||
|
||||
/* Clock status configuration. */
|
||||
#define SEL_SDHI0_STS SEL_PLL_PACK(CPG_CLKSTATUS, 28, 1)
|
||||
#define SEL_SDHI1_STS SEL_PLL_PACK(CPG_CLKSTATUS, 29, 1)
|
||||
|
||||
enum clk_ids {
|
||||
/* Core Clock Outputs exported to DT */
|
||||
LAST_DT_CORE_CLK = R9A07G054_CLK_DRP_A,
|
||||
@@ -95,9 +106,11 @@ static const struct clk_div_table dtable_16_128[] = {
|
||||
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
|
||||
static const char * const sel_pll5_4[] = { ".pll5_foutpostdiv", ".pll5_fout1ph0" };
|
||||
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
|
||||
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
static const char * const sel_sdhi[] = { ".clk_533", ".clk_400", ".clk_266" };
|
||||
static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
|
||||
|
||||
static const u32 mtable_sdhi[] = { 1, 2, 3 };
|
||||
|
||||
static const struct {
|
||||
struct cpg_core_clk common[56];
|
||||
#ifdef CONFIG_CLK_R9A07G054
|
||||
@@ -163,8 +176,10 @@ static const struct {
|
||||
DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2, sel_pll6_2),
|
||||
DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
|
||||
DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
|
||||
DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, sel_shdi),
|
||||
DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, sel_shdi),
|
||||
DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0, SEL_SDHI0_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1, SEL_SDHI1_STS, sel_sdhi,
|
||||
mtable_sdhi, 0, rzg2l_cpg_sd_clk_mux_notifier),
|
||||
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
|
||||
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
|
||||
DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8),
|
||||
|
@@ -56,15 +56,37 @@
|
||||
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
|
||||
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
|
||||
|
||||
#define CPG_WEN_BIT BIT(16)
|
||||
|
||||
#define MAX_VCLK_FREQ (148500000)
|
||||
|
||||
struct sd_hw_data {
|
||||
/**
|
||||
* struct clk_hw_data - clock hardware data
|
||||
* @hw: clock hw
|
||||
* @conf: clock configuration (register offset, shift, width)
|
||||
* @sconf: clock status configuration (register offset, shift, width)
|
||||
* @priv: CPG private data structure
|
||||
*/
|
||||
struct clk_hw_data {
|
||||
struct clk_hw hw;
|
||||
u32 conf;
|
||||
u32 sconf;
|
||||
struct rzg2l_cpg_priv *priv;
|
||||
};
|
||||
|
||||
#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
|
||||
#define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
|
||||
|
||||
/**
|
||||
* struct sd_mux_hw_data - SD MUX clock hardware data
|
||||
* @hw_data: clock hw data
|
||||
* @mtable: clock mux table
|
||||
*/
|
||||
struct sd_mux_hw_data {
|
||||
struct clk_hw_data hw_data;
|
||||
const u32 *mtable;
|
||||
};
|
||||
|
||||
#define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
|
||||
|
||||
struct rzg2l_pll5_param {
|
||||
u32 pl5_fracin;
|
||||
@@ -121,6 +143,76 @@ static void rzg2l_cpg_del_clk_provider(void *data)
|
||||
of_clk_del_provider(data);
|
||||
}
|
||||
|
||||
/* Must be called in atomic context. */
|
||||
static int rzg2l_cpg_wait_clk_update_done(void __iomem *base, u32 conf)
|
||||
{
|
||||
u32 bitmask = GENMASK(GET_WIDTH(conf) - 1, 0) << GET_SHIFT(conf);
|
||||
u32 off = GET_REG_OFFSET(conf);
|
||||
u32 val;
|
||||
|
||||
return readl_poll_timeout_atomic(base + off, val, !(val & bitmask), 10, 200);
|
||||
}
|
||||
|
||||
int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event,
|
||||
void *data)
|
||||
{
|
||||
struct clk_notifier_data *cnd = data;
|
||||
struct clk_hw *hw = __clk_get_hw(cnd->clk);
|
||||
struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
|
||||
struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
|
||||
u32 off = GET_REG_OFFSET(clk_hw_data->conf);
|
||||
u32 shift = GET_SHIFT(clk_hw_data->conf);
|
||||
const u32 clk_src_266 = 3;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (event != PRE_RATE_CHANGE || (cnd->new_rate / MEGA == 266))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spin_lock_irqsave(&priv->rmw_lock, flags);
|
||||
|
||||
/*
|
||||
* As per the HW manual, we should not directly switch from 533 MHz to
|
||||
* 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
|
||||
* to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
|
||||
* and then switch to the target setting (2’b01 (533 MHz) or 2’b10
|
||||
* (400 MHz)).
|
||||
* Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
|
||||
* switching register is prohibited.
|
||||
* The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
|
||||
* the index to value mapping is done by adding 1 to the index.
|
||||
*/
|
||||
|
||||
writel((CPG_WEN_BIT | clk_src_266) << shift, priv->base + off);
|
||||
|
||||
/* Wait for the update done. */
|
||||
ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
|
||||
|
||||
spin_unlock_irqrestore(&priv->rmw_lock, flags);
|
||||
|
||||
if (ret)
|
||||
dev_err(priv->dev, "failed to switch to safe clk source\n");
|
||||
|
||||
return notifier_from_errno(ret);
|
||||
}
|
||||
|
||||
static int rzg2l_register_notifier(struct clk_hw *hw, const struct cpg_core_clk *core,
|
||||
struct rzg2l_cpg_priv *priv)
|
||||
{
|
||||
struct notifier_block *nb;
|
||||
|
||||
if (!core->notifier)
|
||||
return 0;
|
||||
|
||||
nb = devm_kzalloc(priv->dev, sizeof(*nb), GFP_KERNEL);
|
||||
if (!nb)
|
||||
return -ENOMEM;
|
||||
|
||||
nb->notifier_call = core->notifier;
|
||||
|
||||
return clk_notifier_register(hw->clk, nb);
|
||||
}
|
||||
|
||||
static struct clk * __init
|
||||
rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
|
||||
struct clk **clks,
|
||||
@@ -183,63 +275,44 @@ rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
|
||||
|
||||
static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
|
||||
{
|
||||
struct sd_hw_data *hwdata = to_sd_hw_data(hw);
|
||||
struct rzg2l_cpg_priv *priv = hwdata->priv;
|
||||
u32 off = GET_REG_OFFSET(hwdata->conf);
|
||||
u32 shift = GET_SHIFT(hwdata->conf);
|
||||
const u32 clk_src_266 = 2;
|
||||
u32 msk, val, bitmask;
|
||||
struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
|
||||
struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
|
||||
struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
|
||||
u32 off = GET_REG_OFFSET(clk_hw_data->conf);
|
||||
u32 shift = GET_SHIFT(clk_hw_data->conf);
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* As per the HW manual, we should not directly switch from 533 MHz to
|
||||
* 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
|
||||
* to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
|
||||
* and then switch to the target setting (2’b01 (533 MHz) or 2’b10
|
||||
* (400 MHz)).
|
||||
* Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
|
||||
* switching register is prohibited.
|
||||
* The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
|
||||
* the index to value mapping is done by adding 1 to the index.
|
||||
*/
|
||||
bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
|
||||
msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
|
||||
val = clk_mux_index_to_val(sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, index);
|
||||
|
||||
spin_lock_irqsave(&priv->rmw_lock, flags);
|
||||
if (index != clk_src_266) {
|
||||
writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
|
||||
|
||||
ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
|
||||
!(val & msk), 10,
|
||||
CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
}
|
||||
writel((CPG_WEN_BIT | val) << shift, priv->base + off);
|
||||
|
||||
writel(bitmask | ((index + 1) << shift), priv->base + off);
|
||||
/* Wait for the update done. */
|
||||
ret = rzg2l_cpg_wait_clk_update_done(priv->base, clk_hw_data->sconf);
|
||||
|
||||
ret = readl_poll_timeout_atomic(priv->base + CPG_CLKSTATUS, val,
|
||||
!(val & msk), 10,
|
||||
CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&priv->rmw_lock, flags);
|
||||
|
||||
if (ret)
|
||||
dev_err(priv->dev, "failed to switch clk source\n");
|
||||
dev_err(priv->dev, "Failed to switch parent\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
|
||||
{
|
||||
struct sd_hw_data *hwdata = to_sd_hw_data(hw);
|
||||
struct rzg2l_cpg_priv *priv = hwdata->priv;
|
||||
u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
|
||||
struct clk_hw_data *clk_hw_data = to_clk_hw_data(hw);
|
||||
struct sd_mux_hw_data *sd_mux_hw_data = to_sd_mux_hw_data(clk_hw_data);
|
||||
struct rzg2l_cpg_priv *priv = clk_hw_data->priv;
|
||||
u32 val;
|
||||
|
||||
val >>= GET_SHIFT(hwdata->conf);
|
||||
val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
|
||||
val = readl(priv->base + GET_REG_OFFSET(clk_hw_data->conf));
|
||||
val >>= GET_SHIFT(clk_hw_data->conf);
|
||||
val &= GENMASK(GET_WIDTH(clk_hw_data->conf) - 1, 0);
|
||||
|
||||
return val ? val - 1 : 0;
|
||||
return clk_mux_val_to_index(hw, sd_mux_hw_data->mtable, CLK_MUX_ROUND_CLOSEST, val);
|
||||
}
|
||||
|
||||
static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
|
||||
@@ -253,31 +326,40 @@ rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
|
||||
void __iomem *base,
|
||||
struct rzg2l_cpg_priv *priv)
|
||||
{
|
||||
struct sd_hw_data *clk_hw_data;
|
||||
struct sd_mux_hw_data *sd_mux_hw_data;
|
||||
struct clk_init_data init;
|
||||
struct clk_hw *clk_hw;
|
||||
int ret;
|
||||
|
||||
clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
|
||||
if (!clk_hw_data)
|
||||
sd_mux_hw_data = devm_kzalloc(priv->dev, sizeof(*sd_mux_hw_data), GFP_KERNEL);
|
||||
if (!sd_mux_hw_data)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
clk_hw_data->priv = priv;
|
||||
clk_hw_data->conf = core->conf;
|
||||
sd_mux_hw_data->hw_data.priv = priv;
|
||||
sd_mux_hw_data->hw_data.conf = core->conf;
|
||||
sd_mux_hw_data->hw_data.sconf = core->sconf;
|
||||
sd_mux_hw_data->mtable = core->mtable;
|
||||
|
||||
init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
|
||||
init.ops = &rzg2l_cpg_sd_clk_mux_ops;
|
||||
init.flags = 0;
|
||||
init.flags = core->flag;
|
||||
init.num_parents = core->num_parents;
|
||||
init.parent_names = core->parent_names;
|
||||
|
||||
clk_hw = &clk_hw_data->hw;
|
||||
clk_hw = &sd_mux_hw_data->hw_data.hw;
|
||||
clk_hw->init = &init;
|
||||
|
||||
ret = devm_clk_hw_register(priv->dev, clk_hw);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = rzg2l_register_notifier(clk_hw, core, priv);
|
||||
if (ret) {
|
||||
dev_err(priv->dev, "Failed to register notifier for %s\n",
|
||||
core->name);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return clk_hw->clk;
|
||||
}
|
||||
|
||||
|
@@ -9,6 +9,8 @@
|
||||
#ifndef __RENESAS_RZG2L_CPG_H__
|
||||
#define __RENESAS_RZG2L_CPG_H__
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#define CPG_SIPLL5_STBY (0x140)
|
||||
#define CPG_SIPLL5_CLK1 (0x144)
|
||||
#define CPG_SIPLL5_CLK3 (0x14C)
|
||||
@@ -19,7 +21,6 @@
|
||||
#define CPG_PL2_DDIV (0x204)
|
||||
#define CPG_PL3A_DDIV (0x208)
|
||||
#define CPG_PL6_DDIV (0x210)
|
||||
#define CPG_PL2SDHI_DSEL (0x218)
|
||||
#define CPG_CLKSTATUS (0x280)
|
||||
#define CPG_PL3_SSEL (0x408)
|
||||
#define CPG_PL6_SSEL (0x414)
|
||||
@@ -43,8 +44,6 @@
|
||||
#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
|
||||
#define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
|
||||
|
||||
#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 200
|
||||
|
||||
/* n = 0/1/2 for PLL1/4/6 */
|
||||
#define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
|
||||
#define CPG_SAMPLL_CLK2(n) (0x08 + (16 * n))
|
||||
@@ -69,9 +68,6 @@
|
||||
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
|
||||
#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
|
||||
|
||||
#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
|
||||
#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
|
||||
|
||||
#define EXTAL_FREQ_IN_MEGA_HZ (24)
|
||||
|
||||
/**
|
||||
@@ -90,10 +86,13 @@ struct cpg_core_clk {
|
||||
unsigned int mult;
|
||||
unsigned int type;
|
||||
unsigned int conf;
|
||||
unsigned int sconf;
|
||||
const struct clk_div_table *dtable;
|
||||
const u32 *mtable;
|
||||
const char * const *parent_names;
|
||||
int flag;
|
||||
int mux_flags;
|
||||
notifier_fn_t notifier;
|
||||
u32 flag;
|
||||
u32 mux_flags;
|
||||
int num_parents;
|
||||
};
|
||||
|
||||
@@ -151,10 +150,11 @@ enum clk_types {
|
||||
.parent_names = _parent_names, \
|
||||
.num_parents = ARRAY_SIZE(_parent_names), \
|
||||
.mux_flags = CLK_MUX_READ_ONLY)
|
||||
#define DEF_SD_MUX(_name, _id, _conf, _parent_names) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \
|
||||
#define DEF_SD_MUX(_name, _id, _conf, _sconf, _parent_names, _mtable, _clk_flags, _notifier) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, .sconf = _sconf, \
|
||||
.parent_names = _parent_names, \
|
||||
.num_parents = ARRAY_SIZE(_parent_names))
|
||||
.num_parents = ARRAY_SIZE(_parent_names), \
|
||||
.mtable = _mtable, .flag = _clk_flags, .notifier = _notifier)
|
||||
#define DEF_PLL5_FOUTPOSTDIV(_name, _id, _parent) \
|
||||
DEF_TYPE(_name, _id, CLK_TYPE_SIPLL5, .parent = _parent)
|
||||
#define DEF_PLL5_4_MUX(_name, _id, _conf, _parent_names) \
|
||||
@@ -273,4 +273,6 @@ extern const struct rzg2l_cpg_info r9a07g044_cpg_info;
|
||||
extern const struct rzg2l_cpg_info r9a07g054_cpg_info;
|
||||
extern const struct rzg2l_cpg_info r9a09g011_cpg_info;
|
||||
|
||||
int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block *nb, unsigned long event, void *data);
|
||||
|
||||
#endif
|
||||
|
@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
|
||||
.channel = 0,
|
||||
.scan_index = 0,
|
||||
.scan_type = {
|
||||
.sign = 'u',
|
||||
.sign = 's',
|
||||
.realbits = 24,
|
||||
.storagebits = 32,
|
||||
.shift = 8,
|
||||
@@ -370,12 +370,11 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
|
||||
return ret;
|
||||
|
||||
ret = ad7768_scan_direct(indio_dev);
|
||||
if (ret >= 0)
|
||||
*val = ret;
|
||||
|
||||
iio_device_release_direct_mode(indio_dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*val = sign_extend32(ret, chan->scan_type.realbits - 1);
|
||||
|
||||
return IIO_VAL_INT;
|
||||
|
||||
|
@@ -59,6 +59,12 @@ static void vimc_streamer_pipeline_terminate(struct vimc_stream *stream)
|
||||
continue;
|
||||
|
||||
sd = media_entity_to_v4l2_subdev(ved->ent);
|
||||
/*
|
||||
* Do not call .s_stream() to stop an already
|
||||
* stopped/unstarted subdev.
|
||||
*/
|
||||
if (!v4l2_subdev_is_streaming(sd))
|
||||
continue;
|
||||
v4l2_subdev_call(sd, video, s_stream, 0);
|
||||
}
|
||||
}
|
||||
|
@@ -363,12 +363,8 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
|
||||
* The .s_stream() operation must never be called to start or stop an
|
||||
* already started or stopped subdev. Catch offenders but don't return
|
||||
* an error yet to avoid regressions.
|
||||
*
|
||||
* As .s_stream() is mutually exclusive with the .enable_streams() and
|
||||
* .disable_streams() operation, we can use the enabled_streams field
|
||||
* to store the subdev streaming state.
|
||||
*/
|
||||
if (WARN_ON(!!sd->enabled_streams == !!enable))
|
||||
if (WARN_ON(sd->s_stream_enabled == !!enable))
|
||||
return 0;
|
||||
|
||||
ret = sd->ops->video->s_stream(sd, enable);
|
||||
@@ -379,7 +375,7 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
sd->enabled_streams = enable ? BIT(0) : 0;
|
||||
sd->s_stream_enabled = enable;
|
||||
|
||||
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
|
||||
if (!IS_ERR_OR_NULL(sd->privacy_led)) {
|
||||
@@ -1929,37 +1925,43 @@ static int v4l2_subdev_enable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
|
||||
u64 streams_mask)
|
||||
{
|
||||
struct device *dev = sd->entity.graph_obj.mdev->dev;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* The subdev doesn't implement pad-based stream enable, fall back
|
||||
* on the .s_stream() operation. This can only be done for subdevs that
|
||||
* have a single source pad, as sd->enabled_streams is global to the
|
||||
* subdev.
|
||||
* to the .s_stream() operation.
|
||||
*/
|
||||
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < sd->entity.num_pads; ++i) {
|
||||
if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
|
||||
/*
|
||||
* .s_stream() means there is no streams support, so the only allowed
|
||||
* stream is the implicit stream 0.
|
||||
*/
|
||||
if (streams_mask != BIT_ULL(0))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (sd->enabled_streams & streams_mask) {
|
||||
dev_dbg(dev, "set of streams %#llx already enabled on %s:%u\n",
|
||||
streams_mask, sd->entity.name, pad);
|
||||
/*
|
||||
* We use a 64-bit bitmask for tracking enabled pads, so only subdevices
|
||||
* with 64 pads or less can be supported.
|
||||
*/
|
||||
if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (sd->enabled_pads & BIT_ULL(pad)) {
|
||||
dev_dbg(dev, "pad %u already enabled on %s\n",
|
||||
pad, sd->entity.name);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
/* Start streaming when the first streams are enabled. */
|
||||
if (!sd->enabled_streams) {
|
||||
/* Start streaming when the first pad is enabled. */
|
||||
if (!sd->enabled_pads) {
|
||||
ret = v4l2_subdev_call(sd, video, s_stream, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sd->enabled_streams |= streams_mask;
|
||||
sd->enabled_pads |= BIT_ULL(pad);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2046,37 +2048,43 @@ static int v4l2_subdev_disable_streams_fallback(struct v4l2_subdev *sd, u32 pad,
|
||||
u64 streams_mask)
|
||||
{
|
||||
struct device *dev = sd->entity.graph_obj.mdev->dev;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If the subdev doesn't implement pad-based stream enable, fall back
|
||||
* on the .s_stream() operation. This can only be done for subdevs that
|
||||
* have a single source pad, as sd->enabled_streams is global to the
|
||||
* subdev.
|
||||
* to the .s_stream() operation.
|
||||
*/
|
||||
if (!(sd->entity.pads[pad].flags & MEDIA_PAD_FL_SOURCE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < sd->entity.num_pads; ++i) {
|
||||
if (i != pad && sd->entity.pads[i].flags & MEDIA_PAD_FL_SOURCE)
|
||||
/*
|
||||
* .s_stream() means there is no streams support, so the only allowed
|
||||
* stream is the implicit stream 0.
|
||||
*/
|
||||
if (streams_mask != BIT_ULL(0))
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if ((sd->enabled_streams & streams_mask) != streams_mask) {
|
||||
dev_dbg(dev, "set of streams %#llx already disabled on %s:%u\n",
|
||||
streams_mask, sd->entity.name, pad);
|
||||
/*
|
||||
* We use a 64-bit bitmask for tracking enabled pads, so only subdevices
|
||||
* with 64 pads or less can be supported.
|
||||
*/
|
||||
if (pad >= sizeof(sd->enabled_pads) * BITS_PER_BYTE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!(sd->enabled_pads & BIT_ULL(pad))) {
|
||||
dev_dbg(dev, "pad %u already disabled on %s\n",
|
||||
pad, sd->entity.name);
|
||||
return -EALREADY;
|
||||
}
|
||||
|
||||
/* Stop streaming when the last streams are disabled. */
|
||||
if (!(sd->enabled_streams & ~streams_mask)) {
|
||||
if (!(sd->enabled_pads & ~BIT_ULL(pad))) {
|
||||
ret = v4l2_subdev_call(sd, video, s_stream, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
sd->enabled_streams &= ~streams_mask;
|
||||
sd->enabled_pads &= ~BIT_ULL(pad);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2232,6 +2240,31 @@ void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
|
||||
|
||||
bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd)
|
||||
{
|
||||
struct v4l2_subdev_state *state;
|
||||
|
||||
if (!v4l2_subdev_has_op(sd, pad, enable_streams))
|
||||
return sd->s_stream_enabled;
|
||||
|
||||
if (!(sd->flags & V4L2_SUBDEV_FL_STREAMS))
|
||||
return !!sd->enabled_pads;
|
||||
|
||||
state = v4l2_subdev_get_locked_active_state(sd);
|
||||
|
||||
for (unsigned int i = 0; i < state->stream_configs.num_configs; ++i) {
|
||||
const struct v4l2_subdev_stream_config *cfg;
|
||||
|
||||
cfg = &state->stream_configs.configs[i];
|
||||
|
||||
if (cfg->enabled)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_subdev_is_streaming);
|
||||
|
||||
int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
|
||||
{
|
||||
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
|
||||
|
@@ -1866,7 +1866,7 @@ static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
|
||||
if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
|
||||
return 0;
|
||||
|
||||
ice = of_qcom_ice_get(dev);
|
||||
ice = devm_of_qcom_ice_get(dev);
|
||||
if (ice == ERR_PTR(-EOPNOTSUPP)) {
|
||||
dev_warn(dev, "Disabling inline encryption support\n");
|
||||
ice = NULL;
|
||||
|
@@ -5071,8 +5071,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
|
||||
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
|
||||
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
|
||||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6185_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.avb_ops = &mv88e6352_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
@@ -5120,8 +5120,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
|
||||
.hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
|
||||
.hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
|
||||
.reset = mv88e6352_g1_reset,
|
||||
.vtu_getnext = mv88e6185_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
|
||||
.vtu_getnext = mv88e6352_g1_vtu_getnext,
|
||||
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
|
||||
.gpio_ops = &mv88e6352_gpio_ops,
|
||||
.avb_ops = &mv88e6352_avb_ops,
|
||||
.ptp_ops = &mv88e6352_ptp_ops,
|
||||
@@ -6114,7 +6114,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.num_databases = 4096,
|
||||
.num_macs = 8192,
|
||||
.num_ports = 7,
|
||||
.num_internal_phys = 5,
|
||||
.num_internal_phys = 2,
|
||||
.internal_phys_offset = 3,
|
||||
.num_gpio = 15,
|
||||
.max_vid = 4095,
|
||||
.port_base_addr = 0x10,
|
||||
@@ -6139,7 +6140,8 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
|
||||
.num_databases = 4096,
|
||||
.num_macs = 8192,
|
||||
.num_ports = 7,
|
||||
.num_internal_phys = 5,
|
||||
.num_internal_phys = 2,
|
||||
.internal_phys_offset = 3,
|
||||
.num_gpio = 15,
|
||||
.max_vid = 4095,
|
||||
.port_base_addr = 0x10,
|
||||
|
@@ -262,25 +262,22 @@ static int adjust_local_phandle_references(struct device_node *local_fixups,
|
||||
*/
|
||||
int of_resolve_phandles(struct device_node *overlay)
|
||||
{
|
||||
struct device_node *child, *local_fixups, *refnode;
|
||||
struct device_node *tree_symbols, *overlay_fixups;
|
||||
struct device_node *child, *refnode;
|
||||
struct device_node *overlay_fixups;
|
||||
struct device_node __free(device_node) *local_fixups = NULL;
|
||||
struct property *prop;
|
||||
const char *refpath;
|
||||
phandle phandle, phandle_delta;
|
||||
int err;
|
||||
|
||||
tree_symbols = NULL;
|
||||
|
||||
if (!overlay) {
|
||||
pr_err("null overlay\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!of_node_check_flag(overlay, OF_DETACHED)) {
|
||||
pr_err("overlay not detached\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
phandle_delta = live_tree_max_phandle() + 1;
|
||||
@@ -292,7 +289,7 @@ int of_resolve_phandles(struct device_node *overlay)
|
||||
|
||||
err = adjust_local_phandle_references(local_fixups, overlay, phandle_delta);
|
||||
if (err)
|
||||
goto out;
|
||||
return err;
|
||||
|
||||
overlay_fixups = NULL;
|
||||
|
||||
@@ -301,16 +298,13 @@ int of_resolve_phandles(struct device_node *overlay)
|
||||
overlay_fixups = child;
|
||||
}
|
||||
|
||||
if (!overlay_fixups) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
if (!overlay_fixups)
|
||||
return 0;
|
||||
|
||||
tree_symbols = of_find_node_by_path("/__symbols__");
|
||||
struct device_node __free(device_node) *tree_symbols = of_find_node_by_path("/__symbols__");
|
||||
if (!tree_symbols) {
|
||||
pr_err("no symbols in root of device tree.\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for_each_property_of_node(overlay_fixups, prop) {
|
||||
@@ -324,14 +318,12 @@ int of_resolve_phandles(struct device_node *overlay)
|
||||
if (err) {
|
||||
pr_err("node label '%s' not found in live devicetree symbols table\n",
|
||||
prop->name);
|
||||
goto out;
|
||||
return err;
|
||||
}
|
||||
|
||||
refnode = of_find_node_by_path(refpath);
|
||||
if (!refnode) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (!refnode)
|
||||
return -ENOENT;
|
||||
|
||||
phandle = refnode->phandle;
|
||||
of_node_put(refnode);
|
||||
@@ -341,11 +333,8 @@ int of_resolve_phandles(struct device_node *overlay)
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
if (err)
|
||||
pr_err("overlay phandle fixup failed: %d\n", err);
|
||||
of_node_put(tree_symbols);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_resolve_phandles);
|
||||
|
@@ -885,6 +885,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||
resource_size_t offset, next_offset;
|
||||
LIST_HEAD(resources);
|
||||
struct resource *res, *next_res;
|
||||
bool bus_registered = false;
|
||||
char addr[64], *fmt;
|
||||
const char *name;
|
||||
int err;
|
||||
@@ -948,6 +949,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||
name = dev_name(&bus->dev);
|
||||
|
||||
err = device_register(&bus->dev);
|
||||
bus_registered = true;
|
||||
if (err)
|
||||
goto unregister;
|
||||
|
||||
@@ -1031,12 +1033,15 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
|
||||
unregister:
|
||||
put_device(&bridge->dev);
|
||||
device_del(&bridge->dev);
|
||||
|
||||
free:
|
||||
#ifdef CONFIG_PCI_DOMAINS_GENERIC
|
||||
pci_bus_release_domain_nr(bus, parent);
|
||||
#endif
|
||||
if (bus_registered)
|
||||
put_device(&bus->dev);
|
||||
else
|
||||
kfree(bus);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@@ -10,6 +10,7 @@
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
@@ -328,6 +329,53 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(of_qcom_ice_get);
|
||||
|
||||
static void qcom_ice_put(const struct qcom_ice *ice)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(ice->dev);
|
||||
|
||||
if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice"))
|
||||
platform_device_put(pdev);
|
||||
}
|
||||
|
||||
static void devm_of_qcom_ice_put(struct device *dev, void *res)
|
||||
{
|
||||
qcom_ice_put(*(struct qcom_ice **)res);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from
|
||||
* a DT node.
|
||||
* @dev: device pointer for the consumer device.
|
||||
*
|
||||
* This function will provide an ICE instance either by creating one for the
|
||||
* consumer device if its DT node provides the 'ice' reg range and the 'ice'
|
||||
* clock (for legacy DT style). On the other hand, if consumer provides a
|
||||
* phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already
|
||||
* be created and so this function will return that instead.
|
||||
*
|
||||
* Return: ICE pointer on success, NULL if there is no ICE data provided by the
|
||||
* consumer or ERR_PTR() on error.
|
||||
*/
|
||||
struct qcom_ice *devm_of_qcom_ice_get(struct device *dev)
|
||||
{
|
||||
struct qcom_ice *ice, **dr;
|
||||
|
||||
dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL);
|
||||
if (!dr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ice = of_qcom_ice_get(dev);
|
||||
if (!IS_ERR_OR_NULL(ice)) {
|
||||
*dr = ice;
|
||||
devres_add(dev, dr);
|
||||
} else {
|
||||
devres_free(dr);
|
||||
}
|
||||
|
||||
return ice;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get);
|
||||
|
||||
static int qcom_ice_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct qcom_ice *engine;
|
||||
|
@@ -121,7 +121,7 @@ static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
|
||||
struct device *dev = hba->dev;
|
||||
struct qcom_ice *ice;
|
||||
|
||||
ice = of_qcom_ice_get(dev);
|
||||
ice = devm_of_qcom_ice_get(dev);
|
||||
if (ice == ERR_PTR(-EOPNOTSUPP)) {
|
||||
dev_warn(dev, "Disabling inline encryption support\n");
|
||||
ice = NULL;
|
||||
|
@@ -1038,10 +1038,11 @@ struct v4l2_subdev_platform_data {
|
||||
* @active_state: Active state for the subdev (NULL for subdevs tracking the
|
||||
* state internally). Initialized by calling
|
||||
* v4l2_subdev_init_finalize().
|
||||
* @enabled_streams: Bitmask of enabled streams used by
|
||||
* v4l2_subdev_enable_streams() and
|
||||
* v4l2_subdev_disable_streams() helper functions for fallback
|
||||
* cases.
|
||||
* @enabled_pads: Bitmask of enabled pads used by v4l2_subdev_enable_streams()
|
||||
* and v4l2_subdev_disable_streams() helper functions for
|
||||
* fallback cases.
|
||||
* @s_stream_enabled: Tracks whether streaming has been enabled with s_stream.
|
||||
* This is only for call_s_stream() internal use.
|
||||
*
|
||||
* Each instance of a subdev driver should create this struct, either
|
||||
* stand-alone or embedded in a larger struct.
|
||||
@@ -1089,7 +1090,8 @@ struct v4l2_subdev {
|
||||
* doesn't support it.
|
||||
*/
|
||||
struct v4l2_subdev_state *active_state;
|
||||
u64 enabled_streams;
|
||||
u64 enabled_pads;
|
||||
bool s_stream_enabled;
|
||||
};
|
||||
|
||||
|
||||
@@ -1916,4 +1918,17 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
|
||||
void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
|
||||
const struct v4l2_event *ev);
|
||||
|
||||
/**
|
||||
* v4l2_subdev_is_streaming() - Returns if the subdevice is streaming
|
||||
* @sd: The subdevice
|
||||
*
|
||||
* v4l2_subdev_is_streaming() tells if the subdevice is currently streaming.
|
||||
* "Streaming" here means whether .s_stream() or .enable_streams() has been
|
||||
* successfully called, and the streaming has not yet been disabled.
|
||||
*
|
||||
* If the subdevice implements .enable_streams() this function must be called
|
||||
* while holding the active state lock.
|
||||
*/
|
||||
bool v4l2_subdev_is_streaming(struct v4l2_subdev *sd);
|
||||
|
||||
#endif /* _V4L2_SUBDEV_H */
|
||||
|
@@ -34,4 +34,6 @@ int qcom_ice_program_key(struct qcom_ice *ice,
|
||||
int slot);
|
||||
int qcom_ice_evict_key(struct qcom_ice *ice, int slot);
|
||||
struct qcom_ice *of_qcom_ice_get(struct device *dev);
|
||||
struct qcom_ice *devm_of_qcom_ice_get(struct device *dev);
|
||||
|
||||
#endif /* __QCOM_ICE_H__ */
|
||||
|
@@ -119,6 +119,14 @@
|
||||
trace_print_array_seq(p, array, count, el_size); \
|
||||
})
|
||||
|
||||
#undef __print_dynamic_array
|
||||
#define __print_dynamic_array(array, el_size) \
|
||||
({ \
|
||||
__print_array(__get_dynamic_array(array), \
|
||||
__get_dynamic_array_len(array) / (el_size), \
|
||||
(el_size)); \
|
||||
})
|
||||
|
||||
#undef __print_hex_dump
|
||||
#define __print_hex_dump(prefix_str, prefix_type, \
|
||||
rowsize, groupsize, buf, len, ascii) \
|
||||
|
@@ -22,6 +22,7 @@
|
||||
#undef __get_rel_cpumask
|
||||
#undef __get_rel_sockaddr
|
||||
#undef __print_array
|
||||
#undef __print_dynamic_array
|
||||
#undef __print_hex_dump
|
||||
#undef __get_buf
|
||||
|
||||
|
@@ -256,6 +256,7 @@ comment "Do not forget to sign required modules with scripts/sign-file"
|
||||
choice
|
||||
prompt "Which hash algorithm should modules be signed with?"
|
||||
depends on MODULE_SIG || IMA_APPRAISE_MODSIG
|
||||
default MODULE_SIG_SHA512
|
||||
help
|
||||
This determines which sort of hashing algorithm will be used during
|
||||
signature generation. This algorithm _must_ be built into the kernel
|
||||
|
@@ -458,6 +458,7 @@ static void test_event_printk(struct trace_event_call *call)
|
||||
case '%':
|
||||
continue;
|
||||
case 'p':
|
||||
do_pointer:
|
||||
/* Find dereferencing fields */
|
||||
switch (fmt[i + 1]) {
|
||||
case 'B': case 'R': case 'r':
|
||||
@@ -486,6 +487,12 @@ static void test_event_printk(struct trace_event_call *call)
|
||||
continue;
|
||||
if (fmt[i + j] == '*') {
|
||||
star = true;
|
||||
/* Handle %*pbl case */
|
||||
if (!j && fmt[i + 1] == 'p') {
|
||||
arg++;
|
||||
i++;
|
||||
goto do_pointer;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if ((fmt[i + j] == 's')) {
|
||||
|
@@ -302,6 +302,7 @@ TRACE_EVENT(foo_bar,
|
||||
__bitmask( cpus, num_possible_cpus() )
|
||||
__cpumask( cpum )
|
||||
__vstring( vstr, fmt, va )
|
||||
__string_len( lstr, foo, bar / 2 < strlen(foo) ? bar / 2 : strlen(foo) )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@@ -310,12 +311,14 @@ TRACE_EVENT(foo_bar,
|
||||
memcpy(__get_dynamic_array(list), lst,
|
||||
__length_of(lst) * sizeof(int));
|
||||
__assign_str(str, string);
|
||||
__assign_str(lstr, foo);
|
||||
__assign_vstr(vstr, fmt, va);
|
||||
__assign_bitmask(cpus, cpumask_bits(mask), num_possible_cpus());
|
||||
__assign_cpumask(cpum, cpumask_bits(mask));
|
||||
),
|
||||
|
||||
TP_printk("foo %s %d %s %s %s %s (%s) (%s) %s", __entry->foo, __entry->bar,
|
||||
TP_printk("foo %s %d %s %s %s %s %s %s (%s) (%s) %s [%d] %*pbl",
|
||||
__entry->foo, __entry->bar,
|
||||
|
||||
/*
|
||||
* Notice here the use of some helper functions. This includes:
|
||||
@@ -359,8 +362,17 @@ TRACE_EVENT(foo_bar,
|
||||
__print_array(__get_dynamic_array(list),
|
||||
__get_dynamic_array_len(list) / sizeof(int),
|
||||
sizeof(int)),
|
||||
__get_str(str), __get_bitmask(cpus), __get_cpumask(cpum),
|
||||
__get_str(vstr))
|
||||
|
||||
/* A shortcut is to use __print_dynamic_array for dynamic arrays */
|
||||
|
||||
__print_dynamic_array(list, sizeof(int)),
|
||||
|
||||
__get_str(str), __get_str(lstr),
|
||||
__get_bitmask(cpus), __get_cpumask(cpum),
|
||||
__get_str(vstr),
|
||||
__get_dynamic_array_len(cpus),
|
||||
__get_dynamic_array_len(cpus),
|
||||
__get_dynamic_array(cpus))
|
||||
);
|
||||
|
||||
/*
|
||||
|
@@ -343,4 +343,4 @@ module_platform_driver(apq8016_sbc_platform_driver);
|
||||
|
||||
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
|
||||
MODULE_DESCRIPTION("APQ8016 ASoC Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -142,4 +142,4 @@ static struct platform_driver msm_snd_apq8096_driver = {
|
||||
module_platform_driver(msm_snd_apq8096_driver);
|
||||
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
|
||||
MODULE_DESCRIPTION("APQ8096 ASoC Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -239,4 +239,4 @@ int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_snd_wcd_jack_setup);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -300,10 +300,10 @@ static struct platform_driver apq8016_lpass_cpu_platform_driver = {
|
||||
.of_match_table = of_match_ptr(apq8016_lpass_cpu_device_id),
|
||||
},
|
||||
.probe = asoc_qcom_lpass_cpu_platform_probe,
|
||||
.remove = asoc_qcom_lpass_cpu_platform_remove,
|
||||
.remove_new = asoc_qcom_lpass_cpu_platform_remove,
|
||||
};
|
||||
module_platform_driver(apq8016_lpass_cpu_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("APQ8016 LPASS CPU Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
@@ -1284,15 +1284,12 @@ err:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_probe);
|
||||
|
||||
int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
|
||||
void asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct lpass_data *drvdata = platform_get_drvdata(pdev);
|
||||
|
||||
if (drvdata->variant->exit)
|
||||
drvdata->variant->exit(pdev);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_remove);
|
||||
|
||||
@@ -1307,4 +1304,4 @@ void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev)
|
||||
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_platform_shutdown);
|
||||
|
||||
MODULE_DESCRIPTION("QTi LPASS CPU Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -251,4 +251,4 @@ const struct snd_soc_dai_ops asoc_qcom_lpass_hdmi_dai_ops = {
|
||||
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_hdmi_dai_ops);
|
||||
|
||||
MODULE_DESCRIPTION("QTi LPASS HDMI Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -172,9 +172,9 @@ static struct platform_driver ipq806x_lpass_cpu_platform_driver = {
|
||||
.of_match_table = of_match_ptr(ipq806x_lpass_cpu_device_id),
|
||||
},
|
||||
.probe = asoc_qcom_lpass_cpu_platform_probe,
|
||||
.remove = asoc_qcom_lpass_cpu_platform_remove,
|
||||
.remove_new = asoc_qcom_lpass_cpu_platform_remove,
|
||||
};
|
||||
module_platform_driver(ipq806x_lpass_cpu_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("QTi LPASS CPU Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -1383,4 +1383,4 @@ int asoc_qcom_lpass_platform_register(struct platform_device *pdev)
|
||||
EXPORT_SYMBOL_GPL(asoc_qcom_lpass_platform_register);
|
||||
|
||||
MODULE_DESCRIPTION("QTi LPASS Platform Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -315,11 +315,11 @@ static struct platform_driver sc7180_lpass_cpu_platform_driver = {
|
||||
.pm = &sc7180_lpass_pm_ops,
|
||||
},
|
||||
.probe = asoc_qcom_lpass_cpu_platform_probe,
|
||||
.remove = asoc_qcom_lpass_cpu_platform_remove,
|
||||
.remove_new = asoc_qcom_lpass_cpu_platform_remove,
|
||||
.shutdown = asoc_qcom_lpass_cpu_platform_shutdown,
|
||||
};
|
||||
|
||||
module_platform_driver(sc7180_lpass_cpu_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("SC7180 LPASS CPU DRIVER");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -445,7 +445,7 @@ static struct platform_driver sc7280_lpass_cpu_platform_driver = {
|
||||
.pm = &sc7280_lpass_pm_ops,
|
||||
},
|
||||
.probe = asoc_qcom_lpass_cpu_platform_probe,
|
||||
.remove = asoc_qcom_lpass_cpu_platform_remove,
|
||||
.remove_new = asoc_qcom_lpass_cpu_platform_remove,
|
||||
.shutdown = asoc_qcom_lpass_cpu_platform_shutdown,
|
||||
};
|
||||
|
||||
|
@@ -399,8 +399,8 @@ struct lpass_pcm_data {
|
||||
};
|
||||
|
||||
/* register the platform driver from the CPU DAI driver */
|
||||
int asoc_qcom_lpass_platform_register(struct platform_device *);
|
||||
int asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev);
|
||||
int asoc_qcom_lpass_platform_register(struct platform_device *pdev);
|
||||
void asoc_qcom_lpass_cpu_platform_remove(struct platform_device *pdev);
|
||||
void asoc_qcom_lpass_cpu_platform_shutdown(struct platform_device *pdev);
|
||||
int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev);
|
||||
extern const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops;
|
||||
|
@@ -64,20 +64,16 @@ struct q6apm_dai_rtd {
|
||||
phys_addr_t phys;
|
||||
unsigned int pcm_size;
|
||||
unsigned int pcm_count;
|
||||
unsigned int pos; /* Buffer position */
|
||||
unsigned int periods;
|
||||
unsigned int bytes_sent;
|
||||
unsigned int bytes_received;
|
||||
unsigned int copied_total;
|
||||
uint16_t bits_per_sample;
|
||||
uint16_t source; /* Encoding source bit mask */
|
||||
uint16_t session_id;
|
||||
snd_pcm_uframes_t queue_ptr;
|
||||
bool next_track;
|
||||
enum stream_state state;
|
||||
struct q6apm_graph *graph;
|
||||
spinlock_t lock;
|
||||
uint32_t initial_samples_drop;
|
||||
uint32_t trailing_samples_drop;
|
||||
bool notify_on_drain;
|
||||
};
|
||||
|
||||
@@ -127,25 +123,16 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo
|
||||
{
|
||||
struct q6apm_dai_rtd *prtd = priv;
|
||||
struct snd_pcm_substream *substream = prtd->substream;
|
||||
unsigned long flags;
|
||||
|
||||
switch (opcode) {
|
||||
case APM_CLIENT_EVENT_CMD_EOS_DONE:
|
||||
prtd->state = Q6APM_STREAM_STOPPED;
|
||||
break;
|
||||
case APM_CLIENT_EVENT_DATA_WRITE_DONE:
|
||||
spin_lock_irqsave(&prtd->lock, flags);
|
||||
prtd->pos += prtd->pcm_count;
|
||||
spin_unlock_irqrestore(&prtd->lock, flags);
|
||||
snd_pcm_period_elapsed(substream);
|
||||
if (prtd->state == Q6APM_STREAM_RUNNING)
|
||||
q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
|
||||
|
||||
break;
|
||||
case APM_CLIENT_EVENT_DATA_READ_DONE:
|
||||
spin_lock_irqsave(&prtd->lock, flags);
|
||||
prtd->pos += prtd->pcm_count;
|
||||
spin_unlock_irqrestore(&prtd->lock, flags);
|
||||
snd_pcm_period_elapsed(substream);
|
||||
if (prtd->state == Q6APM_STREAM_RUNNING)
|
||||
q6apm_read(prtd->graph);
|
||||
@@ -251,7 +238,6 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
|
||||
}
|
||||
|
||||
prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
|
||||
prtd->pos = 0;
|
||||
/* rate and channels are sent to audio driver */
|
||||
ret = q6apm_graph_media_format_shmem(prtd->graph, &cfg);
|
||||
if (ret < 0) {
|
||||
@@ -297,6 +283,27 @@ static int q6apm_dai_prepare(struct snd_soc_component *component,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int q6apm_dai_ack(struct snd_soc_component *component, struct snd_pcm_substream *substream)
|
||||
{
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct q6apm_dai_rtd *prtd = runtime->private_data;
|
||||
int i, ret = 0, avail_periods;
|
||||
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
|
||||
avail_periods = (runtime->control->appl_ptr - prtd->queue_ptr)/runtime->period_size;
|
||||
for (i = 0; i < avail_periods; i++) {
|
||||
ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, NO_TIMESTAMP);
|
||||
if (ret < 0) {
|
||||
dev_err(component->dev, "Error queuing playback buffer %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
prtd->queue_ptr += runtime->period_size;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int q6apm_dai_trigger(struct snd_soc_component *component,
|
||||
struct snd_pcm_substream *substream, int cmd)
|
||||
{
|
||||
@@ -308,9 +315,6 @@ static int q6apm_dai_trigger(struct snd_soc_component *component,
|
||||
case SNDRV_PCM_TRIGGER_START:
|
||||
case SNDRV_PCM_TRIGGER_RESUME:
|
||||
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
|
||||
/* start writing buffers for playback only as we already queued capture buffers */
|
||||
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
|
||||
ret = q6apm_write_async(prtd->graph, prtd->pcm_count, 0, 0, 0);
|
||||
break;
|
||||
case SNDRV_PCM_TRIGGER_STOP:
|
||||
/* TODO support be handled via SoftPause Module */
|
||||
@@ -432,16 +436,12 @@ static snd_pcm_uframes_t q6apm_dai_pointer(struct snd_soc_component *component,
|
||||
struct snd_pcm_runtime *runtime = substream->runtime;
|
||||
struct q6apm_dai_rtd *prtd = runtime->private_data;
|
||||
snd_pcm_uframes_t ptr;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&prtd->lock, flags);
|
||||
if (prtd->pos == prtd->pcm_size)
|
||||
prtd->pos = 0;
|
||||
ptr = q6apm_get_hw_pointer(prtd->graph, substream->stream) * runtime->period_size;
|
||||
if (ptr)
|
||||
return ptr - 1;
|
||||
|
||||
ptr = bytes_to_frames(runtime, prtd->pos);
|
||||
spin_unlock_irqrestore(&prtd->lock, flags);
|
||||
|
||||
return ptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int q6apm_dai_hw_params(struct snd_soc_component *component,
|
||||
@@ -656,8 +656,6 @@ static int q6apm_dai_compr_set_params(struct snd_soc_component *component,
|
||||
prtd->pcm_size = runtime->fragments * runtime->fragment_size;
|
||||
prtd->bits_per_sample = 16;
|
||||
|
||||
prtd->pos = 0;
|
||||
|
||||
if (prtd->next_track != true) {
|
||||
memcpy(&prtd->codec, codec, sizeof(*codec));
|
||||
|
||||
@@ -721,14 +719,12 @@ static int q6apm_dai_compr_set_metadata(struct snd_soc_component *component,
|
||||
|
||||
switch (metadata->key) {
|
||||
case SNDRV_COMPRESS_ENCODER_PADDING:
|
||||
prtd->trailing_samples_drop = metadata->value[0];
|
||||
q6apm_remove_trailing_silence(component->dev, prtd->graph,
|
||||
prtd->trailing_samples_drop);
|
||||
metadata->value[0]);
|
||||
break;
|
||||
case SNDRV_COMPRESS_ENCODER_DELAY:
|
||||
prtd->initial_samples_drop = metadata->value[0];
|
||||
q6apm_remove_initial_silence(component->dev, prtd->graph,
|
||||
prtd->initial_samples_drop);
|
||||
metadata->value[0]);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@@ -840,6 +836,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
|
||||
.hw_params = q6apm_dai_hw_params,
|
||||
.pointer = q6apm_dai_pointer,
|
||||
.trigger = q6apm_dai_trigger,
|
||||
.ack = q6apm_dai_ack,
|
||||
.compress_ops = &q6apm_dai_compress_ops,
|
||||
.use_dai_pcm_id = true,
|
||||
};
|
||||
|
@@ -545,6 +545,7 @@ static struct audioreach_module *audioreach_parse_common_tokens(struct q6apm *ap
|
||||
|
||||
if (mod) {
|
||||
int pn, id = 0;
|
||||
|
||||
mod->module_id = module_id;
|
||||
mod->max_ip_port = max_ip_port;
|
||||
mod->max_op_port = max_op_port;
|
||||
@@ -1271,7 +1272,7 @@ int audioreach_tplg_init(struct snd_soc_component *component)
|
||||
|
||||
ret = request_firmware(&fw, tplg_fw_name, dev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "tplg firmware loading %s failed %d \n", tplg_fw_name, ret);
|
||||
dev_err(dev, "tplg firmware loading %s failed %d\n", tplg_fw_name, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@@ -428,4 +428,4 @@ static struct platform_driver sc7180_snd_driver = {
|
||||
module_platform_driver(sc7180_snd_driver);
|
||||
|
||||
MODULE_DESCRIPTION("sc7180 ASoC Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -174,4 +174,4 @@ static struct platform_driver snd_sc8280xp_driver = {
|
||||
module_platform_driver(snd_sc8280xp_driver);
|
||||
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
|
||||
MODULE_DESCRIPTION("SC8280XP ASoC Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -625,4 +625,4 @@ static struct platform_driver sdm845_snd_driver = {
|
||||
module_platform_driver(sdm845_snd_driver);
|
||||
|
||||
MODULE_DESCRIPTION("sdm845 ASoC Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -117,4 +117,4 @@ int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -170,4 +170,4 @@ static struct platform_driver snd_sm8250_driver = {
|
||||
module_platform_driver(snd_sm8250_driver);
|
||||
MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
|
||||
MODULE_DESCRIPTION("SM8250 ASoC Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@@ -140,4 +140,4 @@ static struct platform_driver storm_platform_driver = {
|
||||
module_platform_driver(storm_platform_driver);
|
||||
|
||||
MODULE_DESCRIPTION("QTi IPQ806x-based Storm Machine Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
Reference in New Issue
Block a user