Merge af6cfcd0ef
("mm/hugetlb: unshare page tables during VMA split, not before") into android15-6.6-lts
Steps on the way to 6.6.95 Change-Id: I54cc4e48e80ae5d348fc23deb29ac91179e2e73d Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -6044,8 +6044,6 @@
|
||||
|
||||
Selecting 'on' will also enable the mitigation
|
||||
against user space to user space task attacks.
|
||||
Selecting specific mitigation does not force enable
|
||||
user mitigations.
|
||||
|
||||
Selecting 'off' will disable both the kernel and
|
||||
the user space protections.
|
||||
|
@@ -48,6 +48,7 @@
|
||||
#define CLKDM_NO_AUTODEPS (1 << 4)
|
||||
#define CLKDM_ACTIVE_WITH_MPU (1 << 5)
|
||||
#define CLKDM_MISSING_IDLE_REPORTING (1 << 6)
|
||||
#define CLKDM_STANDBY_FORCE_WAKEUP BIT(7)
|
||||
|
||||
#define CLKDM_CAN_HWSUP (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
|
||||
#define CLKDM_CAN_SWSUP (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
|
||||
|
@@ -19,7 +19,7 @@ static struct clockdomain l4ls_am33xx_clkdm = {
|
||||
.pwrdm = { .name = "per_pwrdm" },
|
||||
.cm_inst = AM33XX_CM_PER_MOD,
|
||||
.clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET,
|
||||
.flags = CLKDM_CAN_SWSUP,
|
||||
.flags = CLKDM_CAN_SWSUP | CLKDM_STANDBY_FORCE_WAKEUP,
|
||||
};
|
||||
|
||||
static struct clockdomain l3s_am33xx_clkdm = {
|
||||
|
@@ -20,6 +20,9 @@
|
||||
#include "cm-regbits-34xx.h"
|
||||
#include "cm-regbits-33xx.h"
|
||||
#include "prm33xx.h"
|
||||
#if IS_ENABLED(CONFIG_SUSPEND)
|
||||
#include <linux/suspend.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield:
|
||||
@@ -328,8 +331,17 @@ static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm)
|
||||
{
|
||||
bool hwsup = false;
|
||||
|
||||
#if IS_ENABLED(CONFIG_SUSPEND)
|
||||
/*
|
||||
* In case of standby, Don't put the l4ls clk domain to sleep.
|
||||
* Since CM3 PM FW doesn't wake-up/enable the l4ls clk domain
|
||||
* upon wake-up, CM3 PM FW fails to wake-up th MPU.
|
||||
*/
|
||||
if (pm_suspend_target_state == PM_SUSPEND_STANDBY &&
|
||||
(clkdm->flags & CLKDM_STANDBY_FORCE_WAKEUP))
|
||||
return 0;
|
||||
#endif
|
||||
hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
|
||||
|
||||
if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
|
||||
am33xx_clkdm_sleep(clkdm);
|
||||
|
||||
|
@@ -264,7 +264,11 @@ int __init omap4_cpcap_init(void)
|
||||
|
||||
static int __init cpcap_late_init(void)
|
||||
{
|
||||
omap4_vc_set_pmic_signaling(PWRDM_POWER_RET);
|
||||
if (!of_find_compatible_node(NULL, NULL, "motorola,cpcap"))
|
||||
return 0;
|
||||
|
||||
if (soc_is_omap443x() || soc_is_omap446x() || soc_is_omap447x())
|
||||
omap4_vc_set_pmic_signaling(PWRDM_POWER_RET);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -515,7 +515,5 @@ void __init early_ioremap_init(void)
|
||||
bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long pfn = PHYS_PFN(offset);
|
||||
|
||||
return memblock_is_map_memory(pfn);
|
||||
return memblock_is_map_memory(offset);
|
||||
}
|
||||
|
@@ -304,13 +304,14 @@ static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
/*
|
||||
* If mprotect/munmap/etc occurs during TLB batched flushing, we need to
|
||||
* synchronise all the TLBI issued with a DSB to avoid the race mentioned in
|
||||
* flush_tlb_batched_pending().
|
||||
* If mprotect/munmap/etc occurs during TLB batched flushing, we need to ensure
|
||||
* all the previously issued TLBIs targeting mm have completed. But since we
|
||||
* can be executing on a remote CPU, a DSB cannot guarantee this like it can
|
||||
* for arch_tlbbatch_flush(). Our only option is to flush the entire mm.
|
||||
*/
|
||||
static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
dsb(ish);
|
||||
flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -1253,7 +1253,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
|
||||
next = addr;
|
||||
end = addr + PUD_SIZE;
|
||||
do {
|
||||
pmd_free_pte_page(pmdp, next);
|
||||
if (pmd_present(pmdp_get(pmdp)))
|
||||
pmd_free_pte_page(pmdp, next);
|
||||
} while (pmdp++, next += PMD_SIZE, next != end);
|
||||
|
||||
pud_clear(pudp);
|
||||
|
@@ -14,40 +14,48 @@
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
u32 flags = CSR_CRMD_IE;
|
||||
register u32 mask asm("t0") = CSR_CRMD_IE;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"csrxchg %[val], %[mask], %[reg]\n\t"
|
||||
: [val] "+r" (flags)
|
||||
: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
u32 flags = 0;
|
||||
register u32 mask asm("t0") = CSR_CRMD_IE;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"csrxchg %[val], %[mask], %[reg]\n\t"
|
||||
: [val] "+r" (flags)
|
||||
: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
u32 flags = 0;
|
||||
register u32 mask asm("t0") = CSR_CRMD_IE;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"csrxchg %[val], %[mask], %[reg]\n\t"
|
||||
: [val] "+r" (flags)
|
||||
: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: "memory");
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
register u32 mask asm("t0") = CSR_CRMD_IE;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"csrxchg %[val], %[mask], %[reg]\n\t"
|
||||
: [val] "+r" (flags)
|
||||
: [mask] "r" (CSR_CRMD_IE), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: [mask] "r" (mask), [reg] "i" (LOONGARCH_CSR_CRMD)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
|
@@ -47,7 +47,8 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
}
|
||||
}
|
||||
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
|
||||
|
||||
return (!pmd || pmd_none(pmdp_get(pmd))) ? NULL : (pte_t *) pmd;
|
||||
}
|
||||
|
||||
int pmd_huge(pmd_t pmd)
|
||||
|
@@ -30,6 +30,7 @@ endif
|
||||
# offsets.
|
||||
cflags-vdso := $(ccflags-vdso) \
|
||||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||
$(filter -std=%,$(KBUILD_CFLAGS)) \
|
||||
-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
|
||||
-mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
|
||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||
|
@@ -22,6 +22,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
|
||||
ifndef CONFIG_64BIT
|
||||
KBUILD_CFLAGS += -mfast-indirect-calls
|
||||
endif
|
||||
KBUILD_CFLAGS += -std=gnu11
|
||||
|
||||
LDFLAGS_vmlinux := -X -e startup --as-needed -T
|
||||
$(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE
|
||||
|
@@ -24,7 +24,7 @@
|
||||
#define DPRINTF(fmt, args...)
|
||||
#endif
|
||||
|
||||
#define RFMT "%#08lx"
|
||||
#define RFMT "0x%08lx"
|
||||
|
||||
/* 1111 1100 0000 0000 0001 0011 1100 0000 */
|
||||
#define OPCODE1(a,b,c) ((a)<<26|(b)<<12|(c)<<6)
|
||||
|
@@ -183,7 +183,7 @@
|
||||
/*
|
||||
* Used to name C functions called from asm
|
||||
*/
|
||||
#ifdef CONFIG_PPC_KERNEL_PCREL
|
||||
#if defined(__powerpc64__) && defined(CONFIG_PPC_KERNEL_PCREL)
|
||||
#define CFUNC(name) name@notoc
|
||||
#else
|
||||
#define CFUNC(name) name
|
||||
|
@@ -1516,6 +1516,8 @@ int eeh_pe_configure(struct eeh_pe *pe)
|
||||
/* Invalid PE ? */
|
||||
if (!pe)
|
||||
return -ENODEV;
|
||||
else
|
||||
ret = eeh_ops->configure_bridge(pe);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -50,7 +50,7 @@ ldflags-$(CONFIG_LD_ORPHAN_WARN) += -Wl,--orphan-handling=$(CONFIG_LD_ORPHAN_WAR
|
||||
ldflags-y += $(filter-out $(CC_AUTO_VAR_INIT_ZERO_ENABLER) $(CC_FLAGS_FTRACE) -Wa$(comma)%, $(KBUILD_CFLAGS))
|
||||
|
||||
CC32FLAGS := -m32
|
||||
CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc
|
||||
CC32FLAGSREMOVE := -mcmodel=medium -mabi=elfv1 -mabi=elfv2 -mcall-aixdesc -mpcrel
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
# This flag is supported by clang for 64-bit but not 32-bit so it will cause
|
||||
# an unused command line flag warning for this file.
|
||||
|
@@ -519,7 +519,12 @@ static struct msi_domain_info pseries_msi_domain_info = {
|
||||
|
||||
static void pseries_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
__pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
|
||||
struct pci_dev *dev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data));
|
||||
|
||||
if (dev->current_state == PCI_D0)
|
||||
__pci_read_msi_msg(irq_data_get_msi_desc(data), msg);
|
||||
else
|
||||
get_cached_msi_msg(data->irq, msg);
|
||||
}
|
||||
|
||||
static struct irq_chip pseries_msi_irq_chip = {
|
||||
|
@@ -490,7 +490,7 @@ enum prot_type {
|
||||
PROT_TYPE_DAT = 3,
|
||||
PROT_TYPE_IEP = 4,
|
||||
/* Dummy value for passing an initialized value when code != PGM_PROTECTION */
|
||||
PROT_NONE,
|
||||
PROT_TYPE_DUMMY,
|
||||
};
|
||||
|
||||
static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
|
||||
@@ -506,7 +506,7 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
|
||||
switch (code) {
|
||||
case PGM_PROTECTION:
|
||||
switch (prot) {
|
||||
case PROT_NONE:
|
||||
case PROT_TYPE_DUMMY:
|
||||
/* We should never get here, acts like termination */
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
@@ -976,7 +976,7 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
gpa = kvm_s390_real_to_abs(vcpu, ga);
|
||||
if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
|
||||
rc = PGM_ADDRESSING;
|
||||
prot = PROT_NONE;
|
||||
prot = PROT_TYPE_DUMMY;
|
||||
}
|
||||
}
|
||||
if (rc)
|
||||
@@ -1134,7 +1134,7 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
|
||||
if (rc == PGM_PROTECTION)
|
||||
prot = PROT_TYPE_KEYC;
|
||||
else
|
||||
prot = PROT_NONE;
|
||||
prot = PROT_TYPE_DUMMY;
|
||||
rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
|
||||
}
|
||||
out_unlock:
|
||||
|
@@ -1442,13 +1442,9 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
|
||||
static enum spectre_v2_user_cmd __init
|
||||
spectre_v2_parse_user_cmdline(void)
|
||||
{
|
||||
enum spectre_v2_user_cmd mode;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
|
||||
SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
|
||||
|
||||
switch (spectre_v2_cmd) {
|
||||
case SPECTRE_V2_CMD_NONE:
|
||||
return SPECTRE_V2_USER_CMD_NONE;
|
||||
@@ -1461,7 +1457,7 @@ spectre_v2_parse_user_cmdline(void)
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
|
||||
arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return mode;
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
|
||||
if (match_option(arg, ret, v2_user_options[i].option)) {
|
||||
@@ -1471,8 +1467,8 @@ spectre_v2_parse_user_cmdline(void)
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
|
||||
return mode;
|
||||
pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_USER_CMD_AUTO;
|
||||
}
|
||||
|
||||
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
|
||||
|
@@ -718,6 +718,8 @@ int arch_memory_failure(unsigned long pfn, int flags)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sgx_unmark_page_reclaimable(page);
|
||||
|
||||
/*
|
||||
* TBD: Add additional plumbing to enable pre-emptive
|
||||
* action for asynchronous poison notification. Until
|
||||
|
@@ -1503,7 +1503,7 @@ static void svm_clear_current_vmcb(struct vmcb *vmcb)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_online_cpu(i)
|
||||
for_each_possible_cpu(i)
|
||||
cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
|
||||
}
|
||||
|
||||
|
@@ -787,8 +787,11 @@ static void vmx_emergency_disable(void)
|
||||
return;
|
||||
|
||||
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
|
||||
loaded_vmcss_on_cpu_link)
|
||||
loaded_vmcss_on_cpu_link) {
|
||||
vmcs_clear(v->vmcs);
|
||||
if (v->shadow_vmcs)
|
||||
vmcs_clear(v->shadow_vmcs);
|
||||
}
|
||||
|
||||
kvm_cpu_vmxoff();
|
||||
}
|
||||
|
@@ -668,6 +668,8 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
|
||||
union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
|
||||
u32 arg_count = 0;
|
||||
u32 index = walk_state->num_operands;
|
||||
u32 prev_num_operands = walk_state->num_operands;
|
||||
u32 new_num_operands;
|
||||
u32 i;
|
||||
|
||||
ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
|
||||
@@ -696,6 +698,7 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
|
||||
|
||||
/* Create the interpreter arguments, in reverse order */
|
||||
|
||||
new_num_operands = index;
|
||||
index--;
|
||||
for (i = 0; i < arg_count; i++) {
|
||||
arg = arguments[index];
|
||||
@@ -720,7 +723,11 @@ cleanup:
|
||||
* pop everything off of the operand stack and delete those
|
||||
* objects
|
||||
*/
|
||||
acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
|
||||
walk_state->num_operands = i;
|
||||
acpi_ds_obj_stack_pop_and_delete(new_num_operands, walk_state);
|
||||
|
||||
/* Restore operand count */
|
||||
walk_state->num_operands = prev_num_operands;
|
||||
|
||||
ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index));
|
||||
return_ACPI_STATUS(status);
|
||||
|
@@ -636,7 +636,8 @@ acpi_status
|
||||
acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
|
||||
union acpi_parse_object *op, acpi_status status)
|
||||
{
|
||||
acpi_status status2;
|
||||
acpi_status return_status = status;
|
||||
u8 ascending = TRUE;
|
||||
|
||||
ACPI_FUNCTION_TRACE_PTR(ps_complete_final_op, walk_state);
|
||||
|
||||
@@ -650,7 +651,7 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
|
||||
op));
|
||||
do {
|
||||
if (op) {
|
||||
if (walk_state->ascending_callback != NULL) {
|
||||
if (ascending && walk_state->ascending_callback != NULL) {
|
||||
walk_state->op = op;
|
||||
walk_state->op_info =
|
||||
acpi_ps_get_opcode_info(op->common.
|
||||
@@ -672,49 +673,26 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
|
||||
}
|
||||
|
||||
if (status == AE_CTRL_TERMINATE) {
|
||||
status = AE_OK;
|
||||
|
||||
/* Clean up */
|
||||
do {
|
||||
if (op) {
|
||||
status2 =
|
||||
acpi_ps_complete_this_op
|
||||
(walk_state, op);
|
||||
if (ACPI_FAILURE
|
||||
(status2)) {
|
||||
return_ACPI_STATUS
|
||||
(status2);
|
||||
}
|
||||
}
|
||||
|
||||
acpi_ps_pop_scope(&
|
||||
(walk_state->
|
||||
parser_state),
|
||||
&op,
|
||||
&walk_state->
|
||||
arg_types,
|
||||
&walk_state->
|
||||
arg_count);
|
||||
|
||||
} while (op);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
ascending = FALSE;
|
||||
return_status = AE_CTRL_TERMINATE;
|
||||
}
|
||||
|
||||
else if (ACPI_FAILURE(status)) {
|
||||
|
||||
/* First error is most important */
|
||||
|
||||
(void)
|
||||
acpi_ps_complete_this_op(walk_state,
|
||||
op);
|
||||
return_ACPI_STATUS(status);
|
||||
ascending = FALSE;
|
||||
return_status = status;
|
||||
}
|
||||
}
|
||||
|
||||
status2 = acpi_ps_complete_this_op(walk_state, op);
|
||||
if (ACPI_FAILURE(status2)) {
|
||||
return_ACPI_STATUS(status2);
|
||||
status = acpi_ps_complete_this_op(walk_state, op);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ascending = FALSE;
|
||||
if (ACPI_SUCCESS(return_status) ||
|
||||
return_status == AE_CTRL_TERMINATE) {
|
||||
return_status = status;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -724,5 +702,5 @@ acpi_ps_complete_final_op(struct acpi_walk_state *walk_state,
|
||||
|
||||
} while (op);
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
return_ACPI_STATUS(return_status);
|
||||
}
|
||||
|
@@ -333,11 +333,8 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
|
||||
|
||||
pos = string;
|
||||
|
||||
if (size != ACPI_UINT32_MAX) {
|
||||
end = string + size;
|
||||
} else {
|
||||
end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX);
|
||||
}
|
||||
size = ACPI_MIN(size, ACPI_PTR_DIFF(ACPI_MAX_PTR, string));
|
||||
end = string + size;
|
||||
|
||||
for (; *format; ++format) {
|
||||
if (*format != '%') {
|
||||
|
@@ -243,10 +243,23 @@ static int acpi_battery_get_property(struct power_supply *psy,
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CURRENT_NOW:
|
||||
case POWER_SUPPLY_PROP_POWER_NOW:
|
||||
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
|
||||
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
|
||||
ret = -ENODEV;
|
||||
else
|
||||
val->intval = battery->rate_now * 1000;
|
||||
break;
|
||||
}
|
||||
|
||||
val->intval = battery->rate_now * 1000;
|
||||
/*
|
||||
* When discharging, the current should be reported as a
|
||||
* negative number as per the power supply class interface
|
||||
* definition.
|
||||
*/
|
||||
if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
|
||||
(battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
|
||||
acpi_battery_handle_discharging(battery)
|
||||
== POWER_SUPPLY_STATUS_DISCHARGING)
|
||||
val->intval = -val->intval;
|
||||
|
||||
break;
|
||||
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
|
||||
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
|
||||
|
@@ -1396,8 +1396,10 @@ static int __init acpi_init(void)
|
||||
}
|
||||
|
||||
acpi_kobj = kobject_create_and_add("acpi", firmware_kobj);
|
||||
if (!acpi_kobj)
|
||||
pr_debug("%s: kset create error\n", __func__);
|
||||
if (!acpi_kobj) {
|
||||
pr_err("Failed to register kobject\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
init_prmt();
|
||||
acpi_init_pcc();
|
||||
|
@@ -368,7 +368,8 @@ static unsigned int via_mode_filter(struct ata_device *dev, unsigned int mask)
|
||||
}
|
||||
|
||||
if (dev->class == ATA_DEV_ATAPI &&
|
||||
dmi_check_system(no_atapi_dma_dmi_table)) {
|
||||
(dmi_check_system(no_atapi_dma_dmi_table) ||
|
||||
config->id == PCI_DEVICE_ID_VIA_6415)) {
|
||||
ata_dev_warn(dev, "controller locks up on ATAPI DMA, forcing PIO\n");
|
||||
mask &= ATA_MASK_PIO;
|
||||
}
|
||||
|
@@ -518,7 +518,7 @@ software_node_get_reference_args(const struct fwnode_handle *fwnode,
|
||||
if (prop->is_inline)
|
||||
return -EINVAL;
|
||||
|
||||
if (index * sizeof(*ref) >= prop->length)
|
||||
if ((index + 1) * sizeof(*ref) > prop->length)
|
||||
return -ENOENT;
|
||||
|
||||
ref_array = prop->pointer;
|
||||
|
@@ -275,13 +275,13 @@ static struct fsl_mc_cmd_desc fsl_mc_accepted_cmds[] = {
|
||||
.size = 8,
|
||||
},
|
||||
[DPSW_GET_TAILDROP] = {
|
||||
.cmdid_value = 0x0A80,
|
||||
.cmdid_value = 0x0A90,
|
||||
.cmdid_mask = 0xFFF0,
|
||||
.token = true,
|
||||
.size = 14,
|
||||
},
|
||||
[DPSW_SET_TAILDROP] = {
|
||||
.cmdid_value = 0x0A90,
|
||||
.cmdid_value = 0x0A80,
|
||||
.cmdid_mask = 0xFFF0,
|
||||
.token = true,
|
||||
.size = 24,
|
||||
|
@@ -214,12 +214,19 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
|
||||
if (error < 0)
|
||||
goto error_cleanup_resource;
|
||||
|
||||
dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
|
||||
&dpmcp_dev->dev,
|
||||
DL_FLAG_AUTOREMOVE_CONSUMER);
|
||||
if (!dpmcp_dev->consumer_link) {
|
||||
error = -EINVAL;
|
||||
goto error_cleanup_mc_io;
|
||||
/* If the DPRC device itself tries to allocate a portal (usually for
|
||||
* UAPI interaction), don't add a device link between them since the
|
||||
* DPMCP device is an actual child device of the DPRC and a reverse
|
||||
* dependency is not allowed.
|
||||
*/
|
||||
if (mc_dev != mc_bus_dev) {
|
||||
dpmcp_dev->consumer_link = device_link_add(&mc_dev->dev,
|
||||
&dpmcp_dev->dev,
|
||||
DL_FLAG_AUTOREMOVE_CONSUMER);
|
||||
if (!dpmcp_dev->consumer_link) {
|
||||
error = -EINVAL;
|
||||
goto error_cleanup_mc_io;
|
||||
}
|
||||
}
|
||||
|
||||
*new_mc_io = mc_io;
|
||||
|
@@ -19,7 +19,7 @@
|
||||
/*
|
||||
* Timeout in milliseconds to wait for the completion of an MC command
|
||||
*/
|
||||
#define MC_CMD_COMPLETION_TIMEOUT_MS 500
|
||||
#define MC_CMD_COMPLETION_TIMEOUT_MS 15000
|
||||
|
||||
/*
|
||||
* usleep_range() min and max values used to throttle down polling
|
||||
|
@@ -131,19 +131,23 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e
|
||||
}
|
||||
|
||||
old_offset = ring->rd_offset;
|
||||
mhi_ep_ring_inc_index(ring);
|
||||
|
||||
dev_dbg(dev, "Adding an element to ring at offset (%zu)\n", ring->rd_offset);
|
||||
buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
|
||||
buf_info.dev_addr = el;
|
||||
buf_info.size = sizeof(*el);
|
||||
|
||||
ret = mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mhi_ep_ring_inc_index(ring);
|
||||
|
||||
/* Update rp in ring context */
|
||||
rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase);
|
||||
memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64));
|
||||
|
||||
buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el));
|
||||
buf_info.dev_addr = el;
|
||||
buf_info.size = sizeof(*el);
|
||||
|
||||
return mhi_cntrl->write_sync(mhi_cntrl, &buf_info);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
|
||||
|
@@ -586,6 +586,7 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
|
||||
struct mhi_cmd *mhi_cmd;
|
||||
struct mhi_event_ctxt *er_ctxt;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
bool reset_device = false;
|
||||
int ret, i;
|
||||
|
||||
dev_dbg(dev, "Transitioning from PM state: %s to: %s\n",
|
||||
@@ -614,8 +615,23 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
|
||||
/* Wake up threads waiting for state transition */
|
||||
wake_up_all(&mhi_cntrl->state_event);
|
||||
|
||||
/* Trigger MHI RESET so that the device will not access host memory */
|
||||
if (MHI_REG_ACCESS_VALID(prev_state)) {
|
||||
/*
|
||||
* If the device is in PBL or SBL, it will only respond to
|
||||
* RESET if the device is in SYSERR state. SYSERR might
|
||||
* already be cleared at this point.
|
||||
*/
|
||||
enum mhi_state cur_state = mhi_get_mhi_state(mhi_cntrl);
|
||||
enum mhi_ee_type cur_ee = mhi_get_exec_env(mhi_cntrl);
|
||||
|
||||
if (cur_state == MHI_STATE_SYS_ERR)
|
||||
reset_device = true;
|
||||
else if (cur_ee != MHI_EE_PBL && cur_ee != MHI_EE_SBL)
|
||||
reset_device = true;
|
||||
}
|
||||
|
||||
/* Trigger MHI RESET so that the device will not access host memory */
|
||||
if (reset_device) {
|
||||
u32 in_reset = -1;
|
||||
unsigned long timeout = msecs_to_jiffies(mhi_cntrl->timeout_ms);
|
||||
|
||||
|
@@ -667,51 +667,6 @@ static int sysc_parse_and_check_child_range(struct sysc *ddata)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Interconnect instances to probe before l4_per instances */
|
||||
static struct resource early_bus_ranges[] = {
|
||||
/* am3/4 l4_wkup */
|
||||
{ .start = 0x44c00000, .end = 0x44c00000 + 0x300000, },
|
||||
/* omap4/5 and dra7 l4_cfg */
|
||||
{ .start = 0x4a000000, .end = 0x4a000000 + 0x300000, },
|
||||
/* omap4 l4_wkup */
|
||||
{ .start = 0x4a300000, .end = 0x4a300000 + 0x30000, },
|
||||
/* omap5 and dra7 l4_wkup without dra7 dcan segment */
|
||||
{ .start = 0x4ae00000, .end = 0x4ae00000 + 0x30000, },
|
||||
};
|
||||
|
||||
static atomic_t sysc_defer = ATOMIC_INIT(10);
|
||||
|
||||
/**
|
||||
* sysc_defer_non_critical - defer non_critical interconnect probing
|
||||
* @ddata: device driver data
|
||||
*
|
||||
* We want to probe l4_cfg and l4_wkup interconnect instances before any
|
||||
* l4_per instances as l4_per instances depend on resources on l4_cfg and
|
||||
* l4_wkup interconnects.
|
||||
*/
|
||||
static int sysc_defer_non_critical(struct sysc *ddata)
|
||||
{
|
||||
struct resource *res;
|
||||
int i;
|
||||
|
||||
if (!atomic_read(&sysc_defer))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(early_bus_ranges); i++) {
|
||||
res = &early_bus_ranges[i];
|
||||
if (ddata->module_pa >= res->start &&
|
||||
ddata->module_pa <= res->end) {
|
||||
atomic_set(&sysc_defer, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_dec_if_positive(&sysc_defer);
|
||||
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
static struct device_node *stdout_path;
|
||||
|
||||
static void sysc_init_stdout_path(struct sysc *ddata)
|
||||
@@ -937,10 +892,6 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_defer_non_critical(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
sysc_check_children(ddata);
|
||||
|
||||
if (!of_property_present(np, "reg"))
|
||||
|
@@ -3971,6 +3971,7 @@ static const struct clk_parent_data spicc_sclk_parent_data[] = {
|
||||
{ .hw = &g12a_clk81.hw },
|
||||
{ .hw = &g12a_fclk_div4.hw },
|
||||
{ .hw = &g12a_fclk_div3.hw },
|
||||
{ .hw = &g12a_fclk_div2.hw },
|
||||
{ .hw = &g12a_fclk_div5.hw },
|
||||
{ .hw = &g12a_fclk_div7.hw },
|
||||
};
|
||||
|
@@ -431,6 +431,7 @@ static const char *const rk3036_critical_clocks[] __initconst = {
|
||||
"hclk_peri",
|
||||
"pclk_peri",
|
||||
"pclk_ddrupctl",
|
||||
"ddrphy",
|
||||
};
|
||||
|
||||
static void __init rk3036_clk_init(struct device_node *np)
|
||||
|
@@ -323,6 +323,40 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
|
||||
.register_em = scmi_cpufreq_register_em,
|
||||
};
|
||||
|
||||
static bool scmi_dev_used_by_cpus(struct device *scmi_dev)
|
||||
{
|
||||
struct device_node *scmi_np = dev_of_node(scmi_dev);
|
||||
struct device_node *cpu_np, *np;
|
||||
struct device *cpu_dev;
|
||||
int cpu, idx;
|
||||
|
||||
if (!scmi_np)
|
||||
return false;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
cpu_dev = get_cpu_device(cpu);
|
||||
if (!cpu_dev)
|
||||
continue;
|
||||
|
||||
cpu_np = dev_of_node(cpu_dev);
|
||||
|
||||
np = of_parse_phandle(cpu_np, "clocks", 0);
|
||||
of_node_put(np);
|
||||
|
||||
if (np == scmi_np)
|
||||
return true;
|
||||
|
||||
idx = of_property_match_string(cpu_np, "power-domain-names", "perf");
|
||||
np = of_parse_phandle(cpu_np, "power-domains", idx);
|
||||
of_node_put(np);
|
||||
|
||||
if (np == scmi_np)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int scmi_cpufreq_probe(struct scmi_device *sdev)
|
||||
{
|
||||
int ret;
|
||||
@@ -331,7 +365,7 @@ static int scmi_cpufreq_probe(struct scmi_device *sdev)
|
||||
|
||||
handle = sdev->handle;
|
||||
|
||||
if (!handle)
|
||||
if (!handle || !scmi_dev_used_by_cpus(dev))
|
||||
return -ENODEV;
|
||||
|
||||
perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
|
||||
|
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
|
||||
|
||||
static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
|
||||
{
|
||||
if (engine->chain.first && engine->chain.last)
|
||||
if (engine->chain_hw.first && engine->chain_hw.last)
|
||||
return mv_cesa_tdma_process(engine, status);
|
||||
|
||||
return mv_cesa_std_process(engine, status);
|
||||
|
@@ -440,8 +440,10 @@ struct mv_cesa_dev {
|
||||
* SRAM
|
||||
* @queue: fifo of the pending crypto requests
|
||||
* @load: engine load counter, useful for load balancing
|
||||
* @chain: list of the current tdma descriptors being processed
|
||||
* by this engine.
|
||||
* @chain_hw: list of the current tdma descriptors being processed
|
||||
* by the hardware.
|
||||
* @chain_sw: list of the current tdma descriptors that will be
|
||||
* submitted to the hardware.
|
||||
* @complete_queue: fifo of the processed requests by the engine
|
||||
*
|
||||
* Structure storing CESA engine information.
|
||||
@@ -463,7 +465,8 @@ struct mv_cesa_engine {
|
||||
struct gen_pool *pool;
|
||||
struct crypto_queue queue;
|
||||
atomic_t load;
|
||||
struct mv_cesa_tdma_chain chain;
|
||||
struct mv_cesa_tdma_chain chain_hw;
|
||||
struct mv_cesa_tdma_chain chain_sw;
|
||||
struct list_head complete_queue;
|
||||
int irq;
|
||||
};
|
||||
|
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
|
||||
{
|
||||
struct mv_cesa_engine *engine = dreq->engine;
|
||||
|
||||
spin_lock_bh(&engine->lock);
|
||||
if (engine->chain_sw.first == dreq->chain.first) {
|
||||
engine->chain_sw.first = NULL;
|
||||
engine->chain_sw.last = NULL;
|
||||
}
|
||||
engine->chain_hw.first = dreq->chain.first;
|
||||
engine->chain_hw.last = dreq->chain.last;
|
||||
spin_unlock_bh(&engine->lock);
|
||||
|
||||
writel_relaxed(0, engine->regs + CESA_SA_CFG);
|
||||
|
||||
mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
|
||||
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
|
||||
void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
|
||||
struct mv_cesa_req *dreq)
|
||||
{
|
||||
if (engine->chain.first == NULL && engine->chain.last == NULL) {
|
||||
engine->chain.first = dreq->chain.first;
|
||||
engine->chain.last = dreq->chain.last;
|
||||
} else {
|
||||
struct mv_cesa_tdma_desc *last;
|
||||
struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
|
||||
|
||||
last = engine->chain.last;
|
||||
/*
|
||||
* Break the DMA chain if the request being queued needs the IV
|
||||
* regs to be set before lauching the request.
|
||||
*/
|
||||
if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
|
||||
engine->chain_sw.first = dreq->chain.first;
|
||||
else {
|
||||
last->next = dreq->chain.first;
|
||||
engine->chain.last = dreq->chain.last;
|
||||
|
||||
/*
|
||||
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
|
||||
* the last element of the current chain, or if the request
|
||||
* being queued needs the IV regs to be set before lauching
|
||||
* the request.
|
||||
*/
|
||||
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
|
||||
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
|
||||
last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
|
||||
last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
|
||||
}
|
||||
last = dreq->chain.last;
|
||||
engine->chain_sw.last = last;
|
||||
/*
|
||||
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
|
||||
* the last element of the current chain.
|
||||
*/
|
||||
if (last->flags & CESA_TDMA_BREAK_CHAIN) {
|
||||
engine->chain_sw.first = NULL;
|
||||
engine->chain_sw.last = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
|
||||
|
||||
tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
|
||||
|
||||
for (tdma = engine->chain.first; tdma; tdma = next) {
|
||||
for (tdma = engine->chain_hw.first; tdma; tdma = next) {
|
||||
spin_lock_bh(&engine->lock);
|
||||
next = tdma->next;
|
||||
spin_unlock_bh(&engine->lock);
|
||||
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
|
||||
&backlog);
|
||||
|
||||
/* Re-chaining to the next request */
|
||||
engine->chain.first = tdma->next;
|
||||
engine->chain_hw.first = tdma->next;
|
||||
tdma->next = NULL;
|
||||
|
||||
/* If this is the last request, clear the chain */
|
||||
if (engine->chain.first == NULL)
|
||||
engine->chain.last = NULL;
|
||||
if (engine->chain_hw.first == NULL)
|
||||
engine->chain_hw.last = NULL;
|
||||
spin_unlock_bh(&engine->lock);
|
||||
|
||||
ctx = crypto_tfm_ctx(req->tfm);
|
||||
|
@@ -161,8 +161,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
|
||||
ubuf->sg = NULL;
|
||||
}
|
||||
} else {
|
||||
dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
|
||||
direction);
|
||||
dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -177,7 +176,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
|
||||
if (!ubuf->sg)
|
||||
return -EINVAL;
|
||||
|
||||
dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
|
||||
dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1756,9 +1756,9 @@ altr_edac_a10_device_trig(struct file *file, const char __user *user_buf,
|
||||
|
||||
local_irq_save(flags);
|
||||
if (trig_type == ALTR_UE_TRIGGER_CHAR)
|
||||
writel(priv->ue_set_mask, set_addr);
|
||||
writew(priv->ue_set_mask, set_addr);
|
||||
else
|
||||
writel(priv->ce_set_mask, set_addr);
|
||||
writew(priv->ce_set_mask, set_addr);
|
||||
|
||||
/* Ensure the interrupt test bits are set */
|
||||
wmb();
|
||||
@@ -1788,7 +1788,7 @@ altr_edac_a10_device_trig2(struct file *file, const char __user *user_buf,
|
||||
|
||||
local_irq_save(flags);
|
||||
if (trig_type == ALTR_UE_TRIGGER_CHAR) {
|
||||
writel(priv->ue_set_mask, set_addr);
|
||||
writew(priv->ue_set_mask, set_addr);
|
||||
} else {
|
||||
/* Setup read/write of 4 bytes */
|
||||
writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
|
||||
|
@@ -203,6 +203,15 @@ static void of_gpio_try_fixup_polarity(const struct device_node *np,
|
||||
*/
|
||||
{ "lantiq,pci-xway", "gpio-reset", false },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_REGULATOR_S5M8767)
|
||||
/*
|
||||
* According to S5M8767, the DVS and DS pin are
|
||||
* active-high signals. However, exynos5250-spring.dts use
|
||||
* active-low setting.
|
||||
*/
|
||||
{ "samsung,s5m8767-pmic", "s5m8767,pmic-buck-dvs-gpios", true },
|
||||
{ "samsung,s5m8767-pmic", "s5m8767,pmic-buck-ds-gpios", true },
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_TOUCHSCREEN_TSC2005)
|
||||
/*
|
||||
* DTS for Nokia N900 incorrectly specified "active high"
|
||||
|
@@ -207,11 +207,20 @@ int vmbus_connect(void)
|
||||
INIT_LIST_HEAD(&vmbus_connection.chn_list);
|
||||
mutex_init(&vmbus_connection.channel_mutex);
|
||||
|
||||
/*
|
||||
* The following Hyper-V interrupt and monitor pages can be used by
|
||||
* UIO for mapping to user-space, so they should always be allocated on
|
||||
* system page boundaries. The system page size must be >= the Hyper-V
|
||||
* page size.
|
||||
*/
|
||||
BUILD_BUG_ON(PAGE_SIZE < HV_HYP_PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Setup the vmbus event connection for channel interrupt
|
||||
* abstraction stuff
|
||||
*/
|
||||
vmbus_connection.int_page = hv_alloc_hyperv_zeroed_page();
|
||||
vmbus_connection.int_page =
|
||||
(void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (vmbus_connection.int_page == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto cleanup;
|
||||
@@ -226,8 +235,8 @@ int vmbus_connect(void)
|
||||
* Setup the monitor notification facility. The 1st page for
|
||||
* parent->child and the 2nd page for child->parent
|
||||
*/
|
||||
vmbus_connection.monitor_pages[0] = hv_alloc_hyperv_page();
|
||||
vmbus_connection.monitor_pages[1] = hv_alloc_hyperv_page();
|
||||
vmbus_connection.monitor_pages[0] = (void *)__get_free_page(GFP_KERNEL);
|
||||
vmbus_connection.monitor_pages[1] = (void *)__get_free_page(GFP_KERNEL);
|
||||
if ((vmbus_connection.monitor_pages[0] == NULL) ||
|
||||
(vmbus_connection.monitor_pages[1] == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
@@ -343,21 +352,23 @@ void vmbus_disconnect(void)
|
||||
destroy_workqueue(vmbus_connection.work_queue);
|
||||
|
||||
if (vmbus_connection.int_page) {
|
||||
hv_free_hyperv_page(vmbus_connection.int_page);
|
||||
free_page((unsigned long)vmbus_connection.int_page);
|
||||
vmbus_connection.int_page = NULL;
|
||||
}
|
||||
|
||||
if (vmbus_connection.monitor_pages[0]) {
|
||||
if (!set_memory_encrypted(
|
||||
(unsigned long)vmbus_connection.monitor_pages[0], 1))
|
||||
hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
|
||||
free_page((unsigned long)
|
||||
vmbus_connection.monitor_pages[0]);
|
||||
vmbus_connection.monitor_pages[0] = NULL;
|
||||
}
|
||||
|
||||
if (vmbus_connection.monitor_pages[1]) {
|
||||
if (!set_memory_encrypted(
|
||||
(unsigned long)vmbus_connection.monitor_pages[1], 1))
|
||||
hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
|
||||
free_page((unsigned long)
|
||||
vmbus_connection.monitor_pages[1]);
|
||||
vmbus_connection.monitor_pages[1] = NULL;
|
||||
}
|
||||
}
|
||||
|
@@ -423,13 +423,16 @@ static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
|
||||
break;
|
||||
case hwmon_pwm:
|
||||
switch (attr) {
|
||||
case hwmon_pwm_auto_channels_temp:
|
||||
if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID)
|
||||
case hwmon_pwm_auto_channels_temp: {
|
||||
u8 fan_source = data->fan_source[channel];
|
||||
|
||||
if (fan_source == FTS_FAN_SOURCE_INVALID || fan_source >= BITS_PER_LONG)
|
||||
*val = 0;
|
||||
else
|
||||
*val = BIT(data->fan_source[channel]);
|
||||
*val = BIT(fan_source);
|
||||
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@@ -91,7 +91,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
|
||||
i2c_dw_disable(dev);
|
||||
synchronize_irq(dev->irq);
|
||||
dev->slave = NULL;
|
||||
pm_runtime_put(dev->dev);
|
||||
pm_runtime_put_sync_suspend(dev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -1971,10 +1971,14 @@ static int npcm_i2c_init_module(struct npcm_i2c *bus, enum i2c_mode mode,
|
||||
|
||||
/* Check HW is OK: SDA and SCL should be high at this point. */
|
||||
if ((npcm_i2c_get_SDA(&bus->adap) == 0) || (npcm_i2c_get_SCL(&bus->adap) == 0)) {
|
||||
dev_err(bus->dev, "I2C%d init fail: lines are low\n", bus->num);
|
||||
dev_err(bus->dev, "SDA=%d SCL=%d\n", npcm_i2c_get_SDA(&bus->adap),
|
||||
npcm_i2c_get_SCL(&bus->adap));
|
||||
return -ENXIO;
|
||||
dev_warn(bus->dev, " I2C%d SDA=%d SCL=%d, attempting to recover\n", bus->num,
|
||||
npcm_i2c_get_SDA(&bus->adap), npcm_i2c_get_SCL(&bus->adap));
|
||||
if (npcm_i2c_recovery_tgclk(&bus->adap)) {
|
||||
dev_err(bus->dev, "I2C%d init fail: SDA=%d SCL=%d\n",
|
||||
bus->num, npcm_i2c_get_SDA(&bus->adap),
|
||||
npcm_i2c_get_SCL(&bus->adap));
|
||||
return -ENXIO;
|
||||
}
|
||||
}
|
||||
|
||||
npcm_i2c_int_enable(bus, true);
|
||||
|
@@ -1397,6 +1397,11 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
|
||||
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], MSG_END_CONTINUE);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
/* Validate message length before proceeding */
|
||||
if (msgs[i].buf[0] == 0 || msgs[i].buf[0] > I2C_SMBUS_BLOCK_MAX)
|
||||
break;
|
||||
|
||||
/* Set the msg length from first byte */
|
||||
msgs[i].len += msgs[i].buf[0];
|
||||
dev_dbg(i2c_dev->dev, "reading %d bytes\n", msgs[i].len);
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/units.h>
|
||||
|
||||
#include <linux/iio/buffer.h>
|
||||
#include <linux/iio/events.h>
|
||||
@@ -434,8 +435,16 @@ static int fxls8962af_read_raw(struct iio_dev *indio_dev,
|
||||
*val = FXLS8962AF_TEMP_CENTER_VAL;
|
||||
return IIO_VAL_INT;
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = 0;
|
||||
return fxls8962af_read_full_scale(data, val2);
|
||||
switch (chan->type) {
|
||||
case IIO_TEMP:
|
||||
*val = MILLIDEGREE_PER_DEGREE;
|
||||
return IIO_VAL_INT;
|
||||
case IIO_ACCEL:
|
||||
*val = 0;
|
||||
return fxls8962af_read_full_scale(data, val2);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
case IIO_CHAN_INFO_SAMP_FREQ:
|
||||
return fxls8962af_read_samp_freq(data, val, val2);
|
||||
default:
|
||||
@@ -734,9 +743,11 @@ static const struct iio_event_spec fxls8962af_event[] = {
|
||||
.type = IIO_TEMP, \
|
||||
.address = FXLS8962AF_TEMP_OUT, \
|
||||
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
|
||||
BIT(IIO_CHAN_INFO_SCALE) | \
|
||||
BIT(IIO_CHAN_INFO_OFFSET),\
|
||||
.scan_index = -1, \
|
||||
.scan_type = { \
|
||||
.sign = 's', \
|
||||
.realbits = 8, \
|
||||
.storagebits = 8, \
|
||||
}, \
|
||||
|
@@ -151,7 +151,7 @@ static int ad7606_spi_reg_write(struct ad7606_state *st,
|
||||
struct spi_device *spi = to_spi_device(st->dev);
|
||||
|
||||
st->d16[0] = cpu_to_be16((st->bops->rd_wr_cmd(addr, 1) << 8) |
|
||||
(val & 0x1FF));
|
||||
(val & 0xFF));
|
||||
|
||||
return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
|
||||
}
|
||||
|
@@ -67,16 +67,18 @@ int inv_icm42600_temp_read_raw(struct iio_dev *indio_dev,
|
||||
return IIO_VAL_INT;
|
||||
/*
|
||||
* T°C = (temp / 132.48) + 25
|
||||
* Tm°C = 1000 * ((temp * 100 / 13248) + 25)
|
||||
* Tm°C = 1000 * ((temp / 132.48) + 25)
|
||||
* Tm°C = 7.548309 * temp + 25000
|
||||
* Tm°C = (temp + 3312) * 7.548309
|
||||
* scale: 100000 / 13248 ~= 7.548309
|
||||
* offset: 25000
|
||||
* offset: 3312
|
||||
*/
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
*val = 7;
|
||||
*val2 = 548309;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
case IIO_CHAN_INFO_OFFSET:
|
||||
*val = 25000;
|
||||
*val = 3312;
|
||||
return IIO_VAL_INT;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@@ -367,12 +367,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
|
||||
/*
|
||||
* CM_ID <-- DESTROYING
|
||||
*
|
||||
* Clean up all resources associated with the connection and release
|
||||
* the initial reference taken by iw_create_cm_id.
|
||||
*
|
||||
* Returns true if and only if the last cm_id_priv reference has been dropped.
|
||||
* Clean up all resources associated with the connection.
|
||||
*/
|
||||
static bool destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
{
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
struct ib_qp *qp;
|
||||
@@ -441,20 +438,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
|
||||
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
|
||||
}
|
||||
|
||||
return iwcm_deref_id(cm_id_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is only called by the application thread and cannot
|
||||
* be called by the event thread. The function will wait for all
|
||||
* references to be released on the cm_id and then kfree the cm_id
|
||||
* object.
|
||||
* Destroy cm_id. If the cm_id still has other references, wait for all
|
||||
* references to be released on the cm_id and then release the initial
|
||||
* reference taken by iw_create_cm_id.
|
||||
*/
|
||||
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
{
|
||||
if (!destroy_cm_id(cm_id))
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||
destroy_cm_id(cm_id);
|
||||
if (refcount_read(&cm_id_priv->refcount) > 1)
|
||||
flush_workqueue(iwcm_wq);
|
||||
iwcm_deref_id(cm_id_priv);
|
||||
}
|
||||
EXPORT_SYMBOL(iw_destroy_cm_id);
|
||||
|
||||
@@ -1037,8 +1036,10 @@ static void cm_work_handler(struct work_struct *_work)
|
||||
|
||||
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
|
||||
ret = process_event(cm_id_priv, &levent);
|
||||
if (ret)
|
||||
WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
|
||||
if (ret) {
|
||||
destroy_cm_id(&cm_id_priv->id);
|
||||
WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
|
||||
}
|
||||
} else
|
||||
pr_debug("dropping event %d\n", levent.event);
|
||||
if (iwcm_deref_id(cm_id_priv))
|
||||
|
@@ -455,6 +455,8 @@ static enum hrtimer_restart gpio_keys_irq_timer(struct hrtimer *t)
|
||||
release_timer);
|
||||
struct input_dev *input = bdata->input;
|
||||
|
||||
guard(spinlock_irqsave)(&bdata->lock);
|
||||
|
||||
if (bdata->key_pressed) {
|
||||
input_report_key(input, *bdata->code, 0);
|
||||
input_sync(input);
|
||||
|
@@ -845,6 +845,12 @@ static int ims_pcu_flash_firmware(struct ims_pcu *pcu,
|
||||
addr = be32_to_cpu(rec->addr) / 2;
|
||||
len = be16_to_cpu(rec->len);
|
||||
|
||||
if (len > sizeof(pcu->cmd_buf) - 1 - sizeof(*fragment)) {
|
||||
dev_err(pcu->dev,
|
||||
"Invalid record length in firmware: %d\n", len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fragment = (void *)&pcu->cmd_buf[1];
|
||||
put_unaligned_le32(addr, &fragment->addr);
|
||||
fragment->len = len;
|
||||
|
@@ -75,9 +75,14 @@ static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int
|
||||
return -1;
|
||||
|
||||
switch (code) {
|
||||
case SND_BELL: if (value) value = 1000;
|
||||
case SND_TONE: break;
|
||||
default: return -1;
|
||||
case SND_BELL:
|
||||
if (value)
|
||||
value = 1000;
|
||||
break;
|
||||
case SND_TONE:
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (value > 20 && value < 32767)
|
||||
@@ -113,9 +118,14 @@ static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned
|
||||
return -1;
|
||||
|
||||
switch (code) {
|
||||
case SND_BELL: if (value) value = 1000;
|
||||
case SND_TONE: break;
|
||||
default: return -1;
|
||||
case SND_BELL:
|
||||
if (value)
|
||||
value = 1000;
|
||||
break;
|
||||
case SND_TONE:
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (value > 20 && value < 32767)
|
||||
|
@@ -780,6 +780,14 @@ int amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
|
||||
{
|
||||
iommu_ga_log_notifier = notifier;
|
||||
|
||||
/*
|
||||
* Ensure all in-flight IRQ handlers run to completion before returning
|
||||
* to the caller, e.g. to ensure module code isn't unloaded while it's
|
||||
* being executed in the IRQ handler.
|
||||
*/
|
||||
if (!notifier)
|
||||
synchronize_rcu();
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(amd_iommu_register_ga_log_notifier);
|
||||
|
@@ -133,10 +133,9 @@ static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
|
||||
spin_lock_irqsave(&ms->lock, flags);
|
||||
should_wake = !(bl->head);
|
||||
bio_list_add(bl, bio);
|
||||
spin_unlock_irqrestore(&ms->lock, flags);
|
||||
|
||||
if (should_wake)
|
||||
wakeup_mirrord(ms);
|
||||
spin_unlock_irqrestore(&ms->lock, flags);
|
||||
}
|
||||
|
||||
static void dispatch_bios(void *context, struct bio_list *bio_list)
|
||||
@@ -646,9 +645,9 @@ static void write_callback(unsigned long error, void *context)
|
||||
if (!ms->failures.head)
|
||||
should_wake = 1;
|
||||
bio_list_add(&ms->failures, bio);
|
||||
spin_unlock_irqrestore(&ms->lock, flags);
|
||||
if (should_wake)
|
||||
wakeup_mirrord(ms);
|
||||
spin_unlock_irqrestore(&ms->lock, flags);
|
||||
}
|
||||
|
||||
static void do_write(struct mirror_set *ms, struct bio *bio)
|
||||
|
@@ -601,6 +601,10 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
|
||||
(*argc)--;
|
||||
|
||||
if (!strcasecmp(arg_name, DM_VERITY_OPT_FEC_DEV)) {
|
||||
if (v->fec->dev) {
|
||||
ti->error = "FEC device already specified";
|
||||
return -EINVAL;
|
||||
}
|
||||
r = dm_get_device(ti, arg_value, BLK_OPEN_READ, &v->fec->dev);
|
||||
if (r) {
|
||||
ti->error = "FEC device lookup failed";
|
||||
|
@@ -1079,6 +1079,9 @@ static int verity_alloc_most_once(struct dm_verity *v)
|
||||
{
|
||||
struct dm_target *ti = v->ti;
|
||||
|
||||
if (v->validated_blocks)
|
||||
return 0;
|
||||
|
||||
/* the bitset can only handle INT_MAX blocks */
|
||||
if (v->data_blocks > INT_MAX) {
|
||||
ti->error = "device too large to use check_at_most_once";
|
||||
@@ -1102,6 +1105,9 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
|
||||
struct dm_verity_io *io;
|
||||
u8 *zero_data;
|
||||
|
||||
if (v->zero_digest)
|
||||
return 0;
|
||||
|
||||
v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
|
||||
|
||||
if (!v->zero_digest)
|
||||
@@ -1512,7 +1518,7 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||
goto bad;
|
||||
}
|
||||
|
||||
/* Root hash signature is a optional parameter*/
|
||||
/* Root hash signature is an optional parameter */
|
||||
r = verity_verify_root_hash(root_hash_digest_to_validate,
|
||||
strlen(root_hash_digest_to_validate),
|
||||
verify_args.sig,
|
||||
|
@@ -71,9 +71,14 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
|
||||
const char *arg_name)
|
||||
{
|
||||
struct dm_target *ti = v->ti;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
const char *sig_key = NULL;
|
||||
|
||||
if (v->signature_key_desc) {
|
||||
ti->error = DM_VERITY_VERIFY_ERR("root_hash_sig_key_desc already specified");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!*argc) {
|
||||
ti->error = DM_VERITY_VERIFY_ERR("Signature key not specified");
|
||||
return -EINVAL;
|
||||
@@ -83,14 +88,18 @@ int verity_verify_sig_parse_opt_args(struct dm_arg_set *as,
|
||||
(*argc)--;
|
||||
|
||||
ret = verity_verify_get_sig_from_key(sig_key, sig_opts);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
ti->error = DM_VERITY_VERIFY_ERR("Invalid key specified");
|
||||
return ret;
|
||||
}
|
||||
|
||||
v->signature_key_desc = kstrdup(sig_key, GFP_KERNEL);
|
||||
if (!v->signature_key_desc)
|
||||
if (!v->signature_key_desc) {
|
||||
ti->error = DM_VERITY_VERIFY_ERR("Could not allocate memory for signature key");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -469,7 +469,7 @@ vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
|
||||
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -480,7 +480,7 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
|
||||
struct vb2_dma_sg_buf *buf = dbuf->priv;
|
||||
struct sg_table *sgt = buf->dma_sgt;
|
||||
|
||||
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
|
||||
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -312,6 +312,11 @@ __ccs_pll_calculate_vt_tree(struct device *dev,
|
||||
dev_dbg(dev, "more_mul2: %u\n", more_mul);
|
||||
|
||||
pll_fr->pll_multiplier = mul * more_mul;
|
||||
if (pll_fr->pll_multiplier > lim_fr->max_pll_multiplier) {
|
||||
dev_dbg(dev, "pll multiplier %u too high\n",
|
||||
pll_fr->pll_multiplier);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pll_fr->pll_multiplier * pll_fr->pll_ip_clk_freq_hz >
|
||||
lim_fr->max_pll_op_clk_freq_hz)
|
||||
@@ -397,6 +402,8 @@ static int ccs_pll_calculate_vt_tree(struct device *dev,
|
||||
min_pre_pll_clk_div = max_t(u16, min_pre_pll_clk_div,
|
||||
pll->ext_clk_freq_hz /
|
||||
lim_fr->max_pll_ip_clk_freq_hz);
|
||||
if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
|
||||
min_pre_pll_clk_div = clk_div_even(min_pre_pll_clk_div);
|
||||
|
||||
dev_dbg(dev, "vt min/max_pre_pll_clk_div: %u,%u\n",
|
||||
min_pre_pll_clk_div, max_pre_pll_clk_div);
|
||||
@@ -792,7 +799,7 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
|
||||
op_lim_fr->min_pre_pll_clk_div, op_lim_fr->max_pre_pll_clk_div);
|
||||
max_op_pre_pll_clk_div =
|
||||
min_t(u16, op_lim_fr->max_pre_pll_clk_div,
|
||||
clk_div_even(pll->ext_clk_freq_hz /
|
||||
DIV_ROUND_UP(pll->ext_clk_freq_hz,
|
||||
op_lim_fr->min_pll_ip_clk_freq_hz));
|
||||
min_op_pre_pll_clk_div =
|
||||
max_t(u16, op_lim_fr->min_pre_pll_clk_div,
|
||||
@@ -815,6 +822,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim,
|
||||
one_or_more(
|
||||
DIV_ROUND_UP(op_lim_fr->max_pll_op_clk_freq_hz,
|
||||
pll->ext_clk_freq_hz))));
|
||||
if (!(pll->flags & CCS_PLL_FLAG_EXT_IP_PLL_DIVIDER))
|
||||
min_op_pre_pll_clk_div = clk_div_even(min_op_pre_pll_clk_div);
|
||||
dev_dbg(dev, "pll_op check: min / max op_pre_pll_clk_div: %u / %u\n",
|
||||
min_op_pre_pll_clk_div, max_op_pre_pll_clk_div);
|
||||
|
||||
|
@@ -453,10 +453,10 @@ static int ub913_set_fmt(struct v4l2_subdev *sd,
|
||||
if (!fmt)
|
||||
return -EINVAL;
|
||||
|
||||
format->format.code = finfo->outcode;
|
||||
|
||||
*fmt = format->format;
|
||||
|
||||
fmt->code = finfo->outcode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -1339,11 +1339,8 @@ static int ov5675_probe(struct i2c_client *client)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ov5675_get_hwcfg(ov5675, &client->dev);
|
||||
if (ret) {
|
||||
dev_err(&client->dev, "failed to get HW configuration: %d",
|
||||
ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
v4l2_i2c_subdev_init(&ov5675->sd, client, &ov5675_subdev_ops);
|
||||
|
||||
|
@@ -2323,8 +2323,8 @@ static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
|
||||
if (!is_acpi_node(fwnode)) {
|
||||
ov8856->xvclk = devm_clk_get(dev, "xvclk");
|
||||
if (IS_ERR(ov8856->xvclk)) {
|
||||
dev_err(dev, "could not get xvclk clock (%pe)\n",
|
||||
ov8856->xvclk);
|
||||
dev_err_probe(dev, PTR_ERR(ov8856->xvclk),
|
||||
"could not get xvclk clock\n");
|
||||
return PTR_ERR(ov8856->xvclk);
|
||||
}
|
||||
|
||||
@@ -2429,11 +2429,8 @@ static int ov8856_probe(struct i2c_client *client)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = ov8856_get_hwcfg(ov8856, &client->dev);
|
||||
if (ret) {
|
||||
dev_err(&client->dev, "failed to get HW configuration: %d",
|
||||
ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
|
||||
|
||||
|
@@ -821,7 +821,7 @@ static int vdec_hevc_slice_setup_core_buffer(struct vdec_hevc_slice_inst *inst,
|
||||
inst->vsi_core->fb.y.dma_addr = y_fb_dma;
|
||||
inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[0];
|
||||
inst->vsi_core->fb.c.dma_addr = c_fb_dma;
|
||||
inst->vsi_core->fb.y.size = ctx->picinfo.fb_sz[1];
|
||||
inst->vsi_core->fb.c.size = ctx->picinfo.fb_sz[1];
|
||||
|
||||
inst->vsi_core->dec.vdec_fb_va = (unsigned long)fb;
|
||||
|
||||
|
@@ -752,6 +752,32 @@ static int mxc_get_free_slot(struct mxc_jpeg_slot_data *slot_data)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
|
||||
{
|
||||
/* free descriptor for decoding/encoding phase */
|
||||
dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
|
||||
jpeg->slot_data.desc,
|
||||
jpeg->slot_data.desc_handle);
|
||||
jpeg->slot_data.desc = NULL;
|
||||
jpeg->slot_data.desc_handle = 0;
|
||||
|
||||
/* free descriptor for encoder configuration phase / decoder DHT */
|
||||
dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
|
||||
jpeg->slot_data.cfg_desc,
|
||||
jpeg->slot_data.cfg_desc_handle);
|
||||
jpeg->slot_data.cfg_desc_handle = 0;
|
||||
jpeg->slot_data.cfg_desc = NULL;
|
||||
|
||||
/* free configuration stream */
|
||||
dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
|
||||
jpeg->slot_data.cfg_stream_vaddr,
|
||||
jpeg->slot_data.cfg_stream_handle);
|
||||
jpeg->slot_data.cfg_stream_vaddr = NULL;
|
||||
jpeg->slot_data.cfg_stream_handle = 0;
|
||||
|
||||
jpeg->slot_data.used = false;
|
||||
}
|
||||
|
||||
static bool mxc_jpeg_alloc_slot_data(struct mxc_jpeg_dev *jpeg)
|
||||
{
|
||||
struct mxc_jpeg_desc *desc;
|
||||
@@ -794,30 +820,11 @@ skip_alloc:
|
||||
return true;
|
||||
err:
|
||||
dev_err(jpeg->dev, "Could not allocate descriptors for slot %d", jpeg->slot_data.slot);
|
||||
mxc_jpeg_free_slot_data(jpeg);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mxc_jpeg_free_slot_data(struct mxc_jpeg_dev *jpeg)
|
||||
{
|
||||
/* free descriptor for decoding/encoding phase */
|
||||
dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
|
||||
jpeg->slot_data.desc,
|
||||
jpeg->slot_data.desc_handle);
|
||||
|
||||
/* free descriptor for encoder configuration phase / decoder DHT */
|
||||
dma_free_coherent(jpeg->dev, sizeof(struct mxc_jpeg_desc),
|
||||
jpeg->slot_data.cfg_desc,
|
||||
jpeg->slot_data.cfg_desc_handle);
|
||||
|
||||
/* free configuration stream */
|
||||
dma_free_coherent(jpeg->dev, MXC_JPEG_MAX_CFG_STREAM,
|
||||
jpeg->slot_data.cfg_stream_vaddr,
|
||||
jpeg->slot_data.cfg_stream_handle);
|
||||
|
||||
jpeg->slot_data.used = false;
|
||||
}
|
||||
|
||||
static void mxc_jpeg_check_and_set_last_buffer(struct mxc_jpeg_ctx *ctx,
|
||||
struct vb2_v4l2_buffer *src_buf,
|
||||
struct vb2_v4l2_buffer *dst_buf)
|
||||
@@ -1913,9 +1920,19 @@ static void mxc_jpeg_buf_queue(struct vb2_buffer *vb)
|
||||
jpeg_src_buf = vb2_to_mxc_buf(vb);
|
||||
jpeg_src_buf->jpeg_parse_error = false;
|
||||
ret = mxc_jpeg_parse(ctx, vb);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
jpeg_src_buf->jpeg_parse_error = true;
|
||||
|
||||
/*
|
||||
* if the capture queue is not setup, the device_run() won't be scheduled,
|
||||
* need to drop the error buffer, so that the decoding can continue
|
||||
*/
|
||||
if (!vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx))) {
|
||||
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
end:
|
||||
v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
|
||||
}
|
||||
|
@@ -43,6 +43,7 @@ struct mxc_isi_m2m_ctx_queue_data {
|
||||
struct v4l2_pix_format_mplane format;
|
||||
const struct mxc_isi_format_info *info;
|
||||
u32 sequence;
|
||||
bool streaming;
|
||||
};
|
||||
|
||||
struct mxc_isi_m2m_ctx {
|
||||
@@ -486,15 +487,18 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
|
||||
struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
|
||||
const struct v4l2_pix_format_mplane *out_pix = &ctx->queues.out.format;
|
||||
const struct v4l2_pix_format_mplane *cap_pix = &ctx->queues.cap.format;
|
||||
const struct mxc_isi_format_info *cap_info = ctx->queues.cap.info;
|
||||
const struct mxc_isi_format_info *out_info = ctx->queues.out.info;
|
||||
struct mxc_isi_m2m *m2m = ctx->m2m;
|
||||
bool bypass;
|
||||
|
||||
int ret;
|
||||
|
||||
if (q->streaming)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&m2m->lock);
|
||||
|
||||
if (m2m->usage_count == INT_MAX) {
|
||||
@@ -547,6 +551,8 @@ static int mxc_isi_m2m_streamon(struct file *file, void *fh,
|
||||
goto unchain;
|
||||
}
|
||||
|
||||
q->streaming = true;
|
||||
|
||||
return 0;
|
||||
|
||||
unchain:
|
||||
@@ -569,10 +575,14 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
|
||||
enum v4l2_buf_type type)
|
||||
{
|
||||
struct mxc_isi_m2m_ctx *ctx = to_isi_m2m_ctx(fh);
|
||||
struct mxc_isi_m2m_ctx_queue_data *q = mxc_isi_m2m_ctx_qdata(ctx, type);
|
||||
struct mxc_isi_m2m *m2m = ctx->m2m;
|
||||
|
||||
v4l2_m2m_ioctl_streamoff(file, fh, type);
|
||||
|
||||
if (!q->streaming)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&m2m->lock);
|
||||
|
||||
/*
|
||||
@@ -598,6 +608,8 @@ static int mxc_isi_m2m_streamoff(struct file *file, void *fh,
|
||||
|
||||
mutex_unlock(&m2m->lock);
|
||||
|
||||
q->streaming = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -348,7 +348,7 @@ static int venus_probe(struct platform_device *pdev)
|
||||
|
||||
ret = v4l2_device_register(dev, &core->v4l2_dev);
|
||||
if (ret)
|
||||
goto err_core_deinit;
|
||||
goto err_hfi_destroy;
|
||||
|
||||
platform_set_drvdata(pdev, core);
|
||||
|
||||
@@ -380,24 +380,24 @@ static int venus_probe(struct platform_device *pdev)
|
||||
|
||||
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_DEC);
|
||||
if (ret)
|
||||
goto err_venus_shutdown;
|
||||
goto err_core_deinit;
|
||||
|
||||
ret = venus_enumerate_codecs(core, VIDC_SESSION_TYPE_ENC);
|
||||
if (ret)
|
||||
goto err_venus_shutdown;
|
||||
goto err_core_deinit;
|
||||
|
||||
ret = pm_runtime_put_sync(dev);
|
||||
if (ret) {
|
||||
pm_runtime_get_noresume(dev);
|
||||
goto err_dev_unregister;
|
||||
goto err_core_deinit;
|
||||
}
|
||||
|
||||
venus_dbgfs_init(core);
|
||||
|
||||
return 0;
|
||||
|
||||
err_dev_unregister:
|
||||
v4l2_device_unregister(&core->v4l2_dev);
|
||||
err_core_deinit:
|
||||
hfi_core_deinit(core, false);
|
||||
err_venus_shutdown:
|
||||
venus_shutdown(core);
|
||||
err_firmware_deinit:
|
||||
@@ -408,9 +408,9 @@ err_runtime_disable:
|
||||
pm_runtime_put_noidle(dev);
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_suspended(dev);
|
||||
v4l2_device_unregister(&core->v4l2_dev);
|
||||
err_hfi_destroy:
|
||||
hfi_destroy(core);
|
||||
err_core_deinit:
|
||||
hfi_core_deinit(core, false);
|
||||
err_core_put:
|
||||
if (core->pm_ops->core_put)
|
||||
core->pm_ops->core_put(core);
|
||||
|
@@ -505,7 +505,7 @@ static int vpif_probe(struct platform_device *pdev)
|
||||
pdev_display = kzalloc(sizeof(*pdev_display), GFP_KERNEL);
|
||||
if (!pdev_display) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_pdev_capture;
|
||||
goto err_del_pdev_capture;
|
||||
}
|
||||
|
||||
pdev_display->name = "vpif_display";
|
||||
@@ -528,6 +528,8 @@ static int vpif_probe(struct platform_device *pdev)
|
||||
|
||||
err_put_pdev_display:
|
||||
platform_device_put(pdev_display);
|
||||
err_del_pdev_capture:
|
||||
platform_device_del(pdev_capture);
|
||||
err_put_pdev_capture:
|
||||
platform_device_put(pdev_capture);
|
||||
err_put_rpm:
|
||||
|
@@ -446,8 +446,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
|
||||
if (ret < 0)
|
||||
goto done;
|
||||
|
||||
dma_sync_sg_for_cpu(isp->dev, req->table.sgt.sgl,
|
||||
req->table.sgt.nents, DMA_TO_DEVICE);
|
||||
dma_sync_sgtable_for_cpu(isp->dev, &req->table.sgt,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (copy_from_user(req->table.addr, config->lsc,
|
||||
req->config.size)) {
|
||||
@@ -455,8 +455,8 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
|
||||
goto done;
|
||||
}
|
||||
|
||||
dma_sync_sg_for_device(isp->dev, req->table.sgt.sgl,
|
||||
req->table.sgt.nents, DMA_TO_DEVICE);
|
||||
dma_sync_sgtable_for_device(isp->dev, &req->table.sgt,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ccdc->lsc.req_lock, flags);
|
||||
|
@@ -161,8 +161,7 @@ static void isp_stat_buf_sync_for_device(struct ispstat *stat,
|
||||
if (ISP_STAT_USES_DMAENGINE(stat))
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_device(stat->isp->dev, buf->sgt.sgl,
|
||||
buf->sgt.nents, DMA_FROM_DEVICE);
|
||||
dma_sync_sgtable_for_device(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
|
||||
@@ -171,8 +170,7 @@ static void isp_stat_buf_sync_for_cpu(struct ispstat *stat,
|
||||
if (ISP_STAT_USES_DMAENGINE(stat))
|
||||
return;
|
||||
|
||||
dma_sync_sg_for_cpu(stat->isp->dev, buf->sgt.sgl,
|
||||
buf->sgt.nents, DMA_FROM_DEVICE);
|
||||
dma_sync_sgtable_for_cpu(stat->isp->dev, &buf->sgt, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static void isp_stat_buf_clear(struct ispstat *stat)
|
||||
|
@@ -497,7 +497,7 @@ free_sdt:
|
||||
vidtv_psi_sdt_table_destroy(m->si.sdt);
|
||||
free_pat:
|
||||
vidtv_psi_pat_table_destroy(m->si.pat);
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
void vidtv_channel_si_destroy(struct vidtv_mux *m)
|
||||
|
@@ -954,8 +954,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
|
||||
if (dev->has_compose_cap) {
|
||||
v4l2_rect_set_min_size(compose, &min_rect);
|
||||
v4l2_rect_set_max_size(compose, &max_rect);
|
||||
v4l2_rect_map_inside(compose, &fmt);
|
||||
}
|
||||
v4l2_rect_map_inside(compose, &fmt);
|
||||
dev->fmt_cap_rect = fmt;
|
||||
tpg_s_buf_height(&dev->tpg, fmt.height);
|
||||
} else if (dev->has_compose_cap) {
|
||||
|
@@ -119,9 +119,8 @@ static void cxusb_gpio_tuner(struct dvb_usb_device *d, int onoff)
|
||||
|
||||
o[0] = GPIO_TUNER;
|
||||
o[1] = onoff;
|
||||
cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1);
|
||||
|
||||
if (i != 0x01)
|
||||
if (!cxusb_ctrl_msg(d, CMD_GPIO_WRITE, o, 2, &i, 1) && i != 0x01)
|
||||
dev_info(&d->udev->dev, "gpio_write failed.\n");
|
||||
|
||||
st->gpio_write_state[GPIO_TUNER] = onoff;
|
||||
|
@@ -520,12 +520,13 @@ static int hdcs_init(struct sd *sd)
|
||||
static int hdcs_dump(struct sd *sd)
|
||||
{
|
||||
u16 reg, val;
|
||||
int err = 0;
|
||||
|
||||
pr_info("Dumping sensor registers:\n");
|
||||
|
||||
for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH; reg++) {
|
||||
stv06xx_read_sensor(sd, reg, &val);
|
||||
for (reg = HDCS_IDENT; reg <= HDCS_ROWEXPH && !err; reg++) {
|
||||
err = stv06xx_read_sensor(sd, reg, &val);
|
||||
pr_info("reg 0x%02x = 0x%02x\n", reg, val);
|
||||
}
|
||||
return 0;
|
||||
return (err < 0) ? err : 0;
|
||||
}
|
||||
|
@@ -1642,7 +1642,9 @@ static bool uvc_ctrl_xctrls_has_control(const struct v4l2_ext_control *xctrls,
|
||||
}
|
||||
|
||||
static void uvc_ctrl_send_events(struct uvc_fh *handle,
|
||||
const struct v4l2_ext_control *xctrls, unsigned int xctrls_count)
|
||||
struct uvc_entity *entity,
|
||||
const struct v4l2_ext_control *xctrls,
|
||||
unsigned int xctrls_count)
|
||||
{
|
||||
struct uvc_control_mapping *mapping;
|
||||
struct uvc_control *ctrl;
|
||||
@@ -1653,6 +1655,9 @@ static void uvc_ctrl_send_events(struct uvc_fh *handle,
|
||||
u32 changes = V4L2_EVENT_CTRL_CH_VALUE;
|
||||
|
||||
ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping);
|
||||
if (ctrl->entity != entity)
|
||||
continue;
|
||||
|
||||
if (ctrl->info.flags & UVC_CTRL_FLAG_ASYNCHRONOUS)
|
||||
/* Notification will be sent from an Interrupt event. */
|
||||
continue;
|
||||
@@ -1783,12 +1788,17 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
|
||||
return mutex_lock_interruptible(&chain->ctrl_mutex) ? -ERESTARTSYS : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the number of uvc controls that have been correctly set, or a
|
||||
* negative number if there has been an error.
|
||||
*/
|
||||
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
|
||||
struct uvc_fh *handle,
|
||||
struct uvc_entity *entity,
|
||||
int rollback,
|
||||
struct uvc_control **err_ctrl)
|
||||
{
|
||||
unsigned int processed_ctrls = 0;
|
||||
struct uvc_control *ctrl;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
@@ -1823,6 +1833,9 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
if (!ret)
|
||||
processed_ctrls++;
|
||||
|
||||
if (rollback || ret < 0)
|
||||
memcpy(uvc_ctrl_data(ctrl, UVC_CTRL_DATA_CURRENT),
|
||||
uvc_ctrl_data(ctrl, UVC_CTRL_DATA_BACKUP),
|
||||
@@ -1841,7 +1854,7 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
|
||||
uvc_ctrl_set_handle(handle, ctrl, handle);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return processed_ctrls;
|
||||
}
|
||||
|
||||
static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
|
||||
@@ -1883,11 +1896,13 @@ int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
|
||||
uvc_ctrl_find_ctrl_idx(entity, ctrls,
|
||||
err_ctrl);
|
||||
goto done;
|
||||
} else if (ret > 0 && !rollback) {
|
||||
uvc_ctrl_send_events(handle, entity,
|
||||
ctrls->controls, ctrls->count);
|
||||
}
|
||||
}
|
||||
|
||||
if (!rollback)
|
||||
uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
|
||||
ret = 0;
|
||||
done:
|
||||
mutex_unlock(&chain->ctrl_mutex);
|
||||
return ret;
|
||||
|
@@ -2217,13 +2217,16 @@ static int uvc_probe(struct usb_interface *intf,
|
||||
#endif
|
||||
|
||||
/* Parse the Video Class control descriptor. */
|
||||
if (uvc_parse_control(dev) < 0) {
|
||||
ret = uvc_parse_control(dev);
|
||||
if (ret < 0) {
|
||||
ret = -ENODEV;
|
||||
uvc_dbg(dev, PROBE, "Unable to parse UVC descriptors\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Parse the associated GPIOs. */
|
||||
if (uvc_gpio_parse(dev) < 0) {
|
||||
ret = uvc_gpio_parse(dev);
|
||||
if (ret < 0) {
|
||||
uvc_dbg(dev, PROBE, "Unable to parse UVC GPIOs\n");
|
||||
goto error;
|
||||
}
|
||||
@@ -2249,24 +2252,32 @@ static int uvc_probe(struct usb_interface *intf,
|
||||
}
|
||||
|
||||
/* Register the V4L2 device. */
|
||||
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
|
||||
ret = v4l2_device_register(&intf->dev, &dev->vdev);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
/* Scan the device for video chains. */
|
||||
if (uvc_scan_device(dev) < 0)
|
||||
if (uvc_scan_device(dev) < 0) {
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Initialize controls. */
|
||||
if (uvc_ctrl_init_device(dev) < 0)
|
||||
if (uvc_ctrl_init_device(dev) < 0) {
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
/* Register video device nodes. */
|
||||
if (uvc_register_chains(dev) < 0)
|
||||
if (uvc_register_chains(dev) < 0) {
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEDIA_CONTROLLER
|
||||
/* Register the media device node */
|
||||
if (media_device_register(&dev->mdev) < 0)
|
||||
ret = media_device_register(&dev->mdev);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
#endif
|
||||
/* Save our data pointer in the interface data. */
|
||||
@@ -2300,7 +2311,7 @@ static int uvc_probe(struct usb_interface *intf,
|
||||
error:
|
||||
uvc_unregister_video(dev);
|
||||
kref_put(&dev->ref, uvc_delete);
|
||||
return -ENODEV;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void uvc_disconnect(struct usb_interface *intf)
|
||||
|
@@ -1033,25 +1033,25 @@ int __video_register_device(struct video_device *vdev,
|
||||
vdev->dev.class = &video_class;
|
||||
vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor);
|
||||
vdev->dev.parent = vdev->dev_parent;
|
||||
vdev->dev.release = v4l2_device_release;
|
||||
dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num);
|
||||
|
||||
/* Increase v4l2_device refcount */
|
||||
v4l2_device_get(vdev->v4l2_dev);
|
||||
|
||||
mutex_lock(&videodev_lock);
|
||||
ret = device_register(&vdev->dev);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&videodev_lock);
|
||||
pr_err("%s: device_register failed\n", __func__);
|
||||
goto cleanup;
|
||||
put_device(&vdev->dev);
|
||||
return ret;
|
||||
}
|
||||
/* Register the release callback that will be called when the last
|
||||
reference to the device goes away. */
|
||||
vdev->dev.release = v4l2_device_release;
|
||||
|
||||
if (nr != -1 && nr != vdev->num && warn_if_nr_in_use)
|
||||
pr_warn("%s: requested %s%d, got %s\n", __func__,
|
||||
name_base, nr, video_device_node_name(vdev));
|
||||
|
||||
/* Increase v4l2_device refcount */
|
||||
v4l2_device_get(vdev->v4l2_dev);
|
||||
|
||||
/* Part 5: Register the entity. */
|
||||
ret = video_register_media_controller(vdev);
|
||||
|
||||
|
@@ -86,6 +86,7 @@ struct mmc_fixup {
|
||||
#define CID_MANFID_MICRON 0x13
|
||||
#define CID_MANFID_SAMSUNG 0x15
|
||||
#define CID_MANFID_APACER 0x27
|
||||
#define CID_MANFID_SWISSBIT 0x5D
|
||||
#define CID_MANFID_KINGSTON 0x70
|
||||
#define CID_MANFID_HYNIX 0x90
|
||||
#define CID_MANFID_KINGSTON_SD 0x9F
|
||||
@@ -291,4 +292,9 @@ static inline int mmc_card_broken_sd_poweroff_notify(const struct mmc_card *c)
|
||||
return c->quirks & MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY;
|
||||
}
|
||||
|
||||
static inline int mmc_card_no_uhs_ddr50_tuning(const struct mmc_card *c)
|
||||
{
|
||||
return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -34,6 +34,16 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = {
|
||||
MMC_QUIRK_BROKEN_SD_CACHE | MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY,
|
||||
EXT_CSD_REV_ANY),
|
||||
|
||||
/*
|
||||
* Swissbit series S46-u cards throw I/O errors during tuning requests
|
||||
* after the initial tuning request expectedly times out. This has
|
||||
* only been observed on cards manufactured on 01/2019 that are using
|
||||
* Bay Trail host controllers.
|
||||
*/
|
||||
_FIXUP_EXT("0016G", CID_MANFID_SWISSBIT, 0x5342, 2019, 1,
|
||||
0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd,
|
||||
MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY),
|
||||
|
||||
END_FIXUP
|
||||
};
|
||||
|
||||
|
@@ -618,6 +618,29 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if the card should tune or not.
|
||||
*/
|
||||
static bool mmc_sd_use_tuning(struct mmc_card *card)
|
||||
{
|
||||
/*
|
||||
* SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
|
||||
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
|
||||
*/
|
||||
if (mmc_host_is_spi(card->host))
|
||||
return false;
|
||||
|
||||
switch (card->host->ios.timing) {
|
||||
case MMC_TIMING_UHS_SDR50:
|
||||
case MMC_TIMING_UHS_SDR104:
|
||||
return true;
|
||||
case MMC_TIMING_UHS_DDR50:
|
||||
return !mmc_card_no_uhs_ddr50_tuning(card);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* UHS-I specific initialization procedure
|
||||
*/
|
||||
@@ -661,14 +684,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and
|
||||
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
|
||||
*/
|
||||
if (!mmc_host_is_spi(card->host) &&
|
||||
(card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
|
||||
card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
|
||||
card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
|
||||
if (mmc_sd_use_tuning(card)) {
|
||||
err = mmc_execute_tuning(card);
|
||||
|
||||
/*
|
||||
|
@@ -2917,7 +2917,7 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
|
||||
write_reg_dma(nandc, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
|
||||
}
|
||||
|
||||
nandc->buf_count = len;
|
||||
nandc->buf_count = 512;
|
||||
memset(nandc->data_buffer, 0xff, nandc->buf_count);
|
||||
|
||||
config_nand_single_cw_page_read(chip, false, 0);
|
||||
|
@@ -817,6 +817,7 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct nand_chip *nand,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sunxi_nfc_randomizer_config(nand, page, false);
|
||||
sunxi_nfc_randomizer_enable(nand);
|
||||
writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
|
||||
nfc->regs + NFC_REG_CMD);
|
||||
@@ -1049,6 +1050,7 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct nand_chip *nand,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sunxi_nfc_randomizer_config(nand, page, false);
|
||||
sunxi_nfc_randomizer_enable(nand);
|
||||
sunxi_nfc_hw_ecc_set_prot_oob_bytes(nand, oob, 0, bbm, page);
|
||||
|
||||
|
@@ -385,10 +385,11 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
|
||||
priv = cdev_to_priv(mcan_class);
|
||||
|
||||
priv->power = devm_regulator_get_optional(&spi->dev, "vsup");
|
||||
if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_m_can_class_free_dev;
|
||||
} else {
|
||||
if (IS_ERR(priv->power)) {
|
||||
if (PTR_ERR(priv->power) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto out_m_can_class_free_dev;
|
||||
}
|
||||
priv->power = NULL;
|
||||
}
|
||||
|
||||
|
@@ -123,7 +123,6 @@ static netdev_tx_t aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *nd
|
||||
}
|
||||
#endif
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
return aq_nic_xmit(aq_nic, skb);
|
||||
}
|
||||
|
||||
|
@@ -898,6 +898,8 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
|
||||
|
||||
frags = aq_nic_map_skb(self, skb, ring);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
if (likely(frags)) {
|
||||
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
|
||||
ring, frags);
|
||||
|
@@ -5070,7 +5070,11 @@ static int macb_probe(struct platform_device *pdev)
|
||||
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
|
||||
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
|
||||
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to set DMA mask\n");
|
||||
goto err_out_free_netdev;
|
||||
}
|
||||
bp->hw_dma_cap |= HW_DMA_CAP_64B;
|
||||
}
|
||||
#endif
|
||||
|
@@ -1148,6 +1148,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
struct gmac_txdesc *txd;
|
||||
skb_frag_t *skb_frag;
|
||||
dma_addr_t mapping;
|
||||
bool tcp = false;
|
||||
void *buffer;
|
||||
u16 mss;
|
||||
int ret;
|
||||
@@ -1155,6 +1156,13 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
word1 = skb->len;
|
||||
word3 = SOF_BIT;
|
||||
|
||||
/* Determine if we are doing TCP */
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
tcp = (ip_hdr(skb)->protocol == IPPROTO_TCP);
|
||||
else
|
||||
/* IPv6 */
|
||||
tcp = (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP);
|
||||
|
||||
mss = skb_shinfo(skb)->gso_size;
|
||||
if (mss) {
|
||||
/* This means we are dealing with TCP and skb->len is the
|
||||
@@ -1167,8 +1175,26 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
mss, skb->len);
|
||||
word1 |= TSS_MTU_ENABLE_BIT;
|
||||
word3 |= mss;
|
||||
} else if (tcp) {
|
||||
/* Even if we are not using TSO, use the hardware offloader
|
||||
* for transferring the TCP frame: this hardware has partial
|
||||
* TCP awareness (called TOE - TCP Offload Engine) and will
|
||||
* according to the datasheet put packets belonging to the
|
||||
* same TCP connection in the same queue for the TOE/TSO
|
||||
* engine to process. The engine will deal with chopping
|
||||
* up frames that exceed ETH_DATA_LEN which the
|
||||
* checksumming engine cannot handle (see below) into
|
||||
* manageable chunks. It flawlessly deals with quite big
|
||||
* frames and frames containing custom DSA EtherTypes.
|
||||
*/
|
||||
mss = netdev->mtu + skb_tcp_all_headers(skb);
|
||||
mss = min(mss, skb->len);
|
||||
netdev_dbg(netdev, "TOE/TSO len %04x mtu %04x mss %04x\n",
|
||||
skb->len, netdev->mtu, mss);
|
||||
word1 |= TSS_MTU_ENABLE_BIT;
|
||||
word3 |= mss;
|
||||
} else if (skb->len >= ETH_FRAME_LEN) {
|
||||
/* Hardware offloaded checksumming isn't working on frames
|
||||
/* Hardware offloaded checksumming isn't working on non-TCP frames
|
||||
* bigger than 1514 bytes. A hypothesis about this is that the
|
||||
* checksum buffer is only 1518 bytes, so when the frames get
|
||||
* bigger they get truncated, or the last few bytes get
|
||||
@@ -1185,21 +1211,16 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int tcp = 0;
|
||||
|
||||
/* We do not switch off the checksumming on non TCP/UDP
|
||||
* frames: as is shown from tests, the checksumming engine
|
||||
* is smart enough to see that a frame is not actually TCP
|
||||
* or UDP and then just pass it through without any changes
|
||||
* to the frame.
|
||||
*/
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
word1 |= TSS_IP_CHKSUM_BIT;
|
||||
tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
|
||||
} else { /* IPv6 */
|
||||
else
|
||||
word1 |= TSS_IPV6_ENABLE_BIT;
|
||||
tcp = ipv6_hdr(skb)->nexthdr == IPPROTO_TCP;
|
||||
}
|
||||
|
||||
word1 |= tcp ? TSS_TCP_CHKSUM_BIT : TSS_UDP_CHKSUM_BIT;
|
||||
}
|
||||
|
@@ -146,6 +146,8 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
np->ioaddr = ioaddr;
|
||||
np->chip_id = chip_idx;
|
||||
np->pdev = pdev;
|
||||
|
||||
spin_lock_init(&np->stats_lock);
|
||||
spin_lock_init (&np->tx_lock);
|
||||
spin_lock_init (&np->rx_lock);
|
||||
|
||||
@@ -866,7 +868,6 @@ tx_error (struct net_device *dev, int tx_status)
|
||||
frame_id = (tx_status & 0xffff0000);
|
||||
printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
|
||||
dev->name, tx_status, frame_id);
|
||||
dev->stats.tx_errors++;
|
||||
/* Ttransmit Underrun */
|
||||
if (tx_status & 0x10) {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
@@ -903,9 +904,15 @@ tx_error (struct net_device *dev, int tx_status)
|
||||
rio_set_led_mode(dev);
|
||||
/* Let TxStartThresh stay default value */
|
||||
}
|
||||
|
||||
spin_lock(&np->stats_lock);
|
||||
/* Maximum Collisions */
|
||||
if (tx_status & 0x08)
|
||||
dev->stats.collisions++;
|
||||
|
||||
dev->stats.tx_errors++;
|
||||
spin_unlock(&np->stats_lock);
|
||||
|
||||
/* Restart the Tx */
|
||||
dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
|
||||
}
|
||||
@@ -1074,7 +1081,9 @@ get_stats (struct net_device *dev)
|
||||
int i;
|
||||
#endif
|
||||
unsigned int stat_reg;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&np->stats_lock, flags);
|
||||
/* All statistics registers need to be acknowledged,
|
||||
else statistic overflow could cause problems */
|
||||
|
||||
@@ -1124,6 +1133,9 @@ get_stats (struct net_device *dev)
|
||||
dr16(TCPCheckSumErrors);
|
||||
dr16(UDPCheckSumErrors);
|
||||
dr16(IPCheckSumErrors);
|
||||
|
||||
spin_unlock_irqrestore(&np->stats_lock, flags);
|
||||
|
||||
return &dev->stats;
|
||||
}
|
||||
|
||||
|
@@ -372,6 +372,8 @@ struct netdev_private {
|
||||
struct pci_dev *pdev;
|
||||
void __iomem *ioaddr;
|
||||
void __iomem *eeprom_addr;
|
||||
// To ensure synchronization when stats are updated.
|
||||
spinlock_t stats_lock;
|
||||
spinlock_t tx_lock;
|
||||
spinlock_t rx_lock;
|
||||
unsigned int rx_buf_sz; /* Based on MTU+slack. */
|
||||
|
@@ -1609,7 +1609,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
|
||||
/* version 1 of the cmd is not supported only by BE2 */
|
||||
if (BE2_chip(adapter))
|
||||
hdr->version = 0;
|
||||
if (BE3_chip(adapter) || lancer_chip(adapter))
|
||||
else if (BE3_chip(adapter) || lancer_chip(adapter))
|
||||
hdr->version = 1;
|
||||
else
|
||||
hdr->version = 2;
|
||||
|
@@ -31,6 +31,7 @@ config FTGMAC100
|
||||
depends on ARM || COMPILE_TEST
|
||||
depends on !64BIT || BROKEN
|
||||
select PHYLIB
|
||||
select FIXED_PHY
|
||||
select MDIO_ASPEED if MACH_ASPEED_G6
|
||||
select CRC32
|
||||
help
|
||||
|
@@ -1067,10 +1067,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
|
||||
void i40e_clear_hw(struct i40e_hw *hw)
|
||||
{
|
||||
u32 num_queues, base_queue;
|
||||
u32 num_pf_int;
|
||||
u32 num_vf_int;
|
||||
s32 num_pf_int;
|
||||
s32 num_vf_int;
|
||||
u32 num_vfs;
|
||||
u32 i, j;
|
||||
s32 i;
|
||||
u32 j;
|
||||
u32 val;
|
||||
u32 eol = 0x7ff;
|
||||
|
||||
|
@@ -3024,7 +3024,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
|
||||
u16 vsi_handle_arr[2];
|
||||
|
||||
/* A rule already exists with the new VSI being added */
|
||||
if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
|
||||
if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
|
||||
return -EEXIST;
|
||||
|
||||
vsi_handle_arr[0] = cur_fltr->vsi_handle;
|
||||
@@ -5991,7 +5991,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
|
||||
|
||||
/* A rule already exists with the new VSI being added */
|
||||
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
|
||||
return 0;
|
||||
return -EEXIST;
|
||||
|
||||
/* Update the previously created VSI list set with
|
||||
* the new VSI ID passed in
|
||||
|
@@ -352,9 +352,12 @@ int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
|
||||
mutex_lock(&pfvf->mbox.lock);
|
||||
|
||||
/* Remove RQ's policer mapping */
|
||||
for (qidx = 0; qidx < hw->rx_queues; qidx++)
|
||||
cn10k_map_unmap_rq_policer(pfvf, qidx,
|
||||
hw->matchall_ipolicer, false);
|
||||
for (qidx = 0; qidx < hw->rx_queues; qidx++) {
|
||||
rc = cn10k_map_unmap_rq_policer(pfvf, qidx, hw->matchall_ipolicer, false);
|
||||
if (rc)
|
||||
dev_warn(pfvf->dev, "Failed to unmap RQ %d's policer (error %d).",
|
||||
qidx, rc);
|
||||
}
|
||||
|
||||
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
|
||||
|
||||
|
@@ -1916,6 +1916,7 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
|
||||
info->so_timestamping |=
|
||||
SOF_TIMESTAMPING_TX_HARDWARE |
|
||||
SOF_TIMESTAMPING_TX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_RX_HARDWARE |
|
||||
SOF_TIMESTAMPING_RAW_HARDWARE;
|
||||
|
||||
|
@@ -444,19 +444,22 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
|
||||
nic_vport_context.node_guid);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
|
||||
|
||||
@@ -498,19 +501,22 @@ int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
|
||||
{
|
||||
u32 *out;
|
||||
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
|
||||
int err;
|
||||
|
||||
out = kvzalloc(outlen, GFP_KERNEL);
|
||||
if (!out)
|
||||
return -ENOMEM;
|
||||
|
||||
mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
err = mlx5_query_nic_vport_context(mdev, 0, out);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
|
||||
nic_vport_context.qkey_violation_counter);
|
||||
|
||||
out:
|
||||
kvfree(out);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
|
||||
|
||||
|
@@ -18,6 +18,8 @@
|
||||
#define EEPROM_MAC_OFFSET (0x01)
|
||||
#define MAX_EEPROM_SIZE (512)
|
||||
#define MAX_OTP_SIZE (1024)
|
||||
#define MAX_HS_OTP_SIZE (8 * 1024)
|
||||
#define MAX_HS_EEPROM_SIZE (64 * 1024)
|
||||
#define OTP_INDICATOR_1 (0xF3)
|
||||
#define OTP_INDICATOR_2 (0xF7)
|
||||
|
||||
@@ -272,6 +274,9 @@ static int lan743x_hs_otp_read(struct lan743x_adapter *adapter, u32 offset,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (offset + length > MAX_HS_OTP_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -320,6 +325,9 @@ static int lan743x_hs_otp_write(struct lan743x_adapter *adapter, u32 offset,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
if (offset + length > MAX_HS_OTP_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
ret = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -497,6 +505,9 @@ static int lan743x_hs_eeprom_read(struct lan743x_adapter *adapter,
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
if (offset + length > MAX_HS_EEPROM_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
@@ -539,6 +550,9 @@ static int lan743x_hs_eeprom_write(struct lan743x_adapter *adapter,
|
||||
u32 val;
|
||||
int i;
|
||||
|
||||
if (offset + length > MAX_HS_EEPROM_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
retval = lan743x_hs_syslock_acquire(adapter, LOCK_TIMEOUT_MAX_CNT);
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
@@ -604,9 +618,9 @@ static int lan743x_ethtool_get_eeprom_len(struct net_device *netdev)
|
||||
struct lan743x_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
if (adapter->flags & LAN743X_ADAPTER_FLAG_OTP)
|
||||
return MAX_OTP_SIZE;
|
||||
return adapter->is_pci11x1x ? MAX_HS_OTP_SIZE : MAX_OTP_SIZE;
|
||||
|
||||
return MAX_EEPROM_SIZE;
|
||||
return adapter->is_pci11x1x ? MAX_HS_EEPROM_SIZE : MAX_EEPROM_SIZE;
|
||||
}
|
||||
|
||||
static int lan743x_ethtool_get_eeprom(struct net_device *netdev,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user