Merge 5e8c311318
("Revert "drm/meson: vclk: fix calculation of 59.94 fractional rates"") into android15-6.6-lts
Steps on the way to 6.6.90 Resolves merge conflicts in: drivers/cpufreq/cpufreq.c include/linux/bpf_verifier.h Signed-off-by: Greg Kroah-Hartman <gregkh@google.com> Change-Id: I911407e27dff5c0a91f3bfdc4d51aeb4052df875
This commit is contained in:
@@ -40,6 +40,9 @@
|
|||||||
reg = <1>;
|
reg = <1>;
|
||||||
interrupt-parent = <&gpio4>;
|
interrupt-parent = <&gpio4>;
|
||||||
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
|
||||||
|
micrel,led-mode = <1>;
|
||||||
|
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||||
|
clock-names = "rmii-ref";
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@@ -73,14 +73,13 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
intc: interrupt-controller@4ac00000 {
|
intc: interrupt-controller@4ac00000 {
|
||||||
compatible = "arm,cortex-a7-gic";
|
compatible = "arm,gic-400";
|
||||||
#interrupt-cells = <3>;
|
#interrupt-cells = <3>;
|
||||||
#address-cells = <1>;
|
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
reg = <0x0 0x4ac10000 0x0 0x1000>,
|
reg = <0x0 0x4ac10000 0x0 0x1000>,
|
||||||
<0x0 0x4ac20000 0x0 0x2000>,
|
<0x0 0x4ac20000 0x0 0x20000>,
|
||||||
<0x0 0x4ac40000 0x0 0x2000>,
|
<0x0 0x4ac40000 0x0 0x20000>,
|
||||||
<0x0 0x4ac60000 0x0 0x2000>;
|
<0x0 0x4ac60000 0x0 0x20000>;
|
||||||
};
|
};
|
||||||
|
|
||||||
psci {
|
psci {
|
||||||
|
@@ -234,10 +234,8 @@ fi
|
|||||||
|
|
||||||
# suppress some warnings in recent ld versions
|
# suppress some warnings in recent ld versions
|
||||||
nowarn="-z noexecstack"
|
nowarn="-z noexecstack"
|
||||||
if ! ld_is_lld; then
|
if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
|
||||||
if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then
|
nowarn="$nowarn --no-warn-rwx-segments"
|
||||||
nowarn="$nowarn --no-warn-rwx-segments"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
platformo=$object/"$platform".o
|
platformo=$object/"$platform".o
|
||||||
|
@@ -1056,6 +1056,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
|||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure we align the start vmemmap addr so that we calculate
|
||||||
|
* the correct start_pfn in altmap boundary check to decided whether
|
||||||
|
* we should use altmap or RAM based backing memory allocation. Also
|
||||||
|
* the address need to be aligned for set_pte operation.
|
||||||
|
|
||||||
|
* If the start addr is already PMD_SIZE aligned we will try to use
|
||||||
|
* a pmd mapping. We don't want to be too aggressive here beacause
|
||||||
|
* that will cause more allocations in RAM. So only if the namespace
|
||||||
|
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
|
||||||
|
*/
|
||||||
|
|
||||||
|
start = ALIGN_DOWN(start, PAGE_SIZE);
|
||||||
for (addr = start; addr < end; addr = next) {
|
for (addr = start; addr < end; addr = next) {
|
||||||
next = pmd_addr_end(addr, end);
|
next = pmd_addr_end(addr, end);
|
||||||
|
|
||||||
@@ -1081,8 +1094,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
|
|||||||
* in altmap block allocation failures, in which case
|
* in altmap block allocation failures, in which case
|
||||||
* we fallback to RAM for vmemmap allocation.
|
* we fallback to RAM for vmemmap allocation.
|
||||||
*/
|
*/
|
||||||
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
|
if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
|
||||||
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
|
||||||
/*
|
/*
|
||||||
* make sure we don't create altmap mappings
|
* make sure we don't create altmap mappings
|
||||||
* covering things outside the device.
|
* covering things outside the device.
|
||||||
|
@@ -9,7 +9,7 @@
|
|||||||
int patch_insn_write(void *addr, const void *insn, size_t len);
|
int patch_insn_write(void *addr, const void *insn, size_t len);
|
||||||
int patch_text_nosync(void *addr, const void *insns, size_t len);
|
int patch_text_nosync(void *addr, const void *insns, size_t len);
|
||||||
int patch_text_set_nosync(void *addr, u8 c, size_t len);
|
int patch_text_set_nosync(void *addr, u8 c, size_t len);
|
||||||
int patch_text(void *addr, u32 *insns, int ninsns);
|
int patch_text(void *addr, u32 *insns, size_t len);
|
||||||
|
|
||||||
extern int riscv_patch_in_stop_machine;
|
extern int riscv_patch_in_stop_machine;
|
||||||
|
|
||||||
|
@@ -19,7 +19,7 @@
|
|||||||
struct patch_insn {
|
struct patch_insn {
|
||||||
void *addr;
|
void *addr;
|
||||||
u32 *insns;
|
u32 *insns;
|
||||||
int ninsns;
|
size_t len;
|
||||||
atomic_t cpu_count;
|
atomic_t cpu_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -234,14 +234,10 @@ NOKPROBE_SYMBOL(patch_text_nosync);
|
|||||||
static int patch_text_cb(void *data)
|
static int patch_text_cb(void *data)
|
||||||
{
|
{
|
||||||
struct patch_insn *patch = data;
|
struct patch_insn *patch = data;
|
||||||
unsigned long len;
|
int ret = 0;
|
||||||
int i, ret = 0;
|
|
||||||
|
|
||||||
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
|
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
|
||||||
for (i = 0; ret == 0 && i < patch->ninsns; i++) {
|
ret = patch_insn_write(patch->addr, patch->insns, patch->len);
|
||||||
len = GET_INSN_LENGTH(patch->insns[i]);
|
|
||||||
ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* Make sure the patching store is effective *before* we
|
* Make sure the patching store is effective *before* we
|
||||||
* increment the counter which releases all waiting CPUs
|
* increment the counter which releases all waiting CPUs
|
||||||
@@ -262,13 +258,13 @@ static int patch_text_cb(void *data)
|
|||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(patch_text_cb);
|
NOKPROBE_SYMBOL(patch_text_cb);
|
||||||
|
|
||||||
int patch_text(void *addr, u32 *insns, int ninsns)
|
int patch_text(void *addr, u32 *insns, size_t len)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct patch_insn patch = {
|
struct patch_insn patch = {
|
||||||
.addr = addr,
|
.addr = addr,
|
||||||
.insns = insns,
|
.insns = insns,
|
||||||
.ninsns = ninsns,
|
.len = len,
|
||||||
.cpu_count = ATOMIC_INIT(0),
|
.cpu_count = ATOMIC_INIT(0),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@@ -23,13 +23,13 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
|
|||||||
|
|
||||||
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
||||||
{
|
{
|
||||||
|
size_t len = GET_INSN_LENGTH(p->opcode);
|
||||||
u32 insn = __BUG_INSN_32;
|
u32 insn = __BUG_INSN_32;
|
||||||
unsigned long offset = GET_INSN_LENGTH(p->opcode);
|
|
||||||
|
|
||||||
p->ainsn.api.restore = (unsigned long)p->addr + offset;
|
p->ainsn.api.restore = (unsigned long)p->addr + len;
|
||||||
|
|
||||||
patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1);
|
patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
|
||||||
patch_text_nosync((void *)p->ainsn.api.insn + offset, &insn, 1);
|
patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __kprobes arch_prepare_simulate(struct kprobe *p)
|
static void __kprobes arch_prepare_simulate(struct kprobe *p)
|
||||||
@@ -116,16 +116,18 @@ void *alloc_insn_page(void)
|
|||||||
/* install breakpoint in text */
|
/* install breakpoint in text */
|
||||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ?
|
size_t len = GET_INSN_LENGTH(p->opcode);
|
||||||
__BUG_INSN_32 : __BUG_INSN_16;
|
u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16;
|
||||||
|
|
||||||
patch_text(p->addr, &insn, 1);
|
patch_text(p->addr, &insn, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* remove breakpoint from text */
|
/* remove breakpoint from text */
|
||||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
patch_text(p->addr, &p->opcode, 1);
|
size_t len = GET_INSN_LENGTH(p->opcode);
|
||||||
|
|
||||||
|
patch_text(p->addr, &p->opcode, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||||
|
@@ -14,6 +14,7 @@
|
|||||||
#include "bpf_jit.h"
|
#include "bpf_jit.h"
|
||||||
|
|
||||||
#define RV_FENTRY_NINSNS 2
|
#define RV_FENTRY_NINSNS 2
|
||||||
|
#define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4)
|
||||||
|
|
||||||
#define RV_REG_TCC RV_REG_A6
|
#define RV_REG_TCC RV_REG_A6
|
||||||
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
|
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
|
||||||
@@ -681,7 +682,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4))
|
if (memcmp(ip, old_insns, RV_FENTRY_NBYTES))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
|
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
|
||||||
@@ -690,8 +691,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
|
|||||||
|
|
||||||
cpus_read_lock();
|
cpus_read_lock();
|
||||||
mutex_lock(&text_mutex);
|
mutex_lock(&text_mutex);
|
||||||
if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4))
|
if (memcmp(ip, new_insns, RV_FENTRY_NBYTES))
|
||||||
ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS);
|
ret = patch_text(ip, new_insns, RV_FENTRY_NBYTES);
|
||||||
mutex_unlock(&text_mutex);
|
mutex_unlock(&text_mutex);
|
||||||
cpus_read_unlock();
|
cpus_read_unlock();
|
||||||
|
|
||||||
|
@@ -48,6 +48,7 @@ KVM_X86_OP(set_idt)
|
|||||||
KVM_X86_OP(get_gdt)
|
KVM_X86_OP(get_gdt)
|
||||||
KVM_X86_OP(set_gdt)
|
KVM_X86_OP(set_gdt)
|
||||||
KVM_X86_OP(sync_dirty_debug_regs)
|
KVM_X86_OP(sync_dirty_debug_regs)
|
||||||
|
KVM_X86_OP(set_dr6)
|
||||||
KVM_X86_OP(set_dr7)
|
KVM_X86_OP(set_dr7)
|
||||||
KVM_X86_OP(cache_reg)
|
KVM_X86_OP(cache_reg)
|
||||||
KVM_X86_OP(get_rflags)
|
KVM_X86_OP(get_rflags)
|
||||||
|
@@ -1595,6 +1595,7 @@ struct kvm_x86_ops {
|
|||||||
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||||
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
|
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
|
||||||
|
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
|
||||||
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
|
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
|
||||||
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
||||||
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
||||||
|
@@ -2014,11 +2014,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
|
|||||||
svm->asid = sd->next_asid++;
|
svm->asid = sd->next_asid++;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
|
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
|
||||||
{
|
{
|
||||||
struct vmcb *vmcb = svm->vmcb;
|
struct vmcb *vmcb = to_svm(vcpu)->vmcb;
|
||||||
|
|
||||||
if (svm->vcpu.arch.guest_state_protected)
|
if (vcpu->arch.guest_state_protected)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (unlikely(value != vmcb->save.dr6)) {
|
if (unlikely(value != vmcb->save.dr6)) {
|
||||||
@@ -4220,10 +4220,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
* Run with all-zero DR6 unless needed, so that we can get the exact cause
|
* Run with all-zero DR6 unless needed, so that we can get the exact cause
|
||||||
* of a #DB.
|
* of a #DB.
|
||||||
*/
|
*/
|
||||||
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
|
if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
|
||||||
svm_set_dr6(svm, vcpu->arch.dr6);
|
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
|
||||||
else
|
|
||||||
svm_set_dr6(svm, DR6_ACTIVE_LOW);
|
|
||||||
|
|
||||||
clgi();
|
clgi();
|
||||||
kvm_load_guest_xsave_state(vcpu);
|
kvm_load_guest_xsave_state(vcpu);
|
||||||
@@ -5002,6 +5000,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||||||
.set_idt = svm_set_idt,
|
.set_idt = svm_set_idt,
|
||||||
.get_gdt = svm_get_gdt,
|
.get_gdt = svm_get_gdt,
|
||||||
.set_gdt = svm_set_gdt,
|
.set_gdt = svm_set_gdt,
|
||||||
|
.set_dr6 = svm_set_dr6,
|
||||||
.set_dr7 = svm_set_dr7,
|
.set_dr7 = svm_set_dr7,
|
||||||
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
|
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
|
||||||
.cache_reg = svm_cache_reg,
|
.cache_reg = svm_cache_reg,
|
||||||
|
@@ -5617,6 +5617,12 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
|
|||||||
set_debugreg(DR6_RESERVED, 6);
|
set_debugreg(DR6_RESERVED, 6);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
|
||||||
|
{
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
set_debugreg(vcpu->arch.dr6, 6);
|
||||||
|
}
|
||||||
|
|
||||||
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
|
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
|
||||||
{
|
{
|
||||||
vmcs_writel(GUEST_DR7, val);
|
vmcs_writel(GUEST_DR7, val);
|
||||||
@@ -7356,10 +7362,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||||||
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
|
|
||||||
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
|
|
||||||
set_debugreg(vcpu->arch.dr6, 6);
|
|
||||||
|
|
||||||
/* When single-stepping over STI and MOV SS, we must clear the
|
/* When single-stepping over STI and MOV SS, we must clear the
|
||||||
* corresponding interruptibility bits in the guest state. Otherwise
|
* corresponding interruptibility bits in the guest state. Otherwise
|
||||||
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
* vmentry fails as it then expects bit 14 (BS) in pending debug
|
||||||
@@ -8292,6 +8294,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
|||||||
.set_idt = vmx_set_idt,
|
.set_idt = vmx_set_idt,
|
||||||
.get_gdt = vmx_get_gdt,
|
.get_gdt = vmx_get_gdt,
|
||||||
.set_gdt = vmx_set_gdt,
|
.set_gdt = vmx_set_gdt,
|
||||||
|
.set_dr6 = vmx_set_dr6,
|
||||||
.set_dr7 = vmx_set_dr7,
|
.set_dr7 = vmx_set_dr7,
|
||||||
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
|
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
|
||||||
.cache_reg = vmx_cache_reg,
|
.cache_reg = vmx_cache_reg,
|
||||||
|
@@ -10772,6 +10772,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||||||
set_debugreg(vcpu->arch.eff_db[1], 1);
|
set_debugreg(vcpu->arch.eff_db[1], 1);
|
||||||
set_debugreg(vcpu->arch.eff_db[2], 2);
|
set_debugreg(vcpu->arch.eff_db[2], 2);
|
||||||
set_debugreg(vcpu->arch.eff_db[3], 3);
|
set_debugreg(vcpu->arch.eff_db[3], 3);
|
||||||
|
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
|
||||||
|
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
|
||||||
|
static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6);
|
||||||
} else if (unlikely(hw_breakpoint_active())) {
|
} else if (unlikely(hw_breakpoint_active())) {
|
||||||
set_debugreg(0, 7);
|
set_debugreg(0, 7);
|
||||||
}
|
}
|
||||||
|
@@ -3521,22 +3521,16 @@ static void btusb_coredump_qca(struct hci_dev *hdev)
|
|||||||
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Return: 0 on success, negative errno on failure. */
|
||||||
* ==0: not a dump pkt.
|
|
||||||
* < 0: fails to handle a dump pkt
|
|
||||||
* > 0: otherwise.
|
|
||||||
*/
|
|
||||||
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int ret = 1;
|
int ret = 0;
|
||||||
u8 pkt_type;
|
u8 pkt_type;
|
||||||
u8 *sk_ptr;
|
u8 *sk_ptr;
|
||||||
unsigned int sk_len;
|
unsigned int sk_len;
|
||||||
u16 seqno;
|
u16 seqno;
|
||||||
u32 dump_size;
|
u32 dump_size;
|
||||||
|
|
||||||
struct hci_event_hdr *event_hdr;
|
|
||||||
struct hci_acl_hdr *acl_hdr;
|
|
||||||
struct qca_dump_hdr *dump_hdr;
|
struct qca_dump_hdr *dump_hdr;
|
||||||
struct btusb_data *btdata = hci_get_drvdata(hdev);
|
struct btusb_data *btdata = hci_get_drvdata(hdev);
|
||||||
struct usb_device *udev = btdata->udev;
|
struct usb_device *udev = btdata->udev;
|
||||||
@@ -3546,30 +3540,14 @@ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
|||||||
sk_len = skb->len;
|
sk_len = skb->len;
|
||||||
|
|
||||||
if (pkt_type == HCI_ACLDATA_PKT) {
|
if (pkt_type == HCI_ACLDATA_PKT) {
|
||||||
acl_hdr = hci_acl_hdr(skb);
|
|
||||||
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
|
||||||
return 0;
|
|
||||||
sk_ptr += HCI_ACL_HDR_SIZE;
|
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||||
sk_len -= HCI_ACL_HDR_SIZE;
|
sk_len -= HCI_ACL_HDR_SIZE;
|
||||||
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
|
||||||
} else {
|
|
||||||
event_hdr = hci_event_hdr(skb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
|
||||||
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
sk_ptr += HCI_EVENT_HDR_SIZE;
|
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||||
sk_len -= HCI_EVENT_HDR_SIZE;
|
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||||
|
|
||||||
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||||
if ((sk_len < offsetof(struct qca_dump_hdr, data))
|
|
||||||
|| (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS)
|
|
||||||
|| (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
/*it is dump pkt now*/
|
|
||||||
seqno = le16_to_cpu(dump_hdr->seqno);
|
seqno = le16_to_cpu(dump_hdr->seqno);
|
||||||
if (seqno == 0) {
|
if (seqno == 0) {
|
||||||
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags);
|
||||||
@@ -3643,17 +3621,84 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return: true if the ACL packet is a dump packet, false otherwise. */
|
||||||
|
static bool acl_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
u8 *sk_ptr;
|
||||||
|
unsigned int sk_len;
|
||||||
|
|
||||||
|
struct hci_event_hdr *event_hdr;
|
||||||
|
struct hci_acl_hdr *acl_hdr;
|
||||||
|
struct qca_dump_hdr *dump_hdr;
|
||||||
|
|
||||||
|
sk_ptr = skb->data;
|
||||||
|
sk_len = skb->len;
|
||||||
|
|
||||||
|
acl_hdr = hci_acl_hdr(skb);
|
||||||
|
if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sk_ptr += HCI_ACL_HDR_SIZE;
|
||||||
|
sk_len -= HCI_ACL_HDR_SIZE;
|
||||||
|
event_hdr = (struct hci_event_hdr *)sk_ptr;
|
||||||
|
|
||||||
|
if ((event_hdr->evt != HCI_VENDOR_PKT) ||
|
||||||
|
(event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||||
|
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||||
|
|
||||||
|
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||||
|
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||||
|
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||||
|
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return: true if the event packet is a dump packet, false otherwise. */
|
||||||
|
static bool evt_pkt_is_dump_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
u8 *sk_ptr;
|
||||||
|
unsigned int sk_len;
|
||||||
|
|
||||||
|
struct hci_event_hdr *event_hdr;
|
||||||
|
struct qca_dump_hdr *dump_hdr;
|
||||||
|
|
||||||
|
sk_ptr = skb->data;
|
||||||
|
sk_len = skb->len;
|
||||||
|
|
||||||
|
event_hdr = hci_event_hdr(skb);
|
||||||
|
|
||||||
|
if ((event_hdr->evt != HCI_VENDOR_PKT)
|
||||||
|
|| (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
sk_ptr += HCI_EVENT_HDR_SIZE;
|
||||||
|
sk_len -= HCI_EVENT_HDR_SIZE;
|
||||||
|
|
||||||
|
dump_hdr = (struct qca_dump_hdr *)sk_ptr;
|
||||||
|
if ((sk_len < offsetof(struct qca_dump_hdr, data)) ||
|
||||||
|
(dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) ||
|
||||||
|
(dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (handle_dump_pkt_qca(hdev, skb))
|
if (acl_pkt_is_dump_qca(hdev, skb))
|
||||||
return 0;
|
return handle_dump_pkt_qca(hdev, skb);
|
||||||
return hci_recv_frame(hdev, skb);
|
return hci_recv_frame(hdev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (handle_dump_pkt_qca(hdev, skb))
|
if (evt_pkt_is_dump_qca(hdev, skb))
|
||||||
return 0;
|
return handle_dump_pkt_qca(hdev, skb);
|
||||||
return hci_recv_frame(hdev, skb);
|
return hci_recv_frame(hdev, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -538,18 +538,20 @@ void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
|
|||||||
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
|
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
|
||||||
|
|
||||||
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq, unsigned int relation)
|
unsigned int target_freq,
|
||||||
|
unsigned int min, unsigned int max,
|
||||||
|
unsigned int relation)
|
||||||
{
|
{
|
||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
unsigned int old_target_freq = target_freq;
|
unsigned int old_target_freq = target_freq;
|
||||||
|
|
||||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
target_freq = clamp_val(target_freq, min, max);
|
||||||
trace_android_vh_cpufreq_resolve_freq(policy, &target_freq, old_target_freq);
|
trace_android_vh_cpufreq_resolve_freq(policy, &target_freq, old_target_freq);
|
||||||
|
|
||||||
if (!policy->freq_table)
|
if (!policy->freq_table)
|
||||||
return target_freq;
|
return target_freq;
|
||||||
|
|
||||||
idx = cpufreq_frequency_table_target(policy, target_freq, relation);
|
idx = cpufreq_frequency_table_target(policy, target_freq, min, max, relation);
|
||||||
policy->cached_resolved_idx = idx;
|
policy->cached_resolved_idx = idx;
|
||||||
policy->cached_target_freq = target_freq;
|
policy->cached_target_freq = target_freq;
|
||||||
return policy->freq_table[idx].frequency;
|
return policy->freq_table[idx].frequency;
|
||||||
@@ -569,7 +571,21 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
|||||||
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
|
unsigned int min = READ_ONCE(policy->min);
|
||||||
|
unsigned int max = READ_ONCE(policy->max);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this function runs in parallel with cpufreq_set_policy(), it may
|
||||||
|
* read policy->min before the update and policy->max after the update
|
||||||
|
* or the other way around, so there is no ordering guarantee.
|
||||||
|
*
|
||||||
|
* Resolve this by always honoring the max (in case it comes from
|
||||||
|
* thermal throttling or similar).
|
||||||
|
*/
|
||||||
|
if (unlikely(min > max))
|
||||||
|
min = max;
|
||||||
|
|
||||||
|
return __resolve_freq(policy, target_freq, min, max, CPUFREQ_RELATION_LE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
||||||
|
|
||||||
@@ -2355,7 +2371,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||||||
if (cpufreq_disabled())
|
if (cpufreq_disabled())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
target_freq = __resolve_freq(policy, target_freq, relation);
|
target_freq = __resolve_freq(policy, target_freq, policy->min,
|
||||||
|
policy->max, relation);
|
||||||
|
|
||||||
trace_android_vh_cpufreq_target(policy, &target_freq, old_target_freq);
|
trace_android_vh_cpufreq_target(policy, &target_freq, old_target_freq);
|
||||||
|
|
||||||
@@ -2647,11 +2664,18 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||||||
* Resolve policy min/max to available frequencies. It ensures
|
* Resolve policy min/max to available frequencies. It ensures
|
||||||
* no frequency resolution will neither overshoot the requested maximum
|
* no frequency resolution will neither overshoot the requested maximum
|
||||||
* nor undershoot the requested minimum.
|
* nor undershoot the requested minimum.
|
||||||
|
*
|
||||||
|
* Avoid storing intermediate values in policy->max or policy->min and
|
||||||
|
* compiler optimizations around them because they may be accessed
|
||||||
|
* concurrently by cpufreq_driver_resolve_freq() during the update.
|
||||||
*/
|
*/
|
||||||
policy->min = new_data.min;
|
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max,
|
||||||
policy->max = new_data.max;
|
new_data.min, new_data.max,
|
||||||
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
|
CPUFREQ_RELATION_H));
|
||||||
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
|
new_data.min = __resolve_freq(policy, new_data.min, new_data.min,
|
||||||
|
new_data.max, CPUFREQ_RELATION_L);
|
||||||
|
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
|
||||||
|
|
||||||
trace_cpu_frequency_limits(policy);
|
trace_cpu_frequency_limits(policy);
|
||||||
|
|
||||||
policy->cached_target_freq = UINT_MAX;
|
policy->cached_target_freq = UINT_MAX;
|
||||||
|
@@ -77,7 +77,8 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
|
|||||||
return freq_next;
|
return freq_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
index = cpufreq_frequency_table_target(policy, freq_next, relation);
|
index = cpufreq_frequency_table_target(policy, freq_next, policy->min,
|
||||||
|
policy->max, relation);
|
||||||
freq_req = freq_table[index].frequency;
|
freq_req = freq_table[index].frequency;
|
||||||
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
|
||||||
freq_avg = freq_req - freq_reduc;
|
freq_avg = freq_req - freq_reduc;
|
||||||
|
@@ -118,8 +118,8 @@ int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy)
|
|||||||
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
EXPORT_SYMBOL_GPL(cpufreq_generic_frequency_table_verify);
|
||||||
|
|
||||||
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq,
|
unsigned int target_freq, unsigned int min,
|
||||||
unsigned int relation)
|
unsigned int max, unsigned int relation)
|
||||||
{
|
{
|
||||||
struct cpufreq_frequency_table optimal = {
|
struct cpufreq_frequency_table optimal = {
|
||||||
.driver_data = ~0,
|
.driver_data = ~0,
|
||||||
@@ -150,7 +150,7 @@ int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
|||||||
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
cpufreq_for_each_valid_entry_idx(pos, table, i) {
|
||||||
freq = pos->frequency;
|
freq = pos->frequency;
|
||||||
|
|
||||||
if ((freq < policy->min) || (freq > policy->max))
|
if (freq < min || freq > max)
|
||||||
continue;
|
continue;
|
||||||
if (freq == target_freq) {
|
if (freq == target_freq) {
|
||||||
optimal.driver_data = i;
|
optimal.driver_data = i;
|
||||||
|
@@ -251,7 +251,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
|
|||||||
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
|
memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
|
||||||
buf_sz);
|
buf_sz);
|
||||||
|
|
||||||
ffa_rx_release();
|
if (!(flags & PARTITION_INFO_GET_RETURN_COUNT_ONLY))
|
||||||
|
ffa_rx_release();
|
||||||
|
|
||||||
mutex_unlock(&drv_info->rx_lock);
|
mutex_unlock(&drv_info->rx_lock);
|
||||||
|
|
||||||
|
@@ -240,6 +240,9 @@ static struct scmi_device *scmi_child_dev_find(struct device *parent,
|
|||||||
if (!dev)
|
if (!dev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Drop the refcnt bumped implicitly by device_find_child */
|
||||||
|
put_device(dev);
|
||||||
|
|
||||||
return to_scmi_dev(dev);
|
return to_scmi_dev(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -25,6 +25,7 @@ int intel_pxp_gsccs_init(struct intel_pxp *pxp);
|
|||||||
|
|
||||||
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
|
int intel_pxp_gsccs_create_session(struct intel_pxp *pxp, int arb_session_id);
|
||||||
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
|
void intel_pxp_gsccs_end_arb_fw_session(struct intel_pxp *pxp, u32 arb_session_id);
|
||||||
|
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
|
static inline void intel_pxp_gsccs_fini(struct intel_pxp *pxp)
|
||||||
@@ -36,8 +37,11 @@ static inline int intel_pxp_gsccs_init(struct intel_pxp *pxp)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bool intel_pxp_gsccs_is_ready_for_sessions(struct intel_pxp *pxp);
|
|
||||||
|
|
||||||
#endif /*__INTEL_PXP_GSCCS_H__ */
|
#endif /*__INTEL_PXP_GSCCS_H__ */
|
||||||
|
@@ -790,13 +790,13 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
|
|||||||
FREQ_1000_1001(params[i].pixel_freq));
|
FREQ_1000_1001(params[i].pixel_freq));
|
||||||
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
|
DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
|
||||||
i, params[i].phy_freq,
|
i, params[i].phy_freq,
|
||||||
FREQ_1000_1001(params[i].phy_freq/1000)*1000);
|
FREQ_1000_1001(params[i].phy_freq/10)*10);
|
||||||
/* Match strict frequency */
|
/* Match strict frequency */
|
||||||
if (phy_freq == params[i].phy_freq &&
|
if (phy_freq == params[i].phy_freq &&
|
||||||
vclk_freq == params[i].vclk_freq)
|
vclk_freq == params[i].vclk_freq)
|
||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
/* Match 1000/1001 variant */
|
/* Match 1000/1001 variant */
|
||||||
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/1000)*1000) &&
|
if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
|
||||||
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
|
vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
|
||||||
return MODE_OK;
|
return MODE_OK;
|
||||||
}
|
}
|
||||||
@@ -1070,7 +1070,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
|
|||||||
|
|
||||||
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
|
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
|
||||||
if ((phy_freq == params[freq].phy_freq ||
|
if ((phy_freq == params[freq].phy_freq ||
|
||||||
phy_freq == FREQ_1000_1001(params[freq].phy_freq/1000)*1000) &&
|
phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
|
||||||
(vclk_freq == params[freq].vclk_freq ||
|
(vclk_freq == params[freq].vclk_freq ||
|
||||||
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
|
vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
|
||||||
if (vclk_freq != params[freq].vclk_freq)
|
if (vclk_freq != params[freq].vclk_freq)
|
||||||
|
@@ -566,6 +566,18 @@ int iommu_probe_device(struct device *dev)
|
|||||||
mutex_lock(&iommu_probe_device_lock);
|
mutex_lock(&iommu_probe_device_lock);
|
||||||
ret = __iommu_probe_device(dev, NULL);
|
ret = __iommu_probe_device(dev, NULL);
|
||||||
mutex_unlock(&iommu_probe_device_lock);
|
mutex_unlock(&iommu_probe_device_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The dma_configure replay paths need bus_iommu_probe() to
|
||||||
|
* finish before they can call arch_setup_dma_ops()
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_IOMMU_DMA) && !ret && dev->iommu_group) {
|
||||||
|
mutex_lock(&dev->iommu_group->mutex);
|
||||||
|
if (!dev->iommu_group->default_domain &&
|
||||||
|
!dev_iommu_ops(dev)->set_platform_dma_ops)
|
||||||
|
ret = -EPROBE_DEFER;
|
||||||
|
mutex_unlock(&dev->iommu_group->mutex);
|
||||||
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@@ -3149,6 +3161,12 @@ int iommu_device_use_default_domain(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
mutex_lock(&group->mutex);
|
mutex_lock(&group->mutex);
|
||||||
|
/* We may race against bus_iommu_probe() finalising groups here */
|
||||||
|
if (IS_ENABLED(CONFIG_IOMMU_DMA) && !group->default_domain &&
|
||||||
|
!dev_iommu_ops(dev)->set_platform_dma_ops) {
|
||||||
|
ret = -EPROBE_DEFER;
|
||||||
|
goto unlock_out;
|
||||||
|
}
|
||||||
if (group->owner_cnt) {
|
if (group->owner_cnt) {
|
||||||
if (group->owner || !iommu_is_default_domain(group) ||
|
if (group->owner || !iommu_is_default_domain(group) ||
|
||||||
!xa_empty(&group->pasid_array)) {
|
!xa_empty(&group->pasid_array)) {
|
||||||
|
@@ -1543,7 +1543,7 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
|
|||||||
struct tc_taprio_qopt_offload *taprio;
|
struct tc_taprio_qopt_offload *taprio;
|
||||||
struct ocelot_port *ocelot_port;
|
struct ocelot_port *ocelot_port;
|
||||||
struct timespec64 base_ts;
|
struct timespec64 base_ts;
|
||||||
int port;
|
int i, port;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
mutex_lock(&ocelot->fwd_domain_lock);
|
mutex_lock(&ocelot->fwd_domain_lock);
|
||||||
@@ -1575,6 +1575,9 @@ static void vsc9959_tas_clock_adjust(struct ocelot *ocelot)
|
|||||||
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
|
QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M,
|
||||||
QSYS_PARAM_CFG_REG_3);
|
QSYS_PARAM_CFG_REG_3);
|
||||||
|
|
||||||
|
for (i = 0; i < taprio->num_entries; i++)
|
||||||
|
vsc9959_tas_gcl_set(ocelot, i, &taprio->entries[i]);
|
||||||
|
|
||||||
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
||||||
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE,
|
||||||
QSYS_TAS_PARAM_CFG_CTRL);
|
QSYS_TAS_PARAM_CFG_CTRL);
|
||||||
|
@@ -172,48 +172,57 @@ static struct pds_auxiliary_dev *pdsc_auxbus_dev_register(struct pdsc *cf,
|
|||||||
return padev;
|
return padev;
|
||||||
}
|
}
|
||||||
|
|
||||||
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf)
|
void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr)
|
||||||
{
|
{
|
||||||
struct pds_auxiliary_dev *padev;
|
struct pds_auxiliary_dev *padev;
|
||||||
int err = 0;
|
|
||||||
|
if (!*pd_ptr)
|
||||||
|
return;
|
||||||
|
|
||||||
mutex_lock(&pf->config_lock);
|
mutex_lock(&pf->config_lock);
|
||||||
|
|
||||||
padev = pf->vfs[cf->vf_id].padev;
|
padev = *pd_ptr;
|
||||||
if (padev) {
|
pds_client_unregister(pf, padev->client_id);
|
||||||
pds_client_unregister(pf, padev->client_id);
|
auxiliary_device_delete(&padev->aux_dev);
|
||||||
auxiliary_device_delete(&padev->aux_dev);
|
auxiliary_device_uninit(&padev->aux_dev);
|
||||||
auxiliary_device_uninit(&padev->aux_dev);
|
*pd_ptr = NULL;
|
||||||
padev->client_id = 0;
|
|
||||||
}
|
|
||||||
pf->vfs[cf->vf_id].padev = NULL;
|
|
||||||
|
|
||||||
mutex_unlock(&pf->config_lock);
|
mutex_unlock(&pf->config_lock);
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
|
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
|
||||||
|
enum pds_core_vif_types vt,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr)
|
||||||
{
|
{
|
||||||
struct pds_auxiliary_dev *padev;
|
struct pds_auxiliary_dev *padev;
|
||||||
enum pds_core_vif_types vt;
|
|
||||||
char devname[PDS_DEVNAME_LEN];
|
char devname[PDS_DEVNAME_LEN];
|
||||||
|
unsigned long mask;
|
||||||
u16 vt_support;
|
u16 vt_support;
|
||||||
int client_id;
|
int client_id;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
if (!cf)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (vt >= PDS_DEV_TYPE_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
mutex_lock(&pf->config_lock);
|
mutex_lock(&pf->config_lock);
|
||||||
|
|
||||||
/* We only support vDPA so far, so it is the only one to
|
mask = BIT_ULL(PDSC_S_FW_DEAD) |
|
||||||
* be verified that it is available in the Core device and
|
BIT_ULL(PDSC_S_STOPPING_DRIVER);
|
||||||
* enabled in the devlink param. In the future this might
|
if (cf->state & mask) {
|
||||||
* become a loop for several VIF types.
|
dev_err(pf->dev, "%s: can't add dev, VF client in bad state %#lx\n",
|
||||||
*/
|
__func__, cf->state);
|
||||||
|
err = -ENXIO;
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/* Verify that the type is supported and enabled. It is not
|
/* Verify that the type is supported and enabled. It is not
|
||||||
* an error if there is no auxbus device support for this
|
* an error if there is no auxbus device support for this
|
||||||
* VF, it just means something else needs to happen with it.
|
* VF, it just means something else needs to happen with it.
|
||||||
*/
|
*/
|
||||||
vt = PDS_DEV_TYPE_VDPA;
|
|
||||||
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
|
vt_support = !!le16_to_cpu(pf->dev_ident.vif_types[vt]);
|
||||||
if (!(vt_support &&
|
if (!(vt_support &&
|
||||||
pf->viftype_status[vt].supported &&
|
pf->viftype_status[vt].supported &&
|
||||||
@@ -239,7 +248,7 @@ int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf)
|
|||||||
err = PTR_ERR(padev);
|
err = PTR_ERR(padev);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
pf->vfs[cf->vf_id].padev = padev;
|
*pd_ptr = padev;
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&pf->config_lock);
|
mutex_unlock(&pf->config_lock);
|
||||||
|
@@ -300,8 +300,11 @@ void pdsc_health_thread(struct work_struct *work);
|
|||||||
int pdsc_register_notify(struct notifier_block *nb);
|
int pdsc_register_notify(struct notifier_block *nb);
|
||||||
void pdsc_unregister_notify(struct notifier_block *nb);
|
void pdsc_unregister_notify(struct notifier_block *nb);
|
||||||
void pdsc_notify(unsigned long event, void *data);
|
void pdsc_notify(unsigned long event, void *data);
|
||||||
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf);
|
int pdsc_auxbus_dev_add(struct pdsc *cf, struct pdsc *pf,
|
||||||
int pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf);
|
enum pds_core_vif_types vt,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr);
|
||||||
|
void pdsc_auxbus_dev_del(struct pdsc *cf, struct pdsc *pf,
|
||||||
|
struct pds_auxiliary_dev **pd_ptr);
|
||||||
|
|
||||||
void pdsc_process_adminq(struct pdsc_qcq *qcq);
|
void pdsc_process_adminq(struct pdsc_qcq *qcq);
|
||||||
void pdsc_work_thread(struct work_struct *work);
|
void pdsc_work_thread(struct work_struct *work);
|
||||||
|
@@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
|
|||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
case PDS_RC_BAD_ADDR:
|
case PDS_RC_BAD_ADDR:
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
case PDS_RC_BAD_PCI:
|
||||||
|
return -ENXIO;
|
||||||
case PDS_RC_EOPCODE:
|
case PDS_RC_EOPCODE:
|
||||||
case PDS_RC_EINTR:
|
case PDS_RC_EINTR:
|
||||||
case PDS_RC_DEV_CMD:
|
case PDS_RC_DEV_CMD:
|
||||||
@@ -65,7 +67,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
|
|||||||
/* Firmware is useful only if the running bit is set and
|
/* Firmware is useful only if the running bit is set and
|
||||||
* fw_status != 0xff (bad PCI read)
|
* fw_status != 0xff (bad PCI read)
|
||||||
*/
|
*/
|
||||||
return (pdsc->fw_status != 0xff) &&
|
return (pdsc->fw_status != PDS_RC_BAD_PCI) &&
|
||||||
(pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
|
(pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -131,6 +133,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
|
|||||||
unsigned long max_wait;
|
unsigned long max_wait;
|
||||||
unsigned long duration;
|
unsigned long duration;
|
||||||
int timeout = 0;
|
int timeout = 0;
|
||||||
|
bool running;
|
||||||
int done = 0;
|
int done = 0;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int status;
|
int status;
|
||||||
@@ -139,6 +142,10 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
|
|||||||
max_wait = start_time + (max_seconds * HZ);
|
max_wait = start_time + (max_seconds * HZ);
|
||||||
|
|
||||||
while (!done && !timeout) {
|
while (!done && !timeout) {
|
||||||
|
running = pdsc_is_fw_running(pdsc);
|
||||||
|
if (!running)
|
||||||
|
break;
|
||||||
|
|
||||||
done = pdsc_devcmd_done(pdsc);
|
done = pdsc_devcmd_done(pdsc);
|
||||||
if (done)
|
if (done)
|
||||||
break;
|
break;
|
||||||
@@ -155,7 +162,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
|
|||||||
dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
|
dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
|
||||||
opcode, pdsc_devcmd_str(opcode), duration / HZ);
|
opcode, pdsc_devcmd_str(opcode), duration / HZ);
|
||||||
|
|
||||||
if (!done || timeout) {
|
if ((!done || timeout) && running) {
|
||||||
dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
|
dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
|
||||||
opcode, pdsc_devcmd_str(opcode), done, timeout,
|
opcode, pdsc_devcmd_str(opcode), done, timeout,
|
||||||
max_seconds);
|
max_seconds);
|
||||||
|
@@ -55,8 +55,11 @@ int pdsc_dl_enable_set(struct devlink *dl, u32 id,
|
|||||||
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
|
for (vf_id = 0; vf_id < pdsc->num_vfs; vf_id++) {
|
||||||
struct pdsc *vf = pdsc->vfs[vf_id].vf;
|
struct pdsc *vf = pdsc->vfs[vf_id].vf;
|
||||||
|
|
||||||
err = ctx->val.vbool ? pdsc_auxbus_dev_add(vf, pdsc) :
|
if (ctx->val.vbool)
|
||||||
pdsc_auxbus_dev_del(vf, pdsc);
|
err = pdsc_auxbus_dev_add(vf, pdsc, vt_entry->vif_id,
|
||||||
|
&pdsc->vfs[vf_id].padev);
|
||||||
|
else
|
||||||
|
pdsc_auxbus_dev_del(vf, pdsc, &pdsc->vfs[vf_id].padev);
|
||||||
}
|
}
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@@ -189,7 +189,8 @@ static int pdsc_init_vf(struct pdsc *vf)
|
|||||||
devl_unlock(dl);
|
devl_unlock(dl);
|
||||||
|
|
||||||
pf->vfs[vf->vf_id].vf = vf;
|
pf->vfs[vf->vf_id].vf = vf;
|
||||||
err = pdsc_auxbus_dev_add(vf, pf);
|
err = pdsc_auxbus_dev_add(vf, pf, PDS_DEV_TYPE_VDPA,
|
||||||
|
&pf->vfs[vf->vf_id].padev);
|
||||||
if (err) {
|
if (err) {
|
||||||
devl_lock(dl);
|
devl_lock(dl);
|
||||||
devl_unregister(dl);
|
devl_unregister(dl);
|
||||||
@@ -415,7 +416,7 @@ static void pdsc_remove(struct pci_dev *pdev)
|
|||||||
|
|
||||||
pf = pdsc_get_pf_struct(pdsc->pdev);
|
pf = pdsc_get_pf_struct(pdsc->pdev);
|
||||||
if (!IS_ERR(pf)) {
|
if (!IS_ERR(pf)) {
|
||||||
pdsc_auxbus_dev_del(pdsc, pf);
|
pdsc_auxbus_dev_del(pdsc, pf, &pf->vfs[pdsc->vf_id].padev);
|
||||||
pf->vfs[pdsc->vf_id].vf = NULL;
|
pf->vfs[pdsc->vf_id].vf = NULL;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -475,6 +476,15 @@ static void pdsc_reset_prepare(struct pci_dev *pdev)
|
|||||||
pdsc_stop_health_thread(pdsc);
|
pdsc_stop_health_thread(pdsc);
|
||||||
pdsc_fw_down(pdsc);
|
pdsc_fw_down(pdsc);
|
||||||
|
|
||||||
|
if (pdev->is_virtfn) {
|
||||||
|
struct pdsc *pf;
|
||||||
|
|
||||||
|
pf = pdsc_get_pf_struct(pdsc->pdev);
|
||||||
|
if (!IS_ERR(pf))
|
||||||
|
pdsc_auxbus_dev_del(pdsc, pf,
|
||||||
|
&pf->vfs[pdsc->vf_id].padev);
|
||||||
|
}
|
||||||
|
|
||||||
pdsc_unmap_bars(pdsc);
|
pdsc_unmap_bars(pdsc);
|
||||||
pci_release_regions(pdev);
|
pci_release_regions(pdev);
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
@@ -510,6 +520,15 @@ static void pdsc_reset_done(struct pci_dev *pdev)
|
|||||||
|
|
||||||
pdsc_fw_up(pdsc);
|
pdsc_fw_up(pdsc);
|
||||||
pdsc_restart_health_thread(pdsc);
|
pdsc_restart_health_thread(pdsc);
|
||||||
|
|
||||||
|
if (pdev->is_virtfn) {
|
||||||
|
struct pdsc *pf;
|
||||||
|
|
||||||
|
pf = pdsc_get_pf_struct(pdsc->pdev);
|
||||||
|
if (!IS_ERR(pf))
|
||||||
|
pdsc_auxbus_dev_add(pdsc, pf, PDS_DEV_TYPE_VDPA,
|
||||||
|
&pf->vfs[pdsc->vf_id].padev);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct pci_error_handlers pdsc_err_handler = {
|
static const struct pci_error_handlers pdsc_err_handler = {
|
||||||
|
@@ -66,20 +66,30 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info->dest_buf) {
|
|
||||||
if ((info->seg_start + off + len) <=
|
|
||||||
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
|
|
||||||
memcpy(info->dest_buf + off, dma_buf, len);
|
|
||||||
} else {
|
|
||||||
rc = -ENOBUFS;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cmn_req->req_type ==
|
if (cmn_req->req_type ==
|
||||||
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
|
cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
|
||||||
info->dest_buf_size += len;
|
info->dest_buf_size += len;
|
||||||
|
|
||||||
|
if (info->dest_buf) {
|
||||||
|
if ((info->seg_start + off + len) <=
|
||||||
|
BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
|
||||||
|
u16 copylen = min_t(u16, len,
|
||||||
|
info->dest_buf_size - off);
|
||||||
|
|
||||||
|
memcpy(info->dest_buf + off, dma_buf, copylen);
|
||||||
|
if (copylen < len)
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
rc = -ENOBUFS;
|
||||||
|
if (cmn_req->req_type ==
|
||||||
|
cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
|
||||||
|
kfree(info->dest_buf);
|
||||||
|
info->dest_buf = NULL;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
|
if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@@ -1393,6 +1393,17 @@ static int bnxt_get_regs_len(struct net_device *dev)
|
|||||||
return reg_len;
|
return reg_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define BNXT_PCIE_32B_ENTRY(start, end) \
|
||||||
|
{ offsetof(struct pcie_ctx_hw_stats, start), \
|
||||||
|
offsetof(struct pcie_ctx_hw_stats, end) }
|
||||||
|
|
||||||
|
static const struct {
|
||||||
|
u16 start;
|
||||||
|
u16 end;
|
||||||
|
} bnxt_pcie_32b_entries[] = {
|
||||||
|
BNXT_PCIE_32B_ENTRY(pcie_ltssm_histogram[0], pcie_ltssm_histogram[3]),
|
||||||
|
};
|
||||||
|
|
||||||
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
||||||
void *_p)
|
void *_p)
|
||||||
{
|
{
|
||||||
@@ -1424,12 +1435,27 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|||||||
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
|
req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr);
|
||||||
rc = hwrm_req_send(bp, req);
|
rc = hwrm_req_send(bp, req);
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
__le64 *src = (__le64 *)hw_pcie_stats;
|
u8 *dst = (u8 *)(_p + BNXT_PXP_REG_LEN);
|
||||||
u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN);
|
u8 *src = (u8 *)hw_pcie_stats;
|
||||||
int i;
|
int i, j;
|
||||||
|
|
||||||
for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++)
|
for (i = 0, j = 0; i < sizeof(*hw_pcie_stats); ) {
|
||||||
dst[i] = le64_to_cpu(src[i]);
|
if (i >= bnxt_pcie_32b_entries[j].start &&
|
||||||
|
i <= bnxt_pcie_32b_entries[j].end) {
|
||||||
|
u32 *dst32 = (u32 *)(dst + i);
|
||||||
|
|
||||||
|
*dst32 = le32_to_cpu(*(__le32 *)(src + i));
|
||||||
|
i += 4;
|
||||||
|
if (i > bnxt_pcie_32b_entries[j].end &&
|
||||||
|
j < ARRAY_SIZE(bnxt_pcie_32b_entries) - 1)
|
||||||
|
j++;
|
||||||
|
} else {
|
||||||
|
u64 *dst64 = (u64 *)(dst + i);
|
||||||
|
|
||||||
|
*dst64 = le64_to_cpu(*(__le64 *)(src + i));
|
||||||
|
i += 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
hwrm_req_drop(bp, req);
|
hwrm_req_drop(bp, req);
|
||||||
}
|
}
|
||||||
|
@@ -352,7 +352,7 @@ parse_eeprom (struct net_device *dev)
|
|||||||
eth_hw_addr_set(dev, psrom->mac_addr);
|
eth_hw_addr_set(dev, psrom->mac_addr);
|
||||||
|
|
||||||
if (np->chip_id == CHIP_IP1000A) {
|
if (np->chip_id == CHIP_IP1000A) {
|
||||||
np->led_mode = psrom->led_mode;
|
np->led_mode = le16_to_cpu(psrom->led_mode);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -335,7 +335,7 @@ typedef struct t_SROM {
|
|||||||
u16 sub_system_id; /* 0x06 */
|
u16 sub_system_id; /* 0x06 */
|
||||||
u16 pci_base_1; /* 0x08 (IP1000A only) */
|
u16 pci_base_1; /* 0x08 (IP1000A only) */
|
||||||
u16 pci_base_2; /* 0x0a (IP1000A only) */
|
u16 pci_base_2; /* 0x0a (IP1000A only) */
|
||||||
u16 led_mode; /* 0x0c (IP1000A only) */
|
__le16 led_mode; /* 0x0c (IP1000A only) */
|
||||||
u16 reserved1[9]; /* 0x0e-0x1f */
|
u16 reserved1[9]; /* 0x0e-0x1f */
|
||||||
u8 mac_addr[6]; /* 0x20-0x25 */
|
u8 mac_addr[6]; /* 0x20-0x25 */
|
||||||
u8 reserved2[10]; /* 0x26-0x2f */
|
u8 reserved2[10]; /* 0x26-0x2f */
|
||||||
|
@@ -695,7 +695,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
|
|||||||
txq->bd.cur = bdp;
|
txq->bd.cur = bdp;
|
||||||
|
|
||||||
/* Trigger transmission start */
|
/* Trigger transmission start */
|
||||||
writel(0, txq->bd.reg_desc_active);
|
if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
|
||||||
|
!readl(txq->bd.reg_desc_active) ||
|
||||||
|
!readl(txq->bd.reg_desc_active) ||
|
||||||
|
!readl(txq->bd.reg_desc_active) ||
|
||||||
|
!readl(txq->bd.reg_desc_active))
|
||||||
|
writel(0, txq->bd.reg_desc_active);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@@ -60,7 +60,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
|
|||||||
.name = "tm_qset",
|
.name = "tm_qset",
|
||||||
.cmd = HNAE3_DBG_CMD_TM_QSET,
|
.cmd = HNAE3_DBG_CMD_TM_QSET,
|
||||||
.dentry = HNS3_DBG_DENTRY_TM,
|
.dentry = HNS3_DBG_DENTRY_TM,
|
||||||
.buf_len = HNS3_DBG_READ_LEN,
|
.buf_len = HNS3_DBG_READ_LEN_1MB,
|
||||||
.init = hns3_dbg_common_file_init,
|
.init = hns3_dbg_common_file_init,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@@ -473,20 +473,14 @@ static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
|
|||||||
writel(mask_en, tqp_vector->mask_addr);
|
writel(mask_en, tqp_vector->mask_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
|
static void hns3_irq_enable(struct hns3_enet_tqp_vector *tqp_vector)
|
||||||
{
|
{
|
||||||
napi_enable(&tqp_vector->napi);
|
napi_enable(&tqp_vector->napi);
|
||||||
enable_irq(tqp_vector->vector_irq);
|
enable_irq(tqp_vector->vector_irq);
|
||||||
|
|
||||||
/* enable vector */
|
|
||||||
hns3_mask_vector_irq(tqp_vector, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
|
static void hns3_irq_disable(struct hns3_enet_tqp_vector *tqp_vector)
|
||||||
{
|
{
|
||||||
/* disable vector */
|
|
||||||
hns3_mask_vector_irq(tqp_vector, 0);
|
|
||||||
|
|
||||||
disable_irq(tqp_vector->vector_irq);
|
disable_irq(tqp_vector->vector_irq);
|
||||||
napi_disable(&tqp_vector->napi);
|
napi_disable(&tqp_vector->napi);
|
||||||
cancel_work_sync(&tqp_vector->rx_group.dim.work);
|
cancel_work_sync(&tqp_vector->rx_group.dim.work);
|
||||||
@@ -707,11 +701,42 @@ static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hns3_enable_irqs_and_tqps(struct net_device *netdev)
|
||||||
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
|
u16 i;
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_irq_enable(&priv->tqp_vector[i]);
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_mask_vector_irq(&priv->tqp_vector[i], 1);
|
||||||
|
|
||||||
|
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||||
|
hns3_tqp_enable(h->kinfo.tqp[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hns3_disable_irqs_and_tqps(struct net_device *netdev)
|
||||||
|
{
|
||||||
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
|
u16 i;
|
||||||
|
|
||||||
|
for (i = 0; i < h->kinfo.num_tqps; i++)
|
||||||
|
hns3_tqp_disable(h->kinfo.tqp[i]);
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_mask_vector_irq(&priv->tqp_vector[i], 0);
|
||||||
|
|
||||||
|
for (i = 0; i < priv->vector_num; i++)
|
||||||
|
hns3_irq_disable(&priv->tqp_vector[i]);
|
||||||
|
}
|
||||||
|
|
||||||
static int hns3_nic_net_up(struct net_device *netdev)
|
static int hns3_nic_net_up(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
int i, j;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hns3_nic_reset_all_ring(h);
|
ret = hns3_nic_reset_all_ring(h);
|
||||||
@@ -720,23 +745,13 @@ static int hns3_nic_net_up(struct net_device *netdev)
|
|||||||
|
|
||||||
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
|
|
||||||
/* enable the vectors */
|
hns3_enable_irqs_and_tqps(netdev);
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
|
||||||
hns3_vector_enable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
/* enable rcb */
|
|
||||||
for (j = 0; j < h->kinfo.num_tqps; j++)
|
|
||||||
hns3_tqp_enable(h->kinfo.tqp[j]);
|
|
||||||
|
|
||||||
/* start the ae_dev */
|
/* start the ae_dev */
|
||||||
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
|
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
|
||||||
if (ret) {
|
if (ret) {
|
||||||
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
while (j--)
|
hns3_disable_irqs_and_tqps(netdev);
|
||||||
hns3_tqp_disable(h->kinfo.tqp[j]);
|
|
||||||
|
|
||||||
for (j = i - 1; j >= 0; j--)
|
|
||||||
hns3_vector_disable(&priv->tqp_vector[j]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@@ -823,17 +838,9 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
|
|||||||
static void hns3_nic_net_down(struct net_device *netdev)
|
static void hns3_nic_net_down(struct net_device *netdev)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
||||||
struct hnae3_handle *h = hns3_get_handle(netdev);
|
|
||||||
const struct hnae3_ae_ops *ops;
|
const struct hnae3_ae_ops *ops;
|
||||||
int i;
|
|
||||||
|
|
||||||
/* disable vectors */
|
hns3_disable_irqs_and_tqps(netdev);
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
|
||||||
hns3_vector_disable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
/* disable rcb */
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
||||||
hns3_tqp_disable(h->kinfo.tqp[i]);
|
|
||||||
|
|
||||||
/* stop ae_dev */
|
/* stop ae_dev */
|
||||||
ops = priv->ae_handle->ae_algo->ops;
|
ops = priv->ae_handle->ae_algo->ops;
|
||||||
@@ -5870,8 +5877,6 @@ int hns3_set_channels(struct net_device *netdev,
|
|||||||
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!if_running)
|
if (!if_running)
|
||||||
return;
|
return;
|
||||||
@@ -5882,11 +5887,7 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
|
|||||||
netif_carrier_off(ndev);
|
netif_carrier_off(ndev);
|
||||||
netif_tx_disable(ndev);
|
netif_tx_disable(ndev);
|
||||||
|
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
hns3_disable_irqs_and_tqps(ndev);
|
||||||
hns3_vector_disable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
||||||
hns3_tqp_disable(h->kinfo.tqp[i]);
|
|
||||||
|
|
||||||
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
|
/* delay ring buffer clearing to hns3_reset_notify_uninit_enet
|
||||||
* during reset process, because driver may not be able
|
* during reset process, because driver may not be able
|
||||||
@@ -5902,7 +5903,6 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
|
|||||||
{
|
{
|
||||||
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
struct hns3_nic_priv *priv = netdev_priv(ndev);
|
||||||
struct hnae3_handle *h = priv->ae_handle;
|
struct hnae3_handle *h = priv->ae_handle;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (!if_running)
|
if (!if_running)
|
||||||
return;
|
return;
|
||||||
@@ -5918,11 +5918,7 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
|
|||||||
|
|
||||||
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
|
||||||
|
|
||||||
for (i = 0; i < priv->vector_num; i++)
|
hns3_enable_irqs_and_tqps(ndev);
|
||||||
hns3_vector_enable(&priv->tqp_vector[i]);
|
|
||||||
|
|
||||||
for (i = 0; i < h->kinfo.num_tqps; i++)
|
|
||||||
hns3_tqp_enable(h->kinfo.tqp[i]);
|
|
||||||
|
|
||||||
netif_tx_wake_all_queues(ndev);
|
netif_tx_wake_all_queues(ndev);
|
||||||
|
|
||||||
|
@@ -440,6 +440,13 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
|
|||||||
ptp->info.settime64 = hclge_ptp_settime;
|
ptp->info.settime64 = hclge_ptp_settime;
|
||||||
|
|
||||||
ptp->info.n_alarm = 0;
|
ptp->info.n_alarm = 0;
|
||||||
|
|
||||||
|
spin_lock_init(&ptp->lock);
|
||||||
|
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
|
||||||
|
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||||
|
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
|
||||||
|
hdev->ptp = ptp;
|
||||||
|
|
||||||
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
|
ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
|
||||||
if (IS_ERR(ptp->clock)) {
|
if (IS_ERR(ptp->clock)) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
@@ -451,12 +458,6 @@ static int hclge_ptp_create_clock(struct hclge_dev *hdev)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_init(&ptp->lock);
|
|
||||||
ptp->io_base = hdev->hw.hw.io_base + HCLGE_PTP_REG_OFFSET;
|
|
||||||
ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
|
|
||||||
ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
|
|
||||||
hdev->ptp = ptp;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1257,9 +1257,8 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
|
|||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
static int hclgevf_en_hw_strip_rxvtag_cmd(struct hclgevf_dev *hdev, bool enable)
|
||||||
{
|
{
|
||||||
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
|
||||||
struct hclge_vf_to_pf_msg send_msg;
|
struct hclge_vf_to_pf_msg send_msg;
|
||||||
|
|
||||||
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
|
||||||
@@ -1268,6 +1267,19 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
|||||||
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
|
||||||
|
{
|
||||||
|
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = hclgevf_en_hw_strip_rxvtag_cmd(hdev, enable);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
hdev->rxvtag_strip_en = enable;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
|
static int hclgevf_reset_tqp(struct hnae3_handle *handle)
|
||||||
{
|
{
|
||||||
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
|
#define HCLGEVF_RESET_ALL_QUEUE_DONE 1U
|
||||||
@@ -2143,12 +2155,13 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
|
|||||||
tc_valid, tc_size);
|
tc_valid, tc_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
|
static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev,
|
||||||
|
bool rxvtag_strip_en)
|
||||||
{
|
{
|
||||||
struct hnae3_handle *nic = &hdev->nic;
|
struct hnae3_handle *nic = &hdev->nic;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hclgevf_en_hw_strip_rxvtag(nic, true);
|
ret = hclgevf_en_hw_strip_rxvtag(nic, rxvtag_strip_en);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"failed to enable rx vlan offload, ret = %d\n", ret);
|
"failed to enable rx vlan offload, ret = %d\n", ret);
|
||||||
@@ -2815,7 +2828,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = hclgevf_init_vlan_config(hdev);
|
ret = hclgevf_init_vlan_config(hdev, hdev->rxvtag_strip_en);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"failed(%d) to initialize VLAN config\n", ret);
|
"failed(%d) to initialize VLAN config\n", ret);
|
||||||
@@ -2928,7 +2941,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
|
|||||||
goto err_config;
|
goto err_config;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = hclgevf_init_vlan_config(hdev);
|
ret = hclgevf_init_vlan_config(hdev, true);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(&hdev->pdev->dev,
|
dev_err(&hdev->pdev->dev,
|
||||||
"failed(%d) to initialize VLAN config\n", ret);
|
"failed(%d) to initialize VLAN config\n", ret);
|
||||||
|
@@ -253,6 +253,7 @@ struct hclgevf_dev {
|
|||||||
int *vector_irq;
|
int *vector_irq;
|
||||||
|
|
||||||
bool gro_en;
|
bool gro_en;
|
||||||
|
bool rxvtag_strip_en;
|
||||||
|
|
||||||
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
|
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
|
||||||
|
|
||||||
|
@@ -1824,6 +1824,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
|
|||||||
pf = vf->pf;
|
pf = vf->pf;
|
||||||
dev = ice_pf_to_dev(pf);
|
dev = ice_pf_to_dev(pf);
|
||||||
vf_vsi = ice_get_vf_vsi(vf);
|
vf_vsi = ice_get_vf_vsi(vf);
|
||||||
|
if (!vf_vsi) {
|
||||||
|
dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
|
||||||
|
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||||
|
goto err_exit;
|
||||||
|
}
|
||||||
|
|
||||||
#define ICE_VF_MAX_FDIR_FILTERS 128
|
#define ICE_VF_MAX_FDIR_FILTERS 128
|
||||||
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
|
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
|
||||||
|
@@ -1237,6 +1237,8 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
/* reset the tstamp_config */
|
/* reset the tstamp_config */
|
||||||
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
|
igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
|
||||||
|
|
||||||
|
mutex_lock(&adapter->ptm_lock);
|
||||||
|
|
||||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||||
|
|
||||||
switch (adapter->hw.mac.type) {
|
switch (adapter->hw.mac.type) {
|
||||||
@@ -1255,7 +1257,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
if (!igc_is_crosststamp_supported(adapter))
|
if (!igc_is_crosststamp_supported(adapter))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mutex_lock(&adapter->ptm_lock);
|
|
||||||
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
|
wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT);
|
||||||
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
|
wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT);
|
||||||
|
|
||||||
@@ -1279,7 +1280,6 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
|
netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n");
|
||||||
|
|
||||||
igc_ptm_reset(hw);
|
igc_ptm_reset(hw);
|
||||||
mutex_unlock(&adapter->ptm_lock);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
/* No work to do. */
|
/* No work to do. */
|
||||||
@@ -1296,5 +1296,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
|
|||||||
out:
|
out:
|
||||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||||
|
|
||||||
|
mutex_unlock(&adapter->ptm_lock);
|
||||||
|
|
||||||
wrfl();
|
wrfl();
|
||||||
}
|
}
|
||||||
|
@@ -917,7 +917,7 @@ static void octep_hb_timeout_task(struct work_struct *work)
|
|||||||
miss_cnt);
|
miss_cnt);
|
||||||
rtnl_lock();
|
rtnl_lock();
|
||||||
if (netif_running(oct->netdev))
|
if (netif_running(oct->netdev))
|
||||||
octep_stop(oct->netdev);
|
dev_close(oct->netdev);
|
||||||
rtnl_unlock();
|
rtnl_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2180,14 +2180,18 @@ skip_rx:
|
|||||||
ring->data[idx] = new_data;
|
ring->data[idx] = new_data;
|
||||||
rxd->rxd1 = (unsigned int)dma_addr;
|
rxd->rxd1 = (unsigned int)dma_addr;
|
||||||
release_desc:
|
release_desc:
|
||||||
|
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
|
||||||
|
if (unlikely(dma_addr == DMA_MAPPING_ERROR))
|
||||||
|
addr64 = FIELD_GET(RX_DMA_ADDR64_MASK,
|
||||||
|
rxd->rxd2);
|
||||||
|
else
|
||||||
|
addr64 = RX_DMA_PREP_ADDR64(dma_addr);
|
||||||
|
}
|
||||||
|
|
||||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
|
||||||
rxd->rxd2 = RX_DMA_LSO;
|
rxd->rxd2 = RX_DMA_LSO;
|
||||||
else
|
else
|
||||||
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
|
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size) | addr64;
|
||||||
|
|
||||||
if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
|
|
||||||
likely(dma_addr != DMA_MAPPING_ERROR))
|
|
||||||
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
|
|
||||||
|
|
||||||
ring->calc_idx = idx;
|
ring->calc_idx = idx;
|
||||||
done++;
|
done++;
|
||||||
|
@@ -1163,6 +1163,7 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
|
|||||||
struct net_device *ndev = priv->ndev;
|
struct net_device *ndev = priv->ndev;
|
||||||
unsigned int head = ring->head;
|
unsigned int head = ring->head;
|
||||||
unsigned int entry = ring->tail;
|
unsigned int entry = ring->tail;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
|
while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
|
||||||
ret = mtk_star_tx_complete_one(priv);
|
ret = mtk_star_tx_complete_one(priv);
|
||||||
@@ -1182,9 +1183,9 @@ static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
|
|||||||
netif_wake_queue(ndev);
|
netif_wake_queue(ndev);
|
||||||
|
|
||||||
if (napi_complete(napi)) {
|
if (napi_complete(napi)) {
|
||||||
spin_lock(&priv->lock);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
mtk_star_enable_dma_irq(priv, false, true);
|
mtk_star_enable_dma_irq(priv, false, true);
|
||||||
spin_unlock(&priv->lock);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -1341,16 +1342,16 @@ push_new_skb:
|
|||||||
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
|
static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
|
||||||
{
|
{
|
||||||
struct mtk_star_priv *priv;
|
struct mtk_star_priv *priv;
|
||||||
|
unsigned long flags;
|
||||||
int work_done = 0;
|
int work_done = 0;
|
||||||
|
|
||||||
priv = container_of(napi, struct mtk_star_priv, rx_napi);
|
priv = container_of(napi, struct mtk_star_priv, rx_napi);
|
||||||
|
|
||||||
work_done = mtk_star_rx(priv, budget);
|
work_done = mtk_star_rx(priv, budget);
|
||||||
if (work_done < budget) {
|
if (work_done < budget && napi_complete_done(napi, work_done)) {
|
||||||
napi_complete_done(napi, work_done);
|
spin_lock_irqsave(&priv->lock, flags);
|
||||||
spin_lock(&priv->lock);
|
|
||||||
mtk_star_enable_dma_irq(priv, true, false);
|
mtk_star_enable_dma_irq(priv, true, false);
|
||||||
spin_unlock(&priv->lock);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return work_done;
|
return work_done;
|
||||||
|
@@ -3499,7 +3499,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||||||
int err;
|
int err;
|
||||||
|
|
||||||
mutex_init(&esw->offloads.termtbl_mutex);
|
mutex_init(&esw->offloads.termtbl_mutex);
|
||||||
mlx5_rdma_enable_roce(esw->dev);
|
err = mlx5_rdma_enable_roce(esw->dev);
|
||||||
|
if (err)
|
||||||
|
goto err_roce;
|
||||||
|
|
||||||
err = mlx5_esw_host_number_init(esw);
|
err = mlx5_esw_host_number_init(esw);
|
||||||
if (err)
|
if (err)
|
||||||
@@ -3560,6 +3562,7 @@ err_vport_metadata:
|
|||||||
esw_offloads_metadata_uninit(esw);
|
esw_offloads_metadata_uninit(esw);
|
||||||
err_metadata:
|
err_metadata:
|
||||||
mlx5_rdma_disable_roce(esw->dev);
|
mlx5_rdma_disable_roce(esw->dev);
|
||||||
|
err_roce:
|
||||||
mutex_destroy(&esw->offloads.termtbl_mutex);
|
mutex_destroy(&esw->offloads.termtbl_mutex);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@@ -118,8 +118,8 @@ static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *
|
|||||||
|
|
||||||
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
|
static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
|
u8 mac[ETH_ALEN] = {};
|
||||||
union ib_gid gid;
|
union ib_gid gid;
|
||||||
u8 mac[ETH_ALEN];
|
|
||||||
|
|
||||||
mlx5_rdma_make_default_gid(dev, &gid);
|
mlx5_rdma_make_default_gid(dev, &gid);
|
||||||
return mlx5_core_roce_gid_set(dev, 0,
|
return mlx5_core_roce_gid_set(dev, 0,
|
||||||
@@ -140,17 +140,17 @@ void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
|
|||||||
mlx5_nic_vport_disable_roce(dev);
|
mlx5_nic_vport_disable_roce(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (!MLX5_CAP_GEN(dev, roce))
|
if (!MLX5_CAP_GEN(dev, roce))
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
err = mlx5_nic_vport_enable_roce(dev);
|
err = mlx5_nic_vport_enable_roce(dev);
|
||||||
if (err) {
|
if (err) {
|
||||||
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
|
||||||
return;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mlx5_rdma_add_roce_addr(dev);
|
err = mlx5_rdma_add_roce_addr(dev);
|
||||||
@@ -165,10 +165,11 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
|
|||||||
goto del_roce_addr;
|
goto del_roce_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return err;
|
||||||
|
|
||||||
del_roce_addr:
|
del_roce_addr:
|
||||||
mlx5_rdma_del_roce_addr(dev);
|
mlx5_rdma_del_roce_addr(dev);
|
||||||
disable_roce:
|
disable_roce:
|
||||||
mlx5_nic_vport_disable_roce(dev);
|
mlx5_nic_vport_disable_roce(dev);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
@@ -8,12 +8,12 @@
|
|||||||
|
|
||||||
#ifdef CONFIG_MLX5_ESWITCH
|
#ifdef CONFIG_MLX5_ESWITCH
|
||||||
|
|
||||||
void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
|
int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
|
||||||
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
|
void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
|
||||||
|
|
||||||
#else /* CONFIG_MLX5_ESWITCH */
|
#else /* CONFIG_MLX5_ESWITCH */
|
||||||
|
|
||||||
static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
|
static inline int mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) { return 0; }
|
||||||
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
|
static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
|
||||||
|
|
||||||
#endif /* CONFIG_MLX5_ESWITCH */
|
#endif /* CONFIG_MLX5_ESWITCH */
|
||||||
|
@@ -1949,6 +1949,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
|
|||||||
if (nr_frags <= 0) {
|
if (nr_frags <= 0) {
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||||
|
tx->frame_last = tx->frame_first;
|
||||||
}
|
}
|
||||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||||
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
||||||
@@ -2018,6 +2019,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
|
|||||||
tx->frame_first = 0;
|
tx->frame_first = 0;
|
||||||
tx->frame_data0 = 0;
|
tx->frame_data0 = 0;
|
||||||
tx->frame_tail = 0;
|
tx->frame_tail = 0;
|
||||||
|
tx->frame_last = 0;
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2058,16 +2060,18 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
|
|||||||
TX_DESC_DATA0_DTYPE_DATA_) {
|
TX_DESC_DATA0_DTYPE_DATA_) {
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
tx->frame_data0 |= TX_DESC_DATA0_LS_;
|
||||||
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
|
||||||
|
tx->frame_last = tx->frame_tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_last];
|
||||||
buffer_info = &tx->buffer_info[tx->frame_tail];
|
buffer_info = &tx->buffer_info[tx->frame_last];
|
||||||
buffer_info->skb = skb;
|
buffer_info->skb = skb;
|
||||||
if (time_stamp)
|
if (time_stamp)
|
||||||
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
|
buffer_info->flags |= TX_BUFFER_INFO_FLAG_TIMESTAMP_REQUESTED;
|
||||||
if (ignore_sync)
|
if (ignore_sync)
|
||||||
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
|
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
|
||||||
|
|
||||||
|
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
|
||||||
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
|
||||||
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
|
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
|
||||||
tx->last_tail = tx->frame_tail;
|
tx->last_tail = tx->frame_tail;
|
||||||
|
@@ -974,6 +974,7 @@ struct lan743x_tx {
|
|||||||
u32 frame_first;
|
u32 frame_first;
|
||||||
u32 frame_data0;
|
u32 frame_data0;
|
||||||
u32 frame_tail;
|
u32 frame_tail;
|
||||||
|
u32 frame_last;
|
||||||
|
|
||||||
struct lan743x_tx_buffer_info *buffer_info;
|
struct lan743x_tx_buffer_info *buffer_info;
|
||||||
|
|
||||||
|
@@ -453,9 +453,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
|
|||||||
return VLAN_N_VID - bridge_num - 1;
|
return VLAN_N_VID - bridge_num - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
|
||||||
|
*
|
||||||
|
* @ocelot: Switch private data structure
|
||||||
|
* @port: Index of ingress port
|
||||||
|
*
|
||||||
|
* IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
|
||||||
|
* component conformance" suggest that a C-VLAN component should only recognize
|
||||||
|
* and filter on C-Tags, and an S-VLAN component should only recognize and
|
||||||
|
* process based on C-Tags.
|
||||||
|
*
|
||||||
|
* In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
|
||||||
|
* components are largely represented by a bridge with vlan_protocol 802.1Q,
|
||||||
|
* and S-VLAN components by a bridge with vlan_protocol 802.1ad.
|
||||||
|
*
|
||||||
|
* Currently the driver only offloads vlan_protocol 802.1Q, but the hardware
|
||||||
|
* design is non-conformant, because the switch assigns each frame to a VLAN
|
||||||
|
* based on an entirely different question, as detailed in figure "Basic VLAN
|
||||||
|
* Classification Flow" from its manual and reproduced below.
|
||||||
|
*
|
||||||
|
* Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
|
||||||
|
* if VLAN_AWARE_ENA[port] and frame has outer tag then:
|
||||||
|
* if VLAN_INNER_TAG_ENA[port] and frame has inner tag then:
|
||||||
|
* TAG_TYPE = (Frame.InnerTPID <> 0x8100)
|
||||||
|
* Set PCP, DEI, VID to values from inner VLAN header
|
||||||
|
* else:
|
||||||
|
* TAG_TYPE = (Frame.OuterTPID <> 0x8100)
|
||||||
|
* Set PCP, DEI, VID to values from outer VLAN header
|
||||||
|
* if VID == 0 then:
|
||||||
|
* VID = VLAN_CFG.VLAN_VID
|
||||||
|
*
|
||||||
|
* Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN
|
||||||
|
* "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1
|
||||||
|
* (if 802.1ad). It will classify based on whichever of the tags is "outer", no
|
||||||
|
* matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]).
|
||||||
|
*
|
||||||
|
* In the VLAN Table, the TAG_TYPE information is not accessible - just the
|
||||||
|
* classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
|
||||||
|
* C-VLAN X, and S-VLAN X.
|
||||||
|
*
|
||||||
|
* Whereas the Linux bridge behavior is to only filter on frames with a TPID
|
||||||
|
* equal to the vlan_protocol, and treat everything else as VLAN-untagged.
|
||||||
|
*
|
||||||
|
* Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5,
|
||||||
|
* received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame
|
||||||
|
* should be treated as 802.1Q-untagged, and classified to the PVID of that
|
||||||
|
* bridge port. Not to VID=3, and not to VID=5.
|
||||||
|
*
|
||||||
|
* The VCAP IS1 TCAM has everything we need to overwrite the choices made in
|
||||||
|
* the basic VLAN classification pipeline: it can match on TAG_TYPE in the key,
|
||||||
|
* and it can modify the classified VID in the action. Thus, for each port
|
||||||
|
* under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to
|
||||||
|
* match on 802.1ad tagged frames and modify their classified VID to the 802.1Q
|
||||||
|
* PVID of the port. This effectively makes it appear to the outside world as
|
||||||
|
* if those packets were processed as VLAN-untagged.
|
||||||
|
*
|
||||||
|
* The rule needs to be updated each time the bridge PVID changes, and needs
|
||||||
|
* to be deleted if the bridge PVID is deleted, or if the port becomes
|
||||||
|
* VLAN-unaware.
|
||||||
|
*/
|
||||||
|
static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port)
|
||||||
|
{
|
||||||
|
unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port);
|
||||||
|
struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1];
|
||||||
|
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||||
|
const struct ocelot_bridge_vlan *pvid_vlan;
|
||||||
|
struct ocelot_vcap_filter *filter;
|
||||||
|
int err, val, pcp, dei;
|
||||||
|
bool vid_replace_ena;
|
||||||
|
u16 vid;
|
||||||
|
|
||||||
|
pvid_vlan = ocelot_port->pvid_vlan;
|
||||||
|
vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan;
|
||||||
|
|
||||||
|
filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie,
|
||||||
|
false);
|
||||||
|
if (!vid_replace_ena) {
|
||||||
|
/* If the reclassification filter doesn't need to exist, delete
|
||||||
|
* it if it was previously installed, and exit doing nothing
|
||||||
|
* otherwise.
|
||||||
|
*/
|
||||||
|
if (filter)
|
||||||
|
return ocelot_vcap_filter_del(ocelot, filter);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The reclassification rule must apply. See if it already exists
|
||||||
|
* or if it must be created.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Treating as VLAN-untagged means using as classified VID equal to
|
||||||
|
* the bridge PVID, and PCP/DEI set to the port default QoS values.
|
||||||
|
*/
|
||||||
|
vid = pvid_vlan->vid;
|
||||||
|
val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
|
||||||
|
pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
|
||||||
|
dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL);
|
||||||
|
|
||||||
|
if (filter) {
|
||||||
|
bool changed = false;
|
||||||
|
|
||||||
|
/* Filter exists, just update it */
|
||||||
|
if (filter->action.vid != vid) {
|
||||||
|
filter->action.vid = vid;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
if (filter->action.pcp != pcp) {
|
||||||
|
filter->action.pcp = pcp;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
if (filter->action.dei != dei) {
|
||||||
|
filter->action.dei = dei;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!changed)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return ocelot_vcap_filter_replace(ocelot, filter);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Filter doesn't exist, create it */
|
||||||
|
filter = kzalloc(sizeof(*filter), GFP_KERNEL);
|
||||||
|
if (!filter)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
filter->key_type = OCELOT_VCAP_KEY_ANY;
|
||||||
|
filter->ingress_port_mask = BIT(port);
|
||||||
|
filter->vlan.tpid = OCELOT_VCAP_BIT_1;
|
||||||
|
filter->prio = 1;
|
||||||
|
filter->id.cookie = cookie;
|
||||||
|
filter->id.tc_offload = false;
|
||||||
|
filter->block_id = VCAP_IS1;
|
||||||
|
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
|
||||||
|
filter->lookup = 0;
|
||||||
|
filter->action.vid_replace_ena = true;
|
||||||
|
filter->action.pcp_dei_ena = true;
|
||||||
|
filter->action.vid = vid;
|
||||||
|
filter->action.pcp = pcp;
|
||||||
|
filter->action.dei = dei;
|
||||||
|
|
||||||
|
err = ocelot_vcap_filter_add(ocelot, filter, NULL);
|
||||||
|
if (err)
|
||||||
|
kfree(filter);
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Default vlan to clasify for untagged frames (may be zero) */
|
/* Default vlan to clasify for untagged frames (may be zero) */
|
||||||
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
|
static int ocelot_port_set_pvid(struct ocelot *ocelot, int port,
|
||||||
const struct ocelot_bridge_vlan *pvid_vlan)
|
const struct ocelot_bridge_vlan *pvid_vlan)
|
||||||
{
|
{
|
||||||
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||||
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
|
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
|
||||||
@@ -475,15 +624,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
|
|||||||
* happens automatically), but also 802.1p traffic which gets
|
* happens automatically), but also 802.1p traffic which gets
|
||||||
* classified to VLAN 0, but that is always in our RX filter, so it
|
* classified to VLAN 0, but that is always in our RX filter, so it
|
||||||
* would get accepted were it not for this setting.
|
* would get accepted were it not for this setting.
|
||||||
|
*
|
||||||
|
* Also, we only support the bridge 802.1Q VLAN protocol, so
|
||||||
|
* 802.1ad-tagged frames (carrying S-Tags) should be considered
|
||||||
|
* 802.1Q-untagged, and also dropped.
|
||||||
*/
|
*/
|
||||||
if (!pvid_vlan && ocelot_port->vlan_aware)
|
if (!pvid_vlan && ocelot_port->vlan_aware)
|
||||||
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
|
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
|
||||||
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
|
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
|
||||||
|
ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA;
|
||||||
|
|
||||||
ocelot_rmw_gix(ocelot, val,
|
ocelot_rmw_gix(ocelot, val,
|
||||||
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
|
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
|
||||||
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
|
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
|
||||||
|
ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA,
|
||||||
ANA_PORT_DROP_CFG, port);
|
ANA_PORT_DROP_CFG, port);
|
||||||
|
|
||||||
|
return ocelot_update_vlan_reclassify_rule(ocelot, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
|
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
|
||||||
@@ -631,7 +788,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
|
|||||||
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
|
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
|
||||||
ANA_PORT_VLAN_CFG, port);
|
ANA_PORT_VLAN_CFG, port);
|
||||||
|
|
||||||
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
|
err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
|
||||||
ocelot_port_manage_port_tag(ocelot, port);
|
ocelot_port_manage_port_tag(ocelot, port);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -670,6 +830,7 @@ EXPORT_SYMBOL(ocelot_vlan_prepare);
|
|||||||
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
||||||
bool untagged)
|
bool untagged)
|
||||||
{
|
{
|
||||||
|
struct ocelot_port *ocelot_port = ocelot->ports[port];
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/* Ignore VID 0 added to our RX filter by the 8021q module, since
|
/* Ignore VID 0 added to our RX filter by the 8021q module, since
|
||||||
@@ -684,9 +845,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* Default ingress vlan classification */
|
/* Default ingress vlan classification */
|
||||||
if (pvid)
|
if (pvid) {
|
||||||
ocelot_port_set_pvid(ocelot, port,
|
err = ocelot_port_set_pvid(ocelot, port,
|
||||||
ocelot_bridge_vlan_find(ocelot, vid));
|
ocelot_bridge_vlan_find(ocelot, vid));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
} else if (ocelot_port->pvid_vlan &&
|
||||||
|
ocelot_bridge_vlan_find(ocelot, vid) == ocelot_port->pvid_vlan) {
|
||||||
|
err = ocelot_port_set_pvid(ocelot, port, NULL);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Untagged egress vlan clasification */
|
/* Untagged egress vlan clasification */
|
||||||
ocelot_port_manage_port_tag(ocelot, port);
|
ocelot_port_manage_port_tag(ocelot, port);
|
||||||
@@ -712,8 +881,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
|
|||||||
return err;
|
return err;
|
||||||
|
|
||||||
/* Ingress */
|
/* Ingress */
|
||||||
if (del_pvid)
|
if (del_pvid) {
|
||||||
ocelot_port_set_pvid(ocelot, port, NULL);
|
err = ocelot_port_set_pvid(ocelot, port, NULL);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Egress */
|
/* Egress */
|
||||||
ocelot_port_manage_port_tag(ocelot, port);
|
ocelot_port_manage_port_tag(ocelot, port);
|
||||||
@@ -2607,7 +2779,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
|
|||||||
ANA_PORT_QOS_CFG,
|
ANA_PORT_QOS_CFG,
|
||||||
port);
|
port);
|
||||||
|
|
||||||
return 0;
|
return ocelot_update_vlan_reclassify_rule(ocelot, port);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
|
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
|
||||||
|
|
||||||
|
@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
|
|||||||
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
|
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
|
||||||
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
|
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
|
||||||
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
|
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
|
||||||
|
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid);
|
||||||
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
|
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
|
||||||
tag->vid.value, tag->vid.mask);
|
tag->vid.value, tag->vid.mask);
|
||||||
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
|
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
|
||||||
|
@@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
|
#include <linux/if_vlan.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
@@ -33,7 +34,7 @@
|
|||||||
#define CMD_CTR (0x2 << CMD_SHIFT)
|
#define CMD_CTR (0x2 << CMD_SHIFT)
|
||||||
|
|
||||||
#define CMD_MASK GENMASK(15, CMD_SHIFT)
|
#define CMD_MASK GENMASK(15, CMD_SHIFT)
|
||||||
#define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
|
#define LEN_MASK GENMASK(CMD_SHIFT - 2, 0)
|
||||||
|
|
||||||
#define DET_CMD_LEN 4
|
#define DET_CMD_LEN 4
|
||||||
#define DET_SOF_LEN 2
|
#define DET_SOF_LEN 2
|
||||||
@@ -262,7 +263,7 @@ static int mse102x_tx_frame_spi(struct mse102x_net *mse, struct sk_buff *txp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
|
static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
|
||||||
unsigned int frame_len)
|
unsigned int frame_len, bool drop)
|
||||||
{
|
{
|
||||||
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
|
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
|
||||||
struct spi_transfer *xfer = &mses->spi_xfer;
|
struct spi_transfer *xfer = &mses->spi_xfer;
|
||||||
@@ -280,6 +281,9 @@ static int mse102x_rx_frame_spi(struct mse102x_net *mse, u8 *buff,
|
|||||||
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
|
netdev_err(mse->ndev, "%s: spi_sync() failed: %d\n",
|
||||||
__func__, ret);
|
__func__, ret);
|
||||||
mse->stats.xfer_err++;
|
mse->stats.xfer_err++;
|
||||||
|
} else if (drop) {
|
||||||
|
netdev_dbg(mse->ndev, "%s: Drop frame\n", __func__);
|
||||||
|
ret = -EINVAL;
|
||||||
} else if (*sof != cpu_to_be16(DET_SOF)) {
|
} else if (*sof != cpu_to_be16(DET_SOF)) {
|
||||||
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
|
netdev_dbg(mse->ndev, "%s: SPI start of frame is invalid (0x%04x)\n",
|
||||||
__func__, *sof);
|
__func__, *sof);
|
||||||
@@ -307,6 +311,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
|
|||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
unsigned int rxalign;
|
unsigned int rxalign;
|
||||||
unsigned int rxlen;
|
unsigned int rxlen;
|
||||||
|
bool drop = false;
|
||||||
__be16 rx = 0;
|
__be16 rx = 0;
|
||||||
u16 cmd_resp;
|
u16 cmd_resp;
|
||||||
u8 *rxpkt;
|
u8 *rxpkt;
|
||||||
@@ -329,7 +334,8 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
|
|||||||
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
|
net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
|
||||||
__func__, cmd_resp);
|
__func__, cmd_resp);
|
||||||
mse->stats.invalid_rts++;
|
mse->stats.invalid_rts++;
|
||||||
return;
|
drop = true;
|
||||||
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
|
net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
|
||||||
@@ -337,12 +343,20 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rxlen = cmd_resp & LEN_MASK;
|
rxlen = cmd_resp & LEN_MASK;
|
||||||
if (!rxlen) {
|
if (rxlen < ETH_ZLEN || rxlen > VLAN_ETH_FRAME_LEN) {
|
||||||
net_dbg_ratelimited("%s: No frame length defined\n", __func__);
|
net_dbg_ratelimited("%s: Invalid frame length: %d\n", __func__,
|
||||||
|
rxlen);
|
||||||
mse->stats.invalid_len++;
|
mse->stats.invalid_len++;
|
||||||
return;
|
drop = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* In case of a invalid CMD_RTS, the frame must be consumed anyway.
|
||||||
|
* So assume the maximum possible frame length.
|
||||||
|
*/
|
||||||
|
drop:
|
||||||
|
if (drop)
|
||||||
|
rxlen = VLAN_ETH_FRAME_LEN;
|
||||||
|
|
||||||
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
|
rxalign = ALIGN(rxlen + DET_SOF_LEN + DET_DFT_LEN, 4);
|
||||||
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
|
skb = netdev_alloc_skb_ip_align(mse->ndev, rxalign);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
@@ -353,7 +367,7 @@ static void mse102x_rx_pkt_spi(struct mse102x_net *mse)
|
|||||||
* They are copied, but ignored.
|
* They are copied, but ignored.
|
||||||
*/
|
*/
|
||||||
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
|
rxpkt = skb_put(skb, rxlen) - DET_SOF_LEN;
|
||||||
if (mse102x_rx_frame_spi(mse, rxpkt, rxlen)) {
|
if (mse102x_rx_frame_spi(mse, rxpkt, rxlen, drop)) {
|
||||||
mse->ndev->stats.rx_errors++;
|
mse->ndev->stats.rx_errors++;
|
||||||
dev_kfree_skb(skb);
|
dev_kfree_skb(skb);
|
||||||
return;
|
return;
|
||||||
@@ -509,6 +523,7 @@ static irqreturn_t mse102x_irq(int irq, void *_mse)
|
|||||||
static int mse102x_net_open(struct net_device *ndev)
|
static int mse102x_net_open(struct net_device *ndev)
|
||||||
{
|
{
|
||||||
struct mse102x_net *mse = netdev_priv(ndev);
|
struct mse102x_net *mse = netdev_priv(ndev);
|
||||||
|
struct mse102x_net_spi *mses = to_mse102x_spi(mse);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
|
ret = request_threaded_irq(ndev->irq, NULL, mse102x_irq, IRQF_ONESHOT,
|
||||||
@@ -524,6 +539,13 @@ static int mse102x_net_open(struct net_device *ndev)
|
|||||||
|
|
||||||
netif_carrier_on(ndev);
|
netif_carrier_on(ndev);
|
||||||
|
|
||||||
|
/* The SPI interrupt can stuck in case of pending packet(s).
|
||||||
|
* So poll for possible packet(s) to re-arm the interrupt.
|
||||||
|
*/
|
||||||
|
mutex_lock(&mses->lock);
|
||||||
|
mse102x_rx_pkt_spi(mse);
|
||||||
|
mutex_unlock(&mses->lock);
|
||||||
|
|
||||||
netif_dbg(mse, ifup, ndev, "network device up\n");
|
netif_dbg(mse, ifup, ndev, "network device up\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@@ -17,6 +17,7 @@
|
|||||||
#define REG2_LEDACT GENMASK(23, 22)
|
#define REG2_LEDACT GENMASK(23, 22)
|
||||||
#define REG2_LEDLINK GENMASK(25, 24)
|
#define REG2_LEDLINK GENMASK(25, 24)
|
||||||
#define REG2_DIV4SEL BIT(27)
|
#define REG2_DIV4SEL BIT(27)
|
||||||
|
#define REG2_REVERSED BIT(28)
|
||||||
#define REG2_ADCBYPASS BIT(30)
|
#define REG2_ADCBYPASS BIT(30)
|
||||||
#define REG2_CLKINSEL BIT(31)
|
#define REG2_CLKINSEL BIT(31)
|
||||||
#define ETH_REG3 0x4
|
#define ETH_REG3 0x4
|
||||||
@@ -65,7 +66,7 @@ static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
|
|||||||
* The only constraint is that it must match the one in
|
* The only constraint is that it must match the one in
|
||||||
* drivers/net/phy/meson-gxl.c to properly match the PHY.
|
* drivers/net/phy/meson-gxl.c to properly match the PHY.
|
||||||
*/
|
*/
|
||||||
writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
|
writel(REG2_REVERSED | FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
|
||||||
priv->regs + ETH_REG2);
|
priv->regs + ETH_REG2);
|
||||||
|
|
||||||
/* Enable the internal phy */
|
/* Enable the internal phy */
|
||||||
|
@@ -627,7 +627,11 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
|
|||||||
* default dst remote_ip previously added for this vni
|
* default dst remote_ip previously added for this vni
|
||||||
*/
|
*/
|
||||||
if (!vxlan_addr_any(&vninode->remote_ip) ||
|
if (!vxlan_addr_any(&vninode->remote_ip) ||
|
||||||
!vxlan_addr_any(&dst->remote_ip))
|
!vxlan_addr_any(&dst->remote_ip)) {
|
||||||
|
u32 hash_index = fdb_head_index(vxlan, all_zeros_mac,
|
||||||
|
vninode->vni);
|
||||||
|
|
||||||
|
spin_lock_bh(&vxlan->hash_lock[hash_index]);
|
||||||
__vxlan_fdb_delete(vxlan, all_zeros_mac,
|
__vxlan_fdb_delete(vxlan, all_zeros_mac,
|
||||||
(vxlan_addr_any(&vninode->remote_ip) ?
|
(vxlan_addr_any(&vninode->remote_ip) ?
|
||||||
dst->remote_ip : vninode->remote_ip),
|
dst->remote_ip : vninode->remote_ip),
|
||||||
@@ -635,6 +639,8 @@ static void vxlan_vni_delete_group(struct vxlan_dev *vxlan,
|
|||||||
vninode->vni, vninode->vni,
|
vninode->vni, vninode->vni,
|
||||||
dst->remote_ifindex,
|
dst->remote_ifindex,
|
||||||
true);
|
true);
|
||||||
|
spin_unlock_bh(&vxlan->hash_lock[hash_index]);
|
||||||
|
}
|
||||||
|
|
||||||
if (vxlan->dev->flags & IFF_UP) {
|
if (vxlan->dev->flags & IFF_UP) {
|
||||||
if (vxlan_addr_multicast(&vninode->remote_ip) &&
|
if (vxlan_addr_multicast(&vninode->remote_ip) &&
|
||||||
|
@@ -103,7 +103,6 @@ int plfxlc_mac_init_hw(struct ieee80211_hw *hw)
|
|||||||
void plfxlc_mac_release(struct plfxlc_mac *mac)
|
void plfxlc_mac_release(struct plfxlc_mac *mac)
|
||||||
{
|
{
|
||||||
plfxlc_chip_release(&mac->chip);
|
plfxlc_chip_release(&mac->chip);
|
||||||
lockdep_assert_held(&mac->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int plfxlc_op_start(struct ieee80211_hw *hw)
|
int plfxlc_op_start(struct ieee80211_hw *hw)
|
||||||
|
@@ -3377,7 +3377,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
|
|||||||
|
|
||||||
dev_info(dev->ctrl.device, "restart after slot reset\n");
|
dev_info(dev->ctrl.device, "restart after slot reset\n");
|
||||||
pci_restore_state(pdev);
|
pci_restore_state(pdev);
|
||||||
if (!nvme_try_sched_reset(&dev->ctrl))
|
if (nvme_try_sched_reset(&dev->ctrl))
|
||||||
nvme_unquiesce_io_queues(&dev->ctrl);
|
nvme_unquiesce_io_queues(&dev->ctrl);
|
||||||
return PCI_ERS_RESULT_RECOVERED;
|
return PCI_ERS_RESULT_RECOVERED;
|
||||||
}
|
}
|
||||||
|
@@ -1710,7 +1710,7 @@ static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
|
|||||||
cancel_work_sync(&queue->io_work);
|
cancel_work_sync(&queue->io_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
|
static void nvme_tcp_stop_queue_nowait(struct nvme_ctrl *nctrl, int qid)
|
||||||
{
|
{
|
||||||
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||||
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
|
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
|
||||||
@@ -1724,6 +1724,31 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
|
|||||||
mutex_unlock(&queue->queue_lock);
|
mutex_unlock(&queue->queue_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nvme_tcp_wait_queue(struct nvme_ctrl *nctrl, int qid)
|
||||||
|
{
|
||||||
|
struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
|
||||||
|
struct nvme_tcp_queue *queue = &ctrl->queues[qid];
|
||||||
|
int timeout = 100;
|
||||||
|
|
||||||
|
while (timeout > 0) {
|
||||||
|
if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) ||
|
||||||
|
!sk_wmem_alloc_get(queue->sock->sk))
|
||||||
|
return;
|
||||||
|
msleep(2);
|
||||||
|
timeout -= 2;
|
||||||
|
}
|
||||||
|
dev_warn(nctrl->device,
|
||||||
|
"qid %d: timeout draining sock wmem allocation expired\n",
|
||||||
|
qid);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
|
||||||
|
{
|
||||||
|
nvme_tcp_stop_queue_nowait(nctrl, qid);
|
||||||
|
nvme_tcp_wait_queue(nctrl, qid);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
|
static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
|
||||||
{
|
{
|
||||||
write_lock_bh(&queue->sock->sk->sk_callback_lock);
|
write_lock_bh(&queue->sock->sk->sk_callback_lock);
|
||||||
@@ -1790,7 +1815,9 @@ static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 1; i < ctrl->queue_count; i++)
|
for (i = 1; i < ctrl->queue_count; i++)
|
||||||
nvme_tcp_stop_queue(ctrl, i);
|
nvme_tcp_stop_queue_nowait(ctrl, i);
|
||||||
|
for (i = 1; i < ctrl->queue_count; i++)
|
||||||
|
nvme_tcp_wait_queue(ctrl, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
|
static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
|
||||||
|
@@ -1283,7 +1283,8 @@ static int imx6_pcie_probe(struct platform_device *pdev)
|
|||||||
case IMX8MQ_EP:
|
case IMX8MQ_EP:
|
||||||
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
|
if (dbi_base->start == IMX8MQ_PCIE2_BASE_ADDR)
|
||||||
imx6_pcie->controller_id = 1;
|
imx6_pcie->controller_id = 1;
|
||||||
|
fallthrough;
|
||||||
|
case IMX7D:
|
||||||
imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
|
imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev,
|
||||||
"pciephy");
|
"pciephy");
|
||||||
if (IS_ERR(imx6_pcie->pciephy_reset)) {
|
if (IS_ERR(imx6_pcie->pciephy_reset)) {
|
||||||
|
@@ -1427,6 +1427,7 @@ struct bpf_prog_aux {
|
|||||||
bool sleepable;
|
bool sleepable;
|
||||||
bool tail_call_reachable;
|
bool tail_call_reachable;
|
||||||
bool xdp_has_frags;
|
bool xdp_has_frags;
|
||||||
|
bool changes_pkt_data;
|
||||||
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
|
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
|
||||||
const struct btf_type *attach_func_proto;
|
const struct btf_type *attach_func_proto;
|
||||||
/* function name for valid attach_btf_id */
|
/* function name for valid attach_btf_id */
|
||||||
|
@@ -574,6 +574,7 @@ struct bpf_subprog_info {
|
|||||||
bool tail_call_reachable;
|
bool tail_call_reachable;
|
||||||
bool has_ld_abs;
|
bool has_ld_abs;
|
||||||
bool is_async_cb;
|
bool is_async_cb;
|
||||||
|
bool changes_pkt_data;
|
||||||
|
|
||||||
ANDROID_KABI_RESERVE(1);
|
ANDROID_KABI_RESERVE(1);
|
||||||
};
|
};
|
||||||
|
@@ -787,8 +787,8 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
|
|||||||
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
|
int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
|
||||||
|
|
||||||
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq,
|
unsigned int target_freq, unsigned int min,
|
||||||
unsigned int relation);
|
unsigned int max, unsigned int relation);
|
||||||
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
|
||||||
unsigned int freq);
|
unsigned int freq);
|
||||||
|
|
||||||
@@ -853,12 +853,12 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy,
|
|||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Works only on sorted freq-tables */
|
static inline int find_index_l(struct cpufreq_policy *policy,
|
||||||
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
|
unsigned int target_freq,
|
||||||
unsigned int target_freq,
|
unsigned int min, unsigned int max,
|
||||||
bool efficiencies)
|
bool efficiencies)
|
||||||
{
|
{
|
||||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
target_freq = clamp_val(target_freq, min, max);
|
||||||
|
|
||||||
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
||||||
return cpufreq_table_find_index_al(policy, target_freq,
|
return cpufreq_table_find_index_al(policy, target_freq,
|
||||||
@@ -868,6 +868,14 @@ static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
|
|||||||
efficiencies);
|
efficiencies);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Works only on sorted freq-tables */
|
||||||
|
static inline int cpufreq_table_find_index_l(struct cpufreq_policy *policy,
|
||||||
|
unsigned int target_freq,
|
||||||
|
bool efficiencies)
|
||||||
|
{
|
||||||
|
return find_index_l(policy, target_freq, policy->min, policy->max, efficiencies);
|
||||||
|
}
|
||||||
|
|
||||||
/* Find highest freq at or below target in a table in ascending order */
|
/* Find highest freq at or below target in a table in ascending order */
|
||||||
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
|
static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq,
|
unsigned int target_freq,
|
||||||
@@ -921,12 +929,12 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy,
|
|||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Works only on sorted freq-tables */
|
static inline int find_index_h(struct cpufreq_policy *policy,
|
||||||
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
|
unsigned int target_freq,
|
||||||
unsigned int target_freq,
|
unsigned int min, unsigned int max,
|
||||||
bool efficiencies)
|
bool efficiencies)
|
||||||
{
|
{
|
||||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
target_freq = clamp_val(target_freq, min, max);
|
||||||
|
|
||||||
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
||||||
return cpufreq_table_find_index_ah(policy, target_freq,
|
return cpufreq_table_find_index_ah(policy, target_freq,
|
||||||
@@ -936,6 +944,14 @@ static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
|
|||||||
efficiencies);
|
efficiencies);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Works only on sorted freq-tables */
|
||||||
|
static inline int cpufreq_table_find_index_h(struct cpufreq_policy *policy,
|
||||||
|
unsigned int target_freq,
|
||||||
|
bool efficiencies)
|
||||||
|
{
|
||||||
|
return find_index_h(policy, target_freq, policy->min, policy->max, efficiencies);
|
||||||
|
}
|
||||||
|
|
||||||
/* Find closest freq to target in a table in ascending order */
|
/* Find closest freq to target in a table in ascending order */
|
||||||
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
|
static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq,
|
unsigned int target_freq,
|
||||||
@@ -1006,12 +1022,12 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy,
|
|||||||
return best;
|
return best;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Works only on sorted freq-tables */
|
static inline int find_index_c(struct cpufreq_policy *policy,
|
||||||
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
|
unsigned int target_freq,
|
||||||
unsigned int target_freq,
|
unsigned int min, unsigned int max,
|
||||||
bool efficiencies)
|
bool efficiencies)
|
||||||
{
|
{
|
||||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
target_freq = clamp_val(target_freq, min, max);
|
||||||
|
|
||||||
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING)
|
||||||
return cpufreq_table_find_index_ac(policy, target_freq,
|
return cpufreq_table_find_index_ac(policy, target_freq,
|
||||||
@@ -1021,7 +1037,17 @@ static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
|
|||||||
efficiencies);
|
efficiencies);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
|
/* Works only on sorted freq-tables */
|
||||||
|
static inline int cpufreq_table_find_index_c(struct cpufreq_policy *policy,
|
||||||
|
unsigned int target_freq,
|
||||||
|
bool efficiencies)
|
||||||
|
{
|
||||||
|
return find_index_c(policy, target_freq, policy->min, policy->max, efficiencies);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy,
|
||||||
|
unsigned int min, unsigned int max,
|
||||||
|
int idx)
|
||||||
{
|
{
|
||||||
unsigned int freq;
|
unsigned int freq;
|
||||||
|
|
||||||
@@ -1030,11 +1056,13 @@ static inline bool cpufreq_is_in_limits(struct cpufreq_policy *policy, int idx)
|
|||||||
|
|
||||||
freq = policy->freq_table[idx].frequency;
|
freq = policy->freq_table[idx].frequency;
|
||||||
|
|
||||||
return freq == clamp_val(freq, policy->min, policy->max);
|
return freq == clamp_val(freq, min, max);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq,
|
unsigned int target_freq,
|
||||||
|
unsigned int min,
|
||||||
|
unsigned int max,
|
||||||
unsigned int relation)
|
unsigned int relation)
|
||||||
{
|
{
|
||||||
bool efficiencies = policy->efficiencies_available &&
|
bool efficiencies = policy->efficiencies_available &&
|
||||||
@@ -1045,29 +1073,26 @@ static inline int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
|||||||
relation &= ~CPUFREQ_RELATION_E;
|
relation &= ~CPUFREQ_RELATION_E;
|
||||||
|
|
||||||
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
|
if (unlikely(policy->freq_table_sorted == CPUFREQ_TABLE_UNSORTED))
|
||||||
return cpufreq_table_index_unsorted(policy, target_freq,
|
return cpufreq_table_index_unsorted(policy, target_freq, min,
|
||||||
relation);
|
max, relation);
|
||||||
retry:
|
retry:
|
||||||
switch (relation) {
|
switch (relation) {
|
||||||
case CPUFREQ_RELATION_L:
|
case CPUFREQ_RELATION_L:
|
||||||
idx = cpufreq_table_find_index_l(policy, target_freq,
|
idx = find_index_l(policy, target_freq, min, max, efficiencies);
|
||||||
efficiencies);
|
|
||||||
break;
|
break;
|
||||||
case CPUFREQ_RELATION_H:
|
case CPUFREQ_RELATION_H:
|
||||||
idx = cpufreq_table_find_index_h(policy, target_freq,
|
idx = find_index_h(policy, target_freq, min, max, efficiencies);
|
||||||
efficiencies);
|
|
||||||
break;
|
break;
|
||||||
case CPUFREQ_RELATION_C:
|
case CPUFREQ_RELATION_C:
|
||||||
idx = cpufreq_table_find_index_c(policy, target_freq,
|
idx = find_index_c(policy, target_freq, min, max, efficiencies);
|
||||||
efficiencies);
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Limit frequency index to honor policy->min/max */
|
/* Limit frequency index to honor min and max */
|
||||||
if (!cpufreq_is_in_limits(policy, idx) && efficiencies) {
|
if (!cpufreq_is_in_limits(policy, min, max, idx) && efficiencies) {
|
||||||
efficiencies = false;
|
efficiencies = false;
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
@@ -915,7 +915,7 @@ bool bpf_jit_needs_zext(void);
|
|||||||
bool bpf_jit_supports_subprog_tailcalls(void);
|
bool bpf_jit_supports_subprog_tailcalls(void);
|
||||||
bool bpf_jit_supports_kfunc_call(void);
|
bool bpf_jit_supports_kfunc_call(void);
|
||||||
bool bpf_jit_supports_far_kfunc_call(void);
|
bool bpf_jit_supports_far_kfunc_call(void);
|
||||||
bool bpf_helper_changes_pkt_data(void *func);
|
bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id);
|
||||||
|
|
||||||
static inline bool bpf_dump_raw_ok(const struct cred *cred)
|
static inline bool bpf_dump_raw_ok(const struct cred *cred)
|
||||||
{
|
{
|
||||||
|
@@ -79,6 +79,7 @@ enum pds_core_status_code {
|
|||||||
PDS_RC_EVFID = 31, /* VF ID does not exist */
|
PDS_RC_EVFID = 31, /* VF ID does not exist */
|
||||||
PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
|
PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
|
||||||
PDS_RC_ECLIENT = 33, /* No such client id */
|
PDS_RC_ECLIENT = 33, /* No such client id */
|
||||||
|
PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@@ -691,6 +691,11 @@ typedef unsigned int sk_buff_data_t;
|
|||||||
typedef unsigned char *sk_buff_data_t;
|
typedef unsigned char *sk_buff_data_t;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
enum skb_tstamp_type {
|
||||||
|
SKB_CLOCK_REALTIME,
|
||||||
|
SKB_CLOCK_MONOTONIC,
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DOC: Basic sk_buff geometry
|
* DOC: Basic sk_buff geometry
|
||||||
*
|
*
|
||||||
@@ -810,10 +815,8 @@ typedef unsigned char *sk_buff_data_t;
|
|||||||
* @dst_pending_confirm: need to confirm neighbour
|
* @dst_pending_confirm: need to confirm neighbour
|
||||||
* @decrypted: Decrypted SKB
|
* @decrypted: Decrypted SKB
|
||||||
* @slow_gro: state present at GRO time, slower prepare step required
|
* @slow_gro: state present at GRO time, slower prepare step required
|
||||||
* @mono_delivery_time: When set, skb->tstamp has the
|
* @tstamp_type: When set, skb->tstamp has the
|
||||||
* delivery_time in mono clock base (i.e. EDT). Otherwise, the
|
* delivery_time clock base of skb->tstamp.
|
||||||
* skb->tstamp has the (rcv) timestamp at ingress and
|
|
||||||
* delivery_time at egress.
|
|
||||||
* @napi_id: id of the NAPI struct this skb came from
|
* @napi_id: id of the NAPI struct this skb came from
|
||||||
* @sender_cpu: (aka @napi_id) source CPU in XPS
|
* @sender_cpu: (aka @napi_id) source CPU in XPS
|
||||||
* @alloc_cpu: CPU which did the skb allocation.
|
* @alloc_cpu: CPU which did the skb allocation.
|
||||||
@@ -941,7 +944,7 @@ struct sk_buff {
|
|||||||
/* private: */
|
/* private: */
|
||||||
__u8 __mono_tc_offset[0];
|
__u8 __mono_tc_offset[0];
|
||||||
/* public: */
|
/* public: */
|
||||||
__u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
|
__u8 tstamp_type:1; /* See skb_tstamp_type */
|
||||||
#ifdef CONFIG_NET_XGRESS
|
#ifdef CONFIG_NET_XGRESS
|
||||||
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
|
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
|
||||||
__u8 tc_skip_classify:1;
|
__u8 tc_skip_classify:1;
|
||||||
@@ -4198,7 +4201,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
|
|||||||
static inline void __net_timestamp(struct sk_buff *skb)
|
static inline void __net_timestamp(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
skb->tstamp = ktime_get_real();
|
skb->tstamp = ktime_get_real();
|
||||||
skb->mono_delivery_time = 0;
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline ktime_t net_timedelta(ktime_t t)
|
static inline ktime_t net_timedelta(ktime_t t)
|
||||||
@@ -4207,10 +4210,33 @@ static inline ktime_t net_timedelta(ktime_t t)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
|
static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
|
||||||
bool mono)
|
u8 tstamp_type)
|
||||||
{
|
{
|
||||||
skb->tstamp = kt;
|
skb->tstamp = kt;
|
||||||
skb->mono_delivery_time = kt && mono;
|
|
||||||
|
if (kt)
|
||||||
|
skb->tstamp_type = tstamp_type;
|
||||||
|
else
|
||||||
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb,
|
||||||
|
ktime_t kt, clockid_t clockid)
|
||||||
|
{
|
||||||
|
u8 tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
|
|
||||||
|
switch (clockid) {
|
||||||
|
case CLOCK_REALTIME:
|
||||||
|
break;
|
||||||
|
case CLOCK_MONOTONIC:
|
||||||
|
tstamp_type = SKB_CLOCK_MONOTONIC;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
kt = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
skb_set_delivery_time(skb, kt, tstamp_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
|
DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
|
||||||
@@ -4220,8 +4246,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
|
|||||||
*/
|
*/
|
||||||
static inline void skb_clear_delivery_time(struct sk_buff *skb)
|
static inline void skb_clear_delivery_time(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->mono_delivery_time) {
|
if (skb->tstamp_type) {
|
||||||
skb->mono_delivery_time = 0;
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
if (static_branch_unlikely(&netstamp_needed_key))
|
if (static_branch_unlikely(&netstamp_needed_key))
|
||||||
skb->tstamp = ktime_get_real();
|
skb->tstamp = ktime_get_real();
|
||||||
else
|
else
|
||||||
@@ -4231,7 +4257,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb)
|
|||||||
|
|
||||||
static inline void skb_clear_tstamp(struct sk_buff *skb)
|
static inline void skb_clear_tstamp(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->mono_delivery_time)
|
if (skb->tstamp_type)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
skb->tstamp = 0;
|
skb->tstamp = 0;
|
||||||
@@ -4239,7 +4265,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb)
|
|||||||
|
|
||||||
static inline ktime_t skb_tstamp(const struct sk_buff *skb)
|
static inline ktime_t skb_tstamp(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
if (skb->mono_delivery_time)
|
if (skb->tstamp_type)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return skb->tstamp;
|
return skb->tstamp;
|
||||||
@@ -4247,7 +4273,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb)
|
|||||||
|
|
||||||
static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
|
static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
|
||||||
{
|
{
|
||||||
if (!skb->mono_delivery_time && skb->tstamp)
|
if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp)
|
||||||
return skb->tstamp;
|
return skb->tstamp;
|
||||||
|
|
||||||
if (static_branch_unlikely(&netstamp_needed_key) || cond)
|
if (static_branch_unlikely(&netstamp_needed_key) || cond)
|
||||||
|
@@ -76,7 +76,7 @@ struct frag_v6_compare_key {
|
|||||||
* @stamp: timestamp of the last received fragment
|
* @stamp: timestamp of the last received fragment
|
||||||
* @len: total length of the original datagram
|
* @len: total length of the original datagram
|
||||||
* @meat: length of received fragments so far
|
* @meat: length of received fragments so far
|
||||||
* @mono_delivery_time: stamp has a mono delivery time (EDT)
|
* @tstamp_type: stamp has a mono delivery time (EDT)
|
||||||
* @flags: fragment queue flags
|
* @flags: fragment queue flags
|
||||||
* @max_size: maximum received fragment size
|
* @max_size: maximum received fragment size
|
||||||
* @fqdir: pointer to struct fqdir
|
* @fqdir: pointer to struct fqdir
|
||||||
@@ -97,7 +97,7 @@ struct inet_frag_queue {
|
|||||||
ktime_t stamp;
|
ktime_t stamp;
|
||||||
int len;
|
int len;
|
||||||
int meat;
|
int meat;
|
||||||
u8 mono_delivery_time;
|
u8 tstamp_type;
|
||||||
__u8 flags;
|
__u8 flags;
|
||||||
u16 max_size;
|
u16 max_size;
|
||||||
struct fqdir *fqdir;
|
struct fqdir *fqdir;
|
||||||
|
@@ -13,6 +13,7 @@
|
|||||||
*/
|
*/
|
||||||
#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
|
#define OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream) ((upstream) << 16 | (port))
|
||||||
#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
|
#define OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port) (port)
|
||||||
|
#define OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port) ((ocelot)->num_phys_ports + (port))
|
||||||
#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
|
#define OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port) (port)
|
||||||
#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
|
#define OCELOT_VCAP_IS2_MRP_REDIRECT(ocelot, port) ((ocelot)->num_phys_ports + (port))
|
||||||
#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
|
#define OCELOT_VCAP_IS2_MRP_TRAP(ocelot) ((ocelot)->num_phys_ports * 2)
|
||||||
@@ -499,6 +500,7 @@ struct ocelot_vcap_key_vlan {
|
|||||||
struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
|
struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
|
||||||
enum ocelot_vcap_bit dei; /* DEI */
|
enum ocelot_vcap_bit dei; /* DEI */
|
||||||
enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
|
enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
|
||||||
|
enum ocelot_vcap_bit tpid;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ocelot_vcap_key_etype {
|
struct ocelot_vcap_key_etype {
|
||||||
|
@@ -19,7 +19,7 @@ struct ump_cvt_to_ump_bank {
|
|||||||
/* context for converting from MIDI1 byte stream to UMP packet */
|
/* context for converting from MIDI1 byte stream to UMP packet */
|
||||||
struct ump_cvt_to_ump {
|
struct ump_cvt_to_ump {
|
||||||
/* MIDI1 intermediate buffer */
|
/* MIDI1 intermediate buffer */
|
||||||
unsigned char buf[4];
|
unsigned char buf[6]; /* up to 6 bytes for SysEx */
|
||||||
int len;
|
int len;
|
||||||
int cmd_bytes;
|
int cmd_bytes;
|
||||||
|
|
||||||
|
@@ -2874,7 +2874,7 @@ void __weak bpf_jit_compile(struct bpf_prog *prog)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
bool __weak bpf_helper_changes_pkt_data(void *func)
|
bool __weak bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@@ -2636,16 +2636,36 @@ static int cmp_subprogs(const void *a, const void *b)
|
|||||||
((struct bpf_subprog_info *)b)->start;
|
((struct bpf_subprog_info *)b)->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Find subprogram that contains instruction at 'off' */
|
||||||
|
static struct bpf_subprog_info *find_containing_subprog(struct bpf_verifier_env *env, int off)
|
||||||
|
{
|
||||||
|
struct bpf_subprog_info *vals = env->subprog_info;
|
||||||
|
int l, r, m;
|
||||||
|
|
||||||
|
if (off >= env->prog->len || off < 0 || env->subprog_cnt == 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
l = 0;
|
||||||
|
r = env->subprog_cnt - 1;
|
||||||
|
while (l < r) {
|
||||||
|
m = l + (r - l + 1) / 2;
|
||||||
|
if (vals[m].start <= off)
|
||||||
|
l = m;
|
||||||
|
else
|
||||||
|
r = m - 1;
|
||||||
|
}
|
||||||
|
return &vals[l];
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Find subprogram that starts exactly at 'off' */
|
||||||
static int find_subprog(struct bpf_verifier_env *env, int off)
|
static int find_subprog(struct bpf_verifier_env *env, int off)
|
||||||
{
|
{
|
||||||
struct bpf_subprog_info *p;
|
struct bpf_subprog_info *p;
|
||||||
|
|
||||||
p = bsearch(&off, env->subprog_info, env->subprog_cnt,
|
p = find_containing_subprog(env, off);
|
||||||
sizeof(env->subprog_info[0]), cmp_subprogs);
|
if (!p || p->start != off)
|
||||||
if (!p)
|
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
return p - env->subprog_info;
|
return p - env->subprog_info;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int add_subprog(struct bpf_verifier_env *env, int off)
|
static int add_subprog(struct bpf_verifier_env *env, int off)
|
||||||
@@ -9378,6 +9398,8 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
|
|||||||
|
|
||||||
if (env->log.level & BPF_LOG_LEVEL)
|
if (env->log.level & BPF_LOG_LEVEL)
|
||||||
verbose(env, "Func#%d is global and valid. Skipping.\n", subprog);
|
verbose(env, "Func#%d is global and valid. Skipping.\n", subprog);
|
||||||
|
if (env->subprog_info[subprog].changes_pkt_data)
|
||||||
|
clear_all_pkt_pointers(env);
|
||||||
clear_caller_saved_regs(env, caller->regs);
|
clear_caller_saved_regs(env, caller->regs);
|
||||||
|
|
||||||
/* All global functions return a 64-bit SCALAR_VALUE */
|
/* All global functions return a 64-bit SCALAR_VALUE */
|
||||||
@@ -10021,7 +10043,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* With LD_ABS/IND some JITs save/restore skb from r1. */
|
/* With LD_ABS/IND some JITs save/restore skb from r1. */
|
||||||
changes_data = bpf_helper_changes_pkt_data(fn->func);
|
changes_data = bpf_helper_changes_pkt_data(func_id);
|
||||||
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
|
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
|
||||||
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
|
verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
|
||||||
func_id_name(func_id), func_id);
|
func_id_name(func_id), func_id);
|
||||||
@@ -15128,6 +15150,29 @@ static int check_return_code(struct bpf_verifier_env *env)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mark_subprog_changes_pkt_data(struct bpf_verifier_env *env, int off)
|
||||||
|
{
|
||||||
|
struct bpf_subprog_info *subprog;
|
||||||
|
|
||||||
|
subprog = find_containing_subprog(env, off);
|
||||||
|
subprog->changes_pkt_data = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* 't' is an index of a call-site.
|
||||||
|
* 'w' is a callee entry point.
|
||||||
|
* Eventually this function would be called when env->cfg.insn_state[w] == EXPLORED.
|
||||||
|
* Rely on DFS traversal order and absence of recursive calls to guarantee that
|
||||||
|
* callee's change_pkt_data marks would be correct at that moment.
|
||||||
|
*/
|
||||||
|
static void merge_callee_effects(struct bpf_verifier_env *env, int t, int w)
|
||||||
|
{
|
||||||
|
struct bpf_subprog_info *caller, *callee;
|
||||||
|
|
||||||
|
caller = find_containing_subprog(env, t);
|
||||||
|
callee = find_containing_subprog(env, w);
|
||||||
|
caller->changes_pkt_data |= callee->changes_pkt_data;
|
||||||
|
}
|
||||||
|
|
||||||
/* non-recursive DFS pseudo code
|
/* non-recursive DFS pseudo code
|
||||||
* 1 procedure DFS-iterative(G,v):
|
* 1 procedure DFS-iterative(G,v):
|
||||||
* 2 label v as discovered
|
* 2 label v as discovered
|
||||||
@@ -15261,6 +15306,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
|||||||
bool visit_callee)
|
bool visit_callee)
|
||||||
{
|
{
|
||||||
int ret, insn_sz;
|
int ret, insn_sz;
|
||||||
|
int w;
|
||||||
|
|
||||||
insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
|
insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
|
||||||
ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
|
ret = push_insn(t, t + insn_sz, FALLTHROUGH, env);
|
||||||
@@ -15272,8 +15318,10 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
|
|||||||
mark_jmp_point(env, t + insn_sz);
|
mark_jmp_point(env, t + insn_sz);
|
||||||
|
|
||||||
if (visit_callee) {
|
if (visit_callee) {
|
||||||
|
w = t + insns[t].imm + 1;
|
||||||
mark_prune_point(env, t);
|
mark_prune_point(env, t);
|
||||||
ret = push_insn(t, t + insns[t].imm + 1, BRANCH, env);
|
merge_callee_effects(env, t, w);
|
||||||
|
ret = push_insn(t, w, BRANCH, env);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@@ -15325,6 +15373,8 @@ static int visit_insn(int t, struct bpf_verifier_env *env)
|
|||||||
mark_prune_point(env, t);
|
mark_prune_point(env, t);
|
||||||
mark_jmp_point(env, t);
|
mark_jmp_point(env, t);
|
||||||
}
|
}
|
||||||
|
if (bpf_helper_call(insn) && bpf_helper_changes_pkt_data(insn->imm))
|
||||||
|
mark_subprog_changes_pkt_data(env, t);
|
||||||
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
|
if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
|
||||||
struct bpf_kfunc_call_arg_meta meta;
|
struct bpf_kfunc_call_arg_meta meta;
|
||||||
|
|
||||||
@@ -15446,6 +15496,7 @@ static int check_cfg(struct bpf_verifier_env *env)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret = 0; /* cfg looks good */
|
ret = 0; /* cfg looks good */
|
||||||
|
env->prog->aux->changes_pkt_data = env->subprog_info[0].changes_pkt_data;
|
||||||
|
|
||||||
err_free:
|
err_free:
|
||||||
kvfree(insn_state);
|
kvfree(insn_state);
|
||||||
@@ -18604,6 +18655,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|||||||
}
|
}
|
||||||
func[i]->aux->num_exentries = num_exentries;
|
func[i]->aux->num_exentries = num_exentries;
|
||||||
func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
|
func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
|
||||||
|
func[i]->aux->changes_pkt_data = env->subprog_info[i].changes_pkt_data;
|
||||||
func[i] = bpf_int_jit_compile(func[i]);
|
func[i] = bpf_int_jit_compile(func[i]);
|
||||||
if (!func[i]->jited) {
|
if (!func[i]->jited) {
|
||||||
err = -ENOTSUPP;
|
err = -ENOTSUPP;
|
||||||
@@ -19888,6 +19940,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
|||||||
}
|
}
|
||||||
if (tgt_prog) {
|
if (tgt_prog) {
|
||||||
struct bpf_prog_aux *aux = tgt_prog->aux;
|
struct bpf_prog_aux *aux = tgt_prog->aux;
|
||||||
|
bool tgt_changes_pkt_data;
|
||||||
|
|
||||||
if (bpf_prog_is_dev_bound(prog->aux) &&
|
if (bpf_prog_is_dev_bound(prog->aux) &&
|
||||||
!bpf_prog_dev_bound_match(prog, tgt_prog)) {
|
!bpf_prog_dev_bound_match(prog, tgt_prog)) {
|
||||||
@@ -19916,6 +19969,14 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
|||||||
"Extension programs should be JITed\n");
|
"Extension programs should be JITed\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
tgt_changes_pkt_data = aux->func
|
||||||
|
? aux->func[subprog]->aux->changes_pkt_data
|
||||||
|
: aux->changes_pkt_data;
|
||||||
|
if (prog->aux->changes_pkt_data && !tgt_changes_pkt_data) {
|
||||||
|
bpf_log(log,
|
||||||
|
"Extension program changes packet data, while original does not\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (!tgt_prog->jited) {
|
if (!tgt_prog->jited) {
|
||||||
bpf_log(log, "Can attach to only JITed progs\n");
|
bpf_log(log, "Can attach to only JITed progs\n");
|
||||||
@@ -20375,10 +20436,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto skip_full_check;
|
goto skip_full_check;
|
||||||
|
|
||||||
ret = check_attach_btf_id(env);
|
|
||||||
if (ret)
|
|
||||||
goto skip_full_check;
|
|
||||||
|
|
||||||
ret = resolve_pseudo_ldimm64(env);
|
ret = resolve_pseudo_ldimm64(env);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto skip_full_check;
|
goto skip_full_check;
|
||||||
@@ -20393,6 +20450,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto skip_full_check;
|
goto skip_full_check;
|
||||||
|
|
||||||
|
ret = check_attach_btf_id(env);
|
||||||
|
if (ret)
|
||||||
|
goto skip_full_check;
|
||||||
|
|
||||||
ret = do_check_subprogs(env);
|
ret = do_check_subprogs(env);
|
||||||
ret = ret ?: do_check_main(env);
|
ret = ret ?: do_check_main(env);
|
||||||
|
|
||||||
|
@@ -7152,13 +7152,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
|
|||||||
/* Copy the data into the page, so we can start over. */
|
/* Copy the data into the page, so we can start over. */
|
||||||
ret = trace_seq_to_buffer(&iter->seq,
|
ret = trace_seq_to_buffer(&iter->seq,
|
||||||
page_address(spd.pages[i]),
|
page_address(spd.pages[i]),
|
||||||
trace_seq_used(&iter->seq));
|
min((size_t)trace_seq_used(&iter->seq),
|
||||||
|
PAGE_SIZE));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
__free_page(spd.pages[i]);
|
__free_page(spd.pages[i]);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
spd.partial[i].offset = 0;
|
spd.partial[i].offset = 0;
|
||||||
spd.partial[i].len = trace_seq_used(&iter->seq);
|
spd.partial[i].len = ret;
|
||||||
|
|
||||||
trace_seq_init(&iter->seq);
|
trace_seq_init(&iter->seq);
|
||||||
}
|
}
|
||||||
|
@@ -7386,6 +7386,9 @@ static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
/* Init rx_len */
|
/* Init rx_len */
|
||||||
conn->rx_len = len;
|
conn->rx_len = len;
|
||||||
|
|
||||||
|
skb_set_delivery_time(conn->rx_skb, skb->tstamp,
|
||||||
|
skb->tstamp_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy as much as the rx_skb can hold */
|
/* Copy as much as the rx_skb can hold */
|
||||||
|
@@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
|
|||||||
struct sk_buff *))
|
struct sk_buff *))
|
||||||
{
|
{
|
||||||
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
|
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
|
||||||
bool mono_delivery_time = skb->mono_delivery_time;
|
u8 tstamp_type = skb->tstamp_type;
|
||||||
unsigned int hlen, ll_rs, mtu;
|
unsigned int hlen, ll_rs, mtu;
|
||||||
ktime_t tstamp = skb->tstamp;
|
ktime_t tstamp = skb->tstamp;
|
||||||
struct ip_frag_state state;
|
struct ip_frag_state state;
|
||||||
@@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
|
|||||||
if (iter.frag)
|
if (iter.frag)
|
||||||
ip_fraglist_prepare(skb, &iter);
|
ip_fraglist_prepare(skb, &iter);
|
||||||
|
|
||||||
skb_set_delivery_time(skb, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb, tstamp, tstamp_type);
|
||||||
err = output(net, sk, data, skb);
|
err = output(net, sk, data, skb);
|
||||||
if (err || !iter.frag)
|
if (err || !iter.frag)
|
||||||
break;
|
break;
|
||||||
@@ -113,7 +113,7 @@ slow_path:
|
|||||||
goto blackhole;
|
goto blackhole;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb2, tstamp, tstamp_type);
|
||||||
err = output(net, sk, data, skb2);
|
err = output(net, sk, data, skb2);
|
||||||
if (err)
|
if (err)
|
||||||
goto blackhole;
|
goto blackhole;
|
||||||
|
@@ -2197,7 +2197,7 @@ EXPORT_SYMBOL(net_disable_timestamp);
|
|||||||
static inline void net_timestamp_set(struct sk_buff *skb)
|
static inline void net_timestamp_set(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
skb->tstamp = 0;
|
skb->tstamp = 0;
|
||||||
skb->mono_delivery_time = 0;
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
if (static_branch_unlikely(&netstamp_needed_key))
|
if (static_branch_unlikely(&netstamp_needed_key))
|
||||||
skb->tstamp = ktime_get_real();
|
skb->tstamp = ktime_get_real();
|
||||||
}
|
}
|
||||||
|
@@ -7752,13 +7752,13 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb,
|
|||||||
if (!tstamp)
|
if (!tstamp)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
skb->tstamp = tstamp;
|
skb->tstamp = tstamp;
|
||||||
skb->mono_delivery_time = 1;
|
skb->tstamp_type = SKB_CLOCK_MONOTONIC;
|
||||||
break;
|
break;
|
||||||
case BPF_SKB_TSTAMP_UNSPEC:
|
case BPF_SKB_TSTAMP_UNSPEC:
|
||||||
if (tstamp)
|
if (tstamp)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
skb->tstamp = 0;
|
skb->tstamp = 0;
|
||||||
skb->mono_delivery_time = 0;
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -7886,42 +7886,37 @@ static const struct bpf_func_proto bpf_tcp_raw_check_syncookie_ipv6_proto = {
|
|||||||
|
|
||||||
#endif /* CONFIG_INET */
|
#endif /* CONFIG_INET */
|
||||||
|
|
||||||
bool bpf_helper_changes_pkt_data(void *func)
|
bool bpf_helper_changes_pkt_data(enum bpf_func_id func_id)
|
||||||
{
|
{
|
||||||
if (func == bpf_skb_vlan_push ||
|
switch (func_id) {
|
||||||
func == bpf_skb_vlan_pop ||
|
case BPF_FUNC_clone_redirect:
|
||||||
func == bpf_skb_store_bytes ||
|
case BPF_FUNC_l3_csum_replace:
|
||||||
func == bpf_skb_change_proto ||
|
case BPF_FUNC_l4_csum_replace:
|
||||||
func == bpf_skb_change_head ||
|
case BPF_FUNC_lwt_push_encap:
|
||||||
func == sk_skb_change_head ||
|
case BPF_FUNC_lwt_seg6_action:
|
||||||
func == bpf_skb_change_tail ||
|
case BPF_FUNC_lwt_seg6_adjust_srh:
|
||||||
func == sk_skb_change_tail ||
|
case BPF_FUNC_lwt_seg6_store_bytes:
|
||||||
func == bpf_skb_adjust_room ||
|
case BPF_FUNC_msg_pop_data:
|
||||||
func == sk_skb_adjust_room ||
|
case BPF_FUNC_msg_pull_data:
|
||||||
func == bpf_skb_pull_data ||
|
case BPF_FUNC_msg_push_data:
|
||||||
func == sk_skb_pull_data ||
|
case BPF_FUNC_skb_adjust_room:
|
||||||
func == bpf_clone_redirect ||
|
case BPF_FUNC_skb_change_head:
|
||||||
func == bpf_l3_csum_replace ||
|
case BPF_FUNC_skb_change_proto:
|
||||||
func == bpf_l4_csum_replace ||
|
case BPF_FUNC_skb_change_tail:
|
||||||
func == bpf_xdp_adjust_head ||
|
case BPF_FUNC_skb_pull_data:
|
||||||
func == bpf_xdp_adjust_meta ||
|
case BPF_FUNC_skb_store_bytes:
|
||||||
func == bpf_msg_pull_data ||
|
case BPF_FUNC_skb_vlan_pop:
|
||||||
func == bpf_msg_push_data ||
|
case BPF_FUNC_skb_vlan_push:
|
||||||
func == bpf_msg_pop_data ||
|
case BPF_FUNC_store_hdr_opt:
|
||||||
func == bpf_xdp_adjust_tail ||
|
case BPF_FUNC_xdp_adjust_head:
|
||||||
#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
|
case BPF_FUNC_xdp_adjust_meta:
|
||||||
func == bpf_lwt_seg6_store_bytes ||
|
case BPF_FUNC_xdp_adjust_tail:
|
||||||
func == bpf_lwt_seg6_adjust_srh ||
|
/* tail-called program could call any of the above */
|
||||||
func == bpf_lwt_seg6_action ||
|
case BPF_FUNC_tail_call:
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_INET
|
|
||||||
func == bpf_sock_ops_store_hdr_opt ||
|
|
||||||
#endif
|
|
||||||
func == bpf_lwt_in_push_encap ||
|
|
||||||
func == bpf_lwt_xmit_push_encap)
|
|
||||||
return true;
|
return true;
|
||||||
|
default:
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct bpf_func_proto bpf_event_output_data_proto __weak;
|
const struct bpf_func_proto bpf_event_output_data_proto __weak;
|
||||||
@@ -9461,7 +9456,7 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog,
|
|||||||
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK);
|
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK);
|
||||||
*insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg,
|
*insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg,
|
||||||
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2);
|
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2);
|
||||||
/* skb->tc_at_ingress && skb->mono_delivery_time,
|
/* skb->tc_at_ingress && skb->tstamp_type,
|
||||||
* read 0 as the (rcv) timestamp.
|
* read 0 as the (rcv) timestamp.
|
||||||
*/
|
*/
|
||||||
*insn++ = BPF_MOV64_IMM(value_reg, 0);
|
*insn++ = BPF_MOV64_IMM(value_reg, 0);
|
||||||
@@ -9486,7 +9481,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
|
|||||||
* the bpf prog is aware the tstamp could have delivery time.
|
* the bpf prog is aware the tstamp could have delivery time.
|
||||||
* Thus, write skb->tstamp as is if tstamp_type_access is true.
|
* Thus, write skb->tstamp as is if tstamp_type_access is true.
|
||||||
* Otherwise, writing at ingress will have to clear the
|
* Otherwise, writing at ingress will have to clear the
|
||||||
* mono_delivery_time bit also.
|
* skb->tstamp_type bit also.
|
||||||
*/
|
*/
|
||||||
if (!prog->tstamp_type_access) {
|
if (!prog->tstamp_type_access) {
|
||||||
__u8 tmp_reg = BPF_REG_AX;
|
__u8 tmp_reg = BPF_REG_AX;
|
||||||
@@ -9496,7 +9491,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
|
|||||||
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
|
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
|
||||||
/* goto <store> */
|
/* goto <store> */
|
||||||
*insn++ = BPF_JMP_A(2);
|
*insn++ = BPF_JMP_A(2);
|
||||||
/* <clear>: mono_delivery_time */
|
/* <clear>: skb->tstamp_type */
|
||||||
*insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK);
|
*insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK);
|
||||||
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET);
|
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET);
|
||||||
}
|
}
|
||||||
|
@@ -130,7 +130,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
|
|||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
fq->q.stamp = skb->tstamp;
|
fq->q.stamp = skb->tstamp;
|
||||||
fq->q.mono_delivery_time = skb->mono_delivery_time;
|
fq->q.tstamp_type = skb->tstamp_type;
|
||||||
if (frag_type == LOWPAN_DISPATCH_FRAG1)
|
if (frag_type == LOWPAN_DISPATCH_FRAG1)
|
||||||
fq->q.flags |= INET_FRAG_FIRST_IN;
|
fq->q.flags |= INET_FRAG_FIRST_IN;
|
||||||
|
|
||||||
|
@@ -619,7 +619,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
|
|||||||
skb_mark_not_on_list(head);
|
skb_mark_not_on_list(head);
|
||||||
head->prev = NULL;
|
head->prev = NULL;
|
||||||
head->tstamp = q->stamp;
|
head->tstamp = q->stamp;
|
||||||
head->mono_delivery_time = q->mono_delivery_time;
|
head->tstamp_type = q->tstamp_type;
|
||||||
|
|
||||||
if (sk)
|
if (sk)
|
||||||
refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
|
refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);
|
||||||
|
@@ -360,7 +360,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|||||||
qp->iif = dev->ifindex;
|
qp->iif = dev->ifindex;
|
||||||
|
|
||||||
qp->q.stamp = skb->tstamp;
|
qp->q.stamp = skb->tstamp;
|
||||||
qp->q.mono_delivery_time = skb->mono_delivery_time;
|
qp->q.tstamp_type = skb->tstamp_type;
|
||||||
qp->q.meat += skb->len;
|
qp->q.meat += skb->len;
|
||||||
qp->ecn |= ecn;
|
qp->ecn |= ecn;
|
||||||
add_frag_mem_limit(qp->q.fqdir, skb->truesize);
|
add_frag_mem_limit(qp->q.fqdir, skb->truesize);
|
||||||
|
@@ -764,7 +764,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||||||
{
|
{
|
||||||
struct iphdr *iph;
|
struct iphdr *iph;
|
||||||
struct sk_buff *skb2;
|
struct sk_buff *skb2;
|
||||||
bool mono_delivery_time = skb->mono_delivery_time;
|
u8 tstamp_type = skb->tstamp_type;
|
||||||
struct rtable *rt = skb_rtable(skb);
|
struct rtable *rt = skb_rtable(skb);
|
||||||
unsigned int mtu, hlen, ll_rs;
|
unsigned int mtu, hlen, ll_rs;
|
||||||
struct ip_fraglist_iter iter;
|
struct ip_fraglist_iter iter;
|
||||||
@@ -856,7 +856,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_set_delivery_time(skb, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb, tstamp, tstamp_type);
|
||||||
err = output(net, sk, skb);
|
err = output(net, sk, skb);
|
||||||
|
|
||||||
if (!err)
|
if (!err)
|
||||||
@@ -912,7 +912,7 @@ slow_path:
|
|||||||
/*
|
/*
|
||||||
* Put this fragment into the sending queue.
|
* Put this fragment into the sending queue.
|
||||||
*/
|
*/
|
||||||
skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb2, tstamp, tstamp_type);
|
||||||
err = output(net, sk, skb2);
|
err = output(net, sk, skb2);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
@@ -1648,7 +1648,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
|
|||||||
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
|
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
|
||||||
arg->csum));
|
arg->csum));
|
||||||
nskb->ip_summed = CHECKSUM_NONE;
|
nskb->ip_summed = CHECKSUM_NONE;
|
||||||
nskb->mono_delivery_time = !!transmit_time;
|
if (transmit_time)
|
||||||
|
nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
|
||||||
if (txhash)
|
if (txhash)
|
||||||
skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
|
skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
|
||||||
ip_push_pending_frames(sk, &fl4);
|
ip_push_pending_frames(sk, &fl4);
|
||||||
|
@@ -1272,7 +1272,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
|
|||||||
tp = tcp_sk(sk);
|
tp = tcp_sk(sk);
|
||||||
prior_wstamp = tp->tcp_wstamp_ns;
|
prior_wstamp = tp->tcp_wstamp_ns;
|
||||||
tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
|
tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
|
||||||
skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
|
skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
|
||||||
if (clone_it) {
|
if (clone_it) {
|
||||||
oskb = skb;
|
oskb = skb;
|
||||||
|
|
||||||
@@ -1613,7 +1613,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
|||||||
|
|
||||||
skb_split(skb, buff, len);
|
skb_split(skb, buff, len);
|
||||||
|
|
||||||
skb_set_delivery_time(buff, skb->tstamp, true);
|
skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC);
|
||||||
tcp_fragment_tstamp(skb, buff);
|
tcp_fragment_tstamp(skb, buff);
|
||||||
|
|
||||||
old_factor = tcp_skb_pcount(skb);
|
old_factor = tcp_skb_pcount(skb);
|
||||||
@@ -2709,7 +2709,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
|||||||
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
|
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
|
||||||
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
|
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
|
||||||
tp->tcp_wstamp_ns = tp->tcp_clock_cache;
|
tp->tcp_wstamp_ns = tp->tcp_clock_cache;
|
||||||
skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
|
skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
|
||||||
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
|
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
|
||||||
tcp_init_tso_segs(skb, mss_now);
|
tcp_init_tso_segs(skb, mss_now);
|
||||||
goto repair; /* Skip network transmission */
|
goto repair; /* Skip network transmission */
|
||||||
@@ -3695,11 +3695,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
|
|||||||
#ifdef CONFIG_SYN_COOKIES
|
#ifdef CONFIG_SYN_COOKIES
|
||||||
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
|
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
|
||||||
skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
|
skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
|
||||||
true);
|
SKB_CLOCK_MONOTONIC);
|
||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
skb_set_delivery_time(skb, now, true);
|
skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
|
||||||
if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
|
if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
|
||||||
tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
|
tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
|
||||||
}
|
}
|
||||||
@@ -3748,7 +3748,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
|
|||||||
bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
|
bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
|
||||||
synack_type, &opts);
|
synack_type, &opts);
|
||||||
|
|
||||||
skb_set_delivery_time(skb, now, true);
|
skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
|
||||||
tcp_add_tx_delay(skb, tp);
|
tcp_add_tx_delay(skb, tp);
|
||||||
|
|
||||||
return skb;
|
return skb;
|
||||||
@@ -3930,7 +3930,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
|
|||||||
|
|
||||||
err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
|
err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
|
||||||
|
|
||||||
skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
|
skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC);
|
||||||
|
|
||||||
/* Now full SYN+DATA was cloned and sent (or not),
|
/* Now full SYN+DATA was cloned and sent (or not),
|
||||||
* remove the SYN from the original skb (syn_data)
|
* remove the SYN from the original skb (syn_data)
|
||||||
|
@@ -247,6 +247,62 @@ static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
|
|||||||
return segs;
|
return segs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __udpv6_gso_segment_csum(struct sk_buff *seg,
|
||||||
|
struct in6_addr *oldip,
|
||||||
|
const struct in6_addr *newip,
|
||||||
|
__be16 *oldport, __be16 newport)
|
||||||
|
{
|
||||||
|
struct udphdr *uh = udp_hdr(seg);
|
||||||
|
|
||||||
|
if (ipv6_addr_equal(oldip, newip) && *oldport == newport)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (uh->check) {
|
||||||
|
inet_proto_csum_replace16(&uh->check, seg, oldip->s6_addr32,
|
||||||
|
newip->s6_addr32, true);
|
||||||
|
|
||||||
|
inet_proto_csum_replace2(&uh->check, seg, *oldport, newport,
|
||||||
|
false);
|
||||||
|
if (!uh->check)
|
||||||
|
uh->check = CSUM_MANGLED_0;
|
||||||
|
}
|
||||||
|
|
||||||
|
*oldip = *newip;
|
||||||
|
*oldport = newport;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct sk_buff *__udpv6_gso_segment_list_csum(struct sk_buff *segs)
|
||||||
|
{
|
||||||
|
const struct ipv6hdr *iph;
|
||||||
|
const struct udphdr *uh;
|
||||||
|
struct ipv6hdr *iph2;
|
||||||
|
struct sk_buff *seg;
|
||||||
|
struct udphdr *uh2;
|
||||||
|
|
||||||
|
seg = segs;
|
||||||
|
uh = udp_hdr(seg);
|
||||||
|
iph = ipv6_hdr(seg);
|
||||||
|
uh2 = udp_hdr(seg->next);
|
||||||
|
iph2 = ipv6_hdr(seg->next);
|
||||||
|
|
||||||
|
if (!(*(const u32 *)&uh->source ^ *(const u32 *)&uh2->source) &&
|
||||||
|
ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
|
||||||
|
ipv6_addr_equal(&iph->daddr, &iph2->daddr))
|
||||||
|
return segs;
|
||||||
|
|
||||||
|
while ((seg = seg->next)) {
|
||||||
|
uh2 = udp_hdr(seg);
|
||||||
|
iph2 = ipv6_hdr(seg);
|
||||||
|
|
||||||
|
__udpv6_gso_segment_csum(seg, &iph2->saddr, &iph->saddr,
|
||||||
|
&uh2->source, uh->source);
|
||||||
|
__udpv6_gso_segment_csum(seg, &iph2->daddr, &iph->daddr,
|
||||||
|
&uh2->dest, uh->dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
return segs;
|
||||||
|
}
|
||||||
|
|
||||||
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
|
static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
|
||||||
netdev_features_t features,
|
netdev_features_t features,
|
||||||
bool is_ipv6)
|
bool is_ipv6)
|
||||||
@@ -259,7 +315,10 @@ static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
|
|||||||
|
|
||||||
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
|
udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
|
||||||
|
|
||||||
return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
|
if (is_ipv6)
|
||||||
|
return __udpv6_gso_segment_list_csum(skb);
|
||||||
|
else
|
||||||
|
return __udpv4_gso_segment_list_csum(skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
||||||
|
@@ -864,7 +864,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||||||
struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
|
struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
|
||||||
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
|
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
|
||||||
inet6_sk(skb->sk) : NULL;
|
inet6_sk(skb->sk) : NULL;
|
||||||
bool mono_delivery_time = skb->mono_delivery_time;
|
u8 tstamp_type = skb->tstamp_type;
|
||||||
struct ip6_frag_state state;
|
struct ip6_frag_state state;
|
||||||
unsigned int mtu, hlen, nexthdr_offset;
|
unsigned int mtu, hlen, nexthdr_offset;
|
||||||
ktime_t tstamp = skb->tstamp;
|
ktime_t tstamp = skb->tstamp;
|
||||||
@@ -958,7 +958,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||||||
if (iter.frag)
|
if (iter.frag)
|
||||||
ip6_fraglist_prepare(skb, &iter);
|
ip6_fraglist_prepare(skb, &iter);
|
||||||
|
|
||||||
skb_set_delivery_time(skb, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb, tstamp, tstamp_type);
|
||||||
err = output(net, sk, skb);
|
err = output(net, sk, skb);
|
||||||
if (!err)
|
if (!err)
|
||||||
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
|
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
|
||||||
@@ -1019,7 +1019,7 @@ slow_path:
|
|||||||
/*
|
/*
|
||||||
* Put this fragment into the sending queue.
|
* Put this fragment into the sending queue.
|
||||||
*/
|
*/
|
||||||
skb_set_delivery_time(frag, tstamp, mono_delivery_time);
|
skb_set_delivery_time(frag, tstamp, tstamp_type);
|
||||||
err = output(net, sk, frag);
|
err = output(net, sk, frag);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
@@ -126,7 +126,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||||||
struct sk_buff *))
|
struct sk_buff *))
|
||||||
{
|
{
|
||||||
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
|
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
|
||||||
bool mono_delivery_time = skb->mono_delivery_time;
|
u8 tstamp_type = skb->tstamp_type;
|
||||||
ktime_t tstamp = skb->tstamp;
|
ktime_t tstamp = skb->tstamp;
|
||||||
struct ip6_frag_state state;
|
struct ip6_frag_state state;
|
||||||
u8 *prevhdr, nexthdr = 0;
|
u8 *prevhdr, nexthdr = 0;
|
||||||
@@ -192,7 +192,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||||||
if (iter.frag)
|
if (iter.frag)
|
||||||
ip6_fraglist_prepare(skb, &iter);
|
ip6_fraglist_prepare(skb, &iter);
|
||||||
|
|
||||||
skb_set_delivery_time(skb, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb, tstamp, tstamp_type);
|
||||||
err = output(net, sk, data, skb);
|
err = output(net, sk, data, skb);
|
||||||
if (err || !iter.frag)
|
if (err || !iter.frag)
|
||||||
break;
|
break;
|
||||||
@@ -225,7 +225,7 @@ slow_path:
|
|||||||
goto blackhole;
|
goto blackhole;
|
||||||
}
|
}
|
||||||
|
|
||||||
skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
|
skb_set_delivery_time(skb2, tstamp, tstamp_type);
|
||||||
err = output(net, sk, data, skb2);
|
err = output(net, sk, data, skb2);
|
||||||
if (err)
|
if (err)
|
||||||
goto blackhole;
|
goto blackhole;
|
||||||
|
@@ -268,7 +268,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||||||
fq->iif = dev->ifindex;
|
fq->iif = dev->ifindex;
|
||||||
|
|
||||||
fq->q.stamp = skb->tstamp;
|
fq->q.stamp = skb->tstamp;
|
||||||
fq->q.mono_delivery_time = skb->mono_delivery_time;
|
fq->q.tstamp_type = skb->tstamp_type;
|
||||||
fq->q.meat += skb->len;
|
fq->q.meat += skb->len;
|
||||||
fq->ecn |= ecn;
|
fq->ecn |= ecn;
|
||||||
if (payload_len > fq->q.max_size)
|
if (payload_len > fq->q.max_size)
|
||||||
|
@@ -198,7 +198,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
|
|||||||
fq->iif = dev->ifindex;
|
fq->iif = dev->ifindex;
|
||||||
|
|
||||||
fq->q.stamp = skb->tstamp;
|
fq->q.stamp = skb->tstamp;
|
||||||
fq->q.mono_delivery_time = skb->mono_delivery_time;
|
fq->q.tstamp_type = skb->tstamp_type;
|
||||||
fq->q.meat += skb->len;
|
fq->q.meat += skb->len;
|
||||||
fq->ecn |= ecn;
|
fq->ecn |= ecn;
|
||||||
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
|
add_frag_mem_limit(fq->q.fqdir, skb->truesize);
|
||||||
|
@@ -937,7 +937,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
|||||||
mark = inet_twsk(sk)->tw_mark;
|
mark = inet_twsk(sk)->tw_mark;
|
||||||
else
|
else
|
||||||
mark = READ_ONCE(sk->sk_mark);
|
mark = READ_ONCE(sk->sk_mark);
|
||||||
skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
|
skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC);
|
||||||
}
|
}
|
||||||
if (txhash) {
|
if (txhash) {
|
||||||
/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
|
/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */
|
||||||
|
@@ -54,8 +54,8 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
|
|||||||
bpf_compute_data_pointers(skb);
|
bpf_compute_data_pointers(skb);
|
||||||
filter_res = bpf_prog_run(filter, skb);
|
filter_res = bpf_prog_run(filter, skb);
|
||||||
}
|
}
|
||||||
if (unlikely(!skb->tstamp && skb->mono_delivery_time))
|
if (unlikely(!skb->tstamp && skb->tstamp_type))
|
||||||
skb->mono_delivery_time = 0;
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
|
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
|
||||||
skb_orphan(skb);
|
skb_orphan(skb);
|
||||||
|
|
||||||
|
@@ -104,8 +104,8 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
|
|||||||
bpf_compute_data_pointers(skb);
|
bpf_compute_data_pointers(skb);
|
||||||
filter_res = bpf_prog_run(prog->filter, skb);
|
filter_res = bpf_prog_run(prog->filter, skb);
|
||||||
}
|
}
|
||||||
if (unlikely(!skb->tstamp && skb->mono_delivery_time))
|
if (unlikely(!skb->tstamp && skb->tstamp_type))
|
||||||
skb->mono_delivery_time = 0;
|
skb->tstamp_type = SKB_CLOCK_REALTIME;
|
||||||
|
|
||||||
if (prog->exts_integrated) {
|
if (prog->exts_integrated) {
|
||||||
res->class = 0;
|
res->class = 0;
|
||||||
|
@@ -35,6 +35,11 @@ struct drr_sched {
|
|||||||
struct Qdisc_class_hash clhash;
|
struct Qdisc_class_hash clhash;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool cl_is_active(struct drr_class *cl)
|
||||||
|
{
|
||||||
|
return !list_empty(&cl->alist);
|
||||||
|
}
|
||||||
|
|
||||||
static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
|
static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
|
||||||
{
|
{
|
||||||
struct drr_sched *q = qdisc_priv(sch);
|
struct drr_sched *q = qdisc_priv(sch);
|
||||||
@@ -105,6 +110,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
|||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
|
|
||||||
gnet_stats_basic_sync_init(&cl->bstats);
|
gnet_stats_basic_sync_init(&cl->bstats);
|
||||||
|
INIT_LIST_HEAD(&cl->alist);
|
||||||
cl->common.classid = classid;
|
cl->common.classid = classid;
|
||||||
cl->quantum = quantum;
|
cl->quantum = quantum;
|
||||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
|
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
|
||||||
@@ -229,7 +235,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
|
|||||||
{
|
{
|
||||||
struct drr_class *cl = (struct drr_class *)arg;
|
struct drr_class *cl = (struct drr_class *)arg;
|
||||||
|
|
||||||
list_del(&cl->alist);
|
list_del_init(&cl->alist);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
|
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
|
||||||
@@ -336,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct drr_sched *q = qdisc_priv(sch);
|
struct drr_sched *q = qdisc_priv(sch);
|
||||||
struct drr_class *cl;
|
struct drr_class *cl;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
bool first;
|
|
||||||
|
|
||||||
cl = drr_classify(skb, sch, &err);
|
cl = drr_classify(skb, sch, &err);
|
||||||
if (cl == NULL) {
|
if (cl == NULL) {
|
||||||
@@ -346,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
first = !cl->qdisc->q.qlen;
|
|
||||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||||
if (net_xmit_drop_count(err)) {
|
if (net_xmit_drop_count(err)) {
|
||||||
@@ -356,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first) {
|
if (!cl_is_active(cl)) {
|
||||||
list_add_tail(&cl->alist, &q->active);
|
list_add_tail(&cl->alist, &q->active);
|
||||||
cl->deficit = cl->quantum;
|
cl->deficit = cl->quantum;
|
||||||
}
|
}
|
||||||
@@ -390,7 +394,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
|
|||||||
if (unlikely(skb == NULL))
|
if (unlikely(skb == NULL))
|
||||||
goto out;
|
goto out;
|
||||||
if (cl->qdisc->q.qlen == 0)
|
if (cl->qdisc->q.qlen == 0)
|
||||||
list_del(&cl->alist);
|
list_del_init(&cl->alist);
|
||||||
|
|
||||||
bstats_update(&cl->bstats, skb);
|
bstats_update(&cl->bstats, skb);
|
||||||
qdisc_bstats_update(sch, skb);
|
qdisc_bstats_update(sch, skb);
|
||||||
@@ -431,7 +435,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
|
|||||||
for (i = 0; i < q->clhash.hashsize; i++) {
|
for (i = 0; i < q->clhash.hashsize; i++) {
|
||||||
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
|
||||||
if (cl->qdisc->q.qlen)
|
if (cl->qdisc->q.qlen)
|
||||||
list_del(&cl->alist);
|
list_del_init(&cl->alist);
|
||||||
qdisc_reset(cl->qdisc);
|
qdisc_reset(cl->qdisc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
|
|||||||
[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
|
[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static bool cl_is_active(struct ets_class *cl)
|
||||||
|
{
|
||||||
|
return !list_empty(&cl->alist);
|
||||||
|
}
|
||||||
|
|
||||||
static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
|
static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
|
||||||
unsigned int *quantum,
|
unsigned int *quantum,
|
||||||
struct netlink_ext_ack *extack)
|
struct netlink_ext_ack *extack)
|
||||||
@@ -293,7 +298,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|||||||
* to remove them.
|
* to remove them.
|
||||||
*/
|
*/
|
||||||
if (!ets_class_is_strict(q, cl) && sch->q.qlen)
|
if (!ets_class_is_strict(q, cl) && sch->q.qlen)
|
||||||
list_del(&cl->alist);
|
list_del_init(&cl->alist);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
|
static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
|
||||||
@@ -416,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct ets_sched *q = qdisc_priv(sch);
|
struct ets_sched *q = qdisc_priv(sch);
|
||||||
struct ets_class *cl;
|
struct ets_class *cl;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
bool first;
|
|
||||||
|
|
||||||
cl = ets_classify(skb, sch, &err);
|
cl = ets_classify(skb, sch, &err);
|
||||||
if (!cl) {
|
if (!cl) {
|
||||||
@@ -426,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
first = !cl->qdisc->q.qlen;
|
|
||||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||||
if (net_xmit_drop_count(err)) {
|
if (net_xmit_drop_count(err)) {
|
||||||
@@ -436,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first && !ets_class_is_strict(q, cl)) {
|
if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
|
||||||
list_add_tail(&cl->alist, &q->active);
|
list_add_tail(&cl->alist, &q->active);
|
||||||
cl->deficit = cl->quantum;
|
cl->deficit = cl->quantum;
|
||||||
}
|
}
|
||||||
@@ -488,7 +491,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
|
|||||||
if (unlikely(!skb))
|
if (unlikely(!skb))
|
||||||
goto out;
|
goto out;
|
||||||
if (cl->qdisc->q.qlen == 0)
|
if (cl->qdisc->q.qlen == 0)
|
||||||
list_del(&cl->alist);
|
list_del_init(&cl->alist);
|
||||||
return ets_qdisc_dequeue_skb(sch, skb);
|
return ets_qdisc_dequeue_skb(sch, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -657,7 +660,7 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
|
|||||||
}
|
}
|
||||||
for (i = q->nbands; i < oldbands; i++) {
|
for (i = q->nbands; i < oldbands; i++) {
|
||||||
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
|
||||||
list_del(&q->classes[i].alist);
|
list_del_init(&q->classes[i].alist);
|
||||||
qdisc_tree_flush_backlog(q->classes[i].qdisc);
|
qdisc_tree_flush_backlog(q->classes[i].qdisc);
|
||||||
}
|
}
|
||||||
q->nstrict = nstrict;
|
q->nstrict = nstrict;
|
||||||
@@ -713,7 +716,7 @@ static void ets_qdisc_reset(struct Qdisc *sch)
|
|||||||
|
|
||||||
for (band = q->nstrict; band < q->nbands; band++) {
|
for (band = q->nstrict; band < q->nbands; band++) {
|
||||||
if (q->classes[band].qdisc->q.qlen)
|
if (q->classes[band].qdisc->q.qlen)
|
||||||
list_del(&q->classes[band].alist);
|
list_del_init(&q->classes[band].alist);
|
||||||
}
|
}
|
||||||
for (band = 0; band < q->nbands; band++)
|
for (band = 0; band < q->nbands; band++)
|
||||||
qdisc_reset(q->classes[band].qdisc);
|
qdisc_reset(q->classes[band].qdisc);
|
||||||
|
@@ -203,7 +203,10 @@ eltree_insert(struct hfsc_class *cl)
|
|||||||
static inline void
|
static inline void
|
||||||
eltree_remove(struct hfsc_class *cl)
|
eltree_remove(struct hfsc_class *cl)
|
||||||
{
|
{
|
||||||
rb_erase(&cl->el_node, &cl->sched->eligible);
|
if (!RB_EMPTY_NODE(&cl->el_node)) {
|
||||||
|
rb_erase(&cl->el_node, &cl->sched->eligible);
|
||||||
|
RB_CLEAR_NODE(&cl->el_node);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@@ -1224,7 +1227,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|||||||
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
|
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
|
||||||
* needs to be called explicitly to remove a class from vttree.
|
* needs to be called explicitly to remove a class from vttree.
|
||||||
*/
|
*/
|
||||||
update_vf(cl, 0, 0);
|
if (cl->cl_nactive)
|
||||||
|
update_vf(cl, 0, 0);
|
||||||
if (cl->cl_flags & HFSC_RSC)
|
if (cl->cl_flags & HFSC_RSC)
|
||||||
eltree_remove(cl);
|
eltree_remove(cl);
|
||||||
}
|
}
|
||||||
@@ -1566,7 +1570,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first) {
|
if (first && !cl->cl_nactive) {
|
||||||
if (cl->cl_flags & HFSC_RSC)
|
if (cl->cl_flags & HFSC_RSC)
|
||||||
init_ed(cl, len);
|
init_ed(cl, len);
|
||||||
if (cl->cl_flags & HFSC_FSC)
|
if (cl->cl_flags & HFSC_FSC)
|
||||||
|
@@ -1494,6 +1494,8 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|||||||
{
|
{
|
||||||
struct htb_class *cl = (struct htb_class *)arg;
|
struct htb_class *cl = (struct htb_class *)arg;
|
||||||
|
|
||||||
|
if (!cl->prio_activity)
|
||||||
|
return;
|
||||||
htb_deactivate(qdisc_priv(sch), cl);
|
htb_deactivate(qdisc_priv(sch), cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -202,6 +202,11 @@ struct qfq_sched {
|
|||||||
*/
|
*/
|
||||||
enum update_reason {enqueue, requeue};
|
enum update_reason {enqueue, requeue};
|
||||||
|
|
||||||
|
static bool cl_is_active(struct qfq_class *cl)
|
||||||
|
{
|
||||||
|
return !list_empty(&cl->alist);
|
||||||
|
}
|
||||||
|
|
||||||
static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
|
static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
|
||||||
{
|
{
|
||||||
struct qfq_sched *q = qdisc_priv(sch);
|
struct qfq_sched *q = qdisc_priv(sch);
|
||||||
@@ -347,7 +352,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
|
|||||||
struct qfq_aggregate *agg = cl->agg;
|
struct qfq_aggregate *agg = cl->agg;
|
||||||
|
|
||||||
|
|
||||||
list_del(&cl->alist); /* remove from RR queue of the aggregate */
|
list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
|
||||||
if (list_empty(&agg->active)) /* agg is now inactive */
|
if (list_empty(&agg->active)) /* agg is now inactive */
|
||||||
qfq_deactivate_agg(q, agg);
|
qfq_deactivate_agg(q, agg);
|
||||||
}
|
}
|
||||||
@@ -477,6 +482,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
|||||||
gnet_stats_basic_sync_init(&cl->bstats);
|
gnet_stats_basic_sync_init(&cl->bstats);
|
||||||
cl->common.classid = classid;
|
cl->common.classid = classid;
|
||||||
cl->deficit = lmax;
|
cl->deficit = lmax;
|
||||||
|
INIT_LIST_HEAD(&cl->alist);
|
||||||
|
|
||||||
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
|
||||||
classid, NULL);
|
classid, NULL);
|
||||||
@@ -985,7 +991,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
|
|||||||
cl->deficit -= (int) len;
|
cl->deficit -= (int) len;
|
||||||
|
|
||||||
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
|
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
|
||||||
list_del(&cl->alist);
|
list_del_init(&cl->alist);
|
||||||
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
|
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
|
||||||
cl->deficit += agg->lmax;
|
cl->deficit += agg->lmax;
|
||||||
list_move_tail(&cl->alist, &agg->active);
|
list_move_tail(&cl->alist, &agg->active);
|
||||||
@@ -1217,7 +1223,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct qfq_class *cl;
|
struct qfq_class *cl;
|
||||||
struct qfq_aggregate *agg;
|
struct qfq_aggregate *agg;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
bool first;
|
|
||||||
|
|
||||||
cl = qfq_classify(skb, sch, &err);
|
cl = qfq_classify(skb, sch, &err);
|
||||||
if (cl == NULL) {
|
if (cl == NULL) {
|
||||||
@@ -1239,7 +1244,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
}
|
}
|
||||||
|
|
||||||
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||||
first = !cl->qdisc->q.qlen;
|
|
||||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||||
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
||||||
@@ -1255,8 +1259,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
++sch->q.qlen;
|
++sch->q.qlen;
|
||||||
|
|
||||||
agg = cl->agg;
|
agg = cl->agg;
|
||||||
/* if the queue was not empty, then done here */
|
/* if the class is active, then done here */
|
||||||
if (!first) {
|
if (cl_is_active(cl)) {
|
||||||
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
|
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
|
||||||
list_first_entry(&agg->active, struct qfq_class, alist)
|
list_first_entry(&agg->active, struct qfq_class, alist)
|
||||||
== cl && cl->deficit < len)
|
== cl && cl->deficit < len)
|
||||||
@@ -1418,6 +1422,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|||||||
struct qfq_sched *q = qdisc_priv(sch);
|
struct qfq_sched *q = qdisc_priv(sch);
|
||||||
struct qfq_class *cl = (struct qfq_class *)arg;
|
struct qfq_class *cl = (struct qfq_class *)arg;
|
||||||
|
|
||||||
|
if (list_empty(&cl->alist))
|
||||||
|
return;
|
||||||
qfq_deactivate_class(q, cl);
|
qfq_deactivate_class(q, cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -840,14 +840,14 @@ static void ak4613_parse_of(struct ak4613_priv *priv,
|
|||||||
/* Input 1 - 2 */
|
/* Input 1 - 2 */
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
snprintf(prop, sizeof(prop), "asahi-kasei,in%d-single-end", i + 1);
|
snprintf(prop, sizeof(prop), "asahi-kasei,in%d-single-end", i + 1);
|
||||||
if (!of_get_property(np, prop, NULL))
|
if (!of_property_read_bool(np, prop))
|
||||||
priv->ic |= 1 << i;
|
priv->ic |= 1 << i;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Output 1 - 6 */
|
/* Output 1 - 6 */
|
||||||
for (i = 0; i < 6; i++) {
|
for (i = 0; i < 6; i++) {
|
||||||
snprintf(prop, sizeof(prop), "asahi-kasei,out%d-single-end", i + 1);
|
snprintf(prop, sizeof(prop), "asahi-kasei,out%d-single-end", i + 1);
|
||||||
if (!of_get_property(np, prop, NULL))
|
if (!of_property_read_bool(np, prop))
|
||||||
priv->oc |= 1 << i;
|
priv->oc |= 1 << i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -2935,7 +2935,7 @@ int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop)
|
|||||||
unsigned int i, nb_controls;
|
unsigned int i, nb_controls;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!of_property_read_bool(dev->of_node, prop))
|
if (!of_property_present(dev->of_node, prop))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
strings = devm_kcalloc(dev, nb_controls_max,
|
strings = devm_kcalloc(dev, nb_controls_max,
|
||||||
@@ -3009,23 +3009,17 @@ int snd_soc_of_parse_tdm_slot(struct device_node *np,
|
|||||||
if (rx_mask)
|
if (rx_mask)
|
||||||
snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask);
|
snd_soc_of_get_slot_mask(np, "dai-tdm-slot-rx-mask", rx_mask);
|
||||||
|
|
||||||
if (of_property_read_bool(np, "dai-tdm-slot-num")) {
|
ret = of_property_read_u32(np, "dai-tdm-slot-num", &val);
|
||||||
ret = of_property_read_u32(np, "dai-tdm-slot-num", &val);
|
if (ret && ret != -EINVAL)
|
||||||
if (ret)
|
return ret;
|
||||||
return ret;
|
if (!ret && slots)
|
||||||
|
*slots = val;
|
||||||
|
|
||||||
if (slots)
|
ret = of_property_read_u32(np, "dai-tdm-slot-width", &val);
|
||||||
*slots = val;
|
if (ret && ret != -EINVAL)
|
||||||
}
|
return ret;
|
||||||
|
if (!ret && slot_width)
|
||||||
if (of_property_read_bool(np, "dai-tdm-slot-width")) {
|
*slot_width = val;
|
||||||
ret = of_property_read_u32(np, "dai-tdm-slot-width", &val);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (slot_width)
|
|
||||||
*slot_width = val;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@@ -3249,10 +3243,10 @@ unsigned int snd_soc_daifmt_parse_format(struct device_node *np,
|
|||||||
* SND_SOC_DAIFMT_INV_MASK area
|
* SND_SOC_DAIFMT_INV_MASK area
|
||||||
*/
|
*/
|
||||||
snprintf(prop, sizeof(prop), "%sbitclock-inversion", prefix);
|
snprintf(prop, sizeof(prop), "%sbitclock-inversion", prefix);
|
||||||
bit = !!of_get_property(np, prop, NULL);
|
bit = of_property_read_bool(np, prop);
|
||||||
|
|
||||||
snprintf(prop, sizeof(prop), "%sframe-inversion", prefix);
|
snprintf(prop, sizeof(prop), "%sframe-inversion", prefix);
|
||||||
frame = !!of_get_property(np, prop, NULL);
|
frame = of_property_read_bool(np, prop);
|
||||||
|
|
||||||
switch ((bit << 4) + frame) {
|
switch ((bit << 4) + frame) {
|
||||||
case 0x11:
|
case 0x11:
|
||||||
@@ -3289,12 +3283,12 @@ unsigned int snd_soc_daifmt_parse_clock_provider_raw(struct device_node *np,
|
|||||||
* check "[prefix]frame-master"
|
* check "[prefix]frame-master"
|
||||||
*/
|
*/
|
||||||
snprintf(prop, sizeof(prop), "%sbitclock-master", prefix);
|
snprintf(prop, sizeof(prop), "%sbitclock-master", prefix);
|
||||||
bit = !!of_get_property(np, prop, NULL);
|
bit = of_property_present(np, prop);
|
||||||
if (bit && bitclkmaster)
|
if (bit && bitclkmaster)
|
||||||
*bitclkmaster = of_parse_phandle(np, prop, 0);
|
*bitclkmaster = of_parse_phandle(np, prop, 0);
|
||||||
|
|
||||||
snprintf(prop, sizeof(prop), "%sframe-master", prefix);
|
snprintf(prop, sizeof(prop), "%sframe-master", prefix);
|
||||||
frame = !!of_get_property(np, prop, NULL);
|
frame = of_property_present(np, prop);
|
||||||
if (frame && framemaster)
|
if (frame && framemaster)
|
||||||
*framemaster = of_parse_phandle(np, prop, 0);
|
*framemaster = of_parse_phandle(np, prop, 0);
|
||||||
|
|
||||||
|
@@ -1542,10 +1542,13 @@ static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
|
|||||||
/*
|
/*
|
||||||
* Filter for systems with 'component_chaining' enabled.
|
* Filter for systems with 'component_chaining' enabled.
|
||||||
* This helps to avoid unnecessary re-configuration of an
|
* This helps to avoid unnecessary re-configuration of an
|
||||||
* already active BE on such systems.
|
* already active BE on such systems and ensures the BE DAI
|
||||||
|
* widget is powered ON after hw_params() BE DAI callback.
|
||||||
*/
|
*/
|
||||||
if (fe->card->component_chaining &&
|
if (fe->card->component_chaining &&
|
||||||
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
|
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
|
||||||
|
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
|
||||||
|
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
|
||||||
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
|
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
107
tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c
Normal file
107
tools/testing/selftests/bpf/prog_tests/changes_pkt_data.c
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include "bpf/libbpf.h"
|
||||||
|
#include "changes_pkt_data_freplace.skel.h"
|
||||||
|
#include "changes_pkt_data.skel.h"
|
||||||
|
#include <test_progs.h>
|
||||||
|
|
||||||
|
static void print_verifier_log(const char *log)
|
||||||
|
{
|
||||||
|
if (env.verbosity >= VERBOSE_VERY)
|
||||||
|
fprintf(stdout, "VERIFIER LOG:\n=============\n%s=============\n", log);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test_aux(const char *main_prog_name,
|
||||||
|
const char *to_be_replaced,
|
||||||
|
const char *replacement,
|
||||||
|
bool expect_load)
|
||||||
|
{
|
||||||
|
struct changes_pkt_data_freplace *freplace = NULL;
|
||||||
|
struct bpf_program *freplace_prog = NULL;
|
||||||
|
struct bpf_program *main_prog = NULL;
|
||||||
|
LIBBPF_OPTS(bpf_object_open_opts, opts);
|
||||||
|
struct changes_pkt_data *main = NULL;
|
||||||
|
char log[16*1024];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
opts.kernel_log_buf = log;
|
||||||
|
opts.kernel_log_size = sizeof(log);
|
||||||
|
if (env.verbosity >= VERBOSE_SUPER)
|
||||||
|
opts.kernel_log_level = 1 | 2 | 4;
|
||||||
|
main = changes_pkt_data__open_opts(&opts);
|
||||||
|
if (!ASSERT_OK_PTR(main, "changes_pkt_data__open"))
|
||||||
|
goto out;
|
||||||
|
main_prog = bpf_object__find_program_by_name(main->obj, main_prog_name);
|
||||||
|
if (!ASSERT_OK_PTR(main_prog, "main_prog"))
|
||||||
|
goto out;
|
||||||
|
bpf_program__set_autoload(main_prog, true);
|
||||||
|
err = changes_pkt_data__load(main);
|
||||||
|
print_verifier_log(log);
|
||||||
|
if (!ASSERT_OK(err, "changes_pkt_data__load"))
|
||||||
|
goto out;
|
||||||
|
freplace = changes_pkt_data_freplace__open_opts(&opts);
|
||||||
|
if (!ASSERT_OK_PTR(freplace, "changes_pkt_data_freplace__open"))
|
||||||
|
goto out;
|
||||||
|
freplace_prog = bpf_object__find_program_by_name(freplace->obj, replacement);
|
||||||
|
if (!ASSERT_OK_PTR(freplace_prog, "freplace_prog"))
|
||||||
|
goto out;
|
||||||
|
bpf_program__set_autoload(freplace_prog, true);
|
||||||
|
bpf_program__set_autoattach(freplace_prog, true);
|
||||||
|
bpf_program__set_attach_target(freplace_prog,
|
||||||
|
bpf_program__fd(main_prog),
|
||||||
|
to_be_replaced);
|
||||||
|
err = changes_pkt_data_freplace__load(freplace);
|
||||||
|
print_verifier_log(log);
|
||||||
|
if (expect_load) {
|
||||||
|
ASSERT_OK(err, "changes_pkt_data_freplace__load");
|
||||||
|
} else {
|
||||||
|
ASSERT_ERR(err, "changes_pkt_data_freplace__load");
|
||||||
|
ASSERT_HAS_SUBSTR(log, "Extension program changes packet data", "error log");
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
changes_pkt_data_freplace__destroy(freplace);
|
||||||
|
changes_pkt_data__destroy(main);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* There are two global subprograms in both changes_pkt_data.skel.h:
|
||||||
|
* - one changes packet data;
|
||||||
|
* - another does not.
|
||||||
|
* It is ok to freplace subprograms that change packet data with those
|
||||||
|
* that either do or do not. It is only ok to freplace subprograms
|
||||||
|
* that do not change packet data with those that do not as well.
|
||||||
|
* The below tests check outcomes for each combination of such freplace.
|
||||||
|
* Also test a case when main subprogram itself is replaced and is a single
|
||||||
|
* subprogram in a program.
|
||||||
|
*/
|
||||||
|
void test_changes_pkt_data_freplace(void)
|
||||||
|
{
|
||||||
|
struct {
|
||||||
|
const char *main;
|
||||||
|
const char *to_be_replaced;
|
||||||
|
bool changes;
|
||||||
|
} mains[] = {
|
||||||
|
{ "main_with_subprogs", "changes_pkt_data", true },
|
||||||
|
{ "main_with_subprogs", "does_not_change_pkt_data", false },
|
||||||
|
{ "main_changes", "main_changes", true },
|
||||||
|
{ "main_does_not_change", "main_does_not_change", false },
|
||||||
|
};
|
||||||
|
struct {
|
||||||
|
const char *func;
|
||||||
|
bool changes;
|
||||||
|
} replacements[] = {
|
||||||
|
{ "changes_pkt_data", true },
|
||||||
|
{ "does_not_change_pkt_data", false }
|
||||||
|
};
|
||||||
|
char buf[64];
|
||||||
|
|
||||||
|
for (int i = 0; i < ARRAY_SIZE(mains); ++i) {
|
||||||
|
for (int j = 0; j < ARRAY_SIZE(replacements); ++j) {
|
||||||
|
snprintf(buf, sizeof(buf), "%s_with_%s",
|
||||||
|
mains[i].to_be_replaced, replacements[j].func);
|
||||||
|
if (!test__start_subtest(buf))
|
||||||
|
continue;
|
||||||
|
test_aux(mains[i].main, mains[i].to_be_replaced, replacements[j].func,
|
||||||
|
mains[i].changes || !replacements[j].changes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
39
tools/testing/selftests/bpf/progs/changes_pkt_data.c
Normal file
39
tools/testing/selftests/bpf/progs/changes_pkt_data.c
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
|
||||||
|
__noinline
|
||||||
|
long changes_pkt_data(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
return bpf_skb_pull_data(sk, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
__noinline __weak
|
||||||
|
long does_not_change_pkt_data(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SEC("?tc")
|
||||||
|
int main_with_subprogs(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
changes_pkt_data(sk);
|
||||||
|
does_not_change_pkt_data(sk);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SEC("?tc")
|
||||||
|
int main_changes(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
bpf_skb_pull_data(sk, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
SEC("?tc")
|
||||||
|
int main_does_not_change(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
@@ -0,0 +1,18 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/bpf.h>
|
||||||
|
#include <bpf/bpf_helpers.h>
|
||||||
|
|
||||||
|
SEC("?freplace")
|
||||||
|
long changes_pkt_data(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
return bpf_skb_pull_data(sk, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
SEC("?freplace")
|
||||||
|
long does_not_change_pkt_data(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
char _license[] SEC("license") = "GPL";
|
@@ -50,6 +50,13 @@ struct {
|
|||||||
__uint(map_flags, BPF_F_NO_PREALLOC);
|
__uint(map_flags, BPF_F_NO_PREALLOC);
|
||||||
} sk_storage_map SEC(".maps");
|
} sk_storage_map SEC(".maps");
|
||||||
|
|
||||||
|
struct {
|
||||||
|
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
|
||||||
|
__uint(max_entries, 1);
|
||||||
|
__uint(key_size, sizeof(__u32));
|
||||||
|
__uint(value_size, sizeof(__u32));
|
||||||
|
} jmp_table SEC(".maps");
|
||||||
|
|
||||||
SEC("cgroup/skb")
|
SEC("cgroup/skb")
|
||||||
__description("skb->sk: no NULL check")
|
__description("skb->sk: no NULL check")
|
||||||
__failure __msg("invalid mem access 'sock_common_or_null'")
|
__failure __msg("invalid mem access 'sock_common_or_null'")
|
||||||
@@ -977,4 +984,53 @@ l1_%=: r0 = *(u8*)(r7 + 0); \
|
|||||||
: __clobber_all);
|
: __clobber_all);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__noinline
|
||||||
|
long skb_pull_data2(struct __sk_buff *sk, __u32 len)
|
||||||
|
{
|
||||||
|
return bpf_skb_pull_data(sk, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
__noinline
|
||||||
|
long skb_pull_data1(struct __sk_buff *sk, __u32 len)
|
||||||
|
{
|
||||||
|
return skb_pull_data2(sk, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* global function calls bpf_skb_pull_data(), which invalidates packet
|
||||||
|
* pointers established before global function call.
|
||||||
|
*/
|
||||||
|
SEC("tc")
|
||||||
|
__failure __msg("invalid mem access")
|
||||||
|
int invalidate_pkt_pointers_from_global_func(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
int *p = (void *)(long)sk->data;
|
||||||
|
|
||||||
|
if ((void *)(p + 1) > (void *)(long)sk->data_end)
|
||||||
|
return TCX_DROP;
|
||||||
|
skb_pull_data1(sk, 0);
|
||||||
|
*p = 42; /* this is unsafe */
|
||||||
|
return TCX_PASS;
|
||||||
|
}
|
||||||
|
|
||||||
|
__noinline
|
||||||
|
int tail_call(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
bpf_tail_call_static(sk, &jmp_table, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Tail calls invalidate packet pointers. */
|
||||||
|
SEC("tc")
|
||||||
|
__failure __msg("invalid mem access")
|
||||||
|
int invalidate_pkt_pointers_by_tail_call(struct __sk_buff *sk)
|
||||||
|
{
|
||||||
|
int *p = (void *)(long)sk->data;
|
||||||
|
|
||||||
|
if ((void *)(p + 1) > (void *)(long)sk->data_end)
|
||||||
|
return TCX_DROP;
|
||||||
|
tail_call(sk);
|
||||||
|
*p = 42; /* this is unsafe */
|
||||||
|
return TCX_PASS;
|
||||||
|
}
|
||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
Reference in New Issue
Block a user