Merge 6.6.99 into android15-6.6-lts

Changes in 6.6.99
	eventpoll: don't decrement ep refcount while still holding the ep mutex
	drm/exynos: exynos7_drm_decon: add vblank check in IRQ handling
	ASoC: fsl_asrc: use internal measured ratio for non-ideal ratio mode
	perf/core: Fix the WARN_ON_ONCE is out of lock protected region
	perf: Revert to requiring CAP_SYS_ADMIN for uprobes
	ASoC: cs35l56: probe() should fail if the device ID is not recognized
	Bluetooth: hci_sync: Fix not disabling advertising instance
	Bluetooth: hci_event: Fix not marking Broadcast Sink BIS as connected
	pinctrl: amd: Clear GPIO debounce for suspend
	fix proc_sys_compare() handling of in-lookup dentries
	netlink: Fix wraparounds of sk->sk_rmem_alloc.
	vsock: fix `vsock_proto` declaration
	tipc: Fix use-after-free in tipc_conn_close().
	tcp: Correct signedness in skb remaining space calculation
	vsock: Fix transport_{g2h,h2g} TOCTOU
	vsock: Fix transport_* TOCTOU
	vsock: Fix IOCTL_VM_SOCKETS_GET_LOCAL_CID to check also `transport_local`
	net: stmmac: Fix interrupt handling for level-triggered mode in DWC_XGMAC2
	net: phy: smsc: Fix Auto-MDIX configuration when disabled by strap
	net: phy: smsc: Force predictable MDI-X state on LAN87xx
	net: phy: smsc: Fix link failure in forced mode with Auto-MDIX
	atm: clip: Fix potential null-ptr-deref in to_atmarpd().
	atm: clip: Fix memory leak of struct clip_vcc.
	atm: clip: Fix infinite recursive call of clip_push().
	atm: clip: Fix NULL pointer dereference in vcc_sendmsg()
	net/sched: Abort __tc_modify_qdisc if parent class does not exist
	rxrpc: Fix bug due to prealloc collision
	maple_tree: fix MA_STATE_PREALLOC flag in mas_preallocate()
	perf: build: Setup PKG_CONFIG_LIBDIR for cross compilation
	Bluetooth: HCI: Set extended advertising data synchronously
	rxrpc: Fix oops due to non-existence of prealloc backlog struct
	ipmi:msghandler: Fix potential memory corruption in ipmi_create_user()
	x86/mce/amd: Add default names for MCA banks and blocks
	x86/mce/amd: Fix threshold limit reset
	x86/mce: Don't remove sysfs if thresholding sysfs init fails
	x86/mce: Make sure CMCI banks are cleared during shutdown on Intel
	KVM: x86/xen: Allow 'out of range' event channel ports in IRQ routing table.
	KVM: SVM: Reject SEV{-ES} intra host migration if vCPU creation is in-flight
	gre: Fix IPv6 multicast route creation.
	md/md-bitmap: fix GPF in bitmap_get_stats()
	pinctrl: qcom: msm: mark certain pins as invalid for interrupts
	wifi: prevent A-MSDU attacks in mesh networks
	drm/gem: Acquire references on GEM handles for framebuffers
	drm/sched: Increment job count before swapping tail spsc queue
	drm/ttm: fix error handling in ttm_buffer_object_transfer
	drm/gem: Fix race in drm_gem_handle_create_tail()
	usb: gadget: u_serial: Fix race condition in TTY wakeup
	Revert "usb: gadget: u_serial: Add null pointer check in gs_start_io"
	drm/framebuffer: Acquire internal references on GEM handles
	Revert "ACPI: battery: negate current when discharging"
	kallsyms: fix build without execinfo
	maple_tree: fix mt_destroy_walk() on root leaf node
	mm: fix the inaccurate memory statistics issue for users
	scripts/gdb: fix interrupts display after MCP on x86
	scripts/gdb: de-reference per-CPU MCE interrupts
	scripts/gdb: fix interrupts.py after maple tree conversion
	mm/vmalloc: leave lazy MMU mode on PTE mapping error
	pwm: mediatek: Ensure to disable clocks in error path
	x86/rdrand: Disable RDSEED on AMD Cyan Skillfish
	x86/mm: Disable hugetlb page table sharing on 32-bit
	smb: server: make use of rdma_destroy_qp()
	ksmbd: fix a mount write count leak in ksmbd_vfs_kern_path_locked()
	erofs: fix to add missing tracepoint in erofs_read_folio()
	netlink: Fix rmem check in netlink_broadcast_deliver().
	netlink: make sure we allow at least one dump skb
	btrfs: remove noinline from btrfs_update_inode()
	btrfs: remove redundant root argument from btrfs_update_inode_fallback()
	btrfs: remove redundant root argument from fixup_inode_link_count()
	btrfs: return a btrfs_inode from btrfs_iget_logging()
	btrfs: fix inode lookup error handling during log replay
	usb:cdnsp: remove TRB_FLUSH_ENDPOINT command
	usb: cdnsp: Replace snprintf() with the safer scnprintf() variant
	usb: cdnsp: Fix issue with CV Bad Descriptor test
	usb: dwc3: Abort suspend on soft disconnect failure
	smb: client: avoid unnecessary reconnects when refreshing referrals
	smb: client: fix DFS interlink failover
	cifs: all initializations for tcon should happen in tcon_info_alloc
	wifi: zd1211rw: Fix potential NULL pointer dereference in zd_mac_tx_to_dev()
	drm/tegra: nvdec: Fix dma_alloc_coherent error check
	md/raid1: Fix stack memory use after return in raid1_reshape
	raid10: cleanup memleak at raid10_make_request
	nbd: fix uaf in nbd_genl_connect() error path
	netfilter: flowtable: account for Ethernet header in nf_flow_pppoe_proto()
	net: appletalk: Fix device refcount leak in atrtr_create()
	ibmvnic: Fix hardcoded NUM_RX_STATS/NUM_TX_STATS with dynamic sizeof
	net: phy: microchip: limit 100M workaround to link-down events on LAN88xx
	can: m_can: m_can_handle_lost_msg(): downgrade msg lost in rx message to debug level
	net: ll_temac: Fix missing tx_pending check in ethtools_set_ringparam()
	bnxt_en: Fix DCB ETS validation
	bnxt_en: Set DMA unmap len correctly for XDP_REDIRECT
	ublk: sanity check add_dev input for underflow
	atm: idt77252: Add missing `dma_map_error()`
	um: vector: Reduce stack usage in vector_eth_configure()
	io_uring: make fallocate be hashed work
	ASoC: amd: yc: add quirk for Acer Nitro ANV15-41 internal mic
	ALSA: hda/realtek - Enable mute LED on HP Pavilion Laptop 15-eg100
	net: usb: qmi_wwan: add SIMCom 8230C composition
	HID: lenovo: Add support for ThinkPad X1 Tablet Thin Keyboard Gen2
	net: mana: Record doorbell physical address in PF mode
	btrfs: fix assertion when building free space tree
	vt: add missing notification when switching back to text mode
	bpf: Adjust free target to avoid global starvation of LRU map
	HID: Add IGNORE quirk for SMARTLINKTECHNOLOGY
	HID: quirks: Add quirk for 2 Chicony Electronics HP 5MP Cameras
	Input: atkbd - do not skip atkbd_deactivate() when skipping ATKBD_CMD_GETID
	selftests/bpf: adapt one more case in test_lru_map to the new target_free
	smb: client: fix potential race in cifs_put_tcon()
	kasan: remove kasan_find_vm_area() to prevent possible deadlock
	ksmbd: fix potential use-after-free in oplock/lease break ack
	crypto: ecdsa - Harden against integer overflows in DIV_ROUND_UP()
	rseq: Fix segfault on registration when rseq_cs is non-zero
	Linux 6.6.99

Change-Id: If006db3205eee54f5e3f425a90608fddcf8d5801
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-18 11:17:42 +00:00
117 changed files with 1944 additions and 1038 deletions

View File

@@ -233,10 +233,16 @@ attempts in order to enforce the LRU property which have increasing impacts on
other CPUs involved in the following operation attempts:
- Attempt to use CPU-local state to batch operations
- Attempt to fetch free nodes from global lists
- Attempt to fetch ``target_free`` free nodes from global lists
- Attempt to pull any node from a global list and remove it from the hashmap
- Attempt to pull any node from any CPU's list and remove it from the hashmap
The number of nodes to borrow from the global list in a batch, ``target_free``,
depends on the size of the map. Larger batch size reduces lock contention, but
may also exhaust the global structure. The value is computed at map init to
avoid exhaustion, by limiting aggregate reservation by all CPUs to half the map
size. With a minimum of a single element and maximum budget of 128 at a time.
This algorithm is described visually in the following diagram. See the
description in commit 3a08c2fd7634 ("bpf: LRU List") for a full explanation of
the corresponding operations:

View File

@@ -35,18 +35,18 @@ digraph {
fn_bpf_lru_list_pop_free_to_local [shape=rectangle,fillcolor=2,
label="Flush local pending,
Rotate Global list, move
LOCAL_FREE_TARGET
target_free
from global -> local"]
// Also corresponds to:
// fn__local_list_flush()
// fn_bpf_lru_list_rotate()
fn___bpf_lru_node_move_to_free[shape=diamond,fillcolor=2,
label="Able to free\nLOCAL_FREE_TARGET\nnodes?"]
label="Able to free\ntarget_free\nnodes?"]
fn___bpf_lru_list_shrink_inactive [shape=rectangle,fillcolor=3,
label="Shrink inactive list
up to remaining
LOCAL_FREE_TARGET
target_free
(global LRU -> local)"]
fn___bpf_lru_list_shrink [shape=diamond,fillcolor=2,
label="> 0 entries in\nlocal free list?"]

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 98
SUBLEVEL = 99
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@@ -1600,35 +1600,19 @@ static void vector_eth_configure(
device->dev = dev;
*vp = ((struct vector_private)
{
.list = LIST_HEAD_INIT(vp->list),
.dev = dev,
.unit = n,
.options = get_transport_options(def),
.rx_irq = 0,
.tx_irq = 0,
.parsed = def,
.max_packet = get_mtu(def) + ETH_HEADER_OTHER,
/* TODO - we need to calculate headroom so that ip header
* is 16 byte aligned all the time
*/
.headroom = get_headroom(def),
.form_header = NULL,
.verify_header = NULL,
.header_rxbuffer = NULL,
.header_txbuffer = NULL,
.header_size = 0,
.rx_header_size = 0,
.rexmit_scheduled = false,
.opened = false,
.transport_data = NULL,
.in_write_poll = false,
.coalesce = 2,
.req_size = get_req_size(def),
.in_error = false,
.bpf = NULL
});
INIT_LIST_HEAD(&vp->list);
vp->dev = dev;
vp->unit = n;
vp->options = get_transport_options(def);
vp->parsed = def;
vp->max_packet = get_mtu(def) + ETH_HEADER_OTHER;
/*
* TODO - we need to calculate headroom so that ip header
* is 16 byte aligned all the time
*/
vp->headroom = get_headroom(def);
vp->coalesce = 2;
vp->req_size = get_req_size(def);
dev->features = dev->hw_features = (NETIF_F_SG | NETIF_F_FRAGLIST);
INIT_WORK(&vp->reset_tx, vector_reset_tx);

View File

@@ -128,7 +128,7 @@ config X86
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_NO_INSTR
select ARCH_WANT_GENERAL_HUGETLB
select ARCH_WANT_HUGE_PMD_SHARE
select ARCH_WANT_HUGE_PMD_SHARE if X86_64
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_DAX_VMEMMAP if X86_64
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP if X86_64

View File

@@ -575,6 +575,7 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1
#define MSR_AMD64_CPUID_FN_7 0xc0011002
#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022

View File

@@ -1154,6 +1154,13 @@ static void init_amd_zen2(struct cpuinfo_x86 *c)
{
fix_erratum_1386(c);
zen2_zenbleed_check(c);
/* Disable RDSEED on AMD Cyan Skillfish because of an error. */
if (c->x86_model == 0x47 && c->x86_stepping == 0x0) {
clear_cpu_cap(c, X86_FEATURE_RDSEED);
msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
pr_emerg("RDSEED is not reliable on this platform; disabling.\n");
}
}
static void init_amd_zen3(struct cpuinfo_x86 *c)

View File

@@ -335,7 +335,6 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
struct thresh_restart {
struct threshold_block *b;
int reset;
int set_lvt_off;
int lvt_off;
u16 old_limit;
@@ -430,13 +429,13 @@ static void threshold_restart_bank(void *_tr)
rdmsr(tr->b->address, lo, hi);
if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
tr->reset = 1; /* limit cannot be lower than err count */
if (tr->reset) { /* reset err count and overflow bit */
hi =
(hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
(THRESHOLD_MAX - tr->b->threshold_limit);
/*
* Reset error count and overflow bit.
* This is done during init or after handling an interrupt.
*/
if (hi & MASK_OVERFLOW_HI || tr->set_lvt_off) {
hi &= ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI);
hi |= THRESHOLD_MAX - tr->b->threshold_limit;
} else if (tr->old_limit) { /* change limit w/o reset */
int new_count = (hi & THRESHOLD_MAX) +
(tr->old_limit - tr->b->threshold_limit);
@@ -1049,13 +1048,20 @@ static const char *get_name(unsigned int cpu, unsigned int bank, struct threshol
}
bank_type = smca_get_bank_type(cpu, bank);
if (bank_type >= N_SMCA_BANK_TYPES)
return NULL;
if (b && (bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2)) {
if (b->block < ARRAY_SIZE(smca_umc_block_names))
return smca_umc_block_names[b->block];
return NULL;
}
if (b && b->block) {
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_block_%u", b->block);
return buf_mcatype;
}
if (bank_type >= N_SMCA_BANK_TYPES) {
snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN, "th_bank_%u", bank);
return buf_mcatype;
}
if (per_cpu(smca_bank_counts, cpu)[bank_type] == 1)

View File

@@ -2704,15 +2704,9 @@ static int mce_cpu_dead(unsigned int cpu)
static int mce_cpu_online(unsigned int cpu)
{
struct timer_list *t = this_cpu_ptr(&mce_timer);
int ret;
mce_device_create(cpu);
ret = mce_threshold_create_device(cpu);
if (ret) {
mce_device_remove(cpu);
return ret;
}
mce_threshold_create_device(cpu);
mce_reenable_cpu();
mce_start_timer(t);
return 0;

View File

@@ -517,6 +517,7 @@ void mce_intel_feature_init(struct cpuinfo_x86 *c)
void mce_intel_feature_clear(struct cpuinfo_x86 *c)
{
intel_clear_lmce();
cmci_clear();
}
bool intel_filter_mce(struct mce *m)

View File

@@ -1782,6 +1782,10 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
struct kvm_vcpu *src_vcpu;
unsigned long i;
if (src->created_vcpus != atomic_read(&src->online_vcpus) ||
dst->created_vcpus != atomic_read(&dst->online_vcpus))
return -EBUSY;
if (!sev_es_guest(src))
return 0;

View File

@@ -1737,8 +1737,19 @@ int kvm_xen_setup_evtchn(struct kvm *kvm,
{
struct kvm_vcpu *vcpu;
if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
return -EINVAL;
/*
* Don't check for the port being within range of max_evtchn_port().
* Userspace can configure what ever targets it likes; events just won't
* be delivered if/while the target is invalid, just like userspace can
* configure MSIs which target non-existent APICs.
*
* This allow on Live Migration and Live Update, the IRQ routing table
* can be restored *independently* of other things like creating vCPUs,
* without imposing an ordering dependency on userspace. In this
* particular case, the problematic ordering would be with setting the
* Xen 'long mode' flag, which changes max_evtchn_port() to allow 4096
* instead of 1024 event channels.
*/
/* We only support 2 level event channels for now */
if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)

View File

@@ -69,7 +69,7 @@ EXPORT_SYMBOL(ecc_get_curve);
void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
u64 *out, unsigned int ndigits)
{
int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
int diff = ndigits - DIV_ROUND_UP_POW2(nbytes, sizeof(u64));
unsigned int o = nbytes & 7;
__be64 msd = 0;

View File

@@ -243,23 +243,10 @@ static int acpi_battery_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
case POWER_SUPPLY_PROP_POWER_NOW:
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) {
if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN)
ret = -ENODEV;
break;
}
val->intval = battery->rate_now * 1000;
/*
* When discharging, the current should be reported as a
* negative number as per the power supply class interface
* definition.
*/
if (psp == POWER_SUPPLY_PROP_CURRENT_NOW &&
(battery->state & ACPI_BATTERY_STATE_DISCHARGING) &&
acpi_battery_handle_discharging(battery)
== POWER_SUPPLY_STATUS_DISCHARGING)
val->intval = -val->intval;
else
val->intval = battery->rate_now * 1000;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:

View File

@@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc,
IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb)))
return -ENOMEM;
error = -EINVAL;
@@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue,
paddr = dma_map_single(&card->pcidev->dev, skb->data,
skb_end_pointer(skb) - skb->data,
DMA_FROM_DEVICE);
if (dma_mapping_error(&card->pcidev->dev, paddr))
goto outpoolrm;
IDT77252_PRV_PADDR(skb) = paddr;
if (push_rx_skb(card, skb, queue)) {
@@ -1871,6 +1875,7 @@ outunmap:
dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb),
skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE);
outpoolrm:
handle = IDT77252_PRV_POOL(skb);
card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL;

View File

@@ -2120,9 +2120,7 @@ again:
goto out;
}
}
ret = nbd_start_device(nbd);
if (ret)
goto out;
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
GFP_KERNEL);
@@ -2138,6 +2136,8 @@ again:
goto out;
}
set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
ret = nbd_start_device(nbd);
out:
mutex_unlock(&nbd->config_lock);
if (!ret) {

View File

@@ -2323,7 +2323,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES)
if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth ||
info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues)
return -EINVAL;
if (capable(CAP_SYS_ADMIN))

View File

@@ -1241,7 +1241,7 @@ int ipmi_create_user(unsigned int if_num,
}
/* Not found, return an error */
rv = -EINVAL;
goto out_kfree;
goto out_unlock;
found:
if (atomic_add_return(1, &intf->nr_users) > max_users) {
@@ -1283,6 +1283,7 @@ int ipmi_create_user(unsigned int if_num,
out_kfree:
atomic_dec(&intf->nr_users);
out_unlock:
srcu_read_unlock(&ipmi_interfaces_srcu, index);
vfree(new_user);
return rv;

View File

@@ -844,11 +844,23 @@ void drm_framebuffer_free(struct kref *kref)
int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs)
{
unsigned int i;
int ret;
bool exists;
if (WARN_ON_ONCE(fb->dev != dev || !fb->format))
return -EINVAL;
for (i = 0; i < fb->format->num_planes; i++) {
if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)))
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
if (fb->obj[i]) {
exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]);
if (exists)
fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
}
}
INIT_LIST_HEAD(&fb->filp_head);
fb->funcs = funcs;
@@ -857,7 +869,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB,
false, drm_framebuffer_free);
if (ret)
goto out;
goto err;
mutex_lock(&dev->mode_config.fb_lock);
dev->mode_config.num_fb++;
@@ -865,7 +877,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
mutex_unlock(&dev->mode_config.fb_lock);
drm_mode_object_register(dev, &fb->base);
out:
return 0;
err:
for (i = 0; i < fb->format->num_planes; i++) {
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) {
drm_gem_object_handle_put_unlocked(fb->obj[i]);
fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i);
}
}
return ret;
}
EXPORT_SYMBOL(drm_framebuffer_init);
@@ -942,6 +963,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
unsigned int i;
for (i = 0; i < fb->format->num_planes; i++) {
if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))
drm_gem_object_handle_put_unlocked(fb->obj[i]);
}
mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);

View File

@@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_private_object_fini);
static void drm_gem_object_handle_get(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_get(obj);
}
/**
* drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any
* @obj: GEM object
*
* Acquires a reference on the GEM buffer object's handle. Required to keep
* the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked()
* to release the reference. Does nothing if the buffer object has no handle.
*
* Returns:
* True if a handle exists, or false otherwise
*/
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
guard(mutex)(&dev->object_name_lock);
/*
* First ref taken during GEM object creation, if any. Some
* drivers set up internal framebuffers with GEM objects that
* do not have a GEM handle. Hence, this counter can be zero.
*/
if (!obj->handle_count)
return false;
drm_gem_object_handle_get(obj);
return true;
}
/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
@@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
}
}
static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
/**
* drm_gem_object_handle_put_unlocked - releases reference on user-space handle
* @obj: GEM object
*
* Releases a reference on the GEM buffer object's handle. Possibly releases
* the GEM buffer object and associated dma-buf objects.
*/
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
bool final = false;
if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0))
return;
/*
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we
* checked for a name
*/
* Must bump handle count first as this may be the last
* ref, in which case the object would disappear before
* we checked for a name.
*/
mutex_lock(&dev->object_name_lock);
if (--obj->handle_count == 0) {
@@ -253,6 +299,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
if (drm_WARN_ON(obj->dev, !data))
return 0;
if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
@@ -363,8 +412,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
int ret;
WARN_ON(!mutex_is_locked(&dev->object_name_lock));
if (obj->handle_count++ == 0)
drm_gem_object_get(obj);
drm_gem_object_handle_get(obj);
/*
* Get the user-visible handle using idr. Preload and perform
@@ -373,7 +422,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&file_priv->table_lock);
idr_preload_end();
@@ -394,6 +443,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
goto err_revoke;
}
/* mirrors drm_gem_handle_delete to avoid races */
spin_lock(&file_priv->table_lock);
obj = idr_replace(&file_priv->object_idr, obj, handle);
WARN_ON(obj != NULL);
spin_unlock(&file_priv->table_lock);
*handlep = handle;
return 0;

View File

@@ -155,6 +155,8 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
int drm_gem_init(struct drm_device *dev);
bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj);
void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);

View File

@@ -601,6 +601,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
if (!ctx->drm_dev)
goto out;
/* check if crtc and vblank have been initialized properly */
if (!drm_dev_has_vblank(ctx->drm_dev))
goto out;
if (!ctx->i80_if) {
drm_crtc_handle_vblank(&ctx->crtc->base);

View File

@@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec)
if (!client->group) {
virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
err = dma_mapping_error(nvdec->dev, iova);
if (err < 0)
return err;
if (!virt)
return -ENOMEM;
} else {
virt = tegra_drm_alloc(tegra, size, &iova);
if (IS_ERR(virt))

View File

@@ -254,6 +254,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
dma_resv_unlock(&fbo->base.base._resv);
kfree(fbo);
return ret;
}
if (fbo->base.resource) {
ttm_resource_set_bo(fbo->base.resource, &fbo->base);
bo->resource = NULL;
@@ -262,12 +269,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.bulk_move = NULL;
}
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
return ret;
}
ttm_bo_get(bo);
fbo->bo = bo;

View File

@@ -305,6 +305,8 @@
#define USB_DEVICE_ID_ASUS_AK1D 0x1125
#define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408
#define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421
#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824
#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -807,6 +809,7 @@
#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
#define USB_DEVICE_ID_LENOVO_X1_TAB2 0x60a4
#define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
#define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
#define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae
@@ -1501,4 +1504,7 @@
#define USB_VENDOR_ID_SIGNOTEC 0x2133
#define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018
#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a
#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155
#endif

View File

@@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev,
return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field,
usage, bit, max);
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max);
default:
@@ -587,6 +588,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
if (ret)
@@ -782,6 +784,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
return lenovo_event_cptkbd(hdev, field, usage, value);
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
return lenovo_event_tp10ubkbd(hdev, field, usage, value);
default:
@@ -1065,6 +1068,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
break;
@@ -1296,6 +1300,7 @@ static int lenovo_probe(struct hid_device *hdev,
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
ret = lenovo_probe_tp10ubkbd(hdev);
break;
@@ -1383,6 +1388,7 @@ static void lenovo_remove(struct hid_device *hdev)
break;
case USB_DEVICE_ID_LENOVO_TP10UBKBD:
case USB_DEVICE_ID_LENOVO_X1_TAB:
case USB_DEVICE_ID_LENOVO_X1_TAB2:
case USB_DEVICE_ID_LENOVO_X1_TAB3:
lenovo_remove_tp10ubkbd(hdev);
break;
@@ -1433,6 +1439,8 @@ static const struct hid_device_id lenovo_devices[] = {
*/
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB2) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) },
{ }

View File

@@ -2110,12 +2110,18 @@ static const struct hid_device_id mt_devices[] = {
HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC,
USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) },
/* Lenovo X1 TAB Gen 2 */
/* Lenovo X1 TAB Gen 1 */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X1_TAB) },
/* Lenovo X1 TAB Gen 2 */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
USB_VENDOR_ID_LENOVO,
USB_DEVICE_ID_LENOVO_X1_TAB2) },
/* Lenovo X1 TAB Gen 3 */
{ .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,

View File

@@ -747,6 +747,8 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) },
@@ -894,6 +896,7 @@ static const struct hid_device_id hid_ignore_list[] = {
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) },
{ }
};

View File

@@ -826,7 +826,7 @@ static int atkbd_probe(struct atkbd *atkbd)
if (atkbd_skip_getid(atkbd)) {
atkbd->id = 0xab83;
return 0;
goto deactivate_kbd;
}
/*
@@ -863,6 +863,7 @@ static int atkbd_probe(struct atkbd *atkbd)
return -1;
}
deactivate_kbd:
/*
* Make sure nothing is coming from the keyboard and disturbs our
* internal state.

View File

@@ -2119,8 +2119,7 @@ int md_bitmap_get_stats(struct bitmap *bitmap, struct md_bitmap_stats *stats)
if (!bitmap)
return -ENOENT;
if (!bitmap->mddev->bitmap_info.external &&
!bitmap->storage.sb_page)
if (!bitmap->storage.sb_page)
return -EINVAL;
sb = kmap_local_page(bitmap->storage.sb_page);
stats->sync_size = le64_to_cpu(sb->sync_size);

View File

@@ -3297,6 +3297,7 @@ static int raid1_reshape(struct mddev *mddev)
/* ok, everything is stopped */
oldpool = conf->r1bio_pool;
conf->r1bio_pool = newpool;
init_waitqueue_head(&conf->r1bio_pool.wait);
for (d = d2 = 0; d < conf->raid_disks; d++) {
struct md_rdev *rdev = conf->mirrors[d].rdev;

View File

@@ -1205,8 +1205,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rcu_read_unlock();
}
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
raid_end_bio_io(r10_bio);
return;
}
rdev = read_balance(conf, r10_bio, &max_sectors);
if (!rdev) {
if (err_rdev) {
@@ -1428,8 +1431,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
}
sectors = r10_bio->sectors;
if (!regular_request_wait(mddev, conf, bio, sectors))
if (!regular_request_wait(mddev, conf, bio, sectors)) {
raid_end_bio_io(r10_bio);
return;
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&

View File

@@ -606,7 +606,7 @@ static int m_can_handle_lost_msg(struct net_device *dev)
struct can_frame *frame;
u32 timestamp = 0;
netdev_err(dev, "msg lost in rxf0\n");
netdev_dbg(dev, "msg lost in rxf0\n");
stats->rx_errors++;
stats->rx_over_errors++;

View File

@@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc)
if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc)
return -EINVAL;
}
for (i = 0; i < max_tc; i++) {
switch (ets->tc_tsa[i]) {
case IEEE_8021QAZ_TSA_STRICT:
break;

View File

@@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
tx_buf->action = XDP_REDIRECT;
tx_buf->xdpf = xdpf;
dma_unmap_addr_set(tx_buf, mapping, mapping);
dma_unmap_len_set(tx_buf, len, 0);
dma_unmap_len_set(tx_buf, len, len);
}
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)

View File

@@ -211,7 +211,6 @@ struct ibmvnic_statistics {
u8 reserved[72];
} __packed __aligned(8);
#define NUM_TX_STATS 3
struct ibmvnic_tx_queue_stats {
u64 batched_packets;
u64 direct_packets;
@@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats {
u64 dropped_packets;
};
#define NUM_RX_STATS 3
#define NUM_TX_STATS \
(sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64))
struct ibmvnic_rx_queue_stats {
u64 packets;
u64 bytes;
u64 interrupts;
};
#define NUM_RX_STATS \
(sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64))
struct ibmvnic_acl_buffer {
__be32 len;
__be32 version;

View File

@@ -28,6 +28,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
gc->phys_db_page_base = gc->bar0_pa +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;

View File

@@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->rx_normal_irq_n[chan]);
u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->tx_normal_irq_n[chan]);
u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
if (likely(intr_status & XGMAC_RI)) {
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->rx_normal_irq_n[chan]);
u64_stats_update_end(&stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
u64_stats_update_begin(&stats->syncp);
u64_stats_inc(&stats->tx_normal_irq_n[chan]);
u64_stats_update_end(&stats->syncp);
ret |= handle_tx;
}
/* Clear interrupts */

View File

@@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
ering->rx_pending > TX_BD_NUM_MAX)
ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))

View File

@@ -310,7 +310,7 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
* As workaround, set to 10 before setting to 100
* at forced 100 F/H mode.
*/
if (!phydev->autoneg && phydev->speed == 100) {
if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) {
/* disable phy interrupt */
temp = phy_read(phydev, LAN88XX_INT_MASK);
temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;

View File

@@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
int rc;
u8 mdix_ctrl;
int val;
int rc;
switch (phydev->mdix_ctrl) {
/* When auto-negotiation is disabled (forced mode), the PHY's
* Auto-MDIX will continue toggling the TX/RX pairs.
*
* To establish a stable link, we must select a fixed MDI mode.
* If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is
* 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode
* mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN
* strap were configured for a fixed MDI connection.
*/
if (phydev->autoneg == AUTONEG_DISABLE) {
if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO)
mdix_ctrl = ETH_TP_MDI;
else
mdix_ctrl = phydev->mdix_ctrl;
} else {
mdix_ctrl = phydev->mdix_ctrl;
}
switch (mdix_ctrl) {
case ETH_TP_MDI:
val = SPECIAL_CTRL_STS_OVRRD_AMDIX_;
break;
@@ -167,7 +186,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
SPECIAL_CTRL_STS_AMDIX_STATE_;
break;
case ETH_TP_MDI_AUTO:
val = SPECIAL_CTRL_STS_AMDIX_ENABLE_;
val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
SPECIAL_CTRL_STS_AMDIX_ENABLE_;
break;
default:
return genphy_config_aneg(phydev);
@@ -183,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
rc |= val;
phy_write(phydev, SPECIAL_CTRL_STS, rc);
phydev->mdix = phydev->mdix_ctrl;
phydev->mdix = mdix_ctrl;
return genphy_config_aneg(phydev);
}
@@ -261,6 +281,33 @@ int lan87xx_read_status(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(lan87xx_read_status);
static int lan87xx_phy_config_init(struct phy_device *phydev)
{
int rc;
/* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN
* hardware strap, but the driver cannot read the strap's status. This
* creates an unpredictable initial state.
*
* To ensure consistent and reliable behavior across all boards,
* override the strap configuration on initialization and force the PHY
* into a known state with Auto-MDIX enabled, which is the expected
* default for modern hardware.
*/
rc = phy_modify(phydev, SPECIAL_CTRL_STS,
SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
SPECIAL_CTRL_STS_AMDIX_STATE_,
SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
SPECIAL_CTRL_STS_AMDIX_ENABLE_);
if (rc < 0)
return rc;
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
return smsc_phy_config_init(phydev);
}
static int lan874x_phy_config_init(struct phy_device *phydev)
{
u16 val;
@@ -696,7 +743,7 @@ static struct phy_driver smsc_phy_driver[] = {
/* basic functions */
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.config_init = lan87xx_phy_config_init,
.soft_reset = smsc_phy_reset,
.config_aneg = lan87xx_config_aneg,

View File

@@ -1432,6 +1432,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)}, /* SIMCom 8230C ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */

View File

@@ -583,7 +583,11 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
skb_queue_tail(q, skb);
while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
zd_mac_tx_status(hw, skb_dequeue(q),
skb = skb_dequeue(q);
if (!skb)
break;
zd_mac_tx_status(hw, skb,
mac->ack_pending ? mac->ack_signal : 0,
NULL);
mac->ack_pending = 0;

View File

@@ -933,6 +933,17 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend
pin, is_suspend ? "suspend" : "hibernate");
}
/*
* debounce enabled over suspend has shown issues with a GPIO
* being unable to wake the system, as we're only interested in
* the actual wakeup event, clear it.
*/
if (gpio_dev->saved_regs[i] & (DB_CNTRl_MASK << DB_CNTRL_OFF)) {
amd_gpio_set_debounce(gpio_dev, pin, 0);
pm_pr_dbg("Clearing debounce for GPIO #%d during %s.\n",
pin, is_suspend ? "suspend" : "hibernate");
}
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}

View File

@@ -1031,6 +1031,25 @@ static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d,
test_bit(d->hwirq, pctrl->skip_wake_irqs);
}
static void msm_gpio_irq_init_valid_mask(struct gpio_chip *gc,
unsigned long *valid_mask,
unsigned int ngpios)
{
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
int i;
bitmap_fill(valid_mask, ngpios);
for (i = 0; i < ngpios; i++) {
g = &pctrl->soc->groups[i];
if (g->intr_detection_width != 1 &&
g->intr_detection_width != 2)
clear_bit(i, valid_mask);
}
}
static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1392,6 +1411,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
girq->parents[0] = pctrl->irq;
girq->init_valid_mask = msm_gpio_irq_init_valid_mask;
ret = gpiochip_add_data(&pctrl->chip, pctrl);
if (ret) {

View File

@@ -133,8 +133,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
return ret;
clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]);
if (!clk_rate)
return -EINVAL;
if (!clk_rate) {
ret = -EINVAL;
goto out;
}
/* Make sure we use the bus clock and not the 26MHz clock */
if (pc->soc->has_ck_26m_sel)
@@ -153,9 +155,9 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
if (clkdiv > PWM_CLK_DIV_MAX) {
pwm_mediatek_clk_disable(chip, pwm);
dev_err(chip->dev, "period of %d ns not supported\n", period_ns);
return -EINVAL;
ret = -EINVAL;
goto out;
}
if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
@@ -172,9 +174,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period);
pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
out:
pwm_mediatek_clk_disable(chip, pwm);
return 0;
return ret;
}
static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm)

View File

@@ -4392,6 +4392,7 @@ void do_unblank_screen(int leaving_gfx)
set_palette(vc);
set_cursor(vc);
vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num);
notify_update(vc);
}
EXPORT_SYMBOL(do_unblank_screen);

View File

@@ -131,8 +131,6 @@ static inline const char *cdnsp_trb_type_string(u8 type)
return "Endpoint Not ready";
case TRB_HALT_ENDPOINT:
return "Halt Endpoint";
case TRB_FLUSH_ENDPOINT:
return "FLush Endpoint";
default:
return "UNKNOWN";
}
@@ -189,203 +187,203 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
switch (type) {
case TRB_LINK:
ret = snprintf(str, size,
"LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
field1, field0, GET_INTR_TARGET(field2),
cdnsp_trb_type_string(type),
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_TC ? 'T' : 't',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
field1, field0, GET_INTR_TARGET(field2),
cdnsp_trb_type_string(type),
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_TC ? 'T' : 't',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_TRANSFER:
case TRB_COMPLETION:
case TRB_PORT_STATUS:
case TRB_HC_EVENT:
ret = snprintf(str, size,
"ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
" len %ld slot %ld flags %c:%c",
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3),
cdnsp_trb_type_string(type), field1, field0,
cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
field3 & EVENT_DATA ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
" len %ld slot %ld flags %c:%c",
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3),
cdnsp_trb_type_string(type), field1, field0,
cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
field3 & EVENT_DATA ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_MFINDEX_WRAP:
ret = snprintf(str, size, "%s: flags %c",
cdnsp_trb_type_string(type),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size, "%s: flags %c",
cdnsp_trb_type_string(type),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_SETUP:
ret = snprintf(str, size,
"type '%s' bRequestType %02x bRequest %02x "
"wValue %02x%02x wIndex %02x%02x wLength %d "
"length %ld TD size %ld intr %ld Setup ID %ld "
"flags %c:%c:%c",
cdnsp_trb_type_string(type),
field0 & 0xff,
(field0 & 0xff00) >> 8,
(field0 & 0xff000000) >> 24,
(field0 & 0xff0000) >> 16,
(field1 & 0xff00) >> 8,
field1 & 0xff,
(field1 & 0xff000000) >> 16 |
(field1 & 0xff0000) >> 16,
TRB_LEN(field2), GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
TRB_SETUPID_TO_TYPE(field3),
field3 & TRB_IDT ? 'D' : 'd',
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"type '%s' bRequestType %02x bRequest %02x "
"wValue %02x%02x wIndex %02x%02x wLength %d "
"length %ld TD size %ld intr %ld Setup ID %ld "
"flags %c:%c:%c",
cdnsp_trb_type_string(type),
field0 & 0xff,
(field0 & 0xff00) >> 8,
(field0 & 0xff000000) >> 24,
(field0 & 0xff0000) >> 16,
(field1 & 0xff00) >> 8,
field1 & 0xff,
(field1 & 0xff000000) >> 16 |
(field1 & 0xff0000) >> 16,
TRB_LEN(field2), GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
TRB_SETUPID_TO_TYPE(field3),
field3 & TRB_IDT ? 'D' : 'd',
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_DATA:
ret = snprintf(str, size,
"type '%s' Buffer %08x%08x length %ld TD size %ld "
"intr %ld flags %c:%c:%c:%c:%c:%c:%c",
cdnsp_trb_type_string(type),
field1, field0, TRB_LEN(field2),
GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
field3 & TRB_IDT ? 'D' : 'i',
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_NO_SNOOP ? 'S' : 's',
field3 & TRB_ISP ? 'I' : 'i',
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"type '%s' Buffer %08x%08x length %ld TD size %ld "
"intr %ld flags %c:%c:%c:%c:%c:%c:%c",
cdnsp_trb_type_string(type),
field1, field0, TRB_LEN(field2),
GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
field3 & TRB_IDT ? 'D' : 'i',
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_NO_SNOOP ? 'S' : 's',
field3 & TRB_ISP ? 'I' : 'i',
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_STATUS:
ret = snprintf(str, size,
"Buffer %08x%08x length %ld TD size %ld intr"
"%ld type '%s' flags %c:%c:%c:%c",
field1, field0, TRB_LEN(field2),
GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
cdnsp_trb_type_string(type),
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"Buffer %08x%08x length %ld TD size %ld intr"
"%ld type '%s' flags %c:%c:%c:%c",
field1, field0, TRB_LEN(field2),
GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
cdnsp_trb_type_string(type),
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_NORMAL:
case TRB_ISOC:
case TRB_EVENT_DATA:
case TRB_TR_NOOP:
ret = snprintf(str, size,
"type '%s' Buffer %08x%08x length %ld "
"TD size %ld intr %ld "
"flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
cdnsp_trb_type_string(type),
field1, field0, TRB_LEN(field2),
GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
field3 & TRB_BEI ? 'B' : 'b',
field3 & TRB_IDT ? 'T' : 't',
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_NO_SNOOP ? 'S' : 's',
field3 & TRB_ISP ? 'I' : 'i',
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c',
!(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
ret = scnprintf(str, size,
"type '%s' Buffer %08x%08x length %ld "
"TD size %ld intr %ld "
"flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
cdnsp_trb_type_string(type),
field1, field0, TRB_LEN(field2),
GET_TD_SIZE(field2),
GET_INTR_TARGET(field2),
field3 & TRB_BEI ? 'B' : 'b',
field3 & TRB_IDT ? 'T' : 't',
field3 & TRB_IOC ? 'I' : 'i',
field3 & TRB_CHAIN ? 'C' : 'c',
field3 & TRB_NO_SNOOP ? 'S' : 's',
field3 & TRB_ISP ? 'I' : 'i',
field3 & TRB_ENT ? 'E' : 'e',
field3 & TRB_CYCLE ? 'C' : 'c',
!(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
break;
case TRB_CMD_NOOP:
case TRB_ENABLE_SLOT:
ret = snprintf(str, size, "%s: flags %c",
cdnsp_trb_type_string(type),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size, "%s: flags %c",
cdnsp_trb_type_string(type),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_DISABLE_SLOT:
ret = snprintf(str, size, "%s: slot %ld flags %c",
cdnsp_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size, "%s: slot %ld flags %c",
cdnsp_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_ADDR_DEV:
ret = snprintf(str, size,
"%s: ctx %08x%08x slot %ld flags %c:%c",
cdnsp_trb_type_string(type), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_BSR ? 'B' : 'b',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ctx %08x%08x slot %ld flags %c:%c",
cdnsp_trb_type_string(type), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_BSR ? 'B' : 'b',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_CONFIG_EP:
ret = snprintf(str, size,
"%s: ctx %08x%08x slot %ld flags %c:%c",
cdnsp_trb_type_string(type), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_DC ? 'D' : 'd',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ctx %08x%08x slot %ld flags %c:%c",
cdnsp_trb_type_string(type), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_DC ? 'D' : 'd',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_EVAL_CONTEXT:
ret = snprintf(str, size,
"%s: ctx %08x%08x slot %ld flags %c",
cdnsp_trb_type_string(type), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ctx %08x%08x slot %ld flags %c",
cdnsp_trb_type_string(type), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_RESET_EP:
case TRB_HALT_ENDPOINT:
case TRB_FLUSH_ENDPOINT:
ret = snprintf(str, size,
"%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c %c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3), field1, field0,
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c',
field3 & TRB_ESP ? 'P' : 'p');
break;
case TRB_STOP_RING:
ret = snprintf(str, size,
"%s: ep%d%s(%d) slot %ld sp %d flags %c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3),
TRB_TO_SLOT_ID(field3),
TRB_TO_SUSPEND_PORT(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ep%d%s(%d) slot %ld sp %d flags %c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3),
TRB_TO_SLOT_ID(field3),
TRB_TO_SUSPEND_PORT(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_SET_DEQ:
ret = snprintf(str, size,
"%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3), field1, field0,
TRB_TO_STREAM_ID(field2),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld flags %c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3), field1, field0,
TRB_TO_STREAM_ID(field2),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_RESET_DEV:
ret = snprintf(str, size, "%s: slot %ld flags %c",
cdnsp_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size, "%s: slot %ld flags %c",
cdnsp_trb_type_string(type),
TRB_TO_SLOT_ID(field3),
field3 & TRB_CYCLE ? 'C' : 'c');
break;
case TRB_ENDPOINT_NRDY:
temp = TRB_TO_HOST_STREAM(field2);
ret = snprintf(str, size,
"%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3), temp,
temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
temp == STREAM_REJECTED ? "(REJECTED)" : "",
TRB_TO_DEV_STREAM(field0),
field3 & TRB_STAT ? 'S' : 's',
field3 & TRB_CYCLE ? 'C' : 'c');
ret = scnprintf(str, size,
"%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
cdnsp_trb_type_string(type),
ep_num, ep_id % 2 ? "out" : "in",
TRB_TO_EP_INDEX(field3), temp,
temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
temp == STREAM_REJECTED ? "(REJECTED)" : "",
TRB_TO_DEV_STREAM(field0),
field3 & TRB_STAT ? 'S' : 's',
field3 & TRB_CYCLE ? 'C' : 'c');
break;
default:
ret = snprintf(str, size,
"type '%s' -> raw %08x %08x %08x %08x",
cdnsp_trb_type_string(type),
field0, field1, field2, field3);
ret = scnprintf(str, size,
"type '%s' -> raw %08x %08x %08x %08x",
cdnsp_trb_type_string(type),
field0, field1, field2, field3);
}
if (ret >= size)
pr_info("CDNSP: buffer overflowed.\n");
if (ret == size - 1)
pr_info("CDNSP: buffer may be truncated.\n");
return str;
}
@@ -468,32 +466,32 @@ static inline const char *cdnsp_decode_portsc(char *str, size_t size,
{
int ret;
ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
portsc & PORT_POWER ? "Powered" : "Powered-off",
portsc & PORT_CONNECT ? "Connected" : "Not-connected",
portsc & PORT_PED ? "Enabled" : "Disabled",
cdnsp_portsc_link_state_string(portsc),
DEV_PORT_SPEED(portsc));
ret = scnprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
portsc & PORT_POWER ? "Powered" : "Powered-off",
portsc & PORT_CONNECT ? "Connected" : "Not-connected",
portsc & PORT_PED ? "Enabled" : "Disabled",
cdnsp_portsc_link_state_string(portsc),
DEV_PORT_SPEED(portsc));
if (portsc & PORT_RESET)
ret += snprintf(str + ret, size - ret, "In-Reset ");
ret += scnprintf(str + ret, size - ret, "In-Reset ");
ret += snprintf(str + ret, size - ret, "Change: ");
ret += scnprintf(str + ret, size - ret, "Change: ");
if (portsc & PORT_CSC)
ret += snprintf(str + ret, size - ret, "CSC ");
ret += scnprintf(str + ret, size - ret, "CSC ");
if (portsc & PORT_WRC)
ret += snprintf(str + ret, size - ret, "WRC ");
ret += scnprintf(str + ret, size - ret, "WRC ");
if (portsc & PORT_RC)
ret += snprintf(str + ret, size - ret, "PRC ");
ret += scnprintf(str + ret, size - ret, "PRC ");
if (portsc & PORT_PLC)
ret += snprintf(str + ret, size - ret, "PLC ");
ret += scnprintf(str + ret, size - ret, "PLC ");
if (portsc & PORT_CEC)
ret += snprintf(str + ret, size - ret, "CEC ");
ret += snprintf(str + ret, size - ret, "Wake: ");
ret += scnprintf(str + ret, size - ret, "CEC ");
ret += scnprintf(str + ret, size - ret, "Wake: ");
if (portsc & PORT_WKCONN_E)
ret += snprintf(str + ret, size - ret, "WCE ");
ret += scnprintf(str + ret, size - ret, "WCE ");
if (portsc & PORT_WKDISC_E)
ret += snprintf(str + ret, size - ret, "WDE ");
ret += scnprintf(str + ret, size - ret, "WDE ");
return str;
}
@@ -565,20 +563,20 @@ static inline const char *cdnsp_decode_ep_context(char *str, size_t size,
avg = EP_AVG_TRB_LENGTH(tx_info);
ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s",
cdnsp_ep_state_string(ep_state), mult,
max_pstr, lsa ? "LSA " : "");
ret = scnprintf(str, size, "State %s mult %d max P. Streams %d %s",
cdnsp_ep_state_string(ep_state), mult,
max_pstr, lsa ? "LSA " : "");
ret += snprintf(str + ret, size - ret,
"interval %d us max ESIT payload %d CErr %d ",
(1 << interval) * 125, esit, cerr);
ret += scnprintf(str + ret, size - ret,
"interval %d us max ESIT payload %d CErr %d ",
(1 << interval) * 125, esit, cerr);
ret += snprintf(str + ret, size - ret,
"Type %s %sburst %d maxp %d deq %016llx ",
cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
burst, maxp, deq);
ret += scnprintf(str + ret, size - ret,
"Type %s %sburst %d maxp %d deq %016llx ",
cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
burst, maxp, deq);
ret += snprintf(str + ret, size - ret, "avg trb len %d", avg);
ret += scnprintf(str + ret, size - ret, "avg trb len %d", avg);
return str;
}

View File

@@ -414,6 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
void cdnsp_setup_analyze(struct cdnsp_device *pdev)
{
struct usb_ctrlrequest *ctrl = &pdev->setup;
struct cdnsp_ep *pep;
int ret = -EINVAL;
u16 len;
@@ -427,10 +428,21 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
goto out;
}
pep = &pdev->eps[0];
/* Restore the ep0 to Stopped/Running state. */
if (pdev->eps[0].ep_state & EP_HALTED) {
trace_cdnsp_ep0_halted("Restore to normal state");
cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
if (pep->ep_state & EP_HALTED) {
if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_HALTED)
cdnsp_halt_endpoint(pdev, pep, 0);
/*
* Halt Endpoint Command for SSP2 for ep0 preserve current
* endpoint state and driver has to synchronize the
* software endpoint state with endpoint output context
* state.
*/
pep->ep_state &= ~EP_HALTED;
pep->ep_state |= EP_STOPPED;
}
/*

View File

@@ -1061,10 +1061,8 @@ static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
pep->ep_state |= EP_DIS_IN_RROGRESS;
/* Endpoint was unconfigured by Reset Device command. */
if (!(pep->ep_state & EP_UNCONFIGURED)) {
if (!(pep->ep_state & EP_UNCONFIGURED))
cdnsp_cmd_stop_ep(pdev, pep);
cdnsp_cmd_flush_ep(pdev, pep);
}
/* Remove all queued USB requests. */
while (!list_empty(&pep->pending_list)) {
@@ -1461,8 +1459,6 @@ static void cdnsp_stop(struct cdnsp_device *pdev)
{
u32 temp;
cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
/* Remove internally queued request for ep0. */
if (!list_empty(&pdev->eps[0].pending_list)) {
struct cdnsp_request *req;

View File

@@ -987,6 +987,12 @@ enum cdnsp_setup_dev {
#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16))
#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
/*
* Halt Endpoint Command TRB field.
* The ESP bit only exists in the SSP2 controller.
*/
#define TRB_ESP BIT(9)
/* Link TRB specific fields. */
#define TRB_TC BIT(1)
@@ -1138,8 +1144,6 @@ union cdnsp_trb {
#define TRB_HALT_ENDPOINT 54
/* Doorbell Overflow Event. */
#define TRB_DRB_OVERFLOW 57
/* Flush Endpoint Command. */
#define TRB_FLUSH_ENDPOINT 58
#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
@@ -1552,8 +1556,6 @@ void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index);
void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index);
void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num);
void cdnsp_queue_reset_device(struct cdnsp_device *pdev);
void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
@@ -1587,7 +1589,6 @@ void cdnsp_irq_reset(struct cdnsp_device *pdev);
int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
struct cdnsp_ep *pep, int value);
int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
void cdnsp_setup_analyze(struct cdnsp_device *pdev);
int cdnsp_status_stage(struct cdnsp_device *pdev);
int cdnsp_reset_device(struct cdnsp_device *pdev);

View File

@@ -2159,19 +2159,6 @@ ep_stopped:
return ret;
}
int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
{
int ret;
cdnsp_queue_flush_endpoint(pdev, pep->idx);
cdnsp_ring_cmd_db(pdev);
ret = cdnsp_wait_for_cmd_compl(pdev);
trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
return ret;
}
/*
* The transfer burst count field of the isochronous TRB defines the number of
* bursts that are required to move all packets in this TD. Only SuperSpeed
@@ -2498,18 +2485,8 @@ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
{
cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
SLOT_ID_FOR_TRB(pdev->slot_id) |
EP_ID_FOR_TRB(ep_index));
}
/*
* Queue a flush endpoint request on the command ring.
*/
void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index)
{
cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
SLOT_ID_FOR_TRB(pdev->slot_id) |
EP_ID_FOR_TRB(ep_index));
EP_ID_FOR_TRB(ep_index) |
(!ep_index ? TRB_ESP : 0));
}
void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)

View File

@@ -2177,12 +2177,15 @@ assert_reset:
static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
u32 reg;
int ret;
switch (dwc->current_dr_role) {
case DWC3_GCTL_PRTCAP_DEVICE:
if (pm_runtime_suspended(dwc->dev))
break;
dwc3_gadget_suspend(dwc);
ret = dwc3_gadget_suspend(dwc);
if (ret)
return ret;
synchronize_irq(dwc->irq_gadget);
dwc3_core_exit(dwc);
break;
@@ -2213,7 +2216,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
break;
if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) {
dwc3_gadget_suspend(dwc);
ret = dwc3_gadget_suspend(dwc);
if (ret)
return ret;
synchronize_irq(dwc->irq_gadget);
}

View File

@@ -4804,8 +4804,15 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
int ret;
ret = dwc3_gadget_soft_disconnect(dwc);
if (ret)
goto err;
/*
* Attempt to reset the controller's state. Likely no
* communication can be established until the host
* performs a port reset.
*/
if (ret && dwc->softconnect) {
dwc3_gadget_soft_connect(dwc);
return -EAGAIN;
}
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver)
@@ -4813,17 +4820,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
err:
/*
* Attempt to reset the controller's state. Likely no
* communication can be established until the host
* performs a port reset.
*/
if (dwc->softconnect)
dwc3_gadget_soft_connect(dwc);
return ret;
}
int dwc3_gadget_resume(struct dwc3 *dwc)

View File

@@ -539,20 +539,16 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
static int gs_start_io(struct gs_port *port)
{
struct list_head *head = &port->read_pool;
struct usb_ep *ep;
struct usb_ep *ep = port->port_usb->out;
int status;
unsigned started;
if (!port->port_usb || !port->port.tty)
return -EIO;
/* Allocate RX and TX I/O buffers. We can't easily do this much
* earlier (with GFP_KERNEL) because the requests are coupled to
* endpoints, as are the packet sizes we'll be using. Different
* configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too.
*/
ep = port->port_usb->out;
status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status)

View File

@@ -488,7 +488,7 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode);
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode);
struct btrfs_inode *inode);
int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct btrfs_inode *inode);
int btrfs_orphan_cleanup(struct btrfs_root *root);
int btrfs_cont_expand(struct btrfs_inode *inode, loff_t oldsize, loff_t size);

View File

@@ -1104,11 +1104,21 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot_for_read(extent_root, &key, path, 1, 0);
if (ret < 0)
goto out_locked;
ASSERT(ret == 0);
/*
* If ret is 1 (no key found), it means this is an empty block group,
* without any extents allocated from it and there's no block group
* item (key BTRFS_BLOCK_GROUP_ITEM_KEY) located in the extent tree
* because we are using the block group tree feature, so block group
* items are stored in the block group tree. It also means there are no
* extents allocated for block groups with a start offset beyond this
* block group's end offset (this is the last, highest, block group).
*/
if (!btrfs_fs_compat_ro(trans->fs_info, BLOCK_GROUP_TREE))
ASSERT(ret == 0);
start = block_group->start;
end = block_group->start + block_group->length;
while (1) {
while (ret == 0) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
if (key.type == BTRFS_EXTENT_ITEM_KEY ||
@@ -1138,8 +1148,6 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
ret = btrfs_next_item(extent_root, path);
if (ret < 0)
goto out_locked;
if (ret)
break;
}
if (start < end) {
ret = __add_to_free_space_tree(trans, block_group, path2,

View File

@@ -3077,7 +3077,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
goto out;
}
trans->block_rsv = &inode->block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
ret = btrfs_update_inode_fallback(trans, inode);
if (ret) /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
@@ -3143,7 +3143,7 @@ int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
&cached_state);
btrfs_inode_safe_disk_i_size_write(inode, 0);
ret = btrfs_update_inode_fallback(trans, root, inode);
ret = btrfs_update_inode_fallback(trans, inode);
if (ret) { /* -ENOMEM or corruption */
btrfs_abort_transaction(trans, ret);
goto out;
@@ -4014,9 +4014,9 @@ failed:
/*
* copy everything in the in-memory inode into the btree.
*/
noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode)
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_inode *inode)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -4043,13 +4043,13 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
}
int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_inode *inode)
struct btrfs_inode *inode)
{
int ret;
ret = btrfs_update_inode(trans, root, inode);
ret = btrfs_update_inode(trans, inode->root, inode);
if (ret == -ENOSPC)
return btrfs_update_inode_item(trans, root, inode);
return btrfs_update_inode_item(trans, inode->root, inode);
return ret;
}
@@ -4327,7 +4327,7 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2);
inode_inc_iversion(&dir->vfs_inode);
dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode);
ret = btrfs_update_inode_fallback(trans, root, dir);
ret = btrfs_update_inode_fallback(trans, dir);
if (ret)
btrfs_abort_transaction(trans, ret);
out:

View File

@@ -1860,7 +1860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size +
fname.disk_name.len * 2);
parent_inode->i_mtime = inode_set_ctime_current(parent_inode);
ret = btrfs_update_inode_fallback(trans, parent_root, BTRFS_I(parent_inode));
ret = btrfs_update_inode_fallback(trans, BTRFS_I(parent_inode));
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;

View File

@@ -140,11 +140,14 @@ static void wait_log_commit(struct btrfs_root *root, int transid);
* and once to do all the other items.
*/
static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
static struct btrfs_inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
{
unsigned int nofs_flag;
struct inode *inode;
/* Only meant to be called for subvolume roots and not for log roots. */
ASSERT(is_fstree(btrfs_root_id(root)));
/*
* We're holding a transaction handle whether we are logging or
* replaying a log tree, so we must make sure NOFS semantics apply
@@ -156,7 +159,10 @@ static struct inode *btrfs_iget_logging(u64 objectid, struct btrfs_root *root)
inode = btrfs_iget(root->fs_info->sb, objectid, root);
memalloc_nofs_restore(nofs_flag);
return inode;
if (IS_ERR(inode))
return ERR_CAST(inode);
return BTRFS_I(inode);
}
/*
@@ -613,20 +619,6 @@ static int read_alloc_one_name(struct extent_buffer *eb, void *start, int len,
return 0;
}
/*
* simple helper to read an inode off the disk from a given root
* This can only be called for subvolume roots and not for the log
*/
static noinline struct inode *read_one_inode(struct btrfs_root *root,
u64 objectid)
{
struct inode *inode;
inode = btrfs_iget_logging(objectid, root);
if (IS_ERR(inode))
inode = NULL;
return inode;
}
/* replays a single extent in 'eb' at 'slot' with 'key' into the
* subvolume 'root'. path is released on entry and should be released
@@ -681,10 +673,15 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
goto out;
}
inode = read_one_inode(root, key->objectid);
if (!inode) {
ret = -EIO;
goto out;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(key->objectid, root);
if (IS_ERR(btrfs_inode)) {
ret = PTR_ERR(btrfs_inode);
goto out;
}
inode = &btrfs_inode->vfs_inode;
}
/*
@@ -963,10 +960,16 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
inode = read_one_inode(root, location.objectid);
if (!inode) {
ret = -EIO;
goto out;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(location.objectid, root);
if (IS_ERR(btrfs_inode)) {
ret = PTR_ERR(btrfs_inode);
inode = NULL;
goto out;
}
inode = &btrfs_inode->vfs_inode;
}
ret = link_to_fixup_dir(trans, root, path, location.objectid);
@@ -1183,18 +1186,21 @@ again:
kfree(victim_name.name);
return ret;
} else if (!ret) {
ret = -ENOENT;
victim_parent = read_one_inode(root,
parent_objectid);
if (victim_parent) {
struct btrfs_inode *btrfs_victim;
btrfs_victim = btrfs_iget_logging(parent_objectid, root);
if (IS_ERR(btrfs_victim)) {
ret = PTR_ERR(btrfs_victim);
} else {
victim_parent = &btrfs_victim->vfs_inode;
inc_nlink(&inode->vfs_inode);
btrfs_release_path(path);
ret = unlink_inode_for_log_replay(trans,
BTRFS_I(victim_parent),
inode, &victim_name);
iput(victim_parent);
}
iput(victim_parent);
kfree(victim_name.name);
if (ret)
return ret;
@@ -1331,11 +1337,16 @@ again:
struct inode *dir;
btrfs_release_path(path);
dir = read_one_inode(root, parent_id);
if (!dir) {
ret = -ENOENT;
kfree(name.name);
goto out;
{
struct btrfs_inode *btrfs_dir;
btrfs_dir = btrfs_iget_logging(parent_id, root);
if (IS_ERR(btrfs_dir)) {
ret = PTR_ERR(btrfs_dir);
kfree(name.name);
goto out;
}
dir = &btrfs_dir->vfs_inode;
}
ret = unlink_inode_for_log_replay(trans, BTRFS_I(dir),
inode, &name);
@@ -1406,16 +1417,28 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* copy the back ref in. The link count fixup code will take
* care of the rest
*/
dir = read_one_inode(root, parent_objectid);
if (!dir) {
ret = -ENOENT;
goto out;
{
struct btrfs_inode *btrfs_dir;
btrfs_dir = btrfs_iget_logging(parent_objectid, root);
if (IS_ERR(btrfs_dir)) {
ret = PTR_ERR(btrfs_dir);
dir = NULL;
goto out;
}
dir = &btrfs_dir->vfs_inode;
}
inode = read_one_inode(root, inode_objectid);
if (!inode) {
ret = -EIO;
goto out;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(inode_objectid, root);
if (IS_ERR(btrfs_inode)) {
ret = PTR_ERR(btrfs_inode);
inode = NULL;
goto out;
}
inode = &btrfs_inode->vfs_inode;
}
while (ref_ptr < ref_end) {
@@ -1426,11 +1449,16 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
* parent object can change from one array
* item to another.
*/
if (!dir)
dir = read_one_inode(root, parent_objectid);
if (!dir) {
ret = -ENOENT;
goto out;
struct btrfs_inode *btrfs_dir;
btrfs_dir = btrfs_iget_logging(parent_objectid, root);
if (IS_ERR(btrfs_dir)) {
ret = PTR_ERR(btrfs_dir);
dir = NULL;
goto out;
}
dir = &btrfs_dir->vfs_inode;
}
} else {
ret = ref_get_fields(eb, ref_ptr, &name, &ref_index);
@@ -1504,8 +1532,7 @@ out:
return ret;
}
static int count_inode_extrefs(struct btrfs_root *root,
struct btrfs_inode *inode, struct btrfs_path *path)
static int count_inode_extrefs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret = 0;
int name_len;
@@ -1519,8 +1546,8 @@ static int count_inode_extrefs(struct btrfs_root *root,
struct extent_buffer *leaf;
while (1) {
ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
&extref, &offset);
ret = btrfs_find_one_extref(inode->root, inode_objectid, offset,
path, &extref, &offset);
if (ret)
break;
@@ -1548,8 +1575,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
return nlink;
}
static int count_inode_refs(struct btrfs_root *root,
struct btrfs_inode *inode, struct btrfs_path *path)
static int count_inode_refs(struct btrfs_inode *inode, struct btrfs_path *path)
{
int ret;
struct btrfs_key key;
@@ -1564,7 +1590,7 @@ static int count_inode_refs(struct btrfs_root *root,
key.offset = (u64)-1;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
ret = btrfs_search_slot(NULL, inode->root, &key, path, 0, 0);
if (ret < 0)
break;
if (ret > 0) {
@@ -1616,9 +1642,9 @@ process_slot:
* will free the inode.
*/
static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret;
u64 nlink = 0;
@@ -1628,13 +1654,13 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
ret = count_inode_refs(root, BTRFS_I(inode), path);
ret = count_inode_refs(BTRFS_I(inode), path);
if (ret < 0)
goto out;
nlink = ret;
ret = count_inode_extrefs(root, BTRFS_I(inode), path);
ret = count_inode_extrefs(BTRFS_I(inode), path);
if (ret < 0)
goto out;
@@ -1700,13 +1726,18 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break;
btrfs_release_path(path);
inode = read_one_inode(root, key.offset);
if (!inode) {
ret = -EIO;
break;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(key.offset, root);
if (IS_ERR(btrfs_inode)) {
ret = PTR_ERR(btrfs_inode);
break;
}
inode = &btrfs_inode->vfs_inode;
}
ret = fixup_inode_link_count(trans, root, inode);
ret = fixup_inode_link_count(trans, inode);
iput(inode);
if (ret)
break;
@@ -1737,9 +1768,14 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
int ret = 0;
struct inode *inode;
inode = read_one_inode(root, objectid);
if (!inode)
return -EIO;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(objectid, root);
if (IS_ERR(btrfs_inode))
return PTR_ERR(btrfs_inode);
inode = &btrfs_inode->vfs_inode;
}
key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
@@ -1777,14 +1813,24 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
struct inode *dir;
int ret;
inode = read_one_inode(root, location->objectid);
if (!inode)
return -ENOENT;
{
struct btrfs_inode *btrfs_inode;
dir = read_one_inode(root, dirid);
if (!dir) {
iput(inode);
return -EIO;
btrfs_inode = btrfs_iget_logging(location->objectid, root);
if (IS_ERR(btrfs_inode))
return PTR_ERR(btrfs_inode);
inode = &btrfs_inode->vfs_inode;
}
{
struct btrfs_inode *btrfs_dir;
btrfs_dir = btrfs_iget_logging(dirid, root);
if (IS_ERR(btrfs_dir)) {
iput(inode);
return PTR_ERR(btrfs_dir);
}
dir = &btrfs_dir->vfs_inode;
}
ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
@@ -1862,9 +1908,14 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
bool update_size = true;
bool name_added = false;
dir = read_one_inode(root, key->objectid);
if (!dir)
return -EIO;
{
struct btrfs_inode *btrfs_dir;
btrfs_dir = btrfs_iget_logging(key->objectid, root);
if (IS_ERR(btrfs_dir))
return PTR_ERR(btrfs_dir);
dir = &btrfs_dir->vfs_inode;
}
ret = read_alloc_one_name(eb, di + 1, btrfs_dir_name_len(eb, di), &name);
if (ret)
@@ -2166,10 +2217,16 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
btrfs_dir_item_key_to_cpu(eb, di, &location);
btrfs_release_path(path);
btrfs_release_path(log_path);
inode = read_one_inode(root, location.objectid);
if (!inode) {
ret = -EIO;
goto out;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(location.objectid, root);
if (IS_ERR(btrfs_inode)) {
ret = PTR_ERR(btrfs_inode);
inode = NULL;
goto out;
}
inode = &btrfs_inode->vfs_inode;
}
ret = link_to_fixup_dir(trans, root, path, location.objectid);
@@ -2320,14 +2377,22 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
if (!log_path)
return -ENOMEM;
dir = read_one_inode(root, dirid);
/* it isn't an error if the inode isn't there, that can happen
* because we replay the deletes before we copy in the inode item
* from the log
*/
if (!dir) {
btrfs_free_path(log_path);
return 0;
{
struct btrfs_inode *btrfs_dir;
btrfs_dir = btrfs_iget_logging(dirid, root);
/*
* It isn't an error if the inode isn't there, that can happen because
* we replay the deletes before we copy in the inode item from the log.
*/
if (IS_ERR(btrfs_dir)) {
btrfs_free_path(log_path);
ret = PTR_ERR(btrfs_dir);
if (ret == -ENOENT)
ret = 0;
return ret;
}
dir = &btrfs_dir->vfs_inode;
}
range_start = 0;
@@ -2486,10 +2551,15 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
struct inode *inode;
u64 from;
inode = read_one_inode(root, key.objectid);
if (!inode) {
ret = -EIO;
break;
{
struct btrfs_inode *btrfs_inode;
btrfs_inode = btrfs_iget_logging(key.objectid, root);
if (IS_ERR(btrfs_inode)) {
ret = PTR_ERR(btrfs_inode);
break;
}
inode = &btrfs_inode->vfs_inode;
}
from = ALIGN(i_size_read(inode),
root->fs_info->sectorsize);
@@ -5421,7 +5491,6 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
ihold(&curr_inode->vfs_inode);
while (true) {
struct inode *vfs_inode;
struct btrfs_key key;
struct btrfs_key found_key;
u64 next_index;
@@ -5437,7 +5506,7 @@ again:
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_dir_item *di;
struct btrfs_key di_key;
struct inode *di_inode;
struct btrfs_inode *di_inode;
int log_mode = LOG_INODE_EXISTS;
int type;
@@ -5464,17 +5533,16 @@ again:
goto out;
}
if (!need_log_inode(trans, BTRFS_I(di_inode))) {
btrfs_add_delayed_iput(BTRFS_I(di_inode));
if (!need_log_inode(trans, di_inode)) {
btrfs_add_delayed_iput(di_inode);
break;
}
ctx->log_new_dentries = false;
if (type == BTRFS_FT_DIR)
log_mode = LOG_INODE_ALL;
ret = btrfs_log_inode(trans, BTRFS_I(di_inode),
log_mode, ctx);
btrfs_add_delayed_iput(BTRFS_I(di_inode));
ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
btrfs_add_delayed_iput(di_inode);
if (ret)
goto out;
if (ctx->log_new_dentries) {
@@ -5516,14 +5584,13 @@ again:
kfree(dir_elem);
btrfs_add_delayed_iput(curr_inode);
curr_inode = NULL;
vfs_inode = btrfs_iget_logging(ino, root);
if (IS_ERR(vfs_inode)) {
ret = PTR_ERR(vfs_inode);
curr_inode = btrfs_iget_logging(ino, root);
if (IS_ERR(curr_inode)) {
ret = PTR_ERR(curr_inode);
curr_inode = NULL;
break;
}
curr_inode = BTRFS_I(vfs_inode);
}
out:
btrfs_free_path(path);
@@ -5601,7 +5668,7 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
struct btrfs_log_ctx *ctx)
{
struct btrfs_ino_list *ino_elem;
struct inode *inode;
struct btrfs_inode *inode;
/*
* It's rare to have a lot of conflicting inodes, in practice it is not
@@ -5692,12 +5759,12 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
* inode in LOG_INODE_EXISTS mode and rename operations update the log,
* so that the log ends up with the new name and without the old name.
*/
if (!need_log_inode(trans, BTRFS_I(inode))) {
btrfs_add_delayed_iput(BTRFS_I(inode));
if (!need_log_inode(trans, inode)) {
btrfs_add_delayed_iput(inode);
return 0;
}
btrfs_add_delayed_iput(BTRFS_I(inode));
btrfs_add_delayed_iput(inode);
ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
if (!ino_elem)
@@ -5733,7 +5800,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
*/
while (!list_empty(&ctx->conflict_inodes)) {
struct btrfs_ino_list *curr;
struct inode *inode;
struct btrfs_inode *inode;
u64 ino;
u64 parent;
@@ -5769,9 +5836,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* dir index key range logged for the directory. So we
* must make sure the deletion is recorded.
*/
ret = btrfs_log_inode(trans, BTRFS_I(inode),
LOG_INODE_ALL, ctx);
btrfs_add_delayed_iput(BTRFS_I(inode));
ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
btrfs_add_delayed_iput(inode);
if (ret)
break;
continue;
@@ -5787,8 +5853,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* it again because if some other task logged the inode after
* that, we can avoid doing it again.
*/
if (!need_log_inode(trans, BTRFS_I(inode))) {
btrfs_add_delayed_iput(BTRFS_I(inode));
if (!need_log_inode(trans, inode)) {
btrfs_add_delayed_iput(inode);
continue;
}
@@ -5799,8 +5865,8 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* well because during a rename we pin the log and update the
* log with the new name before we unpin it.
*/
ret = btrfs_log_inode(trans, BTRFS_I(inode), LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(BTRFS_I(inode));
ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(inode);
if (ret)
break;
}
@@ -6292,7 +6358,7 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
list_for_each_entry(item, delayed_ins_list, log_list) {
struct btrfs_dir_item *dir_item;
struct inode *di_inode;
struct btrfs_inode *di_inode;
struct btrfs_key key;
int log_mode = LOG_INODE_EXISTS;
@@ -6308,8 +6374,8 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
break;
}
if (!need_log_inode(trans, BTRFS_I(di_inode))) {
btrfs_add_delayed_iput(BTRFS_I(di_inode));
if (!need_log_inode(trans, di_inode)) {
btrfs_add_delayed_iput(di_inode);
continue;
}
@@ -6317,12 +6383,12 @@ static int log_new_delayed_dentries(struct btrfs_trans_handle *trans,
log_mode = LOG_INODE_ALL;
ctx->log_new_dentries = false;
ret = btrfs_log_inode(trans, BTRFS_I(di_inode), log_mode, ctx);
ret = btrfs_log_inode(trans, di_inode, log_mode, ctx);
if (!ret && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, BTRFS_I(di_inode), ctx);
ret = log_new_dir_dentries(trans, di_inode, ctx);
btrfs_add_delayed_iput(BTRFS_I(di_inode));
btrfs_add_delayed_iput(di_inode);
if (ret)
break;
@@ -6730,7 +6796,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
ptr = btrfs_item_ptr_offset(leaf, slot);
while (cur_offset < item_size) {
struct btrfs_key inode_key;
struct inode *dir_inode;
struct btrfs_inode *dir_inode;
inode_key.type = BTRFS_INODE_ITEM_KEY;
inode_key.offset = 0;
@@ -6779,18 +6845,16 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
goto out;
}
if (!need_log_inode(trans, BTRFS_I(dir_inode))) {
btrfs_add_delayed_iput(BTRFS_I(dir_inode));
if (!need_log_inode(trans, dir_inode)) {
btrfs_add_delayed_iput(dir_inode);
continue;
}
ctx->log_new_dentries = false;
ret = btrfs_log_inode(trans, BTRFS_I(dir_inode),
LOG_INODE_ALL, ctx);
ret = btrfs_log_inode(trans, dir_inode, LOG_INODE_ALL, ctx);
if (!ret && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans,
BTRFS_I(dir_inode), ctx);
btrfs_add_delayed_iput(BTRFS_I(dir_inode));
ret = log_new_dir_dentries(trans, dir_inode, ctx);
btrfs_add_delayed_iput(dir_inode);
if (ret)
goto out;
}
@@ -6815,7 +6879,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
int slot;
struct btrfs_key search_key;
struct inode *inode;
struct btrfs_inode *inode;
u64 ino;
int ret = 0;
@@ -6830,11 +6894,10 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
if (IS_ERR(inode))
return PTR_ERR(inode);
if (BTRFS_I(inode)->generation >= trans->transid &&
need_log_inode(trans, BTRFS_I(inode)))
ret = btrfs_log_inode(trans, BTRFS_I(inode),
LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(BTRFS_I(inode));
if (inode->generation >= trans->transid &&
need_log_inode(trans, inode))
ret = btrfs_log_inode(trans, inode, LOG_INODE_EXISTS, ctx);
btrfs_add_delayed_iput(inode);
if (ret)
return ret;

View File

@@ -358,6 +358,8 @@ int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
*/
static int erofs_read_folio(struct file *file, struct folio *folio)
{
trace_erofs_read_folio(folio, true);
return iomap_read_folio(folio, &erofs_iomap_ops);
}

View File

@@ -774,7 +774,7 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
call_rcu(&epi->rcu, epi_rcu_free);
percpu_counter_dec(&ep->user->epoll_watches);
return ep_refcount_dec_and_test(ep);
return true;
}
/*
@@ -782,14 +782,14 @@ static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)
*/
static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
{
WARN_ON_ONCE(__ep_remove(ep, epi, false));
if (__ep_remove(ep, epi, false))
WARN_ON_ONCE(ep_refcount_dec_and_test(ep));
}
static void ep_clear_and_put(struct eventpoll *ep)
{
struct rb_node *rbp, *next;
struct epitem *epi;
bool dispose;
/* We need to release all tasks waiting for these file */
if (waitqueue_active(&ep->poll_wait))
@@ -822,10 +822,8 @@ static void ep_clear_and_put(struct eventpoll *ep)
cond_resched();
}
dispose = ep_refcount_dec_and_test(ep);
mutex_unlock(&ep->mtx);
if (dispose)
if (ep_refcount_dec_and_test(ep))
ep_free(ep);
}
@@ -1005,7 +1003,7 @@ again:
dispose = __ep_remove(ep, epi, true);
mutex_unlock(&ep->mtx);
if (dispose)
if (dispose && ep_refcount_dec_and_test(ep))
ep_free(ep);
goto again;
}

View File

@@ -42,7 +42,7 @@ static void proc_evict_inode(struct inode *inode)
head = ei->sysctl;
if (head) {
RCU_INIT_POINTER(ei->sysctl, NULL);
WRITE_ONCE(ei->sysctl, NULL);
proc_sys_evict_inode(inode, head);
}
}

View File

@@ -920,17 +920,21 @@ static int proc_sys_compare(const struct dentry *dentry,
struct ctl_table_header *head;
struct inode *inode;
/* Although proc doesn't have negative dentries, rcu-walk means
* that inode here can be NULL */
/* AV: can it, indeed? */
inode = d_inode_rcu(dentry);
if (!inode)
return 1;
if (name->len != len)
return 1;
if (memcmp(name->name, str, len))
return 1;
head = rcu_dereference(PROC_I(inode)->sysctl);
// false positive is fine here - we'll recheck anyway
if (d_in_lookup(dentry))
return 0;
inode = d_inode_rcu(dentry);
// we just might have run into dentry in the middle of __dentry_kill()
if (!inode)
return 1;
head = READ_ONCE(PROC_I(inode)->sysctl);
return !head || !sysctl_is_seen(head);
}

View File

@@ -36,9 +36,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
unsigned long text, lib, swap, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
anon = get_mm_counter(mm, MM_ANONPAGES);
file = get_mm_counter(mm, MM_FILEPAGES);
shmem = get_mm_counter(mm, MM_SHMEMPAGES);
anon = get_mm_counter_sum(mm, MM_ANONPAGES);
file = get_mm_counter_sum(mm, MM_FILEPAGES);
shmem = get_mm_counter_sum(mm, MM_SHMEMPAGES);
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
@@ -59,7 +59,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
text = min(text, mm->exec_vm << PAGE_SHIFT);
lib = (mm->exec_vm << PAGE_SHIFT) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
swap = get_mm_counter_sum(mm, MM_SWAPENTS);
SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
@@ -92,12 +92,12 @@ unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES) +
get_mm_counter(mm, MM_SHMEMPAGES);
*shared = get_mm_counter_sum(mm, MM_FILEPAGES) +
get_mm_counter_sum(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->data_vm + mm->stack_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
*resident = *shared + get_mm_counter_sum(mm, MM_ANONPAGES);
return mm->total_vm;
}

View File

@@ -830,6 +830,7 @@ struct TCP_Server_Info {
* format: \\HOST\SHARE[\OPTIONAL PATH]
*/
char *leaf_fullpath;
bool dfs_conn:1;
};
static inline bool is_smb1(struct TCP_Server_Info *server)
@@ -1065,6 +1066,7 @@ struct cifs_ses {
struct list_head smb_ses_list;
struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct list_head dlist; /* dfs list */
struct cifs_tcon *tcon_ipc;
spinlock_t ses_lock; /* protect anything here that is not protected */
struct mutex session_mutex;
@@ -1294,6 +1296,7 @@ struct cifs_tcon {
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
struct delayed_work dfs_cache_work;
struct list_head dfs_ses_list;
#endif
struct delayed_work query_interfaces; /* query interfaces workqueue job */
char *origin_fullpath; /* canonical copy of smb3_fs_context::source */

View File

@@ -136,6 +136,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *out_buf,
int *bytes_returned);
void smb2_query_server_interfaces(struct work_struct *work);
void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
bool all_channels);
@@ -737,15 +738,9 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
/* Put references of @ses and its children */
static inline void cifs_put_smb_ses(struct cifs_ses *ses)
{
struct cifs_ses *next;
do {
next = ses->dfs_root_ses;
__cifs_put_smb_ses(ses);
} while ((ses = next));
__cifs_put_smb_ses(ses);
}
/* Get an active reference of @ses and its children.
@@ -759,9 +754,7 @@ static inline void cifs_put_smb_ses(struct cifs_ses *ses)
static inline void cifs_smb_ses_inc_refcount(struct cifs_ses *ses)
{
lockdep_assert_held(&cifs_tcp_ses_lock);
for (; ses; ses = ses->dfs_root_ses)
ses->ses_count++;
ses->ses_count++;
}
static inline bool dfs_src_pathname_equal(const char *s1, const char *s2)

View File

@@ -113,7 +113,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
return rc;
}
static void smb2_query_server_interfaces(struct work_struct *work)
void smb2_query_server_interfaces(struct work_struct *work)
{
int rc;
int xid;
@@ -1551,6 +1551,9 @@ static int match_server(struct TCP_Server_Info *server,
if (server->nosharesock)
return 0;
if (!match_super && (ctx->dfs_conn || server->dfs_conn))
return 0;
/* If multidialect negotiation see if existing sessions match one */
if (strcmp(ctx->vals->version_string, SMB3ANY_VERSION_STRING) == 0) {
if (server->vals->protocol_id < SMB30_PROT_ID)
@@ -1740,6 +1743,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
if (ctx->nosharesock)
tcp_ses->nosharesock = true;
tcp_ses->dfs_conn = ctx->dfs_conn;
tcp_ses->ops = ctx->ops;
tcp_ses->vals = ctx->vals;
@@ -1890,12 +1894,14 @@ out_err:
}
/* this function must be called with ses_lock and chan_lock held */
static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
static int match_session(struct cifs_ses *ses,
struct smb3_fs_context *ctx,
bool match_super)
{
struct TCP_Server_Info *server = ses->server;
enum securityEnum ctx_sec, ses_sec;
if (ctx->dfs_root_ses != ses->dfs_root_ses)
if (!match_super && ctx->dfs_root_ses != ses->dfs_root_ses)
return 0;
/*
@@ -2047,7 +2053,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
continue;
}
spin_lock(&ses->chan_lock);
if (match_session(ses, ctx)) {
if (match_session(ses, ctx, false)) {
spin_unlock(&ses->chan_lock);
spin_unlock(&ses->ses_lock);
ret = ses;
@@ -2450,8 +2456,6 @@ retry_new_session:
* need to lock before changing something in the session.
*/
spin_lock(&cifs_tcp_ses_lock);
if (ctx->dfs_root_ses)
cifs_smb_ses_inc_refcount(ctx->dfs_root_ses);
ses->dfs_root_ses = ctx->dfs_root_ses;
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2528,6 +2532,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
{
unsigned int xid;
struct cifs_ses *ses;
LIST_HEAD(ses_list);
/*
* IPC tcon share the lifetime of their session and are
@@ -2559,6 +2564,7 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
cancel_delayed_work_sync(&tcon->query_interfaces);
#ifdef CONFIG_CIFS_DFS_UPCALL
cancel_delayed_work_sync(&tcon->dfs_cache_work);
list_replace_init(&tcon->dfs_ses_list, &ses_list);
#endif
if (tcon->use_witness) {
@@ -2579,6 +2585,9 @@ cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
cifs_fscache_release_super_cookie(tcon);
tconInfoFree(tcon, netfs_trace_tcon_ref_free);
cifs_put_smb_ses(ses);
#ifdef CONFIG_CIFS_DFS_UPCALL
dfs_put_root_smb_sessions(&ses_list);
#endif
}
/**
@@ -2807,20 +2816,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
tcon->max_cached_dirs = ctx->max_cached_dirs;
tcon->nodelete = ctx->nodelete;
tcon->local_lease = ctx->local_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
tcon->status = TID_GOOD;
INIT_DELAYED_WORK(&tcon->query_interfaces,
smb2_query_server_interfaces);
if (ses->server->dialect >= SMB30_PROT_ID &&
(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
/* schedule query interfaces poll */
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
}
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
#endif
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2962,7 +2965,7 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&ses->chan_lock);
spin_lock(&tcon->tc_lock);
if (!match_server(tcp_srv, ctx, true) ||
!match_session(ses, ctx) ||
!match_session(ses, ctx, true) ||
!match_tcon(tcon, ctx) ||
!match_prepath(sb, tcon, mnt_data)) {
rc = 0;
@@ -3712,13 +3715,12 @@ out:
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
struct cifs_mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
bool isdfs;
int rc;
rc = dfs_mount_share(&mnt_ctx, &isdfs);
rc = dfs_mount_share(&mnt_ctx);
if (rc)
goto error;
if (!isdfs)
if (!ctx->dfs_conn)
goto out;
/*
@@ -4135,7 +4137,7 @@ cifs_set_vol_auth(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
static struct cifs_tcon *
__cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
int rc;
struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
@@ -4233,17 +4235,6 @@ out:
return tcon;
}
static struct cifs_tcon *
cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
{
struct cifs_tcon *ret;
cifs_mount_lock();
ret = __cifs_construct_tcon(cifs_sb, fsuid);
cifs_mount_unlock();
return ret;
}
struct cifs_tcon *
cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
{

View File

@@ -69,7 +69,7 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path)
* Get an active reference of @ses so that next call to cifs_put_tcon() won't
* release it as any new DFS referrals must go through its IPC tcon.
*/
static void add_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
static void set_root_smb_session(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses = mnt_ctx->ses;
@@ -95,7 +95,7 @@ static inline int parse_dfs_target(struct smb3_fs_context *ctx,
return rc;
}
static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
static int setup_dfs_ref(struct cifs_mount_ctx *mnt_ctx,
struct dfs_info3_param *tgt,
struct dfs_ref_walk *rw)
{
@@ -120,6 +120,7 @@ static int set_ref_paths(struct cifs_mount_ctx *mnt_ctx,
}
ref_walk_path(rw) = ref_path;
ref_walk_fpath(rw) = full_path;
ref_walk_ses(rw) = ctx->dfs_root_ses;
return 0;
}
@@ -128,11 +129,11 @@ static int __dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_info3_param tgt = {};
bool is_refsrv;
int rc = -ENOENT;
again:
do {
ctx->dfs_root_ses = ref_walk_ses(rw);
if (ref_walk_empty(rw)) {
rc = dfs_get_referral(mnt_ctx, ref_walk_path(rw) + 1,
NULL, ref_walk_tl(rw));
@@ -158,10 +159,7 @@ again:
if (rc)
continue;
is_refsrv = tgt.server_type == DFS_TYPE_ROOT ||
DFS_INTERLINK(tgt.flags);
ref_walk_set_tgt_hint(rw);
if (tgt.flags & DFSREF_STORAGE_SERVER) {
rc = cifs_mount_get_tcon(mnt_ctx);
if (!rc)
@@ -172,12 +170,10 @@ again:
continue;
}
if (is_refsrv)
add_root_smb_session(mnt_ctx);
set_root_smb_session(mnt_ctx);
rc = ref_walk_advance(rw);
if (!rc) {
rc = set_ref_paths(mnt_ctx, &tgt, rw);
rc = setup_dfs_ref(mnt_ctx, &tgt, rw);
if (!rc) {
rc = -EREMOTE;
goto again;
@@ -193,20 +189,22 @@ out:
return rc;
}
static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx)
static int dfs_referral_walk(struct cifs_mount_ctx *mnt_ctx,
struct dfs_ref_walk **rw)
{
struct dfs_ref_walk *rw;
int rc;
rw = ref_walk_alloc();
if (IS_ERR(rw))
return PTR_ERR(rw);
*rw = ref_walk_alloc();
if (IS_ERR(*rw)) {
rc = PTR_ERR(*rw);
*rw = NULL;
return rc;
}
ref_walk_init(rw);
rc = set_ref_paths(mnt_ctx, NULL, rw);
ref_walk_init(*rw);
rc = setup_dfs_ref(mnt_ctx, NULL, *rw);
if (!rc)
rc = __dfs_referral_walk(mnt_ctx, rw);
ref_walk_free(rw);
rc = __dfs_referral_walk(mnt_ctx, *rw);
return rc;
}
@@ -214,16 +212,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct dfs_ref_walk *rw = NULL;
struct cifs_tcon *tcon;
char *origin_fullpath;
bool new_tcon = true;
int rc;
origin_fullpath = dfs_get_path(cifs_sb, ctx->source);
if (IS_ERR(origin_fullpath))
return PTR_ERR(origin_fullpath);
rc = dfs_referral_walk(mnt_ctx);
rc = dfs_referral_walk(mnt_ctx, &rw);
if (!rc) {
/*
* Prevent superblock from being created with any missing
@@ -241,21 +239,16 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
tcon = mnt_ctx->tcon;
spin_lock(&tcon->tc_lock);
if (!tcon->origin_fullpath) {
tcon->origin_fullpath = origin_fullpath;
origin_fullpath = NULL;
} else {
new_tcon = false;
}
tcon->origin_fullpath = origin_fullpath;
origin_fullpath = NULL;
ref_walk_set_tcon(rw, tcon);
spin_unlock(&tcon->tc_lock);
if (new_tcon) {
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
}
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
dfs_cache_get_ttl() * HZ);
out:
kfree(origin_fullpath);
ref_walk_free(rw);
return rc;
}
@@ -279,7 +272,7 @@ static int update_fs_context_dstaddr(struct smb3_fs_context *ctx)
return rc;
}
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx)
{
struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
bool nodfs = ctx->nodfs;
@@ -289,7 +282,6 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
if (rc)
return rc;
*isdfs = false;
rc = get_session(mnt_ctx, NULL);
if (rc)
return rc;
@@ -317,10 +309,15 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
return rc;
}
*isdfs = true;
add_root_smb_session(mnt_ctx);
rc = __dfs_mount_share(mnt_ctx);
dfs_put_root_smb_sessions(mnt_ctx);
if (!ctx->dfs_conn) {
ctx->dfs_conn = true;
cifs_mount_put_conns(mnt_ctx);
rc = get_session(mnt_ctx, NULL);
}
if (!rc) {
set_root_smb_session(mnt_ctx);
rc = __dfs_mount_share(mnt_ctx);
}
return rc;
}

View File

@@ -19,6 +19,7 @@
struct dfs_ref {
char *path;
char *full_path;
struct cifs_ses *ses;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *tit;
};
@@ -38,6 +39,7 @@ struct dfs_ref_walk {
#define ref_walk_path(w) (ref_walk_cur(w)->path)
#define ref_walk_fpath(w) (ref_walk_cur(w)->full_path)
#define ref_walk_tl(w) (&ref_walk_cur(w)->tl)
#define ref_walk_ses(w) (ref_walk_cur(w)->ses)
static inline struct dfs_ref_walk *ref_walk_alloc(void)
{
@@ -60,14 +62,19 @@ static inline void __ref_walk_free(struct dfs_ref *ref)
kfree(ref->path);
kfree(ref->full_path);
dfs_cache_free_tgts(&ref->tl);
if (ref->ses)
cifs_put_smb_ses(ref->ses);
memset(ref, 0, sizeof(*ref));
}
static inline void ref_walk_free(struct dfs_ref_walk *rw)
{
struct dfs_ref *ref = ref_walk_start(rw);
struct dfs_ref *ref;
for (; ref <= ref_walk_end(rw); ref++)
if (!rw)
return;
for (ref = ref_walk_start(rw); ref <= ref_walk_end(rw); ref++)
__ref_walk_free(ref);
kfree(rw);
}
@@ -116,9 +123,22 @@ static inline void ref_walk_set_tgt_hint(struct dfs_ref_walk *rw)
ref_walk_tit(rw));
}
static inline void ref_walk_set_tcon(struct dfs_ref_walk *rw,
struct cifs_tcon *tcon)
{
struct dfs_ref *ref = ref_walk_start(rw);
for (; ref <= ref_walk_cur(rw); ref++) {
if (WARN_ON_ONCE(!ref->ses))
continue;
list_add(&ref->ses->dlist, &tcon->dfs_ses_list);
ref->ses = NULL;
}
}
int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref,
struct smb3_fs_context *ctx);
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs);
int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx);
static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path)
{
@@ -142,20 +162,14 @@ static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *p
* references of all DFS root sessions that were used across the mount process
* in dfs_mount_share().
*/
static inline void dfs_put_root_smb_sessions(struct cifs_mount_ctx *mnt_ctx)
static inline void dfs_put_root_smb_sessions(struct list_head *head)
{
const struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
struct cifs_ses *ses = ctx->dfs_root_ses;
struct cifs_ses *cur;
struct cifs_ses *ses, *n;
if (!ses)
return;
for (cur = ses; cur; cur = cur->dfs_root_ses) {
if (cur->dfs_root_ses)
cifs_put_smb_ses(cur->dfs_root_ses);
list_for_each_entry_safe(ses, n, head, dlist) {
list_del_init(&ses->dlist);
cifs_put_smb_ses(ses);
}
cifs_put_smb_ses(ses);
}
#endif /* _CIFS_DFS_H */

View File

@@ -1095,16 +1095,18 @@ int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it,
return 0;
}
static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
static bool target_share_equal(struct cifs_tcon *tcon, const char *s1)
{
char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
const char *host;
size_t hostlen;
struct TCP_Server_Info *server = tcon->ses->server;
struct sockaddr_storage ss;
const char *host;
const char *s2 = &tcon->tree_name[1];
size_t hostlen;
char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
bool match;
int rc;
if (strcasecmp(s1, s2))
if (strcasecmp(s2, s1))
return false;
/*
@@ -1128,34 +1130,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
return match;
}
/*
* Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
* target shares in @refs.
*/
static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
const char *path,
struct dfs_cache_tgt_list *old_tl,
struct dfs_cache_tgt_list *new_tl)
{
struct dfs_cache_tgt_iterator *oit, *nit;
for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
oit = dfs_cache_get_next_tgt(old_tl, oit)) {
for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
nit = dfs_cache_get_next_tgt(new_tl, nit)) {
if (target_share_equal(server,
dfs_cache_get_tgt_name(oit),
dfs_cache_get_tgt_name(nit))) {
dfs_cache_noreq_update_tgthint(path, nit);
return;
}
}
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
cifs_signal_cifsd_for_reconnect(server, true);
}
static bool is_ses_good(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
@@ -1172,41 +1146,35 @@ static bool is_ses_good(struct cifs_ses *ses)
return ret;
}
/* Refresh dfs referral of @ses and mark it for reconnect if needed */
static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
static char *get_ses_refpath(struct cifs_ses *ses)
{
struct TCP_Server_Info *server = ses->server;
DFS_CACHE_TGT_LIST(old_tl);
DFS_CACHE_TGT_LIST(new_tl);
bool needs_refresh = false;
struct cache_entry *ce;
unsigned int xid;
char *path = NULL;
int rc = 0;
xid = get_xid();
char *path = ERR_PTR(-ENOENT);
mutex_lock(&server->refpath_lock);
if (server->leaf_fullpath) {
path = kstrdup(server->leaf_fullpath + 1, GFP_ATOMIC);
if (!path)
rc = -ENOMEM;
path = ERR_PTR(-ENOMEM);
}
mutex_unlock(&server->refpath_lock);
if (!path)
goto out;
return path;
}
down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
if (!IS_ERR(ce)) {
rc = get_targets(ce, &old_tl);
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
}
up_read(&htable_rw_lock);
/* Refresh dfs referral of @ses */
static void refresh_ses_referral(struct cifs_ses *ses)
{
struct cache_entry *ce;
unsigned int xid;
char *path;
int rc = 0;
if (!needs_refresh) {
rc = 0;
xid = get_xid();
path = get_ses_refpath(ses);
if (IS_ERR(path)) {
rc = PTR_ERR(path);
path = NULL;
goto out;
}
@@ -1217,29 +1185,106 @@ static void __refresh_ses_referral(struct cifs_ses *ses, bool force_refresh)
goto out;
}
ce = cache_refresh_path(xid, ses, path, true);
if (!IS_ERR(ce)) {
rc = get_targets(ce, &new_tl);
ce = cache_refresh_path(xid, ses, path, false);
if (!IS_ERR(ce))
up_read(&htable_rw_lock);
cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
else
rc = PTR_ERR(ce);
out:
free_xid(xid);
kfree(path);
}
static int __refresh_tcon_referral(struct cifs_tcon *tcon,
const char *path,
struct dfs_info3_param *refs,
int numrefs, bool force_refresh)
{
struct cache_entry *ce;
bool reconnect = force_refresh;
int rc = 0;
int i;
if (unlikely(!numrefs))
return 0;
if (force_refresh) {
for (i = 0; i < numrefs; i++) {
/* TODO: include prefix paths in the matching */
if (target_share_equal(tcon, refs[i].node_name)) {
reconnect = false;
break;
}
}
}
down_write(&htable_rw_lock);
ce = lookup_cache_entry(path);
if (!IS_ERR(ce)) {
if (force_refresh || cache_entry_expired(ce))
rc = update_cache_entry_locked(ce, refs, numrefs);
} else if (PTR_ERR(ce) == -ENOENT) {
ce = add_cache_entry_locked(refs, numrefs);
}
up_write(&htable_rw_lock);
if (IS_ERR(ce))
rc = PTR_ERR(ce);
if (reconnect) {
cifs_tcon_dbg(FYI, "%s: mark for reconnect\n", __func__);
cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
}
return rc;
}
static void refresh_tcon_referral(struct cifs_tcon *tcon, bool force_refresh)
{
struct dfs_info3_param *refs = NULL;
struct cache_entry *ce;
struct cifs_ses *ses;
unsigned int xid;
bool needs_refresh;
char *path;
int numrefs = 0;
int rc = 0;
xid = get_xid();
ses = tcon->ses;
path = get_ses_refpath(ses);
if (IS_ERR(path)) {
rc = PTR_ERR(path);
path = NULL;
goto out;
}
down_read(&htable_rw_lock);
ce = lookup_cache_entry(path);
needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
if (!needs_refresh) {
up_read(&htable_rw_lock);
goto out;
}
up_read(&htable_rw_lock);
ses = CIFS_DFS_ROOT_SES(ses);
if (!is_ses_good(ses)) {
cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
__func__);
goto out;
}
rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
if (!rc) {
rc = __refresh_tcon_referral(tcon, path, refs,
numrefs, force_refresh);
}
out:
free_xid(xid);
dfs_cache_free_tgts(&old_tl);
dfs_cache_free_tgts(&new_tl);
kfree(path);
}
static inline void refresh_ses_referral(struct cifs_ses *ses)
{
__refresh_ses_referral(ses, false);
}
static inline void force_refresh_ses_referral(struct cifs_ses *ses)
{
__refresh_ses_referral(ses, true);
free_dfs_info_array(refs, numrefs);
}
/**
@@ -1280,7 +1325,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
force_refresh_ses_referral(tcon->ses);
refresh_tcon_referral(tcon, true);
return 0;
}
@@ -1292,8 +1337,9 @@ void dfs_cache_refresh(struct work_struct *work)
tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
for (ses = tcon->ses; ses; ses = ses->dfs_root_ses)
list_for_each_entry(ses, &tcon->dfs_ses_list, dlist)
refresh_ses_referral(ses);
refresh_tcon_referral(tcon, false);
queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
atomic_read(&dfs_cache_ttl) * HZ);

View File

@@ -287,6 +287,7 @@ struct smb3_fs_context {
struct cifs_ses *dfs_root_ses;
bool dfs_automount:1; /* set for dfs automount only */
enum cifs_reparse_type reparse_type;
bool dfs_conn:1; /* set for dfs mounts */
};
extern const struct fs_parameter_spec smb3_fs_parameters[];

View File

@@ -145,6 +145,15 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
mutex_init(&ret_buf->fscache_lock);
#endif
trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
#endif
INIT_LIST_HEAD(&ret_buf->pending_opens);
INIT_DELAYED_WORK(&ret_buf->query_interfaces,
smb2_query_server_interfaces);
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh);
#endif
return ret_buf;
}

View File

@@ -260,7 +260,7 @@ static struct vfsmount *cifs_do_automount(struct path *path)
ctx->source = NULL;
goto out;
}
ctx->dfs_automount = is_dfs_mount(mntpt);
ctx->dfs_automount = ctx->dfs_conn = is_dfs_mount(mntpt);
cifs_dbg(FYI, "%s: ctx: source=%s UNC=%s prepath=%s dfs_automount=%d\n",
__func__, ctx->source, ctx->UNC, ctx->prepath, ctx->dfs_automount);

View File

@@ -8503,11 +8503,6 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
goto err_out;
}
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
opinfo_put(opinfo);
ksmbd_fd_put(work, fp);
rsp->StructureSize = cpu_to_le16(24);
rsp->OplockLevel = rsp_oplevel;
rsp->Reserved = 0;
@@ -8515,16 +8510,15 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
rsp->VolatileFid = volatile_id;
rsp->PersistentFid = persistent_id;
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
if (!ret)
return;
if (ret) {
err_out:
smb2_set_err_rsp(work);
}
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
opinfo_put(opinfo);
ksmbd_fd_put(work, fp);
smb2_set_err_rsp(work);
}
static int check_lease_state(struct lease *lease, __le32 req_state)
@@ -8654,11 +8648,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
}
lease_state = lease->state;
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
atomic_dec(&opinfo->breaking_cnt);
wake_up_interruptible_all(&opinfo->oplock_brk);
opinfo_put(opinfo);
rsp->StructureSize = cpu_to_le16(36);
rsp->Reserved = 0;
@@ -8667,16 +8656,16 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
rsp->LeaseState = lease_state;
rsp->LeaseDuration = 0;
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
if (!ret)
return;
if (ret) {
err_out:
smb2_set_err_rsp(work);
}
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
atomic_dec(&opinfo->breaking_cnt);
wake_up_interruptible_all(&opinfo->oplock_brk);
opinfo_put(opinfo);
smb2_set_err_rsp(work);
}
/**

View File

@@ -426,7 +426,8 @@ static void free_transport(struct smb_direct_transport *t)
if (t->qp) {
ib_drain_qp(t->qp);
ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
ib_destroy_qp(t->qp);
t->qp = NULL;
rdma_destroy_qp(t->cm_id);
}
ksmbd_debug(RDMA, "drain the reassembly queue\n");
@@ -1934,8 +1935,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
return 0;
err:
if (t->qp) {
ib_destroy_qp(t->qp);
t->qp = NULL;
rdma_destroy_qp(t->cm_id);
}
if (t->recv_cq) {
ib_destroy_cq(t->recv_cq);

View File

@@ -1293,6 +1293,7 @@ out1:
err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
if (err) {
mnt_drop_write(parent_path->mnt);
path_put(path);
path_put(parent_path);
}

View File

@@ -302,6 +302,9 @@ struct drm_file {
*
* Mapping of mm object handles to object pointers. Used by the GEM
* subsystem. Protected by @table_lock.
*
* Note that allocated entries might be NULL as a transient state when
* creating or deleting a handle.
*/
struct idr object_idr;

View File

@@ -23,6 +23,7 @@
#ifndef __DRM_FRAMEBUFFER_H__
#define __DRM_FRAMEBUFFER_H__
#include <linux/bits.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/sched.h>
@@ -100,6 +101,8 @@ struct drm_framebuffer_funcs {
unsigned num_clips);
};
#define DRM_FRAMEBUFFER_HAS_HANDLE_REF(_i) BIT(0u + (_i))
/**
* struct drm_framebuffer - frame buffer object
*
@@ -188,6 +191,10 @@ struct drm_framebuffer {
* DRM_MODE_FB_MODIFIERS.
*/
int flags;
/**
* @internal_flags: Framebuffer flags like DRM_FRAMEBUFFER_HAS_HANDLE_REF.
*/
unsigned int internal_flags;
/**
* @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor
* IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR

View File

@@ -70,9 +70,11 @@ static inline bool spsc_queue_push(struct spsc_queue *queue, struct spsc_node *n
preempt_disable();
atomic_inc(&queue->job_count);
smp_mb__after_atomic();
tail = (struct spsc_node **)atomic_long_xchg(&queue->tail, (long)&node->next);
WRITE_ONCE(*tail, node);
atomic_inc(&queue->job_count);
/*
* In case of first element verify new node will be visible to the consumer

View File

@@ -34,6 +34,18 @@
*/
#define round_down(x, y) ((x) & ~__round_mask(x, y))
/**
* DIV_ROUND_UP_POW2 - divide and round up
* @n: numerator
* @d: denominator (must be a power of 2)
*
* Divides @n by @d and rounds up to next multiple of @d (which must be a power
* of 2). Avoids integer overflows that may occur with __KERNEL_DIV_ROUND_UP().
* Performance is roughly equivalent to __KERNEL_DIV_ROUND_UP().
*/
#define DIV_ROUND_UP_POW2(n, d) \
((n) / (d) + !!((n) & ((d) - 1)))
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
#define DIV_ROUND_DOWN_ULL(ll, d) \

View File

@@ -2625,6 +2625,11 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
return percpu_counter_read_positive(&mm->rss_stat[member]);
}
static inline unsigned long get_mm_counter_sum(struct mm_struct *mm, int member)
{
return percpu_counter_sum_positive(&mm->rss_stat[member]);
}
void mm_trace_rss_stat(struct mm_struct *mm, int member);
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)

View File

@@ -236,8 +236,8 @@ int __vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
#ifdef CONFIG_BPF_SYSCALL
extern struct proto vsock_proto;
#ifdef CONFIG_BPF_SYSCALL
int vsock_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
void __init vsock_bpf_build_proto(void);
#else

View File

@@ -353,7 +353,7 @@ static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
{
if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
if (!pskb_may_pull(skb, ETH_HLEN + PPPOE_SES_HLEN))
return false;
*inner_proto = __nf_flow_pppoe_proto(skb);

View File

@@ -202,6 +202,7 @@ const struct io_issue_def io_issue_defs[] = {
},
[IORING_OP_FALLOCATE] = {
.needs_file = 1,
.hash_reg_file = 1,
.prep = io_fallocate_prep,
.issue = io_fallocate,
},

View File

@@ -337,12 +337,12 @@ static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
list) {
__bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
BPF_LRU_LOCAL_LIST_T_FREE);
if (++nfree == LOCAL_FREE_TARGET)
if (++nfree == lru->target_free)
break;
}
if (nfree < LOCAL_FREE_TARGET)
__bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
if (nfree < lru->target_free)
__bpf_lru_list_shrink(lru, l, lru->target_free - nfree,
local_free_list(loc_l),
BPF_LRU_LOCAL_LIST_T_FREE);
@@ -577,6 +577,9 @@ static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
buf += elem_size;
}
lru->target_free = clamp((nr_elems / num_possible_cpus()) / 2,
1, LOCAL_FREE_TARGET);
}
static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,

View File

@@ -58,6 +58,7 @@ struct bpf_lru {
del_from_htab_func del_from_htab;
void *del_arg;
unsigned int hash_offset;
unsigned int target_free;
unsigned int nr_scans;
bool percpu;
};

View File

@@ -874,8 +874,6 @@ static void perf_cgroup_switch(struct task_struct *task)
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
cgrp = perf_cgroup_from_task(task, NULL);
if (READ_ONCE(cpuctx->cgrp) == cgrp)
return;
@@ -887,6 +885,8 @@ static void perf_cgroup_switch(struct task_struct *task)
if (READ_ONCE(cpuctx->cgrp) == NULL)
return;
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
perf_ctx_disable(&cpuctx->ctx, true);
ctx_sched_out(&cpuctx->ctx, EVENT_ALL|EVENT_CGROUP);
@@ -10428,7 +10428,7 @@ static int perf_uprobe_event_init(struct perf_event *event)
if (event->attr.type != perf_uprobe.type)
return -ENOENT;
if (!perfmon_capable())
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
/*

View File

@@ -149,6 +149,29 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t)
return 0;
}
/*
* Get the user-space pointer value stored in the 'rseq_cs' field.
*/
static int rseq_get_rseq_cs_ptr_val(struct rseq __user *rseq, u64 *rseq_cs)
{
if (!rseq_cs)
return -EFAULT;
#ifdef CONFIG_64BIT
if (get_user(*rseq_cs, &rseq->rseq_cs))
return -EFAULT;
#else
if (copy_from_user(rseq_cs, &rseq->rseq_cs, sizeof(*rseq_cs)))
return -EFAULT;
#endif
return 0;
}
/*
* If the rseq_cs field of 'struct rseq' contains a valid pointer to
* user-space, copy 'struct rseq_cs' from user-space and validate its fields.
*/
static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
{
struct rseq_cs __user *urseq_cs;
@@ -157,17 +180,16 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
u32 sig;
int ret;
#ifdef CONFIG_64BIT
if (get_user(ptr, &t->rseq->rseq_cs))
return -EFAULT;
#else
if (copy_from_user(&ptr, &t->rseq->rseq_cs, sizeof(ptr)))
return -EFAULT;
#endif
ret = rseq_get_rseq_cs_ptr_val(t->rseq, &ptr);
if (ret)
return ret;
/* If the rseq_cs pointer is NULL, return a cleared struct rseq_cs. */
if (!ptr) {
memset(rseq_cs, 0, sizeof(*rseq_cs));
return 0;
}
/* Check that the pointer value fits in the user-space process space. */
if (ptr >= TASK_SIZE)
return -EINVAL;
urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
@@ -243,7 +265,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
return !!event_mask;
}
static int clear_rseq_cs(struct task_struct *t)
static int clear_rseq_cs(struct rseq __user *rseq)
{
/*
* The rseq_cs field is set to NULL on preemption or signal
@@ -254,9 +276,9 @@ static int clear_rseq_cs(struct task_struct *t)
* Set rseq_cs to NULL.
*/
#ifdef CONFIG_64BIT
return put_user(0UL, &t->rseq->rseq_cs);
return put_user(0UL, &rseq->rseq_cs);
#else
if (clear_user(&t->rseq->rseq_cs, sizeof(t->rseq->rseq_cs)))
if (clear_user(&rseq->rseq_cs, sizeof(rseq->rseq_cs)))
return -EFAULT;
return 0;
#endif
@@ -288,11 +310,11 @@ static int rseq_ip_fixup(struct pt_regs *regs)
* Clear the rseq_cs pointer and return.
*/
if (!in_rseq_cs(ip, &rseq_cs))
return clear_rseq_cs(t);
return clear_rseq_cs(t->rseq);
ret = rseq_need_restart(t, rseq_cs.flags);
if (ret <= 0)
return ret;
ret = clear_rseq_cs(t);
ret = clear_rseq_cs(t->rseq);
if (ret)
return ret;
trace_rseq_ip_fixup(ip, rseq_cs.start_ip, rseq_cs.post_commit_offset,
@@ -366,6 +388,7 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
int, flags, u32, sig)
{
int ret;
u64 rseq_cs;
if (flags & RSEQ_FLAG_UNREGISTER) {
if (flags & ~RSEQ_FLAG_UNREGISTER)
@@ -420,6 +443,19 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
return -EINVAL;
if (!access_ok(rseq, rseq_len))
return -EFAULT;
/*
* If the rseq_cs pointer is non-NULL on registration, clear it to
* avoid a potential segfault on return to user-space. The proper thing
* to do would have been to fail the registration but this would break
* older libcs that reuse the rseq area for new threads without
* clearing the fields.
*/
if (rseq_get_rseq_cs_ptr_val(rseq, &rseq_cs))
return -EFAULT;
if (rseq_cs && clear_rseq_cs(rseq))
return -EFAULT;
current->rseq = rseq;
current->rseq_len = rseq_len;
current->rseq_sig = sig;

View File

@@ -5282,6 +5282,7 @@ static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
struct maple_enode *start;
if (mte_is_leaf(enode)) {
mte_set_node_dead(enode);
node->type = mte_node_type(enode);
goto free_leaf;
}
@@ -5509,7 +5510,7 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
/* At this point, we are at the leaf node that needs to be altered. */
/* Exact fit, no nodes needed. */
if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
return 0;
goto set_flag;
mas_wr_end_piv(&wr_mas);
node_size = mas_wr_new_end(&wr_mas);
@@ -5518,10 +5519,10 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
if (node_size == wr_mas.node_end) {
/* reuse node */
if (!mt_in_rcu(mas->tree))
return 0;
goto set_flag;
/* shifting boundary */
if (wr_mas.offset_end - mas->offset == 1)
return 0;
goto set_flag;
}
if (node_size >= mt_slots[wr_mas.type]) {
@@ -5540,10 +5541,13 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
/* node store, slot store needs one node */
ask_now:
mas->mas_flags &= ~MA_STATE_PREALLOC;
mas_node_count_gfp(mas, request, gfp);
mas->mas_flags |= MA_STATE_PREALLOC;
if (likely(!mas_is_err(mas)))
if (likely(!mas_is_err(mas))) {
set_flag:
mas->mas_flags |= MA_STATE_PREALLOC;
return 0;
}
mas_set_alloc_req(mas, 0);
ret = xa_err(mas->node);

View File

@@ -385,17 +385,8 @@ static void print_address_description(void *addr, u8 tag,
}
if (is_vmalloc_addr(addr)) {
struct vm_struct *va = find_vm_area(addr);
if (va) {
pr_err("The buggy address belongs to the virtual mapping at\n"
" [%px, %px) created by:\n"
" %pS\n",
va->addr, va->addr + va->size, va->caller);
pr_err("\n");
page = vmalloc_to_page(addr);
}
pr_err("The buggy address %px belongs to a vmalloc virtual mapping\n", addr);
page = vmalloc_to_page(addr);
}
if (page) {

View File

@@ -468,6 +468,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr,
pgtbl_mod_mask *mask)
{
int err = 0;
pte_t *pte;
/*
@@ -481,18 +482,25 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
do {
struct page *page = pages[*nr];
if (WARN_ON(!pte_none(ptep_get(pte))))
return -EBUSY;
if (WARN_ON(!page))
return -ENOMEM;
if (WARN_ON(!pfn_valid(page_to_pfn(page))))
return -EINVAL;
if (WARN_ON(!pte_none(ptep_get(pte)))) {
err = -EBUSY;
break;
}
if (WARN_ON(!page)) {
err = -ENOMEM;
break;
}
if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
err = -EINVAL;
break;
}
set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
(*nr)++;
} while (pte++, addr += PAGE_SIZE, addr != end);
*mask |= PGTBL_PTE_MODIFIED;
return 0;
return err;
}
static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,

View File

@@ -563,6 +563,7 @@ static int atrtr_create(struct rtentry *r, struct net_device *devhint)
/* Fill in the routing entry */
rt->target = ta->sat_addr;
dev_put(rt->dev); /* Release old device */
dev_hold(devhint);
rt->dev = devhint;
rt->flags = r->rt_flags;

View File

@@ -45,7 +45,8 @@
#include <net/atmclip.h>
static struct net_device *clip_devs;
static struct atm_vcc *atmarpd;
static struct atm_vcc __rcu *atmarpd;
static DEFINE_MUTEX(atmarpd_lock);
static struct timer_list idle_timer;
static const struct neigh_ops clip_neigh_ops;
@@ -53,24 +54,35 @@ static int to_atmarpd(enum atmarp_ctrl_type type, int itf, __be32 ip)
{
struct sock *sk;
struct atmarp_ctrl *ctrl;
struct atm_vcc *vcc;
struct sk_buff *skb;
int err = 0;
pr_debug("(%d)\n", type);
if (!atmarpd)
return -EUNATCH;
rcu_read_lock();
vcc = rcu_dereference(atmarpd);
if (!vcc) {
err = -EUNATCH;
goto unlock;
}
skb = alloc_skb(sizeof(struct atmarp_ctrl), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
if (!skb) {
err = -ENOMEM;
goto unlock;
}
ctrl = skb_put(skb, sizeof(struct atmarp_ctrl));
ctrl->type = type;
ctrl->itf_num = itf;
ctrl->ip = ip;
atm_force_charge(atmarpd, skb->truesize);
atm_force_charge(vcc, skb->truesize);
sk = sk_atm(atmarpd);
sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
return 0;
unlock:
rcu_read_unlock();
return err;
}
static void link_vcc(struct clip_vcc *clip_vcc, struct atmarp_entry *entry)
@@ -417,6 +429,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
if (!vcc->push)
return -EBADFD;
if (vcc->user_back)
return -EINVAL;
clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
if (!clip_vcc)
return -ENOMEM;
@@ -607,17 +621,27 @@ static void atmarpd_close(struct atm_vcc *vcc)
{
pr_debug("\n");
rtnl_lock();
atmarpd = NULL;
mutex_lock(&atmarpd_lock);
RCU_INIT_POINTER(atmarpd, NULL);
mutex_unlock(&atmarpd_lock);
synchronize_rcu();
skb_queue_purge(&sk_atm(vcc)->sk_receive_queue);
rtnl_unlock();
pr_debug("(done)\n");
module_put(THIS_MODULE);
}
static int atmarpd_send(struct atm_vcc *vcc, struct sk_buff *skb)
{
atm_return_tx(vcc, skb);
dev_kfree_skb_any(skb);
return 0;
}
static const struct atmdev_ops atmarpd_dev_ops = {
.close = atmarpd_close
.close = atmarpd_close,
.send = atmarpd_send
};
@@ -631,15 +655,18 @@ static struct atm_dev atmarpd_dev = {
static int atm_init_atmarp(struct atm_vcc *vcc)
{
rtnl_lock();
if (vcc->push == clip_push)
return -EINVAL;
mutex_lock(&atmarpd_lock);
if (atmarpd) {
rtnl_unlock();
mutex_unlock(&atmarpd_lock);
return -EADDRINUSE;
}
mod_timer(&idle_timer, jiffies + CLIP_CHECK_INTERVAL * HZ);
atmarpd = vcc;
rcu_assign_pointer(atmarpd, vcc);
set_bit(ATM_VF_META, &vcc->flags);
set_bit(ATM_VF_READY, &vcc->flags);
/* allow replies and avoid getting closed if signaling dies */
@@ -648,13 +675,14 @@ static int atm_init_atmarp(struct atm_vcc *vcc)
vcc->push = NULL;
vcc->pop = NULL; /* crash */
vcc->push_oam = NULL; /* crash */
rtnl_unlock();
mutex_unlock(&atmarpd_lock);
return 0;
}
static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct atm_vcc *vcc = ATM_SD(sock);
struct sock *sk = sock->sk;
int err = 0;
switch (cmd) {
@@ -675,14 +703,18 @@ static int clip_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
err = clip_create(arg);
break;
case ATMARPD_CTRL:
lock_sock(sk);
err = atm_init_atmarp(vcc);
if (!err) {
sock->state = SS_CONNECTED;
__module_get(THIS_MODULE);
}
release_sock(sk);
break;
case ATMARP_MKIP:
lock_sock(sk);
err = clip_mkip(vcc, arg);
release_sock(sk);
break;
case ATMARP_SETENTRY:
err = clip_setentry(vcc, (__force __be32)arg);

View File

@@ -2139,40 +2139,6 @@ static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
return rp->status;
}
static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
struct hci_rp_le_set_ext_adv_params *rp = data;
struct hci_cp_le_set_ext_adv_params *cp;
struct adv_info *adv_instance;
bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
if (rp->status)
return rp->status;
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
if (!cp)
return rp->status;
hci_dev_lock(hdev);
hdev->adv_addr_type = cp->own_addr_type;
if (!cp->handle) {
/* Store in hdev for instance 0 */
hdev->adv_tx_power = rp->tx_power;
} else {
adv_instance = hci_find_adv_instance(hdev, cp->handle);
if (adv_instance)
adv_instance->tx_power = rp->tx_power;
}
/* Update adv data as tx power is known now */
hci_update_adv_data(hdev, cp->handle);
hci_dev_unlock(hdev);
return rp->status;
}
static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
struct sk_buff *skb)
{
@@ -4153,8 +4119,6 @@ static const struct hci_cc {
HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
hci_cc_le_read_num_adv_sets,
sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
sizeof(struct hci_rp_le_set_ext_adv_params)),
HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
hci_cc_le_set_ext_adv_enable),
HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
@@ -6916,7 +6880,10 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu);
if (!ev->status) {
bis->state = BT_CONNECTED;
set_bit(HCI_CONN_BIG_SYNC, &bis->flags);
hci_debugfs_create_conn(bis);
hci_conn_add_sysfs(bis);
hci_iso_setup_path(bis);
}
}

View File

@@ -1224,9 +1224,129 @@ static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
static int
hci_set_ext_adv_params_sync(struct hci_dev *hdev, struct adv_info *adv,
const struct hci_cp_le_set_ext_adv_params *cp,
struct hci_rp_le_set_ext_adv_params *rp)
{
struct sk_buff *skb;
skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(*cp),
cp, HCI_CMD_TIMEOUT);
/* If command return a status event, skb will be set to -ENODATA */
if (skb == ERR_PTR(-ENODATA))
return 0;
if (IS_ERR(skb)) {
bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld",
HCI_OP_LE_SET_EXT_ADV_PARAMS, PTR_ERR(skb));
return PTR_ERR(skb);
}
if (skb->len != sizeof(*rp)) {
bt_dev_err(hdev, "Invalid response length for 0x%4.4x: %u",
HCI_OP_LE_SET_EXT_ADV_PARAMS, skb->len);
kfree_skb(skb);
return -EIO;
}
memcpy(rp, skb->data, sizeof(*rp));
kfree_skb(skb);
if (!rp->status) {
hdev->adv_addr_type = cp->own_addr_type;
if (!cp->handle) {
/* Store in hdev for instance 0 */
hdev->adv_tx_power = rp->tx_power;
} else if (adv) {
adv->tx_power = rp->tx_power;
}
}
return rp->status;
}
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
struct {
struct hci_cp_le_set_ext_adv_data cp;
u8 data[HCI_MAX_EXT_AD_LENGTH];
} pdu;
u8 len;
struct adv_info *adv = NULL;
int err;
memset(&pdu, 0, sizeof(pdu));
if (instance) {
adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->adv_data_changed)
return 0;
}
len = eir_create_adv_data(hdev, instance, pdu.data);
pdu.cp.length = len;
pdu.cp.handle = instance;
pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
sizeof(pdu.cp) + len, &pdu.cp,
HCI_CMD_TIMEOUT);
if (err)
return err;
/* Update data if the command succeed */
if (adv) {
adv->adv_data_changed = false;
} else {
memcpy(hdev->adv_data, pdu.data, len);
hdev->adv_data_len = len;
}
return 0;
}
static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_adv_data cp;
u8 len;
memset(&cp, 0, sizeof(cp));
len = eir_create_adv_data(hdev, instance, cp.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return 0;
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
hdev->adv_data_len = len;
cp.length = len;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return 0;
if (ext_adv_capable(hdev))
return hci_set_ext_adv_data_sync(hdev, instance);
return hci_set_adv_data_sync(hdev, instance);
}
int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_ext_adv_params cp;
struct hci_rp_le_set_ext_adv_params rp;
bool connectable;
u32 flags;
bdaddr_t random_addr;
@@ -1247,7 +1367,7 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
* Command Disallowed error, so we must first disable the
* instance if it is active.
*/
if (adv && !adv->pending) {
if (adv) {
err = hci_disable_ext_adv_instance_sync(hdev, instance);
if (err)
return err;
@@ -1333,8 +1453,12 @@ int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
cp.secondary_phy = HCI_ADV_PHY_1M;
}
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
err = hci_set_ext_adv_params_sync(hdev, adv, &cp, &rp);
if (err)
return err;
/* Update adv data as tx power is known now */
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
if (err)
return err;
@@ -1859,82 +1983,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
struct {
struct hci_cp_le_set_ext_adv_data cp;
u8 data[HCI_MAX_EXT_AD_LENGTH];
} pdu;
u8 len;
struct adv_info *adv = NULL;
int err;
memset(&pdu, 0, sizeof(pdu));
if (instance) {
adv = hci_find_adv_instance(hdev, instance);
if (!adv || !adv->adv_data_changed)
return 0;
}
len = eir_create_adv_data(hdev, instance, pdu.data);
pdu.cp.length = len;
pdu.cp.handle = instance;
pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
sizeof(pdu.cp) + len, &pdu.cp,
HCI_CMD_TIMEOUT);
if (err)
return err;
/* Update data if the command succeed */
if (adv) {
adv->adv_data_changed = false;
} else {
memcpy(hdev->adv_data, pdu.data, len);
hdev->adv_data_len = len;
}
return 0;
}
static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
struct hci_cp_le_set_adv_data cp;
u8 len;
memset(&cp, 0, sizeof(cp));
len = eir_create_adv_data(hdev, instance, cp.data);
/* There's nothing to do if the data hasn't changed */
if (hdev->adv_data_len == len &&
memcmp(cp.data, hdev->adv_data, len) == 0)
return 0;
memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
hdev->adv_data_len = len;
cp.length = len;
return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
}
int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
{
if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
return 0;
if (ext_adv_capable(hdev))
return hci_set_ext_adv_data_sync(hdev, instance);
return hci_set_adv_data_sync(hdev, instance);
}
int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
bool force)
{
@@ -6253,6 +6301,7 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
struct hci_conn *conn)
{
struct hci_cp_le_set_ext_adv_params cp;
struct hci_rp_le_set_ext_adv_params rp;
int err;
bdaddr_t random_addr;
u8 own_addr_type;
@@ -6294,8 +6343,12 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
if (err)
return err;
err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
sizeof(cp), &cp, HCI_CMD_TIMEOUT);
err = hci_set_ext_adv_params_sync(hdev, NULL, &cp, &rp);
if (err)
return err;
/* Update adv data as tx power is known now */
err = hci_set_ext_adv_data_sync(hdev, cp.handle);
if (err)
return err;

View File

@@ -1135,7 +1135,7 @@ restart:
goto do_error;
while (msg_data_left(msg)) {
ssize_t copy = 0;
int copy = 0;
skb = tcp_write_queue_tail(sk);
if (skb)

View File

@@ -3521,11 +3521,9 @@ static void addrconf_gre_config(struct net_device *dev)
ASSERT_RTNL();
idev = ipv6_find_idev(dev);
if (IS_ERR(idev)) {
pr_debug("%s: add_dev failed\n", __func__);
idev = addrconf_add_dev(dev);
if (IS_ERR(idev))
return;
}
/* Generate the IPv6 link-local address using addrconf_addr_gen(),
* unless we have an IPv4 GRE device not bound to an IP address and
@@ -3539,9 +3537,6 @@ static void addrconf_gre_config(struct net_device *dev)
}
add_v4_addrs(idev);
if (dev->flags & IFF_POINTOPOINT)
addrconf_add_mroute(dev);
}
#endif

View File

@@ -387,7 +387,6 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
WARN_ON(skb->sk != NULL);
skb->sk = sk;
skb->destructor = netlink_skb_destructor;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
sk_mem_charge(sk, skb->truesize);
}
@@ -1227,41 +1226,48 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
long *timeo, struct sock *ssk)
{
DECLARE_WAITQUEUE(wait, current);
struct netlink_sock *nlk;
unsigned int rmem;
nlk = nlk_sk(sk);
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(NETLINK_S_CONGESTED, &nlk->state))) {
DECLARE_WAITQUEUE(wait, current);
if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
kfree_skb(skb);
return -EAGAIN;
}
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&nlk->wait, &wait);
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
*timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
sock_put(sk);
if (signal_pending(current)) {
kfree_skb(skb);
return sock_intr_errno(*timeo);
}
return 1;
if ((rmem == skb->truesize || rmem < READ_ONCE(sk->sk_rcvbuf)) &&
!test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
netlink_skb_set_owner_r(skb, sk);
return 0;
}
netlink_skb_set_owner_r(skb, sk);
return 0;
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
kfree_skb(skb);
return -EAGAIN;
}
__set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&nlk->wait, &wait);
rmem = atomic_read(&sk->sk_rmem_alloc);
if (((rmem && rmem + skb->truesize > READ_ONCE(sk->sk_rcvbuf)) ||
test_bit(NETLINK_S_CONGESTED, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
*timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
sock_put(sk);
if (signal_pending(current)) {
kfree_skb(skb);
return sock_intr_errno(*timeo);
}
return 1;
}
static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
@@ -1321,6 +1327,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
ret = -ECONNREFUSED;
if (nlk->netlink_rcv != NULL) {
ret = skb->len;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);
netlink_skb_set_owner_r(skb, sk);
NETLINK_CB(skb).sk = ssk;
netlink_deliver_tap_kernel(sk, ssk, skb);
@@ -1397,13 +1404,19 @@ EXPORT_SYMBOL_GPL(netlink_strict_get_check);
static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
struct netlink_sock *nlk = nlk_sk(sk);
unsigned int rmem, rcvbuf;
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
if ((rmem == skb->truesize || rmem <= rcvbuf) &&
!test_bit(NETLINK_S_CONGESTED, &nlk->state)) {
netlink_skb_set_owner_r(skb, sk);
__netlink_sendskb(sk, skb);
return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
return rmem > (rcvbuf >> 1);
}
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
return -1;
}
@@ -2190,6 +2203,7 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
struct netlink_ext_ack extack = {};
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
unsigned int rmem, rcvbuf;
size_t max_recvmsg_len;
struct module *module;
int err = -ENOBUFS;
@@ -2203,9 +2217,6 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
goto errout_skb;
}
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
goto errout_skb;
/* NLMSG_GOODSIZE is small to avoid high order allocations being
* required, but it makes sense to _attempt_ a 16K bytes allocation
* to reduce number of system calls on dump operations, if user
@@ -2228,6 +2239,13 @@ static int netlink_dump(struct sock *sk, bool lock_taken)
if (!skb)
goto errout_skb;
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
rmem = atomic_add_return(skb->truesize, &sk->sk_rmem_alloc);
if (rmem != skb->truesize && rmem >= rcvbuf) {
atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
goto errout_skb;
}
/* Trim skb to allocated size. User is expected to provide buffer as
* large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
* netlink_recvmsg())). dump will pack as many smaller messages as

View File

@@ -149,6 +149,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
id_in_use:
write_unlock(&rx->call_lock);
rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EBADSLT);
rxrpc_cleanup_call(call);
_leave(" = -EBADSLT");
return -EBADSLT;
@@ -253,6 +254,9 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
unsigned short call_tail, conn_tail, peer_tail;
unsigned short call_count, conn_count;
if (!b)
return NULL;
/* #calls >= #conns >= #peers must hold true. */
call_head = smp_load_acquire(&b->call_backlog_head);
call_tail = b->call_backlog_tail;

Some files were not shown because too many files have changed in this diff Show More