Merge bceae1daf3 ("x86/traps: Initialize DR6 by writing its architectural reset value") into android15-6.6-lts

Steps on the way to 6.6.97

Resolves merge conflicts in:
	include/net/bluetooth/hci_core.h

Change-Id: I5ccd718e029617df074d9e4ad65d9e841c406516
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-14 12:20:25 +00:00
29 changed files with 235 additions and 100 deletions

View File

@@ -15,7 +15,26 @@
which debugging register was responsible for the trap. The other bits
are either reserved or not of interest to us. */
/* Define reserved bits in DR6 which are always set to 1 */
/*
* Define bits in DR6 which are set to 1 by default.
*
* This is also the DR6 architectural value following Power-up, Reset or INIT.
*
* Note, with the introduction of Bus Lock Detection (BLD) and Restricted
* Transactional Memory (RTM), the DR6 register has been modified:
*
* 1) BLD flag (bit 11) is no longer reserved to 1 if the CPU supports
* Bus Lock Detection. The assertion of a bus lock could clear it.
*
* 2) RTM flag (bit 16) is no longer reserved to 1 if the CPU supports
* restricted transactional memory. #DB occurred inside an RTM region
* could clear it.
*
* Apparently, DR6.BLD and DR6.RTM are active low bits.
*
* As a result, DR6_RESERVED is an incorrect name now, but it is kept for
* compatibility.
*/
#define DR6_RESERVED (0xFFFF0FF0)
#define DR_TRAP0 (0x1) /* db0 */

View File

@@ -2218,17 +2218,16 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
/*
* Clear all 6 debug registers:
*/
static void clear_all_debug_regs(void)
static void initialize_debug_regs(void)
{
int i;
for (i = 0; i < 8; i++) {
/* Ignore db4, db5 */
if ((i == 4) || (i == 5))
continue;
set_debugreg(0, i);
}
/* Control register first -- to make sure everything is disabled. */
set_debugreg(0, 7);
set_debugreg(DR6_RESERVED, 6);
/* dr5 and dr4 don't exist */
set_debugreg(0, 3);
set_debugreg(0, 2);
set_debugreg(0, 1);
set_debugreg(0, 0);
}
#ifdef CONFIG_KGDB
@@ -2371,7 +2370,7 @@ void cpu_init(void)
load_mm_ldt(&init_mm);
clear_all_debug_regs();
initialize_debug_regs();
dbg_restore_debug_regs();
doublefault_init_cpu_tss();

View File

@@ -975,24 +975,32 @@ static bool is_sysenter_singlestep(struct pt_regs *regs)
#endif
}
static __always_inline unsigned long debug_read_clear_dr6(void)
static __always_inline unsigned long debug_read_reset_dr6(void)
{
unsigned long dr6;
get_debugreg(dr6, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
/*
* The Intel SDM says:
*
* Certain debug exceptions may clear bits 0-3. The remaining
* contents of the DR6 register are never cleared by the
* processor. To avoid confusion in identifying debug
* exceptions, debug handlers should clear the register before
* returning to the interrupted task.
* Certain debug exceptions may clear bits 0-3 of DR6.
*
* Keep it simple: clear DR6 immediately.
* BLD induced #DB clears DR6.BLD and any other debug
* exception doesn't modify DR6.BLD.
*
* RTM induced #DB clears DR6.RTM and any other debug
* exception sets DR6.RTM.
*
* To avoid confusion in identifying debug exceptions,
* debug handlers should set DR6.BLD and DR6.RTM, and
* clear other DR6 bits before returning.
*
* Keep it simple: write DR6 with its architectural reset
* value 0xFFFF0FF0, defined as DR6_RESERVED, immediately.
*/
get_debugreg(dr6, 6);
set_debugreg(DR6_RESERVED, 6);
dr6 ^= DR6_RESERVED; /* Flip to positive polarity */
return dr6;
}
@@ -1188,19 +1196,19 @@ out:
/* IST stack entry */
DEFINE_IDTENTRY_DEBUG(exc_debug)
{
exc_debug_kernel(regs, debug_read_clear_dr6());
exc_debug_kernel(regs, debug_read_reset_dr6());
}
/* User entry, runs on regular task stack */
DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
{
exc_debug_user(regs, debug_read_clear_dr6());
exc_debug_user(regs, debug_read_reset_dr6());
}
#else
/* 32 bit does not have separate entry points. */
DEFINE_IDTENTRY_RAW(exc_debug)
{
unsigned long dr6 = debug_read_clear_dr6();
unsigned long dr6 = debug_read_reset_dr6();
if (user_mode(regs))
exc_debug_user(regs, dr6);

View File

@@ -2013,7 +2013,7 @@ static int eb_capture_stage(struct i915_execbuffer *eb)
continue;
if (i915_gem_context_is_recoverable(eb->gem_context) &&
GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 10))
(IS_DGFX(eb->i915) || GRAPHICS_VER_FULL(eb->i915) > IP_VER(12, 0)))
return -EINVAL;
for_each_batch_create_order(eb, j) {

View File

@@ -298,7 +298,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
if (gt->gsc.intf[intf_id].irq < 0)
return;
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq);
if (ret)
drm_err_ratelimited(&gt->i915->drm, "error handling GSC irq: %d\n", ret);
}

View File

@@ -573,7 +573,6 @@ static int ring_context_alloc(struct intel_context *ce)
/* One ringbuffer to rule them all */
GEM_BUG_ON(!engine->legacy.ring);
ce->ring = engine->legacy.ring;
ce->timeline = intel_timeline_get(engine->legacy.timeline);
GEM_BUG_ON(ce->state);
if (engine->context_size) {
@@ -586,6 +585,8 @@ static int ring_context_alloc(struct intel_context *ce)
ce->state = vma;
}
ce->timeline = intel_timeline_get(engine->legacy.timeline);
return 0;
}

View File

@@ -1373,6 +1373,8 @@
#define MDIO_VEND2_CTRL1_SS13 BIT(13)
#endif
#define XGBE_VEND2_MAC_AUTO_SW BIT(9)
/* MDIO mask values */
#define XGBE_AN_CL73_INT_CMPLT BIT(0)
#define XGBE_AN_CL73_INC_LINK BIT(1)

View File

@@ -375,6 +375,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
reg |= MDIO_VEND2_CTRL1_AN_RESTART;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL);
reg |= XGBE_VEND2_MAC_AUTO_SW;
XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg);
}
static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
@@ -1003,6 +1007,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
(pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
reg &= ~MDIO_AN_CTRL1_ENABLE;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
}
static void xgbe_an73_init(struct xgbe_prv_data *pdata)
@@ -1404,6 +1413,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
&an_restart);
/* bail out if the link status register read fails */
if (pdata->phy.link < 0)
return;
if (an_restart) {
xgbe_phy_config_aneg(pdata);
goto adjust_link;

View File

@@ -2855,8 +2855,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int reg;
int ret;
int reg, ret;
*an_restart = 0;
@@ -2890,11 +2889,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
/* Link status is latched low, so read once to clear
* and then read again to get current state
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
if (reg < 0)
return reg;
/* Link status is latched low so that momentary link drops
* can be detected. If link was already down read again
* to get the latest state.
*/
if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) {
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
if (reg < 0)
return reg;
}
if (pdata->en_rx_adap) {
/* if the link is available and adaptation is done,
@@ -2913,9 +2921,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
xgbe_phy_set_mode(pdata, phy_data->cur_mode);
}
/* check again for the link and adaptation status */
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
if (pdata->rx_adapt_done)
return 1;
} else if (reg & MDIO_STAT1_LSTATUS)
return 1;

View File

@@ -292,12 +292,12 @@
#define XGBE_LINK_TIMEOUT 5
#define XGBE_KR_TRAINING_WAIT_ITER 50
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
#define XGBE_SGMII_AN_LINK_STATUS BIT(4)
/* ECC correctable error notification window (seconds) */
#define XGBE_ECC_LIMIT 60

View File

@@ -2491,6 +2491,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
{
struct bnxt_napi *bnapi = cpr->bnapi;
u32 raw_cons = cpr->cp_raw_cons;
bool flush_xdp = false;
u32 cons;
int tx_pkts = 0;
int rx_pkts = 0;
@@ -2528,6 +2529,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
else
rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
&event);
if (event & BNXT_REDIRECT_EVENT)
flush_xdp = true;
if (likely(rc >= 0))
rx_pkts += rc;
/* Increment rx_pkts when rc is -ENOMEM to count towards
@@ -2555,8 +2558,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
if (event & BNXT_REDIRECT_EVENT)
if (flush_xdp) {
xdp_do_flush();
event &= ~BNXT_REDIRECT_EVENT;
}
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring;

View File

@@ -2057,10 +2057,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
if (netdev->mtu > enic->port_mtu)
if (new_mtu > enic->port_mtu)
netdev_warn(netdev,
"interface MTU (%d) set higher than port MTU (%d)\n",
netdev->mtu, enic->port_mtu);
new_mtu, enic->port_mtu);
return _enic_change_mtu(netdev, new_mtu);
}

View File

@@ -6772,6 +6772,10 @@ static int igc_probe(struct pci_dev *pdev,
adapter->port_num = hw->bus.func;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
if (igc_is_device_id_i226(hw))
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
err = pci_save_state(pdev);
if (err)
goto err_ioremap;
@@ -7144,6 +7148,9 @@ static int __maybe_unused igc_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
if (igc_is_device_id_i226(hw))
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
if (igc_init_interrupt_scheme(adapter, true)) {
netdev_err(netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
@@ -7259,6 +7266,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
if (igc_is_device_id_i226(hw))
pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2);
/* In case of PCI error, adapter loses its HW address
* so we should re-assign it here.
*/

View File

@@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
if (!addr) {
if (np->ops->mapping_error(np->device, addr)) {
__free_page(page);
return -ENOMEM;
}
@@ -6672,6 +6672,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
len = skb_headlen(skb);
mapping = np->ops->map_single(np->device, skb->data,
len, DMA_TO_DEVICE);
if (np->ops->mapping_error(np->device, mapping))
goto out_drop;
prod = rp->prod;
@@ -6713,6 +6715,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
mapping = np->ops->map_page(np->device, skb_frag_page(frag),
skb_frag_off(frag), len,
DMA_TO_DEVICE);
if (np->ops->mapping_error(np->device, mapping))
goto out_unmap;
rp->tx_buffs[prod].skb = NULL;
rp->tx_buffs[prod].mapping = mapping;
@@ -6737,6 +6741,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
out:
return NETDEV_TX_OK;
out_unmap:
while (i--) {
const skb_frag_t *frag;
prod = PREVIOUS_TX(rp, prod);
frag = &skb_shinfo(skb)->frags[i];
np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping,
skb_frag_size(frag), DMA_TO_DEVICE);
}
np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping,
skb_headlen(skb), DMA_TO_DEVICE);
out_drop:
rp->tx_errors++;
kfree_skb(skb);
@@ -9636,6 +9653,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address,
dma_unmap_single(dev, dma_address, size, direction);
}
static int niu_pci_mapping_error(struct device *dev, u64 addr)
{
return dma_mapping_error(dev, addr);
}
static const struct niu_ops niu_pci_ops = {
.alloc_coherent = niu_pci_alloc_coherent,
.free_coherent = niu_pci_free_coherent,
@@ -9643,6 +9665,7 @@ static const struct niu_ops niu_pci_ops = {
.unmap_page = niu_pci_unmap_page,
.map_single = niu_pci_map_single,
.unmap_single = niu_pci_unmap_single,
.mapping_error = niu_pci_mapping_error,
};
static void niu_driver_version(void)
@@ -10009,6 +10032,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address,
/* Nothing to do. */
}
static int niu_phys_mapping_error(struct device *dev, u64 dma_address)
{
return false;
}
static const struct niu_ops niu_phys_ops = {
.alloc_coherent = niu_phys_alloc_coherent,
.free_coherent = niu_phys_free_coherent,
@@ -10016,6 +10044,7 @@ static const struct niu_ops niu_phys_ops = {
.unmap_page = niu_phys_unmap_page,
.map_single = niu_phys_map_single,
.unmap_single = niu_phys_unmap_single,
.mapping_error = niu_phys_mapping_error,
};
static int niu_of_probe(struct platform_device *op)

View File

@@ -2879,6 +2879,9 @@ struct tx_ring_info {
#define NEXT_TX(tp, index) \
(((index) + 1) < (tp)->pending ? ((index) + 1) : 0)
#define PREVIOUS_TX(tp, index) \
(((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1))
static inline u32 niu_tx_avail(struct tx_ring_info *tp)
{
return (tp->pending -
@@ -3140,6 +3143,7 @@ struct niu_ops {
enum dma_data_direction direction);
void (*unmap_single)(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction);
int (*mapping_error)(struct device *dev, u64 dma_address);
};
struct niu_link_config {

View File

@@ -4229,8 +4229,6 @@ static void lan78xx_disconnect(struct usb_interface *intf)
if (!dev)
return;
netif_napi_del(&dev->napi);
udev = interface_to_usbdev(intf);
net = dev->net;

View File

@@ -154,9 +154,10 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
struct btrfs_ordered_extent *entry;
int ret;
u64 qgroup_rsv = 0;
const bool is_nocow = (flags &
((1U << BTRFS_ORDERED_NOCOW) | (1U << BTRFS_ORDERED_PREALLOC)));
if (flags &
((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) {
if (is_nocow) {
/* For nocow write, we can release the qgroup rsv right now */
ret = btrfs_qgroup_free_data(inode, NULL, file_offset, num_bytes, &qgroup_rsv);
if (ret < 0)
@@ -171,8 +172,13 @@ static struct btrfs_ordered_extent *alloc_ordered_extent(
return ERR_PTR(ret);
}
entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
if (!entry)
if (!entry) {
if (!is_nocow)
btrfs_qgroup_free_refroot(inode->root->fs_info,
btrfs_root_id(inode->root),
qgroup_rsv, BTRFS_QGROUP_RSV_DATA);
return ERR_PTR(-ENOMEM);
}
entry->file_offset = file_offset;
entry->num_bytes = num_bytes;

View File

@@ -743,6 +743,7 @@ struct TCP_Server_Info {
__le32 session_key_id; /* retrieved from negotiate response and send in session setup request */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
unsigned long neg_start; /* when negotiate started (jiffies) */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
#define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */
#define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */
@@ -1268,6 +1269,7 @@ struct cifs_tcon {
bool use_persistent:1; /* use persistent instead of durable handles */
bool no_lease:1; /* Do not request leases on files or directories */
bool use_witness:1; /* use witness protocol */
bool dummy:1; /* dummy tcon used for reconnecting channels */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;

View File

@@ -677,12 +677,12 @@ server_unresponsive(struct TCP_Server_Info *server)
/*
* If we're in the process of mounting a share or reconnecting a session
* and the server abruptly shut down (e.g. socket wasn't closed, packet
* had been ACK'ed but no SMB response), don't wait longer than 20s to
* negotiate protocol.
* had been ACK'ed but no SMB response), don't wait longer than 20s from
* when negotiate actually started.
*/
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate &&
time_after(jiffies, server->lstrp + 20 * HZ)) {
time_after(jiffies, server->neg_start + 20 * HZ)) {
spin_unlock(&server->srv_lock);
cifs_reconnect(server, false);
return true;
@@ -3998,6 +3998,7 @@ retry:
server->lstrp = jiffies;
server->tcpStatus = CifsInNegotiate;
server->neg_start = jiffies;
spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server);

View File

@@ -437,7 +437,7 @@ skip_sess_setup:
free_xid(xid);
ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
/* regardless of rc value, setup polling */
if (!tcon->ipc && !tcon->dummy)
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
@@ -4228,10 +4228,8 @@ void smb2_reconnect_server(struct work_struct *work)
}
goto done;
}
tcon->status = TID_GOOD;
tcon->retry = false;
tcon->need_reconnect = false;
tcon->dummy = true;
/* now reconnect sessions for necessary channels */
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {

View File

@@ -114,7 +114,7 @@ DECLARE_EVENT_CLASS(smb3_rw_err_class,
__entry->len = len;
__entry->rc = rc;
),
TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
__entry->offset, __entry->len, __entry->rc)
)
@@ -247,7 +247,7 @@ DECLARE_EVENT_CLASS(smb3_fd_class,
__entry->tid = tid;
__entry->sesid = sesid;
),
TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx",
__entry->xid, __entry->sesid, __entry->tid, __entry->fid)
)
@@ -286,7 +286,7 @@ DECLARE_EVENT_CLASS(smb3_fd_err_class,
__entry->sesid = sesid;
__entry->rc = rc;
),
TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
__entry->rc)
)
@@ -558,7 +558,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_err_class,
__entry->status = status;
__entry->rc = rc;
),
TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
__entry->sesid, __entry->tid, __entry->cmd, __entry->mid,
__entry->status, __entry->rc)
)
@@ -593,7 +593,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_done_class,
__entry->cmd = cmd;
__entry->mid = mid;
),
TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu",
TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu",
__entry->sesid, __entry->tid,
__entry->cmd, __entry->mid)
)
@@ -631,7 +631,7 @@ DECLARE_EVENT_CLASS(smb3_mid_class,
__entry->when_sent = when_sent;
__entry->when_received = when_received;
),
TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
TP_printk("cmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
__entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
__entry->when_received)
)
@@ -662,7 +662,7 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
__assign_str(func_name, func_name);
__entry->rc = rc;
),
TP_printk("\t%s: xid=%u rc=%d",
TP_printk("%s: xid=%u rc=%d",
__get_str(func_name), __entry->xid, __entry->rc)
)
@@ -688,7 +688,7 @@ DECLARE_EVENT_CLASS(smb3_sync_err_class,
__entry->ino = ino;
__entry->rc = rc;
),
TP_printk("\tino=%lu rc=%d",
TP_printk("ino=%lu rc=%d",
__entry->ino, __entry->rc)
)
@@ -714,7 +714,7 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
__entry->xid = xid;
__assign_str(func_name, func_name);
),
TP_printk("\t%s: xid=%u",
TP_printk("%s: xid=%u",
__get_str(func_name), __entry->xid)
)

View File

@@ -29,6 +29,7 @@
#include <linux/idr.h>
#include <linux/leds.h>
#include <linux/rculist.h>
#include <linux/srcu.h>
#include <linux/android_kabi.h>
#include <net/bluetooth/hci.h>
@@ -349,6 +350,7 @@ struct amp_assoc {
struct hci_dev {
struct list_head list;
struct srcu_struct srcu;
struct mutex lock;
struct ida unset_handle_ida;

View File

@@ -899,8 +899,10 @@ static int check_expect_hints_stats(struct objagg_hints *objagg_hints,
int err;
stats = objagg_hints_stats_get(objagg_hints);
if (IS_ERR(stats))
if (IS_ERR(stats)) {
*errmsg = "objagg_hints_stats_get() failed.";
return PTR_ERR(stats);
}
err = __check_expect_stats(stats, expect_stats, errmsg);
objagg_stats_put(stats);
return err;

View File

@@ -65,7 +65,7 @@ static DEFINE_IDA(hci_index_ida);
/* Get HCI device by index.
* Device is held on return. */
struct hci_dev *hci_dev_get(int index)
static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
{
struct hci_dev *hdev = NULL, *d;
@@ -78,6 +78,8 @@ struct hci_dev *hci_dev_get(int index)
list_for_each_entry(d, &hci_dev_list, list) {
if (d->id == index) {
hdev = hci_dev_hold(d);
if (srcu_index)
*srcu_index = srcu_read_lock(&d->srcu);
break;
}
}
@@ -85,6 +87,22 @@ struct hci_dev *hci_dev_get(int index)
return hdev;
}
struct hci_dev *hci_dev_get(int index)
{
return __hci_dev_get(index, NULL);
}
static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
{
return __hci_dev_get(index, srcu_index);
}
static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
{
srcu_read_unlock(&hdev->srcu, srcu_index);
hci_dev_put(hdev);
}
/* ---- Inquiry support ---- */
bool hci_discovery_active(struct hci_dev *hdev)
@@ -590,9 +608,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
int hci_dev_reset(__u16 dev)
{
struct hci_dev *hdev;
int err;
int err, srcu_index;
hdev = hci_dev_get(dev);
hdev = hci_dev_get_srcu(dev, &srcu_index);
if (!hdev)
return -ENODEV;
@@ -614,7 +632,7 @@ int hci_dev_reset(__u16 dev)
err = hci_dev_do_reset(hdev);
done:
hci_dev_put(hdev);
hci_dev_put_srcu(hdev, srcu_index);
return err;
}
@@ -2424,6 +2442,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
if (!hdev)
return NULL;
if (init_srcu_struct(&hdev->srcu)) {
kfree(hdev);
return NULL;
}
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
hdev->esco_type = (ESCO_HV1);
hdev->link_mode = (HCI_LM_ACCEPT);
@@ -2669,6 +2692,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
synchronize_srcu(&hdev->srcu);
cleanup_srcu_struct(&hdev->srcu);
cancel_work_sync(&hdev->rx_work);
cancel_work_sync(&hdev->cmd_work);
cancel_work_sync(&hdev->tx_work);

View File

@@ -89,11 +89,11 @@ ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local,
lockdep_assert_held(&local->chanctx_mtx);
if (WARN_ON(!compat))
return NULL;
list_for_each_entry(link, &ctx->reserved_links,
reserved_chanctx_list) {
if (!compat)
compat = &link->reserved_chandef;
compat = cfg80211_chandef_compatible(&link->reserved_chandef,
compat);
if (!compat)

View File

@@ -1186,6 +1186,15 @@ ieee80211_vif_get_shift(struct ieee80211_vif *vif)
return shift;
}
#define for_each_link_data(sdata, __link) \
struct ieee80211_sub_if_data *__sdata = sdata; \
for (int __link_id = 0; \
__link_id < ARRAY_SIZE((__sdata)->link); __link_id++) \
if ((!(__sdata)->vif.valid_links || \
(__sdata)->vif.valid_links & BIT(__link_id)) && \
((__link) = sdata_dereference((__sdata)->link[__link_id], \
(__sdata))))
static inline int
ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
struct cfg80211_rnr_elems *rnr_elems,

View File

@@ -28,8 +28,16 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
if (link_id < 0)
link_id = 0;
rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
rcu_assign_pointer(sdata->link[link_id], link);
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
struct ieee80211_sub_if_data *ap_bss;
struct ieee80211_bss_conf *ap_bss_conf;
ap_bss = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
ap_bss_conf = sdata_dereference(ap_bss->vif.link_conf[link_id],
ap_bss);
memcpy(link_conf, ap_bss_conf, sizeof(*link_conf));
}
link->sdata = sdata;
link->link_id = link_id;
@@ -65,6 +73,9 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
ieee80211_link_debugfs_add(link);
}
rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
rcu_assign_pointer(sdata->link[link_id], link);
}
void ieee80211_link_stop(struct ieee80211_link_data *link)

View File

@@ -497,22 +497,15 @@ void rose_rt_device_down(struct net_device *dev)
t = rose_node;
rose_node = rose_node->next;
for (i = 0; i < t->count; i++) {
for (i = t->count - 1; i >= 0; i--) {
if (t->neighbour[i] != s)
continue;
t->count--;
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
fallthrough;
case 1:
t->neighbour[1] = t->neighbour[2];
break;
case 2:
break;
}
memmove(&t->neighbour[i], &t->neighbour[i + 1],
sizeof(t->neighbour[0]) *
(t->count - i));
}
if (t->count <= 0)

View File

@@ -780,15 +780,12 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
{
bool qdisc_is_offloaded = sch->flags & TCQ_F_OFFLOADED;
const struct Qdisc_class_ops *cops;
unsigned long cl;
u32 parentid;
bool notify;
int drops;
if (n == 0 && len == 0)
return;
drops = max_t(int, n, 0);
rcu_read_lock();
while ((parentid = sch->parent)) {
@@ -797,17 +794,8 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
if (sch->flags & TCQ_F_NOPARENT)
break;
/* Notify parent qdisc only if child qdisc becomes empty.
*
* If child was empty even before update then backlog
* counter is screwed and we skip notification because
* parent class is already passive.
*
* If the original child was offloaded then it is allowed
* to be seem as empty, so the parent is notified anyway.
*/
notify = !sch->q.qlen && !WARN_ON_ONCE(!n &&
!qdisc_is_offloaded);
/* Notify parent qdisc only if child qdisc becomes empty. */
notify = !sch->q.qlen;
/* TODO: perform the search on a per txq basis */
sch = qdisc_lookup_rcu(qdisc_dev(sch), TC_H_MAJ(parentid));
if (sch == NULL) {
@@ -816,6 +804,9 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
}
cops = sch->ops->cl_ops;
if (notify && cops->qlen_notify) {
/* Note that qlen_notify must be idempotent as it may get called
* multiple times.
*/
cl = cops->find(sch, parentid);
cops->qlen_notify(sch, cl);
}