Merge 6.6.91 into android15-6.6-lts

GKI (arm64) relevant 25 out of 114 changes, affecting 32 files +300/-170
  38433aa31b dm: add missing unlock on in dm_keyslot_evict() [1 file, +2/-1]
  31ff70ad39 sch_htb: make htb_deactivate() idempotent [1 file, +6/-9]
  fa1fe9f3dd gre: Fix again IPv6 link-local address generation. [1 file, +9/-6]
  370635397b netdevice: add netdev_tx_reset_subqueue() shorthand [1 file, +12/-1]
  42b7a7c962 can: gw: fix RCU/BH usage in cgw_create_job() [1 file, +90/-59]
  355b052633 bpf: Scrub packet on bpf_redirect_peer [1 file, +1/-0]
  cbc82e7db1 Input: xpad - fix Share button on Xbox One controllers [1 file, +20/-15]
  56b4e8b621 Input: xpad - add support for 8BitDo Ultimate 2 Wireless Controller [1 file, +1/-0]
  2d9d6a4cd3 Input: xpad - fix two controller table values [1 file, +2/-2]
  faa9059631 module: ensure that kobject_put() is safe for module type kobjects [1 file, +3/-1]
  dffa51cf2d usb: gadget: f_ecm: Add get_status callback [1 file, +7/-0]
  9b09b99a95 usb: gadget: Use get_status callback to set remote wakeup capability [1 file, +5/-7]
  c9d8b0932e usb: typec: tcpm: delay SNK_TRY_WAIT_DEBOUNCE to SRC_TRYWAIT transition [1 file, +1/-1]
  14f298c521 usb: typec: ucsi: displayport: Fix NULL pointer access [1 file, +2/-0]
  9519771908 types: Complement the aligned types with signed 64-bit one [2 files, +3/-1]
  eba09f4239 nvme: unblock ctrl state transition for firmware update [1 file, +2/-1]
  00f0dd1a01 do_umount(): add missing barrier before refcount checks in sync case [1 file, +2/-1]
  51f1389b5f io_uring: always arm linked timeouts prior to issue [1 file, +16/-37]
  746e7d285d io_uring: ensure deferred completions are posted for multishot [1 file, +8/-0]
  054fc98d69 arm64: insn: Add support for encoding DSB [2 files, +38/-23]
  854da0ed06 arm64: proton-pack: Expose whether the platform is mitigated by firmware [2 files, +6/-0]
  73591041a5 arm64: proton-pack: Expose whether the branchy loop k value [2 files, +6/-0]
  42a20cf510 arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs [3 files, +52/-5]
  80251f6202 arm64: bpf: Only mitigate cBPF programs loaded by unprivileged users [1 file, +3/-0]
  ca8a5626ca arm64: proton-pack: Add new CPUs 'k' values for branch mitigation [2 files, +3/-0]

Changes in 6.6.91
	dm: add missing unlock on in dm_keyslot_evict()
	arm64: dts: imx8mm-verdin: Link reg_usdhc2_vqmmc to usdhc2
	can: mcan: m_can_class_unregister(): fix order of unregistration calls
	wifi: cfg80211: fix out-of-bounds access during multi-link element defragmentation
	can: mcp251xfd: mcp251xfd_remove(): fix order of unregistration calls
	ksmbd: prevent rename with empty string
	ksmbd: prevent out-of-bounds stream writes by validating *pos
	ksmbd: Fix UAF in __close_file_table_ids
	openvswitch: Fix unsafe attribute parsing in output_userspace()
	ksmbd: fix memory leak in parse_lease_state()
	sch_htb: make htb_deactivate() idempotent
	gre: Fix again IPv6 link-local address generation.
	netdevice: add netdev_tx_reset_subqueue() shorthand
	net: ethernet: mtk_eth_soc: reset all TX queues on DMA free
	can: mcp251xfd: fix TDC setting for low data bit rates
	can: gw: fix RCU/BH usage in cgw_create_job()
	ipvs: fix uninit-value for saddr in do_output_route4
	netfilter: ipset: fix region locking in hash types
	bpf: Scrub packet on bpf_redirect_peer
	net: dsa: b53: allow leaky reserved multicast
	net: dsa: b53: fix clearing PVID of a port
	net: dsa: b53: fix flushing old pvid VLAN on pvid change
	net: dsa: b53: fix VLAN ID for untagged vlan on bridge leave
	net: dsa: b53: always rejoin default untagged VLAN on bridge leave
	net: dsa: b53: fix learning on VLAN unaware bridges
	Input: cyttsp5 - ensure minimum reset pulse width
	Input: cyttsp5 - fix power control issue on wakeup
	Input: mtk-pmic-keys - fix possible null pointer dereference
	Input: xpad - fix Share button on Xbox One controllers
	Input: xpad - add support for 8BitDo Ultimate 2 Wireless Controller
	Input: xpad - fix two controller table values
	Input: synaptics - enable InterTouch on Dynabook Portege X30-D
	Input: synaptics - enable InterTouch on Dynabook Portege X30L-G
	Input: synaptics - enable InterTouch on Dell Precision M3800
	Input: synaptics - enable SMBus for HP Elitebook 850 G1
	Input: synaptics - enable InterTouch on TUXEDO InfinityBook Pro 14 v5
	staging: iio: adc: ad7816: Correct conditional logic for store mode
	staging: axis-fifo: Remove hardware resets for user errors
	staging: axis-fifo: Correct handling of tx_fifo_depth for size validation
	x86/mm: Eliminate window where TLB flushes may be inadvertently skipped
	drm/amd/display: Shift DMUB AUX reply command if necessary
	iio: adc: ad7606: fix serial register access
	iio: adc: rockchip: Fix clock initialization sequence
	iio: adis16201: Correct inclinometer channel resolution
	iio: imu: st_lsm6dsx: fix possible lockup in st_lsm6dsx_read_fifo
	iio: imu: st_lsm6dsx: fix possible lockup in st_lsm6dsx_read_tagged_fifo
	drm/v3d: Add job to pending list if the reset was skipped
	drm/amd/display: more liberal vmin/vmax update for freesync
	drm/amd/display: Fix the checking condition in dmub aux handling
	drm/amd/display: Remove incorrect checking in dmub aux handler
	drm/amd/display: Fix wrong handling for AUX_DEFER case
	drm/amd/display: Copy AUX read reply data whenever length > 0
	drm/amdgpu/hdp4: use memcfg register to post the write for HDP flush
	drm/amdgpu/hdp5.2: use memcfg register to post the write for HDP flush
	drm/amdgpu/hdp5: use memcfg register to post the write for HDP flush
	drm/amdgpu/hdp6: use memcfg register to post the write for HDP flush
	usb: uhci-platform: Make the clock really optional
	smb: client: Avoid race in open_cached_dir with lease breaks
	xen: swiotlb: Use swiotlb bouncing if kmalloc allocation demands it
	xenbus: Use kref to track req lifetime
	clocksource/i8253: Use raw_spinlock_irqsave() in clockevent_i8253_disable()
	module: ensure that kobject_put() is safe for module type kobjects
	x86/microcode: Consolidate the loader enablement checking
	ocfs2: switch osb->disable_recovery to enum
	ocfs2: implement handshaking with ocfs2 recovery thread
	ocfs2: stop quota recovery before disabling quotas
	usb: cdnsp: Fix issue with resuming from L1
	usb: cdnsp: fix L1 resume issue for RTL_REVISION_NEW_LPM version
	usb: gadget: f_ecm: Add get_status callback
	usb: gadget: tegra-xudc: ACK ST_RC after clearing CTRL_RUN
	usb: gadget: Use get_status callback to set remote wakeup capability
	usb: host: tegra: Prevent host controller crash when OTG port is used
	usb: typec: tcpm: delay SNK_TRY_WAIT_DEBOUNCE to SRC_TRYWAIT transition
	usb: typec: ucsi: displayport: Fix NULL pointer access
	USB: usbtmc: use interruptible sleep in usbtmc_read
	usb: usbtmc: Fix erroneous get_stb ioctl error returns
	usb: usbtmc: Fix erroneous wait_srq ioctl return
	usb: usbtmc: Fix erroneous generic_read ioctl return
	iio: accel: adxl367: fix setting odr for activity time update
	iio: temp: maxim-thermocouple: Fix potential lack of DMA safe buffer.
	types: Complement the aligned types with signed 64-bit one
	iio: accel: adxl355: Make timestamp 64-bit aligned using aligned_s64
	iio: adc: dln2: Use aligned_s64 for timestamp
	MIPS: Fix MAX_REG_OFFSET
	drm/panel: simple: Update timings for AUO G101EVN010
	nvme: unblock ctrl state transition for firmware update
	do_umount(): add missing barrier before refcount checks in sync case
	io_uring: always arm linked timeouts prior to issue
	io_uring: ensure deferred completions are posted for multishot
	arm64: insn: Add support for encoding DSB
	arm64: proton-pack: Expose whether the platform is mitigated by firmware
	arm64: proton-pack: Expose whether the branchy loop k value
	arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs
	arm64: bpf: Only mitigate cBPF programs loaded by unprivileged users
	arm64: proton-pack: Add new CPUs 'k' values for branch mitigation
	x86/bpf: Call branch history clearing sequence on exit
	x86/bpf: Add IBHF call at end of classic BPF
	x86/bhi: Do not set BHI_DIS_S in 32-bit mode
	x86/speculation: Simplify and make CALL_NOSPEC consistent
	x86/speculation: Add a conditional CS prefix to CALL_NOSPEC
	x86/speculation: Remove the extra #ifdef around CALL_NOSPEC
	Documentation: x86/bugs/its: Add ITS documentation
	x86/its: Enumerate Indirect Target Selection (ITS) bug
	x86/its: Add support for ITS-safe indirect thunk
	x86/its: Add support for ITS-safe return thunk
	x86/its: Enable Indirect Target Selection mitigation
	x86/its: Add "vmexit" option to skip mitigation on some CPUs
	x86/its: Add support for RSB stuffing mitigation
	x86/its: Align RETs in BHB clear sequence to avoid thunking
	x86/ibt: Keep IBT disabled during alternative patching
	x86/its: Use dynamic thunks for indirect branches
	x86/its: Fix build errors when CONFIG_MODULES=n
	x86/its: FineIBT-paranoid vs ITS
	Linux 6.6.91

Change-Id: Ic8980c4efbc1720c33b4bab154ce2dfe32cf8624
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-05-28 16:47:40 +00:00
110 changed files with 1719 additions and 493 deletions

View File

@@ -514,6 +514,7 @@ Description: information about CPUs heterogeneity.
What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
/sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds

View File

@@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
srso
gather_data_sampling
reg-file-data-sampling
indirect-target-selection

View File

@@ -0,0 +1,168 @@
.. SPDX-License-Identifier: GPL-2.0
Indirect Target Selection (ITS)
===============================
ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
released before Alder Lake. ITS may allow an attacker to control the prediction
of indirect branches and RETs located in the lower half of a cacheline.
ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
Scope of Impact
---------------
- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
predicted with unintended target corresponding to a branch in the guest.
- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
gadgets.
- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
branches may still be predicted with targets corresponding to direct branches
executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
should be available via distro updates. Alternatively microcode can be
obtained from Intel's github repository [#f1]_.
Affected CPUs
-------------
Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
======================== ============ ==================== ===============
Common name Family_Model eIBRS Intra-mode BTI
Guest/Host Isolation
======================== ============ ==================== ===============
SKYLAKE_X (step >= 6) 06_55H Affected Affected
ICELAKE_X 06_6AH Not affected Affected
ICELAKE_D 06_6CH Not affected Affected
ICELAKE_L 06_7EH Not affected Affected
TIGERLAKE_L 06_8CH Not affected Affected
TIGERLAKE 06_8DH Not affected Affected
KABYLAKE_L (step >= 12) 06_8EH Affected Affected
KABYLAKE (step >= 13) 06_9EH Affected Affected
COMETLAKE 06_A5H Affected Affected
COMETLAKE_L 06_A6H Affected Affected
ROCKETLAKE 06_A7H Not affected Affected
======================== ============ ==================== ===============
- All affected CPUs enumerate Enhanced IBRS feature.
- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
update for mitigation.
- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
host's affected status.
- Intel Atom CPUs are not affected by ITS.
Mitigation
----------
As only the indirect branches and RETs that have their last byte of instruction
in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
the mitigation is to not allow indirect branches in the lower half.
This is achieved by relying on existing retpoline support in the kernel, and in
compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
added ITS-safe thunks. These safe thunks consists of indirect branch in the
second half of the cacheline. Not all retpoline sites are patched to thunks, if
a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
indirect branch.
Dynamic thunks
~~~~~~~~~~~~~~
From a dynamically allocated pool of safe-thunks, each vulnerable site is
replaced with a new thunk, such that they get a unique address. This could
improve the branch prediction accuracy. Also, it is a defense-in-depth measure
against aliasing.
Note, for simplicity, indirect branches in eBPF programs are always replaced
with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
in future this can be changed to use dynamic thunks.
All vulnerable RETs are replaced with a static thunk, they do not use dynamic
thunks. This is because RETs get their prediction from RSB mostly that does not
depend on source address. RETs that underflow RSB may benefit from dynamic
thunks. But, RETs significantly outnumber indirect branches, and any benefit
from a unique source address could be outweighed by the increased icache
footprint and iTLB pressure.
Retpoline
~~~~~~~~~
Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
safe thunks. Unless user requested the RSB-stuffing mitigation.
RSB Stuffing
~~~~~~~~~~~~
RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
attacks. And it also mitigates RETs that are vulnerable to ITS.
Mitigation in guests
^^^^^^^^^^^^^^^^^^^^
All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
and Family/Model of the guest. This is because eIBRS feature could be hidden
from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
indicates that the guest is running on an unaffected host.
To prevent guests from unnecessarily deploying the mitigation on unaffected
platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
is not set by any hardware, but is **intended for VMMs to synthesize** it for
guests as per the host's affected status.
Mitigation options
^^^^^^^^^^^^^^^^^^
The ITS mitigation can be controlled using the "indirect_target_selection"
kernel parameter. The available options are:
======== ===================================================================
on (default) Deploy the "Aligned branch/return thunks" mitigation.
If spectre_v2 mitigation enables retpoline, aligned-thunks are only
deployed for the affected RET instructions. Retpoline mitigates
indirect branches.
off Disable ITS mitigation.
vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
part of ITS. Otherwise, mitigation is not deployed. This option is
useful when host userspace is not in the threat model, and only
attacks from guest to host are considered.
stuff Deploy RSB-fill mitigation when retpoline is also deployed.
Otherwise, deploy the default mitigation. When retpoline mitigation
is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
ITS.
force Force the ITS bug and deploy the default mitigation.
======== ===================================================================
Sysfs reporting
---------------
The sysfs file showing ITS mitigation status is:
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
Note, microcode mitigation status is not reported in this file.
The possible values in this file are:
.. list-table::
* - Not affected
- The processor is not vulnerable.
* - Vulnerable
- System is vulnerable and no mitigation has been applied.
* - Vulnerable, KVM: Not affected
- System is vulnerable to intra-mode BTI, but not affected by eIBRS
guest/host isolation.
* - Mitigation: Aligned branch/return thunks
- The mitigation is enabled, affected indirect branches and RETs are
relocated to safe thunks.
* - Mitigation: Retpolines, Stuffing RSB
- The mitigation is enabled using retpoline and RSB stuffing.
References
----------
.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list

View File

@@ -2084,6 +2084,23 @@
different crypto accelerators. This option can be used
to achieve best performance for particular HW.
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
Target Selection(ITS) bug in Intel CPUs. Updated
microcode is also required for a fix in IBPB.
on: Enable mitigation (default).
off: Disable mitigation.
force: Force the ITS bug and deploy default
mitigation.
vmexit: Only deploy mitigation if CPU is affected by
guest/host isolation part of ITS.
stuff: Deploy RSB-fill mitigation when retpoline is
also deployed. Otherwise, deploy the default
mitigation.
For details see:
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
init= [KNL]
Format: <full_path>
Run specified binary instead of /sbin/init as init
@@ -3367,6 +3384,7 @@
expose users to several CPU vulnerabilities.
Equivalent to: if nokaslr then kpti=0 [ARM64]
gather_data_sampling=off [X86]
indirect_target_selection=off [X86]
kvm.nx_huge_pages=off [X86]
l1tf=off [X86]
mds=off [X86]

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 90
SUBLEVEL = 91
EXTRAVERSION =
NAME = Pinguïn Aangedreven

View File

@@ -148,6 +148,19 @@
startup-delay-us = <20000>;
};
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
compatible = "regulator-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2_vsel>;
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <1800000>;
states = <1800000 0x1>,
<3300000 0x0>;
regulator-name = "PMIC_USDHC_VSELECT";
vin-supply = <&reg_nvcc_sd>;
};
reserved-memory {
#address-cells = <2>;
#size-cells = <2>;
@@ -266,7 +279,7 @@
"SODIMM_19",
"",
"",
"",
"PMIC_USDHC_VSELECT",
"",
"",
"",
@@ -787,6 +800,7 @@
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
vmmc-supply = <&reg_usdhc2_vmmc>;
vqmmc-supply = <&reg_usdhc2_vqmmc>;
};
&wdog1 {
@@ -1209,13 +1223,17 @@
<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */
};
pinctrl_usdhc2_vsel: usdhc2vselgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */
};
/*
* Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
* on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
*/
pinctrl_usdhc2: usdhc2grp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */
@@ -1226,7 +1244,6 @@
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>,
@@ -1237,7 +1254,6 @@
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>,
@@ -1249,7 +1265,6 @@
/* Avoid backfeeding with removed card power */
pinctrl_usdhc2_sleep: usdhc2slpgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>,

View File

@@ -81,6 +81,7 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D
@@ -166,6 +167,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)

View File

@@ -687,6 +687,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
}
#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);

View File

@@ -97,6 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
extern bool __nospectre_bhb;
u8 get_spectre_bhb_loop_value(void);
bool is_spectre_bhb_fw_mitigated(void);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);

View File

@@ -891,6 +891,7 @@ static u8 spectre_bhb_loop_affected(void)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
@@ -998,6 +999,11 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
return true;
}
u8 get_spectre_bhb_loop_value(void)
{
return max_bhb_k;
}
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{
const char *v = arm64_get_bp_hardening_vector(slot);
@@ -1015,7 +1021,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
isb();
}
static bool __read_mostly __nospectre_bhb;
bool __read_mostly __nospectre_bhb;
static int __init parse_spectre_bhb_param(char *str)
{
__nospectre_bhb = true;
@@ -1093,6 +1099,11 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
update_mitigation_state(&spectre_bhb_state, state);
}
bool is_spectre_bhb_fw_mitigated(void)
{
return test_bit(BHB_FW, &system_bhb_mitigations);
}
/* Patched to NOP when enabled */
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
__le32 *origptr,

View File

@@ -5,6 +5,7 @@
*
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*/
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/printk.h>
@@ -1471,43 +1472,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}
static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
{
switch (type) {
case AARCH64_INSN_MB_SY:
return 0xf;
case AARCH64_INSN_MB_ST:
return 0xe;
case AARCH64_INSN_MB_LD:
return 0xd;
case AARCH64_INSN_MB_ISH:
return 0xb;
case AARCH64_INSN_MB_ISHST:
return 0xa;
case AARCH64_INSN_MB_ISHLD:
return 0x9;
case AARCH64_INSN_MB_NSH:
return 0x7;
case AARCH64_INSN_MB_NSHST:
return 0x6;
case AARCH64_INSN_MB_NSHLD:
return 0x5;
default:
pr_err("%s: unknown barrier type %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
}
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
{
u32 opt;
u32 insn;
switch (type) {
case AARCH64_INSN_MB_SY:
opt = 0xf;
break;
case AARCH64_INSN_MB_ST:
opt = 0xe;
break;
case AARCH64_INSN_MB_LD:
opt = 0xd;
break;
case AARCH64_INSN_MB_ISH:
opt = 0xb;
break;
case AARCH64_INSN_MB_ISHST:
opt = 0xa;
break;
case AARCH64_INSN_MB_ISHLD:
opt = 0x9;
break;
case AARCH64_INSN_MB_NSH:
opt = 0x7;
break;
case AARCH64_INSN_MB_NSHST:
opt = 0x6;
break;
case AARCH64_INSN_MB_NSHLD:
opt = 0x5;
break;
default:
pr_err("%s: unknown dmb type %d\n", __func__, type);
opt = __get_barrier_crm_val(type);
if (opt == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_dmb_value();
insn &= ~GENMASK(11, 8);
@@ -1515,3 +1514,18 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
return insn;
}
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
{
u32 opt, insn;
opt = __get_barrier_crm_val(type);
if (opt == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
insn = aarch64_insn_get_dsb_base_value();
insn &= ~GENMASK(11, 8);
insn |= (opt << 8);
return insn;
}

View File

@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "bpf_jit: " fmt
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
@@ -17,6 +18,7 @@
#include <asm/asm-extable.h>
#include <asm/byteorder.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/patching.h>
@@ -655,7 +657,51 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp;
}
static void build_epilogue(struct jit_ctx *ctx)
/* Clobbers BPF registers 1-4, aka x0-x3 */
static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
{
const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
u8 k = get_spectre_bhb_loop_value();
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
cpu_mitigations_off() || __nospectre_bhb ||
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
return;
if (capable(CAP_SYS_ADMIN))
return;
if (supports_clearbhb(SCOPE_SYSTEM)) {
emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
return;
}
if (k) {
emit_a64_mov_i64(r1, k, ctx);
emit(A64_B(1), ctx);
emit(A64_SUBS_I(true, r1, r1, 1), ctx);
emit(A64_B_(A64_COND_NE, -2), ctx);
emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
emit(aarch64_insn_get_isb_value(), ctx);
}
if (is_spectre_bhb_fw_mitigated()) {
emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
emit(aarch64_insn_get_hvc_value(), ctx);
break;
case SMCCC_CONDUIT_SMC:
emit(aarch64_insn_get_smc_value(), ctx);
break;
default:
pr_err_once("Firmware mitigation enabled with unknown conduit\n");
}
}
}
static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
{
const u8 r0 = bpf2a64[BPF_REG_0];
const u8 r6 = bpf2a64[BPF_REG_6];
@@ -677,10 +723,13 @@ static void build_epilogue(struct jit_ctx *ctx)
emit(A64_POP(r8, r9, A64_SP), ctx);
emit(A64_POP(r6, r7, A64_SP), ctx);
if (was_classic)
build_bhb_mitigation(ctx);
/* Restore FP/LR registers */
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
/* Set return value */
/* Move the return value from bpf:r0 (aka x7) to x0 */
emit(A64_MOV(1, A64_R(0), r0), ctx);
/* Authenticate lr */
@@ -1588,7 +1637,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
}
ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx);
build_epilogue(&ctx, was_classic);
build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry);
@@ -1624,7 +1673,7 @@ skip_init_ctx:
goto out_off;
}
build_epilogue(&ctx);
build_epilogue(&ctx, was_classic);
build_plt(&ctx);
/* 3. Extra pass to validate JITed code. */

View File

@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
/* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last))
#define MAX_REG_OFFSET \
(offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/**
* regs_get_register() - get register value from its offset

View File

@@ -2610,6 +2610,17 @@ config MITIGATION_SPECTRE_BHI
indirect branches.
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
config MITIGATION_ITS
bool "Enable Indirect Target Selection mitigation"
depends on CPU_SUP_INTEL && X86_64
depends on RETPOLINE && RETHUNK
default y
help
Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
endif
config ARCH_HAS_ADD_PAGES

View File

@@ -1569,7 +1569,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
* ORC to unwind properly.
*
* The alignment is for performance and not for safety, and may be safely
* refactored in the future if needed.
* refactored in the future if needed. The .skips are for safety, to ensure
* that all RETs are in the second half of a cacheline to mitigate Indirect
* Target Selection, rather than taking the slowpath via its_return_thunk.
*/
SYM_FUNC_START(clear_bhb_loop)
push %rbp
@@ -1579,10 +1581,22 @@ SYM_FUNC_START(clear_bhb_loop)
call 1f
jmp 5f
.align 64, 0xcc
/*
* Shift instructions so that the RET is in the upper half of the
* cacheline and don't take the slowpath to its_return_thunk.
*/
.skip 32 - (.Lret1 - 1f), 0xcc
ANNOTATE_INTRA_FUNCTION_CALL
1: call 2f
RET
.Lret1: RET
.align 64, 0xcc
/*
* As above shift instructions for RET at .Lret2 as well.
*
* This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
* but some Clang versions (e.g. 18) don't like this.
*/
.skip 32 - 18, 0xcc
2: movl $5, %eax
3: jmp 4f
nop
@@ -1590,7 +1604,7 @@ SYM_FUNC_START(clear_bhb_loop)
jnz 3b
sub $1, %ecx
jnz 1b
RET
.Lret2: RET
5: lfence
pop %rbp
RET

View File

@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/stringify.h>
#include <asm/asm.h>
#include <asm/bug.h>
#define ALT_FLAGS_SHIFT 16
@@ -130,6 +131,37 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
}
#endif
#ifdef CONFIG_MITIGATION_ITS
extern void its_init_mod(struct module *mod);
extern void its_fini_mod(struct module *mod);
extern void its_free_mod(struct module *mod);
extern u8 *its_static_thunk(int reg);
#else /* CONFIG_MITIGATION_ITS */
static inline void its_init_mod(struct module *mod) { }
static inline void its_fini_mod(struct module *mod) { }
static inline void its_free_mod(struct module *mod) { }
static inline u8 *its_static_thunk(int reg)
{
WARN_ONCE(1, "ITS not compiled in");
return NULL;
}
#endif
#if defined(CONFIG_RETHUNK) && defined(CONFIG_OBJTOOL)
extern bool cpu_wants_rethunk(void);
extern bool cpu_wants_rethunk_at(void *addr);
#else
static __always_inline bool cpu_wants_rethunk(void)
{
return false;
}
static __always_inline bool cpu_wants_rethunk_at(void *addr)
{
return false;
}
#endif
#ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end,

View File

@@ -468,6 +468,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */
/*
* BUG word(s)
@@ -518,4 +519,6 @@
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
#endif /* _ASM_X86_CPUFEATURES_H */

View File

@@ -17,10 +17,12 @@ struct ucode_cpu_info {
void load_ucode_bsp(void);
void load_ucode_ap(void);
void microcode_bsp_resume(void);
bool __init microcode_loader_disabled(void);
#else
static inline void load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { }
static inline bool __init microcode_loader_disabled(void) { return false; }
#endif
extern unsigned long initrd_start_early;

View File

@@ -180,6 +180,14 @@
* VERW clears CPU Register
* File.
*/
#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
* Not susceptible to
* Indirect Target Selection.
* This bit is not set by
* HW, but is synthesized by
* VMMs for guests to know
* their affected status.
*/
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR

View File

@@ -219,9 +219,8 @@
.endm
/*
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
* to the retpoline thunk with a CS prefix when the register requires
* a RAX prefix byte to encode. Also see apply_retpolines().
* Emits a conditional CS prefix that is compatible with
* -mindirect-branch-cs-prefix.
*/
.macro __CS_PREFIX reg:req
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15
@@ -365,10 +364,14 @@
".long 999b\n\t" \
".popsection\n\t"
#define ITS_THUNK_SIZE 64
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
typedef u8 its_thunk_t[ITS_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
extern its_thunk_t __x86_indirect_its_thunk_array[];
#ifdef CONFIG_RETHUNK
extern void __x86_return_thunk(void);
@@ -392,6 +395,12 @@ static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {}
#endif
#ifdef CONFIG_MITIGATION_ITS
extern void its_return_thunk(void);
#else
static inline void its_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void);
@@ -412,11 +421,6 @@ extern void (*x86_return_thunk)(void);
#ifdef CONFIG_CALL_DEPTH_TRACKING
extern void __x86_return_skl(void);
static inline void x86_set_skl_return_thunk(void)
{
x86_return_thunk = &__x86_return_skl;
}
#define CALL_DEPTH_ACCOUNT \
ALTERNATIVE("", \
__stringify(INCREMENT_CALL_DEPTH), \
@@ -429,7 +433,6 @@ DECLARE_PER_CPU(u64, __x86_stuffs_count);
DECLARE_PER_CPU(u64, __x86_ctxsw_count);
#endif
#else
static inline void x86_set_skl_return_thunk(void) {}
#define CALL_DEPTH_ACCOUNT ""
@@ -454,20 +457,23 @@ static inline void x86_set_skl_return_thunk(void) {}
#ifdef CONFIG_X86_64
/*
* Emits a conditional CS prefix that is compatible with
* -mindirect-branch-cs-prefix.
*/
#define __CS_PREFIX(reg) \
".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
".ifc \\rs," reg "\n" \
".byte 0x2e\n" \
".endif\n" \
".endr\n"
/*
* Inline asm uses the %V modifier which is only in newer GCC
* which is ensured when CONFIG_RETPOLINE is defined.
*/
# define CALL_NOSPEC \
ALTERNATIVE_2( \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE, \
"lfence;\n" \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
X86_FEATURE_RETPOLINE_LFENCE)
#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
"call __x86_indirect_thunk_%V[thunk_target]\n"
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)

View File

@@ -18,6 +18,8 @@
#include <linux/mmu_context.h>
#include <linux/bsearch.h>
#include <linux/sync_core.h>
#include <linux/moduleloader.h>
#include <linux/cleanup.h>
#include <asm/text-patching.h>
#include <asm/alternative.h>
#include <asm/sections.h>
@@ -30,6 +32,8 @@
#include <asm/fixmap.h>
#include <asm/paravirt.h>
#include <asm/asm-prototypes.h>
#include <asm/cfi.h>
#include <asm/set_memory.h>
int __read_mostly alternatives_patched;
@@ -123,6 +127,135 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
#endif
};
#ifdef CONFIG_MITIGATION_ITS
#ifdef CONFIG_MODULES
static struct module *its_mod;
static void *its_page;
static unsigned int its_offset;
/* Initialize a thunk with the "jmp *reg; int3" instructions. */
static void *its_init_thunk(void *thunk, int reg)
{
u8 *bytes = thunk;
int i = 0;
if (reg >= 8) {
bytes[i++] = 0x41; /* REX.B prefix */
reg -= 8;
}
bytes[i++] = 0xff;
bytes[i++] = 0xe0 + reg; /* jmp *reg */
bytes[i++] = 0xcc;
return thunk;
}
void its_init_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
mutex_lock(&text_mutex);
its_mod = mod;
its_page = NULL;
}
void its_fini_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
WARN_ON_ONCE(its_mod != mod);
its_mod = NULL;
its_page = NULL;
mutex_unlock(&text_mutex);
for (int i = 0; i < mod->its_num_pages; i++) {
void *page = mod->its_page_array[i];
set_memory_rox((unsigned long)page, 1);
}
}
void its_free_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
for (int i = 0; i < mod->its_num_pages; i++) {
void *page = mod->its_page_array[i];
module_memfree(page);
}
kfree(mod->its_page_array);
}
DEFINE_FREE(its_execmem, void *, if (_T) module_memfree(_T));
static void *its_alloc(void)
{
void *page __free(its_execmem) = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
if (its_mod) {
void *tmp = krealloc(its_mod->its_page_array,
(its_mod->its_num_pages+1) * sizeof(void *),
GFP_KERNEL);
if (!tmp)
return NULL;
its_mod->its_page_array = tmp;
its_mod->its_page_array[its_mod->its_num_pages++] = page;
}
return no_free_ptr(page);
}
static void *its_allocate_thunk(int reg)
{
int size = 3 + (reg / 8);
void *thunk;
if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
its_page = its_alloc();
if (!its_page) {
pr_err("ITS page allocation failed\n");
return NULL;
}
memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
its_offset = 32;
}
/*
* If the indirect branch instruction will be in the lower half
* of a cacheline, then update the offset to reach the upper half.
*/
if ((its_offset + size - 1) % 64 < 32)
its_offset = ((its_offset - 1) | 0x3F) + 33;
thunk = its_page + its_offset;
its_offset += size;
set_memory_rw((unsigned long)its_page, 1);
thunk = its_init_thunk(thunk, reg);
set_memory_rox((unsigned long)its_page, 1);
return thunk;
}
#else /* CONFIG_MODULES */
static void *its_allocate_thunk(int reg)
{
return NULL;
}
#endif /* CONFIG_MODULES */
#endif /* CONFIG_MITIGATION_ITS */
/*
* Fill the buffer with a single effective instruction of size @len.
*
@@ -521,7 +654,8 @@ static int emit_indirect(int op, int reg, u8 *bytes)
return i;
}
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
void *call_dest, void *jmp_dest)
{
u8 op = insn->opcode.bytes[0];
int i = 0;
@@ -542,7 +676,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
switch (op) {
case CALL_INSN_OPCODE:
__text_gen_insn(bytes+i, op, addr+i,
__x86_indirect_call_thunk_array[reg],
call_dest,
CALL_INSN_SIZE);
i += CALL_INSN_SIZE;
break;
@@ -550,7 +684,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
case JMP32_INSN_OPCODE:
clang_jcc:
__text_gen_insn(bytes+i, op, addr+i,
__x86_indirect_jump_thunk_array[reg],
jmp_dest,
JMP32_INSN_SIZE);
i += JMP32_INSN_SIZE;
break;
@@ -565,6 +699,39 @@ clang_jcc:
return i;
}
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
{
return __emit_trampoline(addr, insn, bytes,
__x86_indirect_call_thunk_array[reg],
__x86_indirect_jump_thunk_array[reg]);
}
#ifdef CONFIG_MITIGATION_ITS
static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
{
u8 *thunk = __x86_indirect_its_thunk_array[reg];
u8 *tmp = its_allocate_thunk(reg);
if (tmp)
thunk = tmp;
return __emit_trampoline(addr, insn, bytes, thunk, thunk);
}
/* Check if an indirect branch is at ITS-unsafe address */
static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return false;
/* Indirect branch opcode is 2 or 3 bytes depending on reg */
addr += 1 + reg / 8;
/* Lower-half of the cacheline? */
return !(addr & 0x20);
}
#endif
/*
* Rewrite the compiler generated retpoline thunk calls.
*
@@ -639,6 +806,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
bytes[i++] = 0xe8; /* LFENCE */
}
#ifdef CONFIG_MITIGATION_ITS
/*
* Check if the address of last byte of emitted-indirect is in
* lower-half of the cacheline. Such branches need ITS mitigation.
*/
if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
return emit_its_trampoline(addr, insn, reg, bytes);
#endif
ret = emit_indirect(op, reg, bytes + i);
if (ret < 0)
return ret;
@@ -710,6 +886,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_RETHUNK
bool cpu_wants_rethunk(void)
{
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
}
bool cpu_wants_rethunk_at(void *addr)
{
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
return false;
if (x86_return_thunk != its_return_thunk)
return true;
return !((unsigned long)addr & 0x20);
}
/*
* Rewrite the compiler generated return thunk tail-calls.
*
@@ -726,7 +917,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
int i = 0;
/* Patch the custom return thunks... */
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
if (cpu_wants_rethunk_at(addr)) {
i = JMP32_INSN_SIZE;
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
} else {
@@ -743,7 +934,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{
s32 *s;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk())
static_call_force_reinit();
for (s = start; s < end; s++) {
@@ -1258,6 +1449,13 @@ static void __apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
static void poison_cfi(void *addr) { }
#endif
u8 *its_static_thunk(int reg)
{
u8 *thunk = __x86_indirect_its_thunk_array[reg];
return thunk;
}
#endif
void apply_fineibt(s32 *start_retpoline, s32 *end_retpoline,
@@ -1575,6 +1773,8 @@ static noinline void __init alt_reloc_selftest(void)
void __init alternative_instructions(void)
{
u64 ibt;
int3_selftest();
/*
@@ -1612,6 +1812,9 @@ void __init alternative_instructions(void)
*/
paravirt_set_cap();
/* Keep CET-IBT disabled until caller/callee are patched */
ibt = ibt_save(/*disable*/ true);
/*
* First patch paravirt functions, such that we overwrite the indirect
* call with the direct call.
@@ -1645,6 +1848,8 @@ void __init alternative_instructions(void)
*/
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
ibt_restore(ibt);
#ifdef CONFIG_SMP
/* Patch to UP if other cpus not imminent. */
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {

View File

@@ -49,6 +49,7 @@ static void __init srbds_select_mitigation(void);
static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void);
static void __init gds_select_mitigation(void);
static void __init its_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base;
@@ -67,6 +68,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
static void __init set_return_thunk(void *thunk)
{
if (x86_return_thunk != __x86_return_thunk)
pr_warn("x86/bugs: return thunk changed\n");
x86_return_thunk = thunk;
}
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val)
{
@@ -175,6 +184,7 @@ void __init cpu_select_mitigations(void)
*/
srso_select_mitigation();
gds_select_mitigation();
its_select_mitigation();
}
/*
@@ -1102,7 +1112,7 @@ do_cmd_auto:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
x86_return_thunk = retbleed_return_thunk;
set_return_thunk(retbleed_return_thunk);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
@@ -1136,7 +1146,9 @@ do_cmd_auto:
case RETBLEED_MITIGATION_STUFF:
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
x86_set_skl_return_thunk();
#ifdef CONFIG_CALL_DEPTH_TRACKING
set_return_thunk(&__x86_return_skl);
#endif
break;
default:
@@ -1170,6 +1182,146 @@ do_cmd_auto:
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
}
#undef pr_fmt
#define pr_fmt(fmt) "ITS: " fmt
enum its_mitigation_cmd {
ITS_CMD_OFF,
ITS_CMD_ON,
ITS_CMD_VMEXIT,
ITS_CMD_RSB_STUFF,
};
enum its_mitigation {
ITS_MITIGATION_OFF,
ITS_MITIGATION_VMEXIT_ONLY,
ITS_MITIGATION_ALIGNED_THUNKS,
ITS_MITIGATION_RETPOLINE_STUFF,
};
static const char * const its_strings[] = {
[ITS_MITIGATION_OFF] = "Vulnerable",
[ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
[ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
[ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
};
static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
static enum its_mitigation_cmd its_cmd __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
static int __init its_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
return 0;
}
if (!strcmp(str, "off")) {
its_cmd = ITS_CMD_OFF;
} else if (!strcmp(str, "on")) {
its_cmd = ITS_CMD_ON;
} else if (!strcmp(str, "force")) {
its_cmd = ITS_CMD_ON;
setup_force_cpu_bug(X86_BUG_ITS);
} else if (!strcmp(str, "vmexit")) {
its_cmd = ITS_CMD_VMEXIT;
} else if (!strcmp(str, "stuff")) {
its_cmd = ITS_CMD_RSB_STUFF;
} else {
pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
}
return 0;
}
early_param("indirect_target_selection", its_parse_cmdline);
static void __init its_select_mitigation(void)
{
enum its_mitigation_cmd cmd = its_cmd;
if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
its_mitigation = ITS_MITIGATION_OFF;
return;
}
/* Retpoline+CDT mitigates ITS, bail out */
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
goto out;
}
/* Exit early to avoid irrelevant warnings */
if (cmd == ITS_CMD_OFF) {
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (spectre_v2_enabled == SPECTRE_V2_NONE) {
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (!IS_ENABLED(CONFIG_RETPOLINE) || !IS_ENABLED(CONFIG_RETHUNK)) {
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (cmd == ITS_CMD_RSB_STUFF &&
(!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING))) {
pr_err("RSB stuff mitigation not supported, using default\n");
cmd = ITS_CMD_ON;
}
switch (cmd) {
case ITS_CMD_OFF:
its_mitigation = ITS_MITIGATION_OFF;
break;
case ITS_CMD_VMEXIT:
if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
goto out;
}
fallthrough;
case ITS_CMD_ON:
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
set_return_thunk(its_return_thunk);
break;
case ITS_CMD_RSB_STUFF:
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
#ifdef CONFIG_CALL_DEPTH_TRACKING
set_return_thunk(&__x86_return_skl);
#endif
if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
pr_info("Retbleed mitigation updated to stuffing\n");
}
break;
}
out:
pr_info("%s\n", its_strings[its_mitigation]);
}
#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
@@ -1677,10 +1829,11 @@ static void __init bhi_select_mitigation(void)
return;
}
if (spec_ctrl_bhi_dis())
if (!IS_ENABLED(CONFIG_X86_64))
return;
if (!IS_ENABLED(CONFIG_X86_64))
/* Mitigate in hardware if supported */
if (spec_ctrl_bhi_dis())
return;
/* Mitigate KVM by default */
@@ -2606,10 +2759,10 @@ static void __init srso_select_mitigation(void)
if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
x86_return_thunk = srso_alias_return_thunk;
set_return_thunk(srso_alias_return_thunk);
} else {
setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk;
set_return_thunk(srso_return_thunk);
}
if (has_microcode)
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
@@ -2793,6 +2946,11 @@ static ssize_t rfds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
}
static ssize_t its_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
}
static char *stibp_state(void)
{
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@ -2975,6 +3133,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_RFDS:
return rfds_show_state(buf);
case X86_BUG_ITS:
return its_show_state(buf);
default:
break;
}
@@ -3054,4 +3215,9 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
{
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
}
ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
}
#endif

View File

@@ -1272,6 +1272,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define GDS BIT(6)
/* CPU is affected by Register File Data Sampling */
#define RFDS BIT(7)
/* CPU is affected by Indirect Target Selection */
#define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
@@ -1283,22 +1287,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
@@ -1362,6 +1369,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
return cpu_matches(cpu_vuln_blacklist, RFDS);
}
static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
return false;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
/* None of the affected CPUs have BHI_CTRL */
if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
return false;
/*
* If a VMM did not expose ITS_NO, assume that a guest could
* be running on a vulnerable hardware or may migrate to such
* hardware.
*/
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return true;
if (cpu_matches(cpu_vuln_blacklist, ITS))
return true;
return false;
}
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
@@ -1476,9 +1509,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS);
/* When virtualized, eIBRS could be hidden, assume vulnerable */
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
/*
* Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
* BHI_NO still need to use the BHI mitigation to prevent Intra-mode
* attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
*/
if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
setup_force_cpu_bug(X86_BUG_BHI);
@@ -1486,6 +1522,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (vulnerable_to_its(x86_arch_cap_msr)) {
setup_force_cpu_bug(X86_BUG_ITS);
if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return;

View File

@@ -1102,15 +1102,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
static int __init save_microcode_in_initrd(void)
{
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 };
unsigned int cpuid_1_eax;
enum ucode_state ret;
struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0;
cpuid_1_eax = native_cpuid_eax(1);
if (!find_blobs_in_containers(&cp))
return -EINVAL;

View File

@@ -43,8 +43,8 @@
#define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr = true;
static struct microcode_ops *microcode_ops;
static bool dis_ucode_ldr = false;
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
@@ -91,6 +91,9 @@ static bool amd_check_current_patch_level(void)
u32 lvl, dummy, i;
u32 *levels;
if (x86_cpuid_vendor() != X86_VENDOR_AMD)
return false;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
levels = final_levels;
@@ -102,27 +105,29 @@ static bool amd_check_current_patch_level(void)
return false;
}
static bool __init check_loader_disabled_bsp(void)
bool __init microcode_loader_disabled(void)
{
static const char *__dis_opt_str = "dis_ucode_ldr";
const char *cmdline = boot_command_line;
const char *option = __dis_opt_str;
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway.
*/
if (native_cpuid_ecx(1) & BIT(31))
if (dis_ucode_ldr)
return true;
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
if (amd_check_current_patch_level())
return true;
}
if (cmdline_find_option_bool(cmdline, option) <= 0)
dis_ucode_ldr = false;
/*
* Disable when:
*
* 1) The CPU does not support CPUID.
*
* 2) Bit 31 in CPUID[1]:ECX is clear
* The bit is reserved for hypervisor use. This is still not
* completely accurate as XEN PV guests don't see that CPUID bit
* set, but that's good enough as they don't land on the BSP
* path anyway.
*
* 3) Certain AMD patch levels are not allowed to be
* overwritten.
*/
if (!have_cpuid_p() ||
native_cpuid_ecx(1) & BIT(31) ||
amd_check_current_patch_level())
dis_ucode_ldr = true;
return dis_ucode_ldr;
}
@@ -132,7 +137,10 @@ void __init load_ucode_bsp(void)
unsigned int cpuid_1_eax;
bool intel = true;
if (!have_cpuid_p())
if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
dis_ucode_ldr = true;
if (microcode_loader_disabled())
return;
cpuid_1_eax = native_cpuid_eax(1);
@@ -153,9 +161,6 @@ void __init load_ucode_bsp(void)
return;
}
if (check_loader_disabled_bsp())
return;
if (intel)
load_ucode_intel_bsp(&early_data);
else
@@ -166,6 +171,11 @@ void load_ucode_ap(void)
{
unsigned int cpuid_1_eax;
/*
* Can't use microcode_loader_disabled() here - .init section
* hell. It doesn't have to either - the BSP variant must've
* parsed cmdline already anyway.
*/
if (dis_ucode_ldr)
return;
@@ -817,7 +827,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data;
int error;
if (dis_ucode_ldr)
if (microcode_loader_disabled())
return -EINVAL;
if (c->x86_vendor == X86_VENDOR_INTEL)

View File

@@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
return 0;
if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0;
uci.mc = get_microcode_blob(&uci, true);

View File

@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax);
}
extern bool dis_ucode_ldr;
extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD

View File

@@ -363,7 +363,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
goto fail;
ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk_at(ip))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else
memcpy(ip, retq, sizeof(retq));

View File

@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
*ptr = (unsigned long)ptep + PAGE_OFFSET;
#ifdef CONFIG_MICROCODE_INITRD32
/* Running on a hypervisor? */
if (native_cpuid_ecx(1) & BIT(31))
return;
params = (struct boot_params *)__pa_nodebug(&boot_params);
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
return;

View File

@@ -312,6 +312,9 @@ int module_finalize(const Elf_Ehdr *hdr,
void *pseg = (void *)para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size);
}
its_init_mod(me);
if (retpolines || cfi) {
void *rseg = NULL, *cseg = NULL;
unsigned int rsize = 0, csize = 0;
@@ -332,6 +335,9 @@ int module_finalize(const Elf_Ehdr *hdr,
void *rseg = (void *)retpolines->sh_addr;
apply_retpolines(rseg, rseg + retpolines->sh_size);
}
its_fini_mod(me);
if (returns) {
void *rseg = (void *)returns->sh_addr;
apply_returns(rseg, rseg + returns->sh_size);
@@ -379,4 +385,5 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
its_free_mod(mod);
}

View File

@@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
break;
case RET:
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk_at(insn))
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
else
code = &retinsn;
@@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
case JCC:
if (!func) {
func = __static_call_return;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
if (cpu_wants_rethunk())
func = x86_return_thunk;
}

View File

@@ -541,4 +541,14 @@ INIT_PER_CPU(irq_stack_backing_store);
"SRSO function pair won't alias");
#endif
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
#endif
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
#endif
#endif /* CONFIG_X86_64 */

View File

@@ -1621,7 +1621,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
static u64 kvm_get_arch_capabilities(void)
{
@@ -1655,6 +1655,8 @@ static u64 kvm_get_arch_capabilities(void)
data |= ARCH_CAP_MDS_NO;
if (!boot_cpu_has_bug(X86_BUG_RFDS))
data |= ARCH_CAP_RFDS_NO;
if (!boot_cpu_has_bug(X86_BUG_ITS))
data |= ARCH_CAP_ITS_NO;
if (!boot_cpu_has(X86_FEATURE_RTM)) {
/*

View File

@@ -360,6 +360,45 @@ SYM_FUNC_END(__x86_return_skl)
#endif /* CONFIG_CALL_DEPTH_TRACKING */
#ifdef CONFIG_MITIGATION_ITS
.macro ITS_THUNK reg
SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
ANNOTATE_RETPOLINE_SAFE
jmp *%\reg
int3
.align 32, 0xcc /* fill to the end of the line */
.skip 32, 0xcc /* skip to the next upper half */
.endm
/* ITS mitigation requires thunks be aligned to upper half of cacheline */
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(__x86_indirect_its_thunk_array)
#define GEN(reg) ITS_THUNK reg
#include <asm/GEN-for-each-reg.h>
#undef GEN
.align 64, 0xcc
SYM_CODE_END(__x86_indirect_its_thunk_array)
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(its_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(its_return_thunk)
EXPORT_SYMBOL(its_return_thunk)
#endif /* CONFIG_MITIGATION_ITS */
/*
* This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it.

View File

@@ -630,7 +630,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
/* Let nmi_uaccess_okay() know that we're changing CR3. */
/*
* Indicate that CR3 is about to change. nmi_uaccess_okay()
* and others are sensitive to the window where mm_cpumask(),
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier();
}
@@ -900,8 +904,16 @@ done:
static bool should_flush_tlb(int cpu, void *data)
{
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
struct flush_tlb_info *info = data;
/*
* Order the 'loaded_mm' and 'is_lazy' against their
* write ordering in switch_mm_irqs_off(). Ensure
* 'is_lazy' is at least as new as 'loaded_mm'.
*/
smp_rmb();
/* Lazy TLB will get flushed at the next context switch. */
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
return false;
@@ -910,8 +922,15 @@ static bool should_flush_tlb(int cpu, void *data)
if (!info->mm)
return true;
/*
* While switching, the remote CPU could have state from
* either the prev or next mm. Assume the worst and flush.
*/
if (loaded_mm == LOADED_MM_SWITCHING)
return true;
/* The target mm is loaded, and the CPU is not lazy. */
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm)
if (loaded_mm == info->mm)
return true;
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */

View File

@@ -37,6 +37,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
#define EMIT5(b1, b2, b3, b4, b5) \
do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
#define EMIT1_off32(b1, off) \
do { EMIT1(b1); EMIT(off, 4); } while (0)
@@ -470,7 +472,11 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
{
u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
if (IS_ENABLED(CONFIG_MITIGATION_ITS) &&
cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
OPTIMIZER_HIDE_VAR(reg);
emit_jump(&prog, its_static_thunk(reg), ip);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE();
EMIT2(0xFF, 0xE0 + reg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
@@ -492,7 +498,7 @@ static void emit_return(u8 **pprog, u8 *ip)
{
u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
if (cpu_wants_rethunk()) {
emit_jump(&prog, x86_return_thunk, ip);
} else {
EMIT1(0xC3); /* ret */
@@ -1072,6 +1078,48 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define RESTORE_TAIL_CALL_CNT(stack) \
EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
struct bpf_prog *bpf_prog)
{
u8 *prog = *pprog;
u8 *func;
if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
/* The clearing sequence clobbers eax and ecx. */
EMIT1(0x50); /* push rax */
EMIT1(0x51); /* push rcx */
ip += 2;
func = (u8 *)clear_bhb_loop;
ip += x86_call_depth_emit_accounting(&prog, func);
if (emit_call(&prog, func, ip))
return -EINVAL;
EMIT1(0x59); /* pop rcx */
EMIT1(0x58); /* pop rax */
}
/* Insert IBHF instruction */
if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
/*
* Add an Indirect Branch History Fence (IBHF). IBHF acts as a
* fence preventing branch history from before the fence from
* affecting indirect branches after the fence. This is
* specifically used in cBPF jitted code to prevent Intra-mode
* BHI attacks. The IBHF instruction is designed to be a NOP on
* hardware that doesn't need or support it. The REP and REX.W
* prefixes are required by the microcode, and they also ensure
* that the NOP is unlikely to be used in existing code.
*
* IBHF is not a valid instruction in 32-bit mode.
*/
EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
}
*pprog = prog;
return 0;
}
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding)
{
@@ -1945,6 +1993,15 @@ emit_jmp:
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
if (bpf_prog_was_classic(bpf_prog) &&
!capable(CAP_SYS_ADMIN)) {
u8 *ip = image + addrs[i - 1];
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
return -EINVAL;
}
pop_callee_regs(&prog, callee_regs_used);
EMIT1(0xC9); /* leave */
emit_return(&prog, image + addrs[i - 1] + (prog - temp));

View File

@@ -566,6 +566,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -581,6 +582,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
@@ -597,6 +599,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_spec_rstack_overflow.attr,
&dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr,
&dev_attr_indirect_target_selection.attr,
NULL
};

View File

@@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
#ifdef CONFIG_CLKEVT_I8253
void clockevent_i8253_disable(void)
{
raw_spin_lock(&i8253_lock);
guard(raw_spinlock_irqsave)(&i8253_lock);
/*
* Writing the MODE register should stop the counter, according to
@@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
outb_p(0, PIT_CH0);
outb_p(0x30, PIT_MODE);
raw_spin_unlock(&i8253_lock);
}
static int pit_shutdown(struct clock_event_device *evt)

View File

@@ -42,7 +42,12 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -33,7 +33,12 @@ static void hdp_v5_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -34,7 +34,17 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
if (!ring || !ring->funcs->emit_wreg) {
WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,
0);
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
if (amdgpu_sriov_vf(adev)) {
/* this is fine because SR_IOV doesn't remap the register */
RREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
} else {
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
}
} else {
amdgpu_ring_emit_wreg(ring,
(adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2,

View File

@@ -33,7 +33,12 @@ static void hdp_v6_0_flush_hdp(struct amdgpu_device *adev,
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
RREG32((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2);
/* We just need to read back a register to post the write.
* Reading back the remapped register causes problems on
* some platforms so just read back the memory size register.
*/
if (adev->nbio.funcs->get_memsize)
adev->nbio.funcs->get_memsize(adev);
} else {
amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
}

View File

@@ -610,15 +610,21 @@ static void dm_crtc_high_irq(void *interrupt_params)
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
if (acrtc->dm_irq_params.stream &&
acrtc->dm_irq_params.vrr_params.supported &&
acrtc->dm_irq_params.freesync_config.state ==
VRR_STATE_ACTIVE_VARIABLE) {
acrtc->dm_irq_params.vrr_params.supported) {
bool replay_en = acrtc->dm_irq_params.stream->link->replay_settings.replay_feature_enabled;
bool psr_en = acrtc->dm_irq_params.stream->link->psr_settings.psr_feature_enabled;
bool fs_active_var_en = acrtc->dm_irq_params.freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
mod_freesync_handle_v_update(adev->dm.freesync_module,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params);
dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
/* update vmin_vmax only if freesync is enabled, or only if PSR and REPLAY are disabled */
if (fs_active_var_en || (!fs_active_var_en && !replay_en && !psr_en)) {
dc_stream_adjust_vmin_vmax(adev->dm.dc,
acrtc->dm_irq_params.stream,
&acrtc->dm_irq_params.vrr_params.adjust);
}
}
/*
@@ -11047,7 +11053,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
* Transient states before tunneling is enabled could
* lead to this error. We can ignore this for now.
*/
if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
if (p_notify->result == AUX_RET_ERROR_PROTOCOL_ERROR) {
DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
payload->address, payload->length,
p_notify->result);
@@ -11056,22 +11062,14 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
goto out;
}
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command & 0xF;
if (adev->dm.dmub_notify->aux_reply.command & 0xF0)
/* The reply is stored in the top nibble of the command. */
payload->reply[0] = (adev->dm.dmub_notify->aux_reply.command >> 4) & 0xF;
payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
if (!payload->write && p_notify->aux_reply.length &&
(payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
if (payload->length != p_notify->aux_reply.length) {
DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
p_notify->aux_reply.length,
payload->address, payload->length);
*operation_result = AUX_RET_ERROR_INVALID_REPLY;
goto out;
}
if (!payload->write && p_notify->aux_reply.length)
memcpy(payload->data, p_notify->aux_reply.data,
p_notify->aux_reply.length);
}
/* success */
ret = p_notify->aux_reply.length;

View File

@@ -48,6 +48,9 @@
#define PEAK_FACTOR_X1000 1006
/*
* This function handles both native AUX and I2C-Over-AUX transactions.
*/
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -84,15 +87,25 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
if (adev->dm.aux_hpd_discon_quirk) {
if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE &&
operation_result == AUX_RET_ERROR_HPD_DISCON) {
result = 0;
result = msg->size;
operation_result = AUX_RET_SUCCESS;
}
}
if (payload.write && result >= 0)
result = msg->size;
/*
* result equals to 0 includes the cases of AUX_DEFER/I2C_DEFER
*/
if (payload.write && result >= 0) {
if (result) {
/*one byte indicating partially written bytes. Force 0 to retry*/
drm_info(adev_to_drm(adev), "amdgpu: AUX partially written\n");
result = 0;
} else if (!payload.reply[0])
/*I2C_ACK|AUX_ACK*/
result = msg->size;
}
if (result < 0)
if (result < 0) {
switch (operation_result) {
case AUX_RET_SUCCESS:
break;
@@ -111,6 +124,13 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
break;
}
drm_info(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result);
}
if (payload.reply[0])
drm_info(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.",
payload.reply[0]);
return result;
}

View File

@@ -979,27 +979,28 @@ static const struct panel_desc auo_g070vvn01 = {
},
};
static const struct drm_display_mode auo_g101evn010_mode = {
.clock = 68930,
.hdisplay = 1280,
.hsync_start = 1280 + 82,
.hsync_end = 1280 + 82 + 2,
.htotal = 1280 + 82 + 2 + 84,
.vdisplay = 800,
.vsync_start = 800 + 8,
.vsync_end = 800 + 8 + 2,
.vtotal = 800 + 8 + 2 + 6,
static const struct display_timing auo_g101evn010_timing = {
.pixelclock = { 64000000, 68930000, 85000000 },
.hactive = { 1280, 1280, 1280 },
.hfront_porch = { 8, 64, 256 },
.hback_porch = { 8, 64, 256 },
.hsync_len = { 40, 168, 767 },
.vactive = { 800, 800, 800 },
.vfront_porch = { 4, 8, 100 },
.vback_porch = { 4, 8, 100 },
.vsync_len = { 8, 16, 223 },
};
static const struct panel_desc auo_g101evn010 = {
.modes = &auo_g101evn010_mode,
.num_modes = 1,
.timings = &auo_g101evn010_timing,
.num_timings = 1,
.bpc = 6,
.size = {
.width = 216,
.height = 135,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
.bus_flags = DRM_BUS_FLAG_DE_HIGH,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};

View File

@@ -289,11 +289,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
return DRM_GPU_SCHED_STAT_NOMINAL;
}
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
static void
v3d_sched_skip_reset(struct drm_sched_job *sched_job)
{
struct drm_gpu_scheduler *sched = sched_job->sched;
spin_lock(&sched->job_list_lock);
list_add(&sched_job->list, &sched->pending_list);
spin_unlock(&sched->job_list_lock);
}
static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
@@ -303,9 +308,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q));
u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q));
/* If the current address or return address have changed, then the GPU
* has probably made progress and we should delay the reset. This
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
@@ -345,11 +357,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
struct v3d_dev *v3d = job->base.v3d;
u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4);
/* If we've made progress, skip reset and let the timer get
* rearmed.
/* If we've made progress, skip reset, add the job to the pending
* list, and let the timer get rearmed.
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
v3d_sched_skip_reset(sched_job);
return DRM_GPU_SCHED_STAT_NOMINAL;
}

View File

@@ -211,9 +211,9 @@ static const struct iio_chan_spec adis16201_channels[] = {
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 12),
IIO_CHAN_SOFT_TIMESTAMP(7)
};

View File

@@ -231,7 +231,7 @@ struct adxl355_data {
u8 transf_buf[3];
struct {
u8 buf[14];
s64 ts;
aligned_s64 ts;
} buffer;
} __aligned(IIO_DMA_MINALIGN);
};

View File

@@ -620,18 +620,14 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
if (ret)
return ret;
st->odr = odr;
/* Activity timers depend on ODR */
ret = _adxl367_set_act_time_ms(st, st->act_time_ms);
if (ret)
return ret;
ret = _adxl367_set_inact_time_ms(st, st->inact_time_ms);
if (ret)
return ret;
st->odr = odr;
return 0;
return _adxl367_set_inact_time_ms(st, st->inact_time_ms);
}
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)

View File

@@ -127,7 +127,7 @@ static int ad7606_spi_reg_read(struct ad7606_state *st, unsigned int addr)
{
.tx_buf = &st->d16[0],
.len = 2,
.cs_change = 0,
.cs_change = 1,
}, {
.rx_buf = &st->d16[1],
.len = 2,

View File

@@ -483,7 +483,7 @@ static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct {
__le16 values[DLN2_ADC_MAX_CHANNELS];
int64_t timestamp_space;
aligned_s64 timestamp_space;
} data;
struct dln2_adc_get_all_vals dev_data;
struct dln2_adc *dln2 = iio_priv(indio_dev);

View File

@@ -485,15 +485,6 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (info->reset)
rockchip_saradc_reset_controller(info->reset);
/*
* Use a default value for the converter clock.
* This may become user-configurable in the future.
*/
ret = clk_set_rate(info->clk, info->data->clk_rate);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to set adc clk rate\n");
ret = regulator_enable(info->vref);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
@@ -520,6 +511,14 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
if (IS_ERR(info->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(info->clk),
"failed to get adc clock\n");
/*
* Use a default value for the converter clock.
* This may become user-configurable in the future.
*/
ret = clk_set_rate(info->clk, info->data->clk_rate);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret,
"failed to set adc clk rate\n");
platform_set_drvdata(pdev, indio_dev);

View File

@@ -370,6 +370,9 @@ int st_lsm6dsx_read_fifo(struct st_lsm6dsx_hw *hw)
if (fifo_status & cpu_to_le16(ST_LSM6DSX_FIFO_EMPTY_MASK))
return 0;
if (!pattern_len)
pattern_len = ST_LSM6DSX_SAMPLE_SIZE;
fifo_len = (le16_to_cpu(fifo_status) & fifo_diff_mask) *
ST_LSM6DSX_CHAN_SIZE;
fifo_len = (fifo_len / pattern_len) * pattern_len;
@@ -601,6 +604,9 @@ int st_lsm6dsx_read_tagged_fifo(struct st_lsm6dsx_hw *hw)
if (!fifo_len)
return 0;
if (!pattern_len)
pattern_len = ST_LSM6DSX_TAGGED_SAMPLE_SIZE;
for (read_len = 0; read_len < fifo_len; read_len += pattern_len) {
err = st_lsm6dsx_read_block(hw,
ST_LSM6DSX_REG_FIFO_OUT_TAG_ADDR,

View File

@@ -121,9 +121,9 @@ static const struct maxim_thermocouple_chip maxim_thermocouple_chips[] = {
struct maxim_thermocouple_data {
struct spi_device *spi;
const struct maxim_thermocouple_chip *chip;
char tc_type;
u8 buffer[16] __aligned(IIO_DMA_MINALIGN);
char tc_type;
};
static int maxim_thermocouple_read(struct maxim_thermocouple_data *data,

View File

@@ -77,12 +77,13 @@
* xbox d-pads should map to buttons, as is required for DDR pads
* but we map them to axes when possible to simplify things
*/
#define MAP_DPAD_TO_BUTTONS (1 << 0)
#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define MAP_STICKS_TO_NULL (1 << 2)
#define MAP_SELECT_BUTTON (1 << 3)
#define MAP_PADDLES (1 << 4)
#define MAP_PROFILE_BUTTON (1 << 5)
#define MAP_DPAD_TO_BUTTONS BIT(0)
#define MAP_TRIGGERS_TO_BUTTONS BIT(1)
#define MAP_STICKS_TO_NULL BIT(2)
#define MAP_SHARE_BUTTON BIT(3)
#define MAP_PADDLES BIT(4)
#define MAP_PROFILE_BUTTON BIT(5)
#define MAP_SHARE_OFFSET BIT(6)
#define DANCEPAD_MAP_CONFIG (MAP_DPAD_TO_BUTTONS | \
MAP_TRIGGERS_TO_BUTTONS | MAP_STICKS_TO_NULL)
@@ -135,7 +136,7 @@ static const struct xpad_device {
{ 0x03f0, 0x048D, "HyperX Clutch", 0, XTYPE_XBOX360 }, /* wireless */
{ 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
{ 0x03f0, 0x07A0, "HyperX Clutch Gladiate RGB", 0, XTYPE_XBOXONE },
{ 0x03f0, 0x08B6, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE }, /* v2 */
{ 0x03f0, 0x08B6, "HyperX Clutch Gladiate", MAP_SHARE_BUTTON, XTYPE_XBOXONE }, /* v2 */
{ 0x03f0, 0x09B4, "HyperX Clutch Tanto", 0, XTYPE_XBOXONE },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
@@ -159,7 +160,7 @@ static const struct xpad_device {
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0b00, "Microsoft X-Box One Elite 2 pad", MAP_PADDLES, XTYPE_XBOXONE },
{ 0x045e, 0x0b0a, "Microsoft X-Box Adaptive Controller", MAP_PROFILE_BUTTON, XTYPE_XBOXONE },
{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x045e, 0x0b12, "Microsoft Xbox Series S|X Controller", MAP_SHARE_BUTTON | MAP_SHARE_OFFSET, XTYPE_XBOXONE },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
@@ -205,13 +206,13 @@ static const struct xpad_device {
{ 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
{ 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", 0, XTYPE_XBOXONE },
{ 0x0b05, 0x1a38, "ASUS ROG RAIKIRI", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x0b05, 0x1abb, "ASUS ROG RAIKIRI PRO", 0, XTYPE_XBOXONE },
{ 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
{ 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
@@ -240,7 +241,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", XTYPE_XBOXONE },
{ 0x0e6f, 0x015d, "PDP Mirror's Edge Official Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
@@ -386,10 +387,11 @@ static const struct xpad_device {
{ 0x2dc8, 0x3106, "8BitDo Ultimate Wireless / Pro 2 Wired Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x3109, "8BitDo Ultimate Wireless Bluetooth", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310a, "8BitDo Ultimate 2C Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x310b, "8BitDo Ultimate 2 Wireless Controller", 0, XTYPE_XBOX360 },
{ 0x2dc8, 0x6001, "8BitDo SN30 Pro", 0, XTYPE_XBOX360 },
{ 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x2e24, 0x1688, "Hyperkin X91 X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SELECT_BUTTON, XTYPE_XBOXONE },
{ 0x2e95, 0x0504, "SCUF Gaming Controller", MAP_SHARE_BUTTON, XTYPE_XBOXONE },
{ 0x31e3, 0x1100, "Wooting One", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1200, "Wooting Two", 0, XTYPE_XBOX360 },
{ 0x31e3, 0x1210, "Wooting Lekker", 0, XTYPE_XBOX360 },
@@ -1025,7 +1027,7 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
* The report format was gleaned from
* https://github.com/kylelemons/xbox/blob/master/xbox.go
*/
static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data, u32 len)
{
struct input_dev *dev = xpad->dev;
bool do_sync = false;
@@ -1066,8 +1068,12 @@ static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & BIT(2));
input_report_key(dev, BTN_SELECT, data[4] & BIT(3));
if (xpad->mapping & MAP_SELECT_BUTTON)
input_report_key(dev, KEY_RECORD, data[22] & BIT(0));
if (xpad->mapping & MAP_SHARE_BUTTON) {
if (xpad->mapping & MAP_SHARE_OFFSET)
input_report_key(dev, KEY_RECORD, data[len - 26] & BIT(0));
else
input_report_key(dev, KEY_RECORD, data[len - 18] & BIT(0));
}
/* buttons A,B,X,Y */
input_report_key(dev, BTN_A, data[4] & BIT(4));
@@ -1215,7 +1221,7 @@ static void xpad_irq_in(struct urb *urb)
xpad360w_process_packet(xpad, 0, xpad->idata);
break;
case XTYPE_XBOXONE:
xpadone_process_packet(xpad, 0, xpad->idata);
xpadone_process_packet(xpad, 0, xpad->idata, urb->actual_length);
break;
default:
xpad_process_packet(xpad, 0, xpad->idata);
@@ -1972,7 +1978,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
if (xpad->mapping & MAP_SELECT_BUTTON)
if (xpad->mapping & MAP_SHARE_BUTTON)
input_set_capability(input_dev, EV_KEY, KEY_RECORD);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)

View File

@@ -147,8 +147,8 @@ static void mtk_pmic_keys_lp_reset_setup(struct mtk_pmic_keys *keys,
u32 value, mask;
int error;
kregs_home = keys->keys[MTK_PMIC_HOMEKEY_INDEX].regs;
kregs_pwr = keys->keys[MTK_PMIC_PWRKEY_INDEX].regs;
kregs_home = &regs->keys_regs[MTK_PMIC_HOMEKEY_INDEX];
kregs_pwr = &regs->keys_regs[MTK_PMIC_PWRKEY_INDEX];
error = of_property_read_u32(keys->dev->of_node, "power-off-time-sec",
&long_press_debounce);

View File

@@ -163,6 +163,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
"DLL060d", /* Dell Precision M3800 */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
"LEN0049", /* Yoga 11e */
@@ -189,11 +190,15 @@ static const char * const smbus_pnp_ids[] = {
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"LEN2068", /* T14 Gen 1 */
"SYN1221", /* TUXEDO InfinityBook Pro 14 v5 */
"SYN3003", /* HP EliteBook 850 G1 */
"SYN3015", /* HP EliteBook 840 G2 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
"SYN323d", /* HP Spectre X360 13-w013dx */
"SYN3257", /* HP Envy 13-ad105ng */
"TOS01f6", /* Dynabook Portege X30L-G */
"TOS0213", /* Dynabook Portege X30-D */
NULL
};

View File

@@ -580,7 +580,7 @@ static int cyttsp5_power_control(struct cyttsp5 *ts, bool on)
int rc;
SET_CMD_REPORT_TYPE(cmd[0], 0);
SET_CMD_REPORT_ID(cmd[0], HID_POWER_SLEEP);
SET_CMD_REPORT_ID(cmd[0], state);
SET_CMD_OPCODE(cmd[1], HID_CMD_SET_POWER);
rc = cyttsp5_write(ts, HID_COMMAND_REG, cmd, sizeof(cmd));
@@ -865,13 +865,16 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
ts->input->phys = ts->phys;
input_set_drvdata(ts->input, ts);
/* Reset the gpio to be in a reset state */
/* Assert gpio to be in a reset state */
ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ts->reset_gpio)) {
error = PTR_ERR(ts->reset_gpio);
dev_err(dev, "Failed to request reset gpio, error %d\n", error);
return error;
}
fsleep(10); /* Ensure long-enough reset pulse (minimum 10us). */
gpiod_set_value_cansleep(ts->reset_gpio, 0);
/* Need a delay to have device up */

View File

@@ -1246,7 +1246,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
t = dm_get_live_table(md, &srcu_idx);
if (!t)
return 0;
goto put_live_table;
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
@@ -1257,6 +1257,7 @@ static int dm_keyslot_evict(struct blk_crypto_profile *profile,
(void *)key);
}
put_live_table:
dm_put_live_table(md, srcu_idx);
return 0;
}

View File

@@ -2125,9 +2125,9 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
if (cdev->is_peripheral)
can_rx_offload_del(&cdev->offload);
unregister_candev(cdev->net);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);

View File

@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
.brp_inc = 1,
};
/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
* [-64,63] for TDCO, indicating a relative TDCO.
*
* Manual tests have shown, that using a relative TDCO configuration
* results in bus off, while an absolute configuration works.
*
* For TDCO use the max value (63) from the data sheet, but 0 as the
* minimum.
*/
static const struct can_tdc_const mcp251xfd_tdc_const = {
.tdcv_min = 0,
.tdcv_max = 63,
.tdco_min = 0,
.tdco_max = 63,
.tdcf_min = 0,
.tdcf_max = 0,
};
static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
{
switch (model) {
@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
{
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u32 val = 0;
s8 tdco;
u32 tdcmod, val = 0;
int err;
/* CAN Control Register
@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
return err;
/* Transmitter Delay Compensation */
tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
-64, 63);
val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
MCP251XFD_REG_TDC_TDCMOD_AUTO) |
FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
else
tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
}
@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
priv->can.bittiming_const = &mcp251xfd_bittiming_const;
priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
priv->can.tdc_const = &mcp251xfd_tdc_const;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
CAN_CTRLMODE_CC_LEN8_DLC;
CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
CAN_CTRLMODE_TDC_MANUAL;
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
priv->ndev = ndev;
priv->spi = spi;
@@ -2179,8 +2203,8 @@ static void mcp251xfd_remove(struct spi_device *spi)
struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
struct net_device *ndev = priv->ndev;
can_rx_offload_del(&priv->offload);
mcp251xfd_unregister(priv);
can_rx_offload_del(&priv->offload);
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
}

View File

@@ -373,15 +373,17 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
}
vc1 &= ~VC1_RX_MCST_FWD_EN;
if (enable) {
vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
vc1 |= VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
if (enable_filtering) {
vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
vc5 |= VC5_DROP_VTABLE_MISS;
} else {
vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
vc5 &= ~VC5_DROP_VTABLE_MISS;
}
@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
} else {
vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
vc1 &= ~VC1_RX_MCST_UNTAG_EN;
vc4 &= ~VC4_ING_VID_CHECK_MASK;
vc5 &= ~VC5_DROP_VTABLE_MISS;
@@ -1519,12 +1521,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
struct b53_vlan *vl;
u16 old_pvid, new_pvid;
int err;
err = b53_vlan_prepare(ds, port, vlan);
if (err)
return err;
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
if (pvid)
new_pvid = vlan->vid;
else if (!pvid && vlan->vid == old_pvid)
new_pvid = b53_default_pvid(dev);
else
new_pvid = old_pvid;
vl = &dev->vlans[vlan->vid];
b53_get_vlan_entry(dev, vlan->vid, vl);
@@ -1541,10 +1552,10 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
b53_set_vlan_entry(dev, vlan->vid, vl);
b53_fast_age_vlan(dev, vlan->vid);
if (pvid && !dsa_is_cpu_port(ds, port)) {
if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid);
b53_fast_age_vlan(dev, vlan->vid);
new_pvid);
b53_fast_age_vlan(dev, old_pvid);
}
return 0;
@@ -1956,7 +1967,7 @@ EXPORT_SYMBOL(b53_br_join);
void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
{
struct b53_device *dev = ds->priv;
struct b53_vlan *vl = &dev->vlans[0];
struct b53_vlan *vl;
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
unsigned int i;
u16 pvlan, reg, pvid;
@@ -1982,6 +1993,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
dev->ports[port].vlan_ctl_mask = pvlan;
pvid = b53_default_pvid(dev);
vl = &dev->vlans[pvid];
/* Make this port join all VLANs without VLAN entries */
if (is58xx(dev)) {
@@ -1990,12 +2002,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
if (!(reg & BIT(cpu_port)))
reg |= BIT(cpu_port);
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
} else {
b53_get_vlan_entry(dev, pvid, vl);
vl->members |= BIT(port) | BIT(cpu_port);
vl->untag |= BIT(port) | BIT(cpu_port);
b53_set_vlan_entry(dev, pvid, vl);
}
b53_get_vlan_entry(dev, pvid, vl);
vl->members |= BIT(port) | BIT(cpu_port);
vl->untag |= BIT(port) | BIT(cpu_port);
b53_set_vlan_entry(dev, pvid, vl);
}
EXPORT_SYMBOL(b53_br_leave);

View File

@@ -3117,11 +3117,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
int i;
int i, j, txqs = 1;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
txqs = MTK_QDMA_NUM_QUEUES;
for (i = 0; i < MTK_MAX_DEVS; i++) {
if (!eth->netdev[i])
continue;
for (j = 0; j < txqs; j++)
netdev_tx_reset_subqueue(eth->netdev[i], j);
}
for (i = 0; i < MTK_MAX_DEVS; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
MTK_QDMA_RING_SIZE * soc->txrx.txd_size,

View File

@@ -4156,7 +4156,8 @@ static void nvme_fw_act_work(struct work_struct *work)
msleep(100);
}
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING) ||
!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
return;
nvme_unquiesce_io_queues(ctrl);

View File

@@ -398,16 +398,14 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
bytes_available = ioread32(fifo->base_addr + XLLF_RLR_OFFSET);
if (!bytes_available) {
dev_err(fifo->dt_device, "received a packet of length 0 - fifo core will be reset\n");
reset_ip_core(fifo);
dev_err(fifo->dt_device, "received a packet of length 0\n");
ret = -EIO;
goto end_unlock;
}
if (bytes_available > len) {
dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu) - fifo core will be reset\n",
dev_err(fifo->dt_device, "user read buffer too small (available bytes=%zu user buffer bytes=%zu)\n",
bytes_available, len);
reset_ip_core(fifo);
ret = -EINVAL;
goto end_unlock;
}
@@ -416,8 +414,7 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
/* this probably can't happen unless IP
* registers were previously mishandled
*/
dev_err(fifo->dt_device, "received a packet that isn't word-aligned - fifo core will be reset\n");
reset_ip_core(fifo);
dev_err(fifo->dt_device, "received a packet that isn't word-aligned\n");
ret = -EIO;
goto end_unlock;
}
@@ -438,7 +435,6 @@ static ssize_t axis_fifo_read(struct file *f, char __user *buf,
if (copy_to_user(buf + copied * sizeof(u32), tmp_buf,
copy * sizeof(u32))) {
reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
@@ -547,7 +543,6 @@ static ssize_t axis_fifo_write(struct file *f, const char __user *buf,
if (copy_from_user(tmp_buf, buf + copied * sizeof(u32),
copy * sizeof(u32))) {
reset_ip_core(fifo);
ret = -EFAULT;
goto end_unlock;
}
@@ -780,9 +775,6 @@ static int axis_fifo_parse_dt(struct axis_fifo *fifo)
goto end;
}
/* IP sets TDFV to fifo depth - 4 so we will do the same */
fifo->tx_fifo_depth -= 4;
ret = get_dts_property(fifo, "xlnx,use-rx-data", &fifo->has_rx_fifo);
if (ret) {
dev_err(fifo->dt_device, "missing xlnx,use-rx-data property\n");

View File

@@ -136,7 +136,7 @@ static ssize_t ad7816_store_mode(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7816_chip_info *chip = iio_priv(indio_dev);
if (strcmp(buf, "full")) {
if (strcmp(buf, "full") == 0) {
gpiod_set_value(chip->rdwr_pin, 1);
chip->mode = AD7816_FULL;
} else {

View File

@@ -138,6 +138,26 @@ static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
(portsc & PORT_CHANGE_BITS), port_regs);
}
static void cdnsp_set_apb_timeout_value(struct cdnsp_device *pdev)
{
struct cdns *cdns = dev_get_drvdata(pdev->dev);
__le32 __iomem *reg;
void __iomem *base;
u32 offset = 0;
u32 val;
if (!cdns->override_apb_timeout)
return;
base = &pdev->cap_regs->hc_capbase;
offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
reg = base + offset + REG_CHICKEN_BITS_3_OFFSET;
val = le32_to_cpu(readl(reg));
val = CHICKEN_APB_TIMEOUT_SET(val, cdns->override_apb_timeout);
writel(cpu_to_le32(val), reg);
}
static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
{
__le32 __iomem *reg;
@@ -1776,6 +1796,8 @@ static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
pdev->rev_cap = reg;
pdev->rtl_revision = readl(&pdev->rev_cap->rtl_revision);
dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
readl(&pdev->rev_cap->ctrl_revision),
readl(&pdev->rev_cap->rtl_revision),
@@ -1801,6 +1823,15 @@ static int cdnsp_gen_setup(struct cdnsp_device *pdev)
pdev->hci_version = HC_VERSION(pdev->hcc_params);
pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
/*
* Override the APB timeout value to give the controller more time for
* enabling UTMI clock and synchronizing APB and UTMI clock domains.
* This fix is platform specific and is required to fixes issue with
* reading incorrect value from PORTSC register after resuming
* from L1 state.
*/
cdnsp_set_apb_timeout_value(pdev);
cdnsp_get_rev_cap(pdev);
/* Make sure the Device Controller is halted. */

View File

@@ -520,6 +520,9 @@ struct cdnsp_rev_cap {
#define REG_CHICKEN_BITS_2_OFFSET 0x48
#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
#define REG_CHICKEN_BITS_3_OFFSET 0x4C
#define CHICKEN_APB_TIMEOUT_SET(p, val) (((p) & ~GENMASK(21, 0)) | (val))
/* XBUF Extended Capability ID. */
#define XBUF_CAP_ID 0xCB
#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
@@ -1359,6 +1362,7 @@ struct cdnsp_port {
* @rev_cap: Controller Capabilities Registers.
* @hcs_params1: Cached register copies of read-only HCSPARAMS1
* @hcc_params: Cached register copies of read-only HCCPARAMS1
* @rtl_revision: Cached controller rtl revision.
* @setup: Temporary buffer for setup packet.
* @ep0_preq: Internal allocated request used during enumeration.
* @ep0_stage: ep0 stage during enumeration process.
@@ -1413,6 +1417,8 @@ struct cdnsp_device {
__u32 hcs_params1;
__u32 hcs_params3;
__u32 hcc_params;
#define RTL_REVISION_NEW_LPM 0x2700
__u32 rtl_revision;
/* Lock used in interrupt thread context. */
spinlock_t lock;
struct usb_ctrlrequest setup;

View File

@@ -33,6 +33,8 @@
#define CDNS_DRD_ID 0x0100
#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)
#define CHICKEN_APB_TIMEOUT_VALUE 0x1C20
static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
/*
@@ -144,6 +146,14 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
cdnsp->otg_irq = pdev->irq;
}
/*
* Cadence PCI based platform require some longer timeout for APB
* to fixes domain clock synchronization issue after resuming
* controller from L1 state.
*/
cdnsp->override_apb_timeout = CHICKEN_APB_TIMEOUT_VALUE;
pci_set_drvdata(pdev, cdnsp);
if (pci_is_enabled(func)) {
cdnsp->dev = dev;
cdnsp->gadget_init = cdnsp_gadget_init;
@@ -153,8 +163,6 @@ static int cdnsp_pci_probe(struct pci_dev *pdev,
goto free_cdnsp;
}
pci_set_drvdata(pdev, cdnsp);
device_wakeup_enable(&pdev->dev);
if (pci_dev_run_wake(pdev))
pm_runtime_put_noidle(&pdev->dev);

View File

@@ -308,7 +308,8 @@ static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
writel(db_value, reg_addr);
cdnsp_force_l0_go(pdev);
if (pdev->rtl_revision < RTL_REVISION_NEW_LPM)
cdnsp_force_l0_go(pdev);
/* Doorbell was set. */
return true;

View File

@@ -79,6 +79,8 @@ struct cdns3_platform_data {
* @pdata: platform data from glue layer
* @lock: spinlock structure
* @xhci_plat_data: xhci private data structure pointer
* @override_apb_timeout: hold value of APB timeout. For value 0 the default
* value in CHICKEN_BITS_3 will be preserved.
* @gadget_init: pointer to gadget initialization function
*/
struct cdns {
@@ -117,6 +119,7 @@ struct cdns {
struct cdns3_platform_data *pdata;
spinlock_t lock;
struct xhci_plat_priv *xhci_plat_data;
u32 override_apb_timeout;
int (*gadget_init)(struct cdns *cdns);
};

View File

@@ -482,6 +482,7 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
u8 *buffer;
u8 tag;
int rv;
long wait_rv;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
@@ -511,16 +512,17 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
}
if (data->iin_ep_present) {
rv = wait_event_interruptible_timeout(
wait_rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&data->iin_data_valid) != 0,
file_data->timeout);
if (rv < 0) {
dev_dbg(dev, "wait interrupted %d\n", rv);
if (wait_rv < 0) {
dev_dbg(dev, "wait interrupted %ld\n", wait_rv);
rv = wait_rv;
goto exit;
}
if (rv == 0) {
if (wait_rv == 0) {
dev_dbg(dev, "wait timed out\n");
rv = -ETIMEDOUT;
goto exit;
@@ -539,6 +541,8 @@ static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
rv = 0;
exit:
/* bump interrupt bTag */
data->iin_bTag += 1;
@@ -602,9 +606,9 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
int rv;
u32 timeout;
unsigned long expire;
long wait_rv;
if (!data->iin_ep_present) {
dev_dbg(dev, "no interrupt endpoint present\n");
@@ -618,25 +622,24 @@ static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
mutex_unlock(&data->io_mutex);
rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&file_data->srq_asserted) != 0 ||
atomic_read(&file_data->closing),
expire);
wait_rv = wait_event_interruptible_timeout(
data->waitq,
atomic_read(&file_data->srq_asserted) != 0 ||
atomic_read(&file_data->closing),
expire);
mutex_lock(&data->io_mutex);
/* Note! disconnect or close could be called in the meantime */
if (atomic_read(&file_data->closing) || data->zombie)
rv = -ENODEV;
return -ENODEV;
if (rv < 0) {
/* dev can be invalid now! */
pr_debug("%s - wait interrupted %d\n", __func__, rv);
return rv;
if (wait_rv < 0) {
dev_dbg(dev, "%s - wait interrupted %ld\n", __func__, wait_rv);
return wait_rv;
}
if (rv == 0) {
if (wait_rv == 0) {
dev_dbg(dev, "%s - wait timed out\n", __func__);
return -ETIMEDOUT;
}
@@ -830,6 +833,7 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
unsigned long expire;
int bufcount = 1;
int again = 0;
long wait_rv;
/* mutex already locked */
@@ -942,19 +946,24 @@ static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
if (!(flags & USBTMC_FLAG_ASYNC)) {
dev_dbg(dev, "%s: before wait time %lu\n",
__func__, expire);
retval = wait_event_interruptible_timeout(
wait_rv = wait_event_interruptible_timeout(
file_data->wait_bulk_in,
usbtmc_do_transfer(file_data),
expire);
dev_dbg(dev, "%s: wait returned %d\n",
__func__, retval);
dev_dbg(dev, "%s: wait returned %ld\n",
__func__, wait_rv);
if (retval <= 0) {
if (retval == 0)
retval = -ETIMEDOUT;
if (wait_rv < 0) {
retval = wait_rv;
goto error;
}
if (wait_rv == 0) {
retval = -ETIMEDOUT;
goto error;
}
}
urb = usb_get_from_anchor(&file_data->in_anchor);
@@ -1380,7 +1389,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
if (!buffer)
return -ENOMEM;
mutex_lock(&data->io_mutex);
retval = mutex_lock_interruptible(&data->io_mutex);
if (retval < 0)
goto exit_nolock;
if (data->zombie) {
retval = -ENODEV;
goto exit;
@@ -1503,6 +1515,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
exit:
mutex_unlock(&data->io_mutex);
exit_nolock:
kfree(buffer);
return retval;
}

View File

@@ -2017,15 +2017,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
if (f->get_status) {
status = f->get_status(f);
if (status < 0)
break;
} else {
/* Set D0 and D1 bits based on func wakeup capability */
if (f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP) {
status |= USB_INTRF_STAT_FUNC_RW_CAP;
if (f->func_wakeup_armed)
status |= USB_INTRF_STAT_FUNC_RW;
}
/* if D5 is not set, then device is not wakeup capable */
if (!(f->config->bmAttributes & USB_CONFIG_ATT_WAKEUP))
status &= ~(USB_INTRF_STAT_FUNC_RW_CAP | USB_INTRF_STAT_FUNC_RW);
}
put_unaligned_le16(status & 0x0000ffff, req->buf);

View File

@@ -892,6 +892,12 @@ static void ecm_resume(struct usb_function *f)
gether_resume(&ecm->port);
}
static int ecm_get_status(struct usb_function *f)
{
return (f->func_wakeup_armed ? USB_INTRF_STAT_FUNC_RW : 0) |
USB_INTRF_STAT_FUNC_RW_CAP;
}
static void ecm_free(struct usb_function *f)
{
struct f_ecm *ecm;
@@ -960,6 +966,7 @@ static struct usb_function *ecm_alloc(struct usb_function_instance *fi)
ecm->port.func.disable = ecm_disable;
ecm->port.func.free_func = ecm_free;
ecm->port.func.suspend = ecm_suspend;
ecm->port.func.get_status = ecm_get_status;
ecm->port.func.resume = ecm_resume;
return &ecm->port.func;

View File

@@ -1749,6 +1749,10 @@ static int __tegra_xudc_ep_disable(struct tegra_xudc_ep *ep)
val = xudc_readl(xudc, CTRL);
val &= ~CTRL_RUN;
xudc_writel(xudc, val, CTRL);
val = xudc_readl(xudc, ST);
if (val & ST_RC)
xudc_writel(xudc, ST_RC, ST);
}
dev_info(xudc->dev, "ep %u disabled\n", ep->index);

View File

@@ -121,7 +121,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
}
/* Get and enable clock if any specified */
uhci->clk = devm_clk_get(&pdev->dev, NULL);
uhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(uhci->clk)) {
ret = PTR_ERR(uhci->clk);
goto err_rmr;

View File

@@ -1363,6 +1363,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra->otg_usb3_port = tegra_xusb_padctl_get_usb3_companion(tegra->padctl,
tegra->otg_usb2_port);
pm_runtime_get_sync(tegra->dev);
if (tegra->host_mode) {
/* switch to host mode */
if (tegra->otg_usb3_port >= 0) {
@@ -1392,6 +1393,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
}
tegra_xhci_set_port_power(tegra, true, true);
pm_runtime_mark_last_busy(tegra->dev);
} else {
if (tegra->otg_usb3_port >= 0)
@@ -1399,6 +1401,7 @@ static void tegra_xhci_id_work(struct work_struct *work)
tegra_xhci_set_port_power(tegra, true, false);
}
pm_runtime_put_autosuspend(tegra->dev);
}
#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_PM_SLEEP)

View File

@@ -5202,7 +5202,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
case SNK_TRY_WAIT_DEBOUNCE:
if (!tcpm_port_is_sink(port)) {
port->max_wait = 0;
tcpm_set_state(port, SRC_TRYWAIT, 0);
tcpm_set_state(port, SRC_TRYWAIT, PD_T_PD_DEBOUNCE);
}
break;
case SRC_TRY_WAIT:

View File

@@ -296,6 +296,8 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
if (!dp)
return;
cancel_work_sync(&dp->work);
dp->data.conf = 0;
dp->data.status = 0;
dp->initialized = false;

View File

@@ -216,6 +216,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
* buffering it.
*/
if (dma_capable(dev, dev_addr, size, true) &&
!dma_kmalloc_needs_bounce(dev, size, dir) &&
!range_straddles_page_boundary(phys, size) &&
!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
!is_swiotlb_force_bounce(dev))

View File

@@ -77,6 +77,7 @@ enum xb_req_state {
struct xb_req_data {
struct list_head list;
wait_queue_head_t wq;
struct kref kref;
struct xsd_sockmsg msg;
uint32_t caller_req_id;
enum xsd_sockmsg_type type;
@@ -103,6 +104,7 @@ int xb_init_comms(void);
void xb_deinit_comms(void);
int xs_watch_msg(struct xs_watch_event *event);
void xs_request_exit(struct xb_req_data *req);
void xs_free_req(struct kref *kref);
int xenbus_match(struct device *_dev, struct device_driver *_drv);
int xenbus_dev_probe(struct device *_dev);

View File

@@ -309,8 +309,8 @@ static int process_msg(void)
virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
} else
kfree(req);
}
kref_put(&req->kref, xs_free_req);
}
mutex_unlock(&xs_response_mutex);
@@ -386,14 +386,13 @@ static int process_writes(void)
state.req->msg.type = XS_ERROR;
state.req->err = err;
list_del(&state.req->list);
if (state.req->state == xb_req_state_aborted)
kfree(state.req);
else {
if (state.req->state != xb_req_state_aborted) {
/* write err, then update state */
virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}
kref_put(&state.req->kref, xs_free_req);
mutex_unlock(&xb_write_mutex);

View File

@@ -406,7 +406,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
mutex_unlock(&u->reply_mutex);
kfree(req->body);
kfree(req);
kref_put(&req->kref, xs_free_req);
kref_put(&u->kref, xenbus_file_free);

View File

@@ -112,6 +112,12 @@ static void xs_suspend_exit(void)
wake_up_all(&xs_state_enter_wq);
}
void xs_free_req(struct kref *kref)
{
struct xb_req_data *req = container_of(kref, struct xb_req_data, kref);
kfree(req);
}
static uint32_t xs_request_enter(struct xb_req_data *req)
{
uint32_t rq_id;
@@ -237,6 +243,12 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
req->caller_req_id = req->msg.req_id;
req->msg.req_id = xs_request_enter(req);
/*
* Take 2nd ref. One for this thread, and the second for the
* xenbus_thread.
*/
kref_get(&req->kref);
mutex_lock(&xb_write_mutex);
list_add_tail(&req->list, &xb_write_list);
notify = list_is_singular(&xb_write_list);
@@ -261,8 +273,8 @@ static void *xs_wait_for_reply(struct xb_req_data *req, struct xsd_sockmsg *msg)
if (req->state == xb_req_state_queued ||
req->state == xb_req_state_wait_reply)
req->state = xb_req_state_aborted;
else
kfree(req);
kref_put(&req->kref, xs_free_req);
mutex_unlock(&xb_write_mutex);
return ret;
@@ -291,6 +303,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
req->cb = xenbus_dev_queue_reply;
req->par = par;
req->user_req = true;
kref_init(&req->kref);
xs_send(req, msg);
@@ -319,6 +332,7 @@ static void *xs_talkv(struct xenbus_transaction t,
req->num_vecs = num_vecs;
req->cb = xs_wake_up;
req->user_req = false;
kref_init(&req->kref);
msg.req_id = 0;
msg.tx_id = t.id;

View File

@@ -635,7 +635,7 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
smp_mb(); // see mntput_no_expire()
smp_mb(); // see mntput_no_expire() and do_umount()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
@@ -1788,6 +1788,7 @@ static int do_umount(struct mount *mnt, int flags)
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
smp_mb(); // paired with __legitimize_mnt()
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {

View File

@@ -174,7 +174,7 @@ int ocfs2_recovery_init(struct ocfs2_super *osb)
struct ocfs2_recovery_map *rm;
mutex_init(&osb->recovery_lock);
osb->disable_recovery = 0;
osb->recovery_state = OCFS2_REC_ENABLED;
osb->recovery_thread_task = NULL;
init_waitqueue_head(&osb->recovery_event);
@@ -190,31 +190,53 @@ int ocfs2_recovery_init(struct ocfs2_super *osb)
return 0;
}
/* we can't grab the goofy sem lock from inside wait_event, so we use
* memory barriers to make sure that we'll see the null task before
* being woken up */
static int ocfs2_recovery_thread_running(struct ocfs2_super *osb)
{
mb();
return osb->recovery_thread_task != NULL;
}
static void ocfs2_recovery_disable(struct ocfs2_super *osb,
enum ocfs2_recovery_state state)
{
mutex_lock(&osb->recovery_lock);
/*
* If recovery thread is not running, we can directly transition to
* final state.
*/
if (!ocfs2_recovery_thread_running(osb)) {
osb->recovery_state = state + 1;
goto out_lock;
}
osb->recovery_state = state;
/* Wait for recovery thread to acknowledge state transition */
wait_event_cmd(osb->recovery_event,
!ocfs2_recovery_thread_running(osb) ||
osb->recovery_state >= state + 1,
mutex_unlock(&osb->recovery_lock),
mutex_lock(&osb->recovery_lock));
out_lock:
mutex_unlock(&osb->recovery_lock);
/*
* At this point we know that no more recovery work can be queued so
* wait for any recovery completion work to complete.
*/
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);
}
void ocfs2_recovery_disable_quota(struct ocfs2_super *osb)
{
ocfs2_recovery_disable(osb, OCFS2_REC_QUOTA_WANT_DISABLE);
}
void ocfs2_recovery_exit(struct ocfs2_super *osb)
{
struct ocfs2_recovery_map *rm;
/* disable any new recovery threads and wait for any currently
* running ones to exit. Do this before setting the vol_state. */
mutex_lock(&osb->recovery_lock);
osb->disable_recovery = 1;
mutex_unlock(&osb->recovery_lock);
wait_event(osb->recovery_event, !ocfs2_recovery_thread_running(osb));
/* At this point, we know that no more recovery threads can be
* launched, so wait for any recovery completion work to
* complete. */
if (osb->ocfs2_wq)
flush_workqueue(osb->ocfs2_wq);
ocfs2_recovery_disable(osb, OCFS2_REC_WANT_DISABLE);
/*
* Now that recovery is shut down, and the osb is about to be
@@ -1472,6 +1494,18 @@ static int __ocfs2_recovery_thread(void *arg)
}
}
restart:
if (quota_enabled) {
mutex_lock(&osb->recovery_lock);
/* Confirm that recovery thread will no longer recover quotas */
if (osb->recovery_state == OCFS2_REC_QUOTA_WANT_DISABLE) {
osb->recovery_state = OCFS2_REC_QUOTA_DISABLED;
wake_up(&osb->recovery_event);
}
if (osb->recovery_state >= OCFS2_REC_QUOTA_DISABLED)
quota_enabled = 0;
mutex_unlock(&osb->recovery_lock);
}
status = ocfs2_super_lock(osb, 1);
if (status < 0) {
mlog_errno(status);
@@ -1569,27 +1603,29 @@ bail:
ocfs2_free_replay_slots(osb);
osb->recovery_thread_task = NULL;
mb(); /* sync with ocfs2_recovery_thread_running */
if (osb->recovery_state == OCFS2_REC_WANT_DISABLE)
osb->recovery_state = OCFS2_REC_DISABLED;
wake_up(&osb->recovery_event);
mutex_unlock(&osb->recovery_lock);
if (quota_enabled)
kfree(rm_quota);
kfree(rm_quota);
return status;
}
void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
{
int was_set = -1;
mutex_lock(&osb->recovery_lock);
if (osb->recovery_state < OCFS2_REC_WANT_DISABLE)
was_set = ocfs2_recovery_map_set(osb, node_num);
trace_ocfs2_recovery_thread(node_num, osb->node_num,
osb->disable_recovery, osb->recovery_thread_task,
osb->disable_recovery ?
-1 : ocfs2_recovery_map_set(osb, node_num));
osb->recovery_state, osb->recovery_thread_task, was_set);
if (osb->disable_recovery)
if (osb->recovery_state >= OCFS2_REC_WANT_DISABLE)
goto out;
if (osb->recovery_thread_task)

View File

@@ -148,6 +148,7 @@ void ocfs2_wait_for_recovery(struct ocfs2_super *osb);
int ocfs2_recovery_init(struct ocfs2_super *osb);
void ocfs2_recovery_exit(struct ocfs2_super *osb);
void ocfs2_recovery_disable_quota(struct ocfs2_super *osb);
int ocfs2_compute_replay_slots(struct ocfs2_super *osb);
void ocfs2_free_replay_slots(struct ocfs2_super *osb);

View File

@@ -308,6 +308,21 @@ enum ocfs2_journal_trigger_type {
void ocfs2_initialize_journal_triggers(struct super_block *sb,
struct ocfs2_triggers triggers[]);
enum ocfs2_recovery_state {
OCFS2_REC_ENABLED = 0,
OCFS2_REC_QUOTA_WANT_DISABLE,
/*
* Must be OCFS2_REC_QUOTA_WANT_DISABLE + 1 for
* ocfs2_recovery_disable_quota() to work.
*/
OCFS2_REC_QUOTA_DISABLED,
OCFS2_REC_WANT_DISABLE,
/*
* Must be OCFS2_REC_WANT_DISABLE + 1 for ocfs2_recovery_exit() to work
*/
OCFS2_REC_DISABLED,
};
struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
@@ -370,7 +385,7 @@ struct ocfs2_super
struct ocfs2_recovery_map *recovery_map;
struct ocfs2_replay_map *replay_map;
struct task_struct *recovery_thread_task;
int disable_recovery;
enum ocfs2_recovery_state recovery_state;
wait_queue_head_t checkpoint_event;
struct ocfs2_journal *journal;
unsigned long osb_commit_interval;

View File

@@ -453,8 +453,7 @@ out:
/* Sync changes in local quota file into global quota file and
* reinitialize local quota file.
* The function expects local quota file to be already locked and
* s_umount locked in shared mode. */
* The function expects local quota file to be already locked. */
static int ocfs2_recover_local_quota_file(struct inode *lqinode,
int type,
struct ocfs2_quota_recovery *rec)
@@ -585,7 +584,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
{
unsigned int ino[OCFS2_MAXQUOTAS] = { LOCAL_USER_QUOTA_SYSTEM_INODE,
LOCAL_GROUP_QUOTA_SYSTEM_INODE };
struct super_block *sb = osb->sb;
struct ocfs2_local_disk_dqinfo *ldinfo;
struct buffer_head *bh;
handle_t *handle;
@@ -597,7 +595,6 @@ int ocfs2_finish_quota_recovery(struct ocfs2_super *osb,
printk(KERN_NOTICE "ocfs2: Finishing quota recovery on device (%s) for "
"slot %u\n", osb->dev_str, slot_num);
down_read(&sb->s_umount);
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
if (list_empty(&(rec->r_list[type])))
continue;
@@ -674,7 +671,6 @@ out_put:
break;
}
out:
up_read(&sb->s_umount);
kfree(rec);
return status;
}
@@ -840,8 +836,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
ocfs2_release_local_quota_bitmaps(&oinfo->dqi_chunk);
/*
* s_umount held in exclusive mode protects us against racing with
* recovery thread...
* ocfs2_dismount_volume() has already aborted quota recovery...
*/
if (oinfo->dqi_rec) {
ocfs2_free_quota_recovery(oinfo->dqi_rec);

View File

@@ -1872,6 +1872,9 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
/* Orphan scan should be stopped as early as possible */
ocfs2_orphan_scan_stop(osb);
/* Stop quota recovery so that we can disable quotas */
ocfs2_recovery_disable_quota(osb);
ocfs2_disable_quotas(osb);
/* All dquots should be freed by now */

View File

@@ -29,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
{
struct cached_fid *cfid;
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
if (!strcmp(cfid->path, path)) {
/*
@@ -38,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* being deleted due to a lease break.
*/
if (!cfid->time || !cfid->has_lease) {
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
kref_get(&cfid->refcount);
spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
}
if (lookup_only) {
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
if (cfids->num_entries >= max_cached_dirs) {
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid = init_cached_dir(path);
if (cfid == NULL) {
spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid->cfids = cfids;
@@ -74,7 +68,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
*/
cfid->has_lease = true;
spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
@@ -185,8 +178,10 @@ replay_again:
if (!utf16_path)
return -ENOMEM;
spin_lock(&cfids->cfid_list_lock);
cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
if (cfid == NULL) {
spin_unlock(&cfids->cfid_list_lock);
kfree(utf16_path);
return -ENOENT;
}
@@ -195,7 +190,6 @@ replay_again:
* Otherwise, it is either a new entry or laundromat worker removed it
* from @cfids->entries. Caller will put last reference if the latter.
*/
spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease && cfid->time) {
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;

View File

@@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
sizeof(struct create_lease_v2) - 4)
return NULL;
goto err_out;
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
@@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
sizeof(struct create_lease))
return NULL;
goto err_out;
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
@@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
lreq->version = 1;
}
return lreq;
err_out:
kfree(lreq);
return NULL;
}
/**

View File

@@ -632,6 +632,11 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
return name;
}
if (*name == '\0') {
kfree(name);
return ERR_PTR(-EINVAL);
}
if (*name == '\\') {
pr_err("not allow directory name included leading slash\n");
kfree(name);

View File

@@ -443,6 +443,13 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
goto out;
}
if (v_len <= *pos) {
pr_err("stream write position %lld is out of bounds (stream length: %zd)\n",
*pos, v_len);
err = -EINVAL;
goto out;
}
if (v_len < size) {
wbuf = kvzalloc(size, GFP_KERNEL);
if (!wbuf) {

View File

@@ -644,21 +644,40 @@ __close_file_table_ids(struct ksmbd_file_table *ft,
bool (*skip)(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp))
{
unsigned int id;
struct ksmbd_file *fp;
int num = 0;
struct ksmbd_file *fp;
unsigned int id = 0;
int num = 0;
idr_for_each_entry(ft->idr, fp, id) {
if (skip(tcon, fp))
while (1) {
write_lock(&ft->lock);
fp = idr_get_next(ft->idr, &id);
if (!fp) {
write_unlock(&ft->lock);
break;
}
if (skip(tcon, fp) ||
!atomic_dec_and_test(&fp->refcount)) {
id++;
write_unlock(&ft->lock);
continue;
}
set_close_state_blocked_works(fp);
idr_remove(ft->idr, fp->volatile_id);
fp->volatile_id = KSMBD_NO_FID;
write_unlock(&ft->lock);
down_write(&fp->f_ci->m_lock);
list_del_init(&fp->node);
up_write(&fp->f_ci->m_lock);
if (!atomic_dec_and_test(&fp->refcount))
continue;
__ksmbd_close_fd(ft, fp);
num++;
id++;
}
return num;
}

View File

@@ -77,6 +77,8 @@ extern ssize_t cpu_show_gds(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,

View File

@@ -576,6 +576,11 @@ struct module {
atomic_t refcnt;
#endif
#ifdef CONFIG_MITIGATION_ITS
int its_num_pages;
void **its_page_array;
#endif
#ifdef CONFIG_CONSTRUCTORS
/* Constructor functions. */
ctor_fn_t *ctors;

View File

@@ -3653,6 +3653,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
#endif
}
/**
* netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
* @dev: network device
* @qid: stack index of the queue to reset
*/
static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
u32 qid)
{
netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
}
/**
* netdev_reset_queue - reset the packets and bytes count of a network device
* @dev_queue: network device
@@ -3662,7 +3673,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
*/
static inline void netdev_reset_queue(struct net_device *dev_queue)
{
netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
netdev_tx_reset_subqueue(dev_queue, 0);
}
/**

View File

@@ -115,8 +115,9 @@ typedef u64 u_int64_t;
typedef s64 int64_t;
#endif
/* this is a special 64bit data type that is 8-byte aligned */
/* These are the special 64-bit data types that are 8-byte aligned */
#define aligned_u64 __aligned_u64
#define aligned_s64 __aligned_s64
#define aligned_be64 __aligned_be64
#define aligned_le64 __aligned_le64

View File

@@ -53,6 +53,7 @@ typedef __u32 __bitwise __wsum;
* No conversions are necessary between 32-bit user-space and a 64-bit kernel.
*/
#define __aligned_u64 __u64 __attribute__((aligned(8)))
#define __aligned_s64 __s64 __attribute__((aligned(8)))
#define __aligned_be64 __be64 __attribute__((aligned(8)))
#define __aligned_le64 __le64 __attribute__((aligned(8)))

Some files were not shown because too many files have changed in this diff Show More