Merge 6.6.97 into android15-6.6-lts

GKI (arm64) relevant 24 out of 131 changes, affecting 32 files +376/-159
  c4fad2460c Revert "mmc: sdhci: Disable SD card clock before changing parameters" [1 file, +2/-7]
  7e88ad41b6 Bluetooth: hci_sync: revert some mesh modifications [1 file, +4/-12]
  8af1406949 Bluetooth: MGMT: set_mesh: update LE scan interval and window [1 file, +22/-0]
  0506547f6e Bluetooth: MGMT: mesh_send: check instances prior disabling advertising [1 file, +2/-1]
  45e9444b3b usb: typec: altmodes/displayport: do not index invalid pin_assignments [2 files, +2/-1]
  529281206f Bluetooth: Prevent unintended pause by checking if advertising is active [1 file, +4/-0]
  f680a4643c net/sched: Always pass notifications when child class becomes empty [1 file, +5/-14]
  bc0819a25e Bluetooth: hci_core: Fix use-after-free in vhci_flush() [2 files, +32/-4]
  b43c3050d2 f2fs: add tracepoint for f2fs_vm_page_mkwrite() [2 files, +41/-23]
  7ac8a61e55 f2fs: prevent writing without fallocate() for pinned files [1 file, +16/-9]
  d1ccd98edd f2fs: convert f2fs_vm_page_mkwrite() to use folio [1 file, +16/-16]
  9e67044aa9 f2fs: fix to zero post-eof page [1 file, +38/-0]
  95ffe73451 scsi: ufs: core: Fix abnormal scale up after last cmd finish [1 file, +4/-13]
  847af89aa1 scsi: ufs: core: Add OPP support for scaling clocks and regulators [2 files, +115/-33]
  51ba658604 scsi: ufs: core: Fix clk scaling to be conditional in reset and restore [1 file, +2/-1]
  817662f9bd rcu: Return early if callback is not specified [1 file, +4/-0]
  e3eed01347 fs: export anon_inode_make_secure_inode() and fix secretmem LSM bypass [3 files, +21/-15]
  7609899eb6 usb: xhci: quirk for data loss in ISOC transfers [3 files, +30/-0]
  c16b75aa6f xhci: Disable stream for xHC controller with XHCI_BROKEN_STREAMS [1 file, +2/-1]
  2cd5e7c169 Input: xpad - support Acer NGR 200 Controller [1 file, +2/-0]
  d8eab407c0 dma-buf: fix timeout handling in dma_resv_wait_timeout v2 [1 file, +7/-5]
  6052862ba3 Logitech C-270 even more broken [1 file, +2/-1]
  63cff9f57e usb: typec: displayport: Fix potential deadlock [1 file, +1/-2]
  897761d165 f2fs: fix to avoid use-after-free issue in f2fs_filemap_fault [1 file, +2/-1]

Changes in 6.6.97
	rtc: pcf2127: add missing semicolon after statement
	rtc: pcf2127: fix SPI command byte for PCF2131
	rtc: cmos: use spin_lock_irqsave in cmos_interrupt
	virtio-net: ensure the received length does not exceed allocated size
	s390/pci: Do not try re-enabling load/store if device is disabled
	vsock/vmci: Clear the vmci transport packet properly when initializing it
	mmc: sdhci: Add a helper function for dump register in dynamic debug mode
	Revert "mmc: sdhci: Disable SD card clock before changing parameters"
	mmc: core: sd: Apply BROKEN_SD_DISCARD quirk earlier
	Bluetooth: hci_sync: revert some mesh modifications
	Bluetooth: MGMT: set_mesh: update LE scan interval and window
	Bluetooth: MGMT: mesh_send: check instances prior disabling advertising
	regulator: gpio: Fix the out-of-bounds access to drvdata::gpiods
	usb: typec: altmodes/displayport: do not index invalid pin_assignments
	mtk-sd: Fix a pagefault in dma_unmap_sg() for not prepared data
	mtk-sd: Prevent memory corruption from DMA map failure
	mtk-sd: reset host->mrq on prepare_data() error
	arm64: dts: apple: t8103: Fix PCIe BCM4377 nodename
	platform/mellanox: mlxbf-tmfifo: fix vring_desc.len assignment
	RDMA/mlx5: Initialize obj_event->obj_sub_list before xa_insert
	nfs: Clean up /proc/net/rpc/nfs when nfs_fs_proc_net_init() fails.
	NFSv4/pNFS: Fix a race to wake on NFS_LAYOUT_DRAIN
	scsi: qla2xxx: Fix DMA mapping test in qla24xx_get_port_database()
	scsi: qla4xxx: Fix missing DMA mapping error in qla4xxx_alloc_pdu()
	scsi: ufs: core: Fix spelling of a sysfs attribute name
	RDMA/mlx5: Fix HW counters query for non-representor devices
	RDMA/mlx5: Fix CC counters query for MPV
	RDMA/mlx5: Fix vport loopback for MPV device
	platform/mellanox: nvsw-sn2201: Fix bus number in adapter error message
	Bluetooth: Prevent unintended pause by checking if advertising is active
	btrfs: fix missing error handling when searching for inode refs during log replay
	btrfs: fix iteration of extrefs during log replay
	btrfs: rename err to ret in btrfs_rmdir()
	btrfs: propagate last_unlink_trans earlier when doing a rmdir
	btrfs: use btrfs_record_snapshot_destroy() during rmdir
	ethernet: atl1: Add missing DMA mapping error checks and count errors
	dpaa2-eth: fix xdp_rxq_info leak
	drm/exynos: fimd: Guard display clock control with runtime PM calls
	spi: spi-fsl-dspi: Clear completion counter before initiating transfer
	drm/i915/selftests: Change mock_request() to return error pointers
	platform/x86: dell-wmi-sysman: Fix WMI data block retrieval in sysfs callbacks
	arm64: dts: qcom: sm8550: add UART14 nodes
	platform/x86: make fw_attr_class constant
	platform/x86: firmware_attributes_class: Move include linux/device/class.h
	platform/x86: firmware_attributes_class: Simplify API
	platform/x86: think-lmi: Directly use firmware_attributes_class
	platform/x86: think-lmi: Fix class device unregistration
	platform/x86: dell-sysman: Directly use firmware_attributes_class
	platform/x86: dell-wmi-sysman: Fix class device unregistration
	platform/mellanox: mlxreg-lc: Fix logic error in power state check
	smb: client: fix warning when reconnecting channel
	net: usb: lan78xx: fix WARN in __netif_napi_del_locked on disconnect
	drm/i915/gt: Fix timeline left held on VMA alloc error
	drm/i915/gsc: mei interrupt top half should be in irq disabled context
	igc: disable L1.2 PCI-E link substate to avoid performance issue
	lib: test_objagg: Set error message in check_expect_hints_stats()
	amd-xgbe: align CL37 AN sequence as per databook
	enic: fix incorrect MTU comparison in enic_change_mtu()
	rose: fix dangling neighbour pointers in rose_rt_device_down()
	nui: Fix dma_mapping_error() check
	net/sched: Always pass notifications when child class becomes empty
	amd-xgbe: do not double read link status
	smb: client: fix race condition in negotiate timeout by using more precise timing
	Revert "drm/i915/gem: Allow EXEC_CAPTURE on recoverable contexts on DG1"
	btrfs: fix qgroup reservation leak on failure to allocate ordered extent
	smb: client: remove \t from TP_printk statements
	Bluetooth: hci_core: Fix use-after-free in vhci_flush()
	wifi: mac80211: chan: chandef is non-NULL for reserved
	wifi: mac80211: Add link iteration macro for link data
	wifi: mac80211: finish link init before RCU publish
	bnxt: properly flush XDP redirect lists
	x86/traps: Initialize DR6 by writing its architectural reset value
	f2fs: add tracepoint for f2fs_vm_page_mkwrite()
	f2fs: prevent writing without fallocate() for pinned files
	f2fs: convert f2fs_vm_page_mkwrite() to use folio
	f2fs: fix to zero post-eof page
	scsi: ufs: core: Fix abnormal scale up after last cmd finish
	scsi: ufs: core: Add OPP support for scaling clocks and regulators
	scsi: ufs: core: Fix clk scaling to be conditional in reset and restore
	drm/simpledrm: Do not upcast in release helpers
	drm/i915/dp_mst: Work around Thunderbolt sink disconnect after SINK_COUNT_ESI read
	drm/msm: Fix a fence leak in submit error path
	drm/msm: Fix another leak in the submit error path
	ALSA: sb: Don't allow changing the DMA mode during operations
	ALSA: sb: Force to disable DMAs once when DMA mode is changed
	ata: libata-acpi: Do not assume 40 wire cable if no devices are enabled
	ata: pata_cs5536: fix build on 32-bit UML
	ASoC: amd: yc: Add quirk for MSI Bravo 17 D7VF internal mic
	platform/x86/amd/pmc: Add PCSpecialist Lafite Pro V 14M to 8042 quirks list
	powerpc: Fix struct termio related ioctl macros
	ASoC: amd: yc: update quirk data for HP Victus
	regulator: fan53555: add enable_time support and soft-start times
	scsi: target: Fix NULL pointer dereference in core_scsi3_decode_spec_i_port()
	aoe: defer rexmit timer downdev work to workqueue
	wifi: mac80211: drop invalid source address OCB frames
	wifi: ath6kl: remove WARN on bad firmware input
	ACPICA: Refuse to evaluate a method if arguments are missing
	mtd: spinand: fix memory leak of ECC engine conf
	rcu: Return early if callback is not specified
	drm/v3d: Disable interrupts before resetting the GPU
	platform/x86: hp-bioscfg: Directly use firmware_attributes_class
	platform/x86: hp-bioscfg: Fix class device unregistration
	module: Provide EXPORT_SYMBOL_GPL_FOR_MODULES() helper
	fs: export anon_inode_make_secure_inode() and fix secretmem LSM bypass
	NFSv4/flexfiles: Fix handling of NFS level errors in I/O
	usb: xhci: quirk for data loss in ISOC transfers
	xhci: dbctty: disable ECHO flag by default
	xhci: dbc: Flush queued requests before stopping dbc
	xhci: Disable stream for xHC controller with XHCI_BROKEN_STREAMS
	Input: xpad - support Acer NGR 200 Controller
	Input: iqs7222 - explicitly define number of external channels
	usb: cdnsp: do not disable slot for disabled slot
	usb: chipidea: udc: disconnect/reconnect from host when do suspend/resume
	smb: client: fix readdir returning wrong type with POSIX extensions
	dma-buf: fix timeout handling in dma_resv_wait_timeout v2
	i2c/designware: Fix an initialization issue
	Logitech C-270 even more broken
	iommu/rockchip: prevent iommus dead loop when two masters share one IOMMU
	powercap: intel_rapl: Do not change CLAMPING bit if ENABLE bit cannot be changed
	platform/x86: think-lmi: Create ksets consecutively
	platform/x86: think-lmi: Fix kobject cleanup
	platform/x86: think-lmi: Fix sysfs group cleanup
	usb: typec: displayport: Fix potential deadlock
	powerpc/kernel: Fix ppc_save_regs inclusion in build
	x86/bugs: Rename MDS machinery to something more generic
	x86/bugs: Add a Transient Scheduler Attacks mitigation
	KVM: SVM: Advertise TSA CPUID bits to guests
	x86/microcode/AMD: Add TSA microcode SHAs
	x86/process: Move the buffer clearing before MONITOR
	f2fs: fix to avoid use-after-free issue in f2fs_filemap_fault
	Linux 6.6.97

Change-Id: I7b7248a01422d5e01f689fbd71d6d5e33674ec3f
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-13 12:52:10 +00:00
76 changed files with 975 additions and 280 deletions

View File

@@ -526,6 +526,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2 /sys/devices/system/cpu/vulnerabilities/spectre_v2
/sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/srbds
/sys/devices/system/cpu/vulnerabilities/tsa
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort /sys/devices/system/cpu/vulnerabilities/tsx_async_abort
Date: January 2018 Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org> Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>

View File

@@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
combination with a microcode update. The microcode clears the affected CPU combination with a microcode update. The microcode clears the affected CPU
buffers when the VERW instruction is executed. buffers when the VERW instruction is executed.
Kernel reuses the MDS function to invoke the buffer clearing: Kernel does the buffer clearing with x86_clear_cpu_buffers().
mds_clear_cpu_buffers()
On MDS affected CPUs, the kernel already invokes CPU buffer clear on On MDS affected CPUs, the kernel already invokes CPU buffer clear on
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No kernel/userspace, hypervisor/guest and C-state (idle) transitions. No

View File

@@ -6720,6 +6720,19 @@
If not specified, "default" is used. In this case, If not specified, "default" is used. In this case,
the RNG's choice is left to each individual trust source. the RNG's choice is left to each individual trust source.
tsa= [X86] Control mitigation for Transient Scheduler
Attacks on AMD CPUs. Search the following in your
favourite search engine for more details:
"Technical guidance for mitigating transient scheduler
attacks".
off - disable the mitigation
on - enable the mitigation (default)
user - mitigate only user/kernel transitions
vm - mitigate only guest/host transitions
tsc= Disable clocksource stability checks for TSC. tsc= Disable clocksource stability checks for TSC.
Format: <string> Format: <string>
[x86] reliable: mark tsc clocksource as reliable, this [x86] reliable: mark tsc clocksource as reliable, this

View File

@@ -93,7 +93,7 @@ enters a C-state.
The kernel provides a function to invoke the buffer clearing: The kernel provides a function to invoke the buffer clearing:
mds_clear_cpu_buffers() x86_clear_cpu_buffers()
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path. Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
Other than CFLAGS.ZF, this macro doesn't clobber any registers. Other than CFLAGS.ZF, this macro doesn't clobber any registers.
@@ -185,9 +185,9 @@ Mitigation points
idle clearing would be a window dressing exercise and is therefore not idle clearing would be a window dressing exercise and is therefore not
activated. activated.
The invocation is controlled by the static key mds_idle_clear which is The invocation is controlled by the static key cpu_buf_idle_clear which is
switched depending on the chosen mitigation mode and the SMT state of switched depending on the chosen mitigation mode and the SMT state of the
the system. system.
The buffer clear is only invoked before entering the C-State to prevent The buffer clear is only invoked before entering the C-State to prevent
that stale data from the idling CPU from spilling to the Hyper-Thread that stale data from the idling CPU from spilling to the Hyper-Thread

View File

@@ -28,6 +28,9 @@ kernel. As of today, modules that make use of symbols exported into namespaces,
are required to import the namespace. Otherwise the kernel will, depending on are required to import the namespace. Otherwise the kernel will, depending on
its configuration, reject loading the module or warn about a missing import. its configuration, reject loading the module or warn about a missing import.
Additionally, it is possible to put symbols into a module namespace, strictly
limiting which modules are allowed to use these symbols.
2. How to define Symbol Namespaces 2. How to define Symbol Namespaces
================================== ==================================
@@ -84,6 +87,22 @@ unit as preprocessor statement. The above example would then read::
within the corresponding compilation unit before any EXPORT_SYMBOL macro is within the corresponding compilation unit before any EXPORT_SYMBOL macro is
used. used.
2.3 Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro
===================================================
Symbols exported using this macro are put into a module namespace. This
namespace cannot be imported.
The macro takes a comma separated list of module names, allowing only those
modules to access this symbol. Simple tail-globs are supported.
For example:
EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
will limit usage of this symbol to modules whoes name matches the given
patterns.
3. How to use Symbols exported in Namespaces 3. How to use Symbols exported in Namespaces
============================================ ============================================
@@ -155,3 +174,6 @@ in-tree modules::
You can also run nsdeps for external module builds. A typical usage is:: You can also run nsdeps for external module builds. A typical usage is::
$ make -C <path_to_kernel_src> M=$PWD nsdeps $ make -C <path_to_kernel_src> M=$PWD nsdeps
Note: it will happily generate an import statement for the module namespace;
which will not work and generates build and runtime failures.

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 96 SUBLEVEL = 97
EXTRAVERSION = EXTRAVERSION =
NAME = Pinguïn Aangedreven NAME = Pinguïn Aangedreven

View File

@@ -23,10 +23,10 @@
#define TCSETSW _IOW('t', 21, struct termios) #define TCSETSW _IOW('t', 21, struct termios)
#define TCSETSF _IOW('t', 22, struct termios) #define TCSETSF _IOW('t', 22, struct termios)
#define TCGETA _IOR('t', 23, struct termio) #define TCGETA 0x40147417 /* _IOR('t', 23, struct termio) */
#define TCSETA _IOW('t', 24, struct termio) #define TCSETA 0x80147418 /* _IOW('t', 24, struct termio) */
#define TCSETAW _IOW('t', 25, struct termio) #define TCSETAW 0x80147419 /* _IOW('t', 25, struct termio) */
#define TCSETAF _IOW('t', 28, struct termio) #define TCSETAF 0x8014741c /* _IOW('t', 28, struct termio) */
#define TCSBRK _IO('t', 29) #define TCSBRK _IO('t', 29)
#define TCXONC _IO('t', 30) #define TCXONC _IO('t', 30)

View File

@@ -165,9 +165,7 @@ endif
obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o obj64-$(CONFIG_PPC_TRANSACTIONAL_MEM) += tm.o
ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC_CORE)$(CONFIG_PPC_BOOK3S),)
obj-y += ppc_save_regs.o obj-y += ppc_save_regs.o
endif
obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o

View File

@@ -2621,6 +2621,15 @@ config MITIGATION_ITS
disabled, mitigation cannot be enabled via cmdline. disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst> See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
config MITIGATION_TSA
bool "Mitigate Transient Scheduler Attacks"
depends on CPU_SUP_AMD
default y
help
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
security vulnerability on AMD CPUs which can lead to forwarding of
invalid info to subsequent instructions and thus can affect their
timing and thereby cause a leakage.
endif endif
config ARCH_HAS_ADD_PAGES config ARCH_HAS_ADD_PAGES

View File

@@ -31,20 +31,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
/* /*
* Define the VERW operand that is disguised as entry code so that * Define the VERW operand that is disguised as entry code so that
* it can be referenced with KPTI enabled. This ensure VERW can be * it can be referenced with KPTI enabled. This ensures VERW can be
* used late in exit-to-user path after page tables are switched. * used late in exit-to-user path after page tables are switched.
*/ */
.pushsection .entry.text, "ax" .pushsection .entry.text, "ax"
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_START_NOALIGN(mds_verw_sel) SYM_CODE_START_NOALIGN(x86_verw_sel)
UNWIND_HINT_UNDEFINED UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
.word __KERNEL_DS .word __KERNEL_DS
.align L1_CACHE_BYTES, 0xcc .align L1_CACHE_BYTES, 0xcc
SYM_CODE_END(mds_verw_sel); SYM_CODE_END(x86_verw_sel);
/* For KVM */ /* For KVM */
EXPORT_SYMBOL_GPL(mds_verw_sel); EXPORT_SYMBOL_GPL(x86_verw_sel);
.popsection .popsection

View File

@@ -81,4 +81,16 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);
extern struct cpumask cpus_stop_mask; extern struct cpumask cpus_stop_mask;
union zen_patch_rev {
struct {
__u32 rev : 8,
stepping : 4,
model : 4,
__reserved : 4,
ext_model : 4,
ext_fam : 8;
};
__u32 ucode_rev;
};
#endif /* _ASM_X86_CPU_H */ #endif /* _ASM_X86_CPU_H */

View File

@@ -449,6 +449,7 @@
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* "" The memory form of VERW mitigates TSA */
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
@@ -470,6 +471,10 @@
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */ #define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* "" AMD CPU not vulnerable to TSA-SQ */
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* "" AMD CPU not vulnerable to TSA-L1 */
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* "" Clear CPU buffers using VERW before VMRUN */
/* /*
* BUG word(s) * BUG word(s)
*/ */
@@ -521,4 +526,5 @@
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */ #define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */ #define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
#define X86_BUG_TSA X86_BUG(1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */

View File

@@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
static __always_inline void native_safe_halt(void) static __always_inline void native_safe_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("sti; hlt": : :"memory"); asm volatile("sti; hlt": : :"memory");
} }
static __always_inline void native_halt(void) static __always_inline void native_halt(void)
{ {
mds_idle_clear_cpu_buffers(); x86_idle_clear_cpu_buffers();
asm volatile("hlt": : :"memory"); asm volatile("hlt": : :"memory");
} }

View File

@@ -44,8 +44,6 @@ static __always_inline void __monitorx(const void *eax, unsigned long ecx,
static __always_inline void __mwait(unsigned long eax, unsigned long ecx) static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
{ {
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */ /* "mwait %eax, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xc9;" asm volatile(".byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx)); :: "a" (eax), "c" (ecx));
@@ -80,7 +78,7 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx, static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
unsigned long ecx) unsigned long ecx)
{ {
/* No MDS buffer clear as this is AMD/HYGON only */ /* No need for TSA buffer clearing on AMD */
/* "mwaitx %eax, %ebx, %ecx;" */ /* "mwaitx %eax, %ebx, %ecx;" */
asm volatile(".byte 0x0f, 0x01, 0xfb;" asm volatile(".byte 0x0f, 0x01, 0xfb;"
@@ -89,7 +87,7 @@ static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx) static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{ {
mds_idle_clear_cpu_buffers();
/* "mwait %eax, %ecx;" */ /* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;" asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx)); :: "a" (eax), "c" (ecx));
@@ -107,13 +105,20 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
*/ */
static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
{ {
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) { if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags; const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0); __monitor(addr, 0, 0);
if (!need_resched()) { if (need_resched())
goto out;
if (ecx & 1) { if (ecx & 1) {
__mwait(eax, ecx); __mwait(eax, ecx);
} else { } else {
@@ -121,7 +126,8 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
raw_local_irq_disable(); raw_local_irq_disable();
} }
} }
}
out:
current_clr_polling(); current_clr_polling();
} }

View File

@@ -324,25 +324,31 @@
.endm .endm
/* /*
* Macro to execute VERW instruction that mitigate transient data sampling * Macro to execute VERW insns that mitigate transient data sampling
* attacks such as MDS. On affected systems a microcode update overloaded VERW * attacks such as MDS or TSA. On affected systems a microcode update
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF. * overloaded VERW insns to also clear the CPU buffers. VERW clobbers
* * CFLAGS.ZF.
* Note: Only the memory operand variant of VERW clears the CPU buffers. * Note: Only the memory operand variant of VERW clears the CPU buffers.
*/ */
.macro CLEAR_CPU_BUFFERS .macro __CLEAR_CPU_BUFFERS feature
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
ALTERNATIVE "", "verw mds_verw_sel(%rip)", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
#else #else
/* /*
* In 32bit mode, the memory operand must be a %cs reference. The data * In 32bit mode, the memory operand must be a %cs reference. The data
* segments may not be usable (vm86 mode), and the stack segment may not * segments may not be usable (vm86 mode), and the stack segment may not
* be flat (ESPFIX32). * be flat (ESPFIX32).
*/ */
ALTERNATIVE "", "verw %cs:mds_verw_sel", X86_FEATURE_CLEAR_CPU_BUF ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
#endif #endif
.endm .endm
#define CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
#define VM_CLEAR_CPU_BUFFERS \
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
.macro CLEAR_BRANCH_HISTORY .macro CLEAR_BRANCH_HISTORY
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
@@ -592,24 +598,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
DECLARE_STATIC_KEY_FALSE(mds_idle_clear); DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear); DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
extern u16 mds_verw_sel; extern u16 x86_verw_sel;
#include <asm/segment.h> #include <asm/segment.h>
/** /**
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability * x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
* *
* This uses the otherwise unused and obsolete VERW instruction in * This uses the otherwise unused and obsolete VERW instruction in
* combination with microcode which triggers a CPU buffer flush when the * combination with microcode which triggers a CPU buffer flush when the
* instruction is executed. * instruction is executed.
*/ */
static __always_inline void mds_clear_cpu_buffers(void) static __always_inline void x86_clear_cpu_buffers(void)
{ {
static const u16 ds = __KERNEL_DS; static const u16 ds = __KERNEL_DS;
@@ -626,14 +632,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
} }
/** /**
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability * x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
* and TSA vulnerabilities.
* *
* Clear CPU buffers if the corresponding static key is enabled * Clear CPU buffers if the corresponding static key is enabled
*/ */
static __always_inline void mds_idle_clear_cpu_buffers(void) static __always_inline void x86_idle_clear_cpu_buffers(void)
{ {
if (static_branch_likely(&mds_idle_clear)) if (static_branch_likely(&cpu_buf_idle_clear))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */

View File

@@ -539,6 +539,63 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c)
#endif #endif
} }
static bool amd_check_tsa_microcode(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
union zen_patch_rev p;
u32 min_rev = 0;
p.ext_fam = c->x86 - 0xf;
p.model = c->x86_model;
p.stepping = c->x86_stepping;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
switch (p.ucode_rev >> 8) {
case 0xa0011: min_rev = 0x0a0011d7; break;
case 0xa0012: min_rev = 0x0a00123b; break;
case 0xa0082: min_rev = 0x0a00820d; break;
case 0xa1011: min_rev = 0x0a10114c; break;
case 0xa1012: min_rev = 0x0a10124c; break;
case 0xa1081: min_rev = 0x0a108109; break;
case 0xa2010: min_rev = 0x0a20102e; break;
case 0xa2012: min_rev = 0x0a201211; break;
case 0xa4041: min_rev = 0x0a404108; break;
case 0xa5000: min_rev = 0x0a500012; break;
case 0xa6012: min_rev = 0x0a60120a; break;
case 0xa7041: min_rev = 0x0a704108; break;
case 0xa7052: min_rev = 0x0a705208; break;
case 0xa7080: min_rev = 0x0a708008; break;
case 0xa70c0: min_rev = 0x0a70c008; break;
case 0xaa002: min_rev = 0x0aa00216; break;
default:
pr_debug("%s: ucode_rev: 0x%x, current revision: 0x%x\n",
__func__, p.ucode_rev, c->microcode);
return false;
}
}
if (!min_rev)
return false;
return c->microcode >= min_rev;
}
static void tsa_init(struct cpuinfo_x86 *c)
{
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return;
if (cpu_has(c, X86_FEATURE_ZEN3) ||
cpu_has(c, X86_FEATURE_ZEN4)) {
if (amd_check_tsa_microcode())
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
} else {
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
}
}
static void bsp_init_amd(struct cpuinfo_x86 *c) static void bsp_init_amd(struct cpuinfo_x86 *c)
{ {
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
@@ -645,6 +702,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
break; break;
} }
tsa_init(c);
return; return;
warn: warn:

View File

@@ -50,6 +50,7 @@ static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void); static void __init srso_select_mitigation(void);
static void __init gds_select_mitigation(void); static void __init gds_select_mitigation(void);
static void __init its_select_mitigation(void); static void __init its_select_mitigation(void);
static void __init tsa_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
@@ -122,9 +123,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */ /* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
/* Control MDS CPU buffer clear before idling (halt, mwait) */ /* Control CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear); DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear); EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
/* /*
* Controls whether l1d flush based mitigations are enabled, * Controls whether l1d flush based mitigations are enabled,
@@ -185,6 +186,7 @@ void __init cpu_select_mitigations(void)
srso_select_mitigation(); srso_select_mitigation();
gds_select_mitigation(); gds_select_mitigation();
its_select_mitigation(); its_select_mitigation();
tsa_select_mitigation();
} }
/* /*
@@ -445,7 +447,7 @@ static void __init mmio_select_mitigation(void)
* is required irrespective of SMT state. * is required irrespective of SMT state.
*/ */
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
/* /*
* Check if the system has the right microcode. * Check if the system has the right microcode.
@@ -2082,10 +2084,10 @@ static void update_mds_branch_idle(void)
return; return;
if (sched_smt_active()) { if (sched_smt_active()) {
static_branch_enable(&mds_idle_clear); static_branch_enable(&cpu_buf_idle_clear);
} else if (mmio_mitigation == MMIO_MITIGATION_OFF || } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
static_branch_disable(&mds_idle_clear); static_branch_disable(&cpu_buf_idle_clear);
} }
} }
@@ -2093,6 +2095,94 @@ static void update_mds_branch_idle(void)
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
#undef pr_fmt
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
enum tsa_mitigations {
TSA_MITIGATION_NONE,
TSA_MITIGATION_UCODE_NEEDED,
TSA_MITIGATION_USER_KERNEL,
TSA_MITIGATION_VM,
TSA_MITIGATION_FULL,
};
static const char * const tsa_strings[] = {
[TSA_MITIGATION_NONE] = "Vulnerable",
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
};
static enum tsa_mitigations tsa_mitigation __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_FULL : TSA_MITIGATION_NONE;
static int __init tsa_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!strcmp(str, "off"))
tsa_mitigation = TSA_MITIGATION_NONE;
else if (!strcmp(str, "on"))
tsa_mitigation = TSA_MITIGATION_FULL;
else if (!strcmp(str, "user"))
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
else if (!strcmp(str, "vm"))
tsa_mitigation = TSA_MITIGATION_VM;
else
pr_err("Ignoring unknown tsa=%s option.\n", str);
return 0;
}
early_param("tsa", tsa_parse_cmdline);
static void __init tsa_select_mitigation(void)
{
if (tsa_mitigation == TSA_MITIGATION_NONE)
return;
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
tsa_mitigation = TSA_MITIGATION_NONE;
return;
}
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR))
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
break;
case TSA_MITIGATION_VM:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
case TSA_MITIGATION_UCODE_NEEDED:
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
goto out;
pr_notice("Forcing mitigation on in a VM\n");
/*
* On the off-chance that microcode has been updated
* on the host, enable the mitigation in the guest just
* in case.
*/
fallthrough;
case TSA_MITIGATION_FULL:
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
break;
default:
break;
}
out:
pr_info("%s\n", tsa_strings[tsa_mitigation]);
}
void cpu_bugs_smt_update(void) void cpu_bugs_smt_update(void)
{ {
mutex_lock(&spec_ctrl_mutex); mutex_lock(&spec_ctrl_mutex);
@@ -2146,6 +2236,24 @@ void cpu_bugs_smt_update(void)
break; break;
} }
switch (tsa_mitigation) {
case TSA_MITIGATION_USER_KERNEL:
case TSA_MITIGATION_VM:
case TSA_MITIGATION_FULL:
case TSA_MITIGATION_UCODE_NEEDED:
/*
* TSA-SQ can potentially lead to info leakage between
* SMT threads.
*/
if (sched_smt_active())
static_branch_enable(&cpu_buf_idle_clear);
else
static_branch_disable(&cpu_buf_idle_clear);
break;
case TSA_MITIGATION_NONE:
break;
}
mutex_unlock(&spec_ctrl_mutex); mutex_unlock(&spec_ctrl_mutex);
} }
@@ -3075,6 +3183,11 @@ static ssize_t gds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
} }
static ssize_t tsa_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
}
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
@@ -3136,6 +3249,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_ITS: case X86_BUG_ITS:
return its_show_state(buf); return its_show_state(buf);
case X86_BUG_TSA:
return tsa_show_state(buf);
default: default:
break; break;
} }
@@ -3220,4 +3336,9 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
{ {
return cpu_show_common(dev, attr, buf, X86_BUG_ITS); return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
} }
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
}
#endif #endif

View File

@@ -1277,6 +1277,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define ITS BIT(8) #define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */ /* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9) #define ITS_NATIVE_ONLY BIT(9)
/* CPU is affected by Transient Scheduler Attacks */
#define TSA BIT(10)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
@@ -1324,7 +1326,7 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_AMD(0x16, RETBLEED), VULNBL_AMD(0x16, RETBLEED),
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
VULNBL_AMD(0x19, SRSO), VULNBL_AMD(0x19, SRSO | TSA),
{} {}
}; };
@@ -1529,6 +1531,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY); setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
} }
if (c->x86_vendor == X86_VENDOR_AMD) {
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
/* Enable bug on Zen guests to allow for live migration. */
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
setup_force_cpu_bug(X86_BUG_TSA);
}
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return; return;
@@ -2215,9 +2227,6 @@ EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
/*
* Clear all 6 debug registers:
*/
static void initialize_debug_regs(void) static void initialize_debug_regs(void)
{ {
/* Control register first -- to make sure everything is disabled. */ /* Control register first -- to make sure everything is disabled. */

View File

@@ -96,18 +96,6 @@ static struct equiv_cpu_table {
struct equiv_cpu_entry *entry; struct equiv_cpu_entry *entry;
} equiv_table; } equiv_table;
union zen_patch_rev {
struct {
__u32 rev : 8,
stepping : 4,
model : 4,
__reserved : 4,
ext_model : 4,
ext_fam : 8;
};
__u32 ucode_rev;
};
union cpuid_1_eax { union cpuid_1_eax {
struct { struct {
__u32 stepping : 4, __u32 stepping : 4,

View File

@@ -231,6 +231,13 @@ static const struct patch_digest phashes[] = {
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21, 0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
} }
}, },
{ 0xa0011d7, {
0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b,
0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12,
0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2,
0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36,
}
},
{ 0xa001223, { { 0xa001223, {
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8, 0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4, 0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
@@ -294,6 +301,13 @@ static const struct patch_digest phashes[] = {
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59, 0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
} }
}, },
{ 0xa00123b, {
0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2,
0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3,
0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28,
0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72,
}
},
{ 0xa00820c, { { 0xa00820c, {
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3, 0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63, 0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
@@ -301,6 +315,13 @@ static const struct patch_digest phashes[] = {
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2, 0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
} }
}, },
{ 0xa00820d, {
0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4,
0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca,
0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1,
0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7,
}
},
{ 0xa10113e, { { 0xa10113e, {
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10, 0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0, 0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
@@ -322,6 +343,13 @@ static const struct patch_digest phashes[] = {
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4, 0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
} }
}, },
{ 0xa10114c, {
0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64,
0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74,
0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a,
0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0,
}
},
{ 0xa10123e, { { 0xa10123e, {
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18, 0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d, 0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
@@ -343,6 +371,13 @@ static const struct patch_digest phashes[] = {
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75, 0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
} }
}, },
{ 0xa10124c, {
0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90,
0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46,
0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc,
0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2,
}
},
{ 0xa108108, { { 0xa108108, {
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9, 0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6, 0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
@@ -350,6 +385,13 @@ static const struct patch_digest phashes[] = {
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16, 0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
} }
}, },
{ 0xa108109, {
0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa,
0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b,
0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35,
0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59,
}
},
{ 0xa20102d, { { 0xa20102d, {
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11, 0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89, 0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
@@ -357,6 +399,13 @@ static const struct patch_digest phashes[] = {
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4, 0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
} }
}, },
{ 0xa20102e, {
0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd,
0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6,
0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5,
0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe,
}
},
{ 0xa201210, { { 0xa201210, {
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe, 0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9, 0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
@@ -364,6 +413,13 @@ static const struct patch_digest phashes[] = {
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41, 0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
} }
}, },
{ 0xa201211, {
0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95,
0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27,
0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff,
0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27,
}
},
{ 0xa404107, { { 0xa404107, {
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45, 0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0, 0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
@@ -371,6 +427,13 @@ static const struct patch_digest phashes[] = {
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99, 0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
} }
}, },
{ 0xa404108, {
0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc,
0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb,
0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19,
0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00,
}
},
{ 0xa500011, { { 0xa500011, {
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4, 0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1, 0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
@@ -378,6 +441,13 @@ static const struct patch_digest phashes[] = {
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74, 0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
} }
}, },
{ 0xa500012, {
0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4,
0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16,
0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f,
0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63,
}
},
{ 0xa601209, { { 0xa601209, {
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32, 0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30, 0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
@@ -385,6 +455,13 @@ static const struct patch_digest phashes[] = {
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d, 0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
} }
}, },
{ 0xa60120a, {
0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d,
0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b,
0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d,
0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c,
}
},
{ 0xa704107, { { 0xa704107, {
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6, 0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93, 0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
@@ -392,6 +469,13 @@ static const struct patch_digest phashes[] = {
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39, 0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
} }
}, },
{ 0xa704108, {
0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93,
0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98,
0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49,
0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a,
}
},
{ 0xa705206, { { 0xa705206, {
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4, 0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7, 0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
@@ -399,6 +483,13 @@ static const struct patch_digest phashes[] = {
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc, 0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
} }
}, },
{ 0xa705208, {
0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19,
0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2,
0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11,
0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff,
}
},
{ 0xa708007, { { 0xa708007, {
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3, 0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2, 0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
@@ -406,6 +497,13 @@ static const struct patch_digest phashes[] = {
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93, 0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
} }
}, },
{ 0xa708008, {
0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46,
0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab,
0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16,
0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b,
}
},
{ 0xa70c005, { { 0xa70c005, {
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b, 0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f, 0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
@@ -413,6 +511,13 @@ static const struct patch_digest phashes[] = {
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13, 0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
} }
}, },
{ 0xa70c008, {
0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21,
0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e,
0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66,
0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0,
}
},
{ 0xaa00116, { { 0xaa00116, {
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63, 0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5, 0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
@@ -441,4 +546,11 @@ static const struct patch_digest phashes[] = {
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef, 0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
} }
}, },
{ 0xaa00216, {
0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5,
0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08,
0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2,
0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1,
}
},
}; };

View File

@@ -48,6 +48,8 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 }, { X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 }, { X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },

View File

@@ -929,16 +929,24 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
*/ */
static __cpuidle void mwait_idle(void) static __cpuidle void mwait_idle(void)
{ {
if (need_resched())
return;
x86_idle_clear_cpu_buffers();
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
const void *addr = &current_thread_info()->flags; const void *addr = &current_thread_info()->flags;
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr)); alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
__monitor(addr, 0, 0); __monitor(addr, 0, 0);
if (!need_resched()) { if (need_resched())
goto out;
__sti_mwait(0, 0); __sti_mwait(0, 0);
raw_local_irq_disable(); raw_local_irq_disable();
} }
}
out:
__current_clr_polling(); __current_clr_polling();
} }

View File

@@ -780,6 +780,7 @@ void kvm_set_cpu_caps(void)
kvm_cpu_cap_mask(CPUID_8000_0021_EAX, kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ | F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
F(VERW_CLEAR) |
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */
); );
@@ -790,6 +791,10 @@ void kvm_set_cpu_caps(void)
F(PERFMON_V2) F(PERFMON_V2)
); );
kvm_cpu_cap_init_kvm_defined(CPUID_8000_0021_ECX,
F(TSA_SQ_NO) | F(TSA_L1_NO)
);
/* /*
* Synthesize "LFENCE is serializing" into the AMD-defined entry in * Synthesize "LFENCE is serializing" into the AMD-defined entry in
* KVM's supported CPUID if the feature is reported as supported by the * KVM's supported CPUID if the feature is reported as supported by the
@@ -1296,8 +1301,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = entry->ebx = entry->ecx = entry->edx = 0; entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break; break;
case 0x80000021: case 0x80000021:
entry->ebx = entry->ecx = entry->edx = 0; entry->ebx = entry->edx = 0;
cpuid_entry_override(entry, CPUID_8000_0021_EAX); cpuid_entry_override(entry, CPUID_8000_0021_EAX);
cpuid_entry_override(entry, CPUID_8000_0021_ECX);
break; break;
/* AMD Extended Performance Monitoring and Debug */ /* AMD Extended Performance Monitoring and Debug */
case 0x80000022: { case 0x80000022: {

View File

@@ -17,6 +17,7 @@ enum kvm_only_cpuid_leafs {
CPUID_8000_0007_EDX, CPUID_8000_0007_EDX,
CPUID_8000_0022_EAX, CPUID_8000_0022_EAX,
CPUID_7_2_EDX, CPUID_7_2_EDX,
CPUID_8000_0021_ECX,
NR_KVM_CPU_CAPS, NR_KVM_CPU_CAPS,
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS, NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
@@ -61,6 +62,10 @@ enum kvm_only_cpuid_leafs {
/* CPUID level 0x80000022 (EAX) */ /* CPUID level 0x80000022 (EAX) */
#define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0) #define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
/* CPUID level 0x80000021 (ECX) */
#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
struct cpuid_reg { struct cpuid_reg {
u32 function; u32 function;
u32 index; u32 index;
@@ -90,6 +95,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX},
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX}, [CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
}; };
/* /*
@@ -129,6 +135,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2); KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL); KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL); KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
default: default:
return x86_feature; return x86_feature;
} }

View File

@@ -167,6 +167,9 @@ SYM_FUNC_START(__svm_vcpu_run)
#endif #endif
mov VCPU_RDI(%_ASM_DI), %_ASM_DI mov VCPU_RDI(%_ASM_DI), %_ASM_DI
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ /* Enter guest mode */
sti sti
@@ -334,6 +337,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
/* Clobbers EFLAGS.ZF */
VM_CLEAR_CPU_BUFFERS
/* Enter guest mode */ /* Enter guest mode */
sti sti

View File

@@ -7263,7 +7263,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vmx_l1d_flush(vcpu); vmx_l1d_flush(vcpu);
else if (static_branch_unlikely(&mmio_stale_data_clear) && else if (static_branch_unlikely(&mmio_stale_data_clear) &&
kvm_arch_has_assigned_device(vcpu->kvm)) kvm_arch_has_assigned_device(vcpu->kvm))
mds_clear_cpu_buffers(); x86_clear_cpu_buffers();
vmx_disable_fb_clear(vmx); vmx_disable_fb_clear(vmx);

View File

@@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(AE_NULL_OBJECT); return_ACPI_STATUS(AE_NULL_OBJECT);
} }
if (this_walk_state->num_operands < obj_desc->method.param_count) {
ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]",
acpi_ut_get_node_name(method_node)));
return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
}
/* Init for new method, possibly wait on method mutex */ /* Init for new method, possibly wait on method mutex */
status = status =

View File

@@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask); EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask);
/** /**
* ata_acpi_cbl_80wire - Check for 80 wire cable * ata_acpi_cbl_pata_type - Return PATA cable type
* @ap: Port to check * @ap: Port to check
* @gtm: GTM data to use
* *
* Return 1 if the @gtm indicates the BIOS selected an 80wire mode. * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS
*/ */
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) int ata_acpi_cbl_pata_type(struct ata_port *ap)
{ {
struct ata_device *dev; struct ata_device *dev;
int ret = ATA_CBL_PATA_UNK;
const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
if (!gtm)
return ATA_CBL_PATA40;
ata_for_each_dev(dev, &ap->link, ENABLED) { ata_for_each_dev(dev, &ap->link, ENABLED) {
unsigned int xfer_mask, udma_mask; unsigned int xfer_mask, udma_mask;
@@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm)
xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); xfer_mask = ata_acpi_gtm_xfermask(dev, gtm);
ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask);
if (udma_mask & ~ATA_UDMA_MASK_40C) ret = ATA_CBL_PATA40;
return 1;
if (udma_mask & ~ATA_UDMA_MASK_40C) {
ret = ATA_CBL_PATA80;
break;
}
} }
return 0; return ret;
} }
EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type);
static void ata_acpi_gtf_to_tf(struct ata_device *dev, static void ata_acpi_gtf_to_tf(struct ata_device *dev,
const struct ata_acpi_gtf *gtf, const struct ata_acpi_gtf *gtf,

View File

@@ -27,7 +27,7 @@
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <linux/dmi.h> #include <linux/dmi.h>
#ifdef CONFIG_X86_32 #if defined(CONFIG_X86) && defined(CONFIG_X86_32)
#include <asm/msr.h> #include <asm/msr.h>
static int use_msr; static int use_msr;
module_param_named(msr, use_msr, int, 0644); module_param_named(msr, use_msr, int, 0644);

View File

@@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) {
two drives */ two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no))) if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80; return ATA_CBL_PATA80;
/* Check with ACPI so we can spot BIOS reported SATA bridges */ /* Check with ACPI so we can spot BIOS reported SATA bridges */
if (ata_acpi_init_gtm(ap) && return ata_acpi_cbl_pata_type(ap);
ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap)))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
} }
static int via_pre_reset(struct ata_link *link, unsigned long deadline) static int via_pre_reset(struct ata_link *link, unsigned long deadline)

View File

@@ -567,6 +567,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(gds);
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
CPU_SHOW_VULN_FALLBACK(indirect_target_selection); CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
CPU_SHOW_VULN_FALLBACK(tsa);
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
@@ -583,6 +584,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = { static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr, &dev_attr_meltdown.attr,
@@ -600,6 +602,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_gather_data_sampling.attr, &dev_attr_gather_data_sampling.attr,
&dev_attr_reg_file_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr,
&dev_attr_indirect_target_selection.attr, &dev_attr_indirect_target_selection.attr,
&dev_attr_tsa.attr,
NULL NULL
}; };

View File

@@ -80,6 +80,7 @@ enum {
DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */
DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */ DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */
DEVFL_FREED = (1<<8), /* device has been cleaned up */ DEVFL_FREED = (1<<8), /* device has been cleaned up */
DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */
}; };
enum { enum {

View File

@@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer)
utgts = count_targets(d, NULL); utgts = count_targets(d, NULL);
if (d->flags & DEVFL_TKILL) { if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) {
spin_unlock_irqrestore(&d->lock, flags); spin_unlock_irqrestore(&d->lock, flags);
return; return;
} }
@@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer)
* to clean up. * to clean up.
*/ */
list_splice(&flist, &d->factive[0]); list_splice(&flist, &d->factive[0]);
aoedev_downdev(d); d->flags |= DEVFL_DEAD;
queue_work(aoe_wq, &d->work);
goto out; goto out;
} }
@@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work)
{ {
struct aoedev *d = container_of(work, struct aoedev, work); struct aoedev *d = container_of(work, struct aoedev, work);
if (d->flags & DEVFL_DEAD)
aoedev_downdev(d);
if (d->flags & DEVFL_GDALLOC) if (d->flags & DEVFL_GDALLOC)
aoeblk_gdalloc(d); aoeblk_gdalloc(d);

View File

@@ -200,8 +200,11 @@ aoedev_downdev(struct aoedev *d)
struct list_head *head, *pos, *nx; struct list_head *head, *pos, *nx;
struct request *rq, *rqnext; struct request *rq, *rqnext;
int i; int i;
unsigned long flags;
d->flags &= ~DEVFL_UP; spin_lock_irqsave(&d->lock, flags);
d->flags &= ~(DEVFL_UP | DEVFL_DEAD);
spin_unlock_irqrestore(&d->lock, flags);
/* clean out active and to-be-retransmitted buffers */ /* clean out active and to-be-retransmitted buffers */
for (i = 0; i < NFACTIVE; i++) { for (i = 0; i < NFACTIVE; i++) {

View File

@@ -678,11 +678,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_iter_begin(&cursor, obj, usage); dma_resv_iter_begin(&cursor, obj, usage);
dma_resv_for_each_fence_unlocked(&cursor, fence) { dma_resv_for_each_fence_unlocked(&cursor, fence) {
ret = dma_fence_wait_timeout(fence, intr, ret); ret = dma_fence_wait_timeout(fence, intr, timeout);
if (ret <= 0) { if (ret <= 0)
dma_resv_iter_end(&cursor); break;
return ret;
} /* Even for zero timeout the return value is 1 */
if (timeout)
timeout = ret;
} }
dma_resv_iter_end(&cursor); dma_resv_iter_end(&cursor);

View File

@@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref)
container_of(kref, struct msm_gem_submit, ref); container_of(kref, struct msm_gem_submit, ref);
unsigned i; unsigned i;
/*
* In error paths, we could unref the submit without calling
* drm_sched_entity_push_job(), so msm_job_free() will never
* get called. Since drm_sched_job_cleanup() will NULL out
* s_fence, we can use that to detect this case.
*/
if (submit->base.s_fence)
drm_sched_job_cleanup(&submit->base);
if (submit->fence_id) { if (submit->fence_id) {
spin_lock(&submit->queue->idr_lock); spin_lock(&submit->queue->idr_lock);
idr_remove(&submit->queue->fence_idr, submit->fence_id); idr_remove(&submit->queue->fence_idr, submit->fence_id);
@@ -754,6 +763,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct msm_ringbuffer *ring; struct msm_ringbuffer *ring;
struct msm_submit_post_dep *post_deps = NULL; struct msm_submit_post_dep *post_deps = NULL;
struct drm_syncobj **syncobjs_to_reset = NULL; struct drm_syncobj **syncobjs_to_reset = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1; int out_fence_fd = -1;
bool has_ww_ticket = false; bool has_ww_ticket = false;
unsigned i; unsigned i;
@@ -970,7 +980,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
} }
if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
struct sync_file *sync_file = sync_file_create(submit->user_fence); sync_file = sync_file_create(submit->user_fence);
if (!sync_file) { if (!sync_file) {
ret = -ENOMEM; ret = -ENOMEM;
} else { } else {
@@ -1003,8 +1013,11 @@ out:
out_unlock: out_unlock:
mutex_unlock(&queue->lock); mutex_unlock(&queue->lock);
out_post_unlock: out_post_unlock:
if (ret && (out_fence_fd >= 0)) if (ret && (out_fence_fd >= 0)) {
put_unused_fd(out_fence_fd); put_unused_fd(out_fence_fd);
if (sync_file)
fput(sync_file->file);
}
if (!IS_ERR_OR_NULL(submit)) { if (!IS_ERR_OR_NULL(submit)) {
msm_gem_submit_put(submit); msm_gem_submit_put(submit);

View File

@@ -276,7 +276,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
static void simpledrm_device_release_clocks(void *res) static void simpledrm_device_release_clocks(void *res)
{ {
struct simpledrm_device *sdev = simpledrm_device_of_dev(res); struct simpledrm_device *sdev = res;
unsigned int i; unsigned int i;
for (i = 0; i < sdev->clk_count; ++i) { for (i = 0; i < sdev->clk_count; ++i) {
@@ -374,7 +374,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
static void simpledrm_device_release_regulators(void *res) static void simpledrm_device_release_regulators(void *res)
{ {
struct simpledrm_device *sdev = simpledrm_device_of_dev(res); struct simpledrm_device *sdev = res;
unsigned int i; unsigned int i;
for (i = 0; i < sdev->regulator_count; ++i) { for (i = 0; i < sdev->regulator_count; ++i) {

View File

@@ -62,6 +62,12 @@ struct v3d_perfmon {
u64 values[]; u64 values[];
}; };
enum v3d_irq {
V3D_CORE_IRQ,
V3D_HUB_IRQ,
V3D_MAX_IRQS,
};
struct v3d_dev { struct v3d_dev {
struct drm_device drm; struct drm_device drm;
@@ -71,6 +77,8 @@ struct v3d_dev {
int ver; int ver;
bool single_irq_line; bool single_irq_line;
int irq[V3D_MAX_IRQS];
void __iomem *hub_regs; void __iomem *hub_regs;
void __iomem *core_regs[3]; void __iomem *core_regs[3];
void __iomem *bridge_regs; void __iomem *bridge_regs;

View File

@@ -120,6 +120,8 @@ v3d_reset(struct v3d_dev *v3d)
if (false) if (false)
v3d_idle_axi(v3d, 0); v3d_idle_axi(v3d, 0);
v3d_irq_disable(v3d);
v3d_idle_gca(v3d); v3d_idle_gca(v3d);
v3d_reset_v3d(v3d); v3d_reset_v3d(v3d);

View File

@@ -215,7 +215,7 @@ v3d_hub_irq(int irq, void *arg)
int int
v3d_irq_init(struct v3d_dev *v3d) v3d_irq_init(struct v3d_dev *v3d)
{ {
int irq1, ret, core; int irq, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -226,17 +226,24 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER) if (irq == -EPROBE_DEFER)
return irq1; return irq;
if (irq1 > 0) { if (irq > 0) {
ret = devm_request_irq(v3d->drm.dev, irq1, v3d->irq[V3D_CORE_IRQ] = irq;
ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
v3d_irq, IRQF_SHARED, v3d_irq, IRQF_SHARED,
"v3d_core0", v3d); "v3d_core0", v3d);
if (ret) if (ret)
goto fail; goto fail;
ret = devm_request_irq(v3d->drm.dev,
platform_get_irq(v3d_to_pdev(v3d), 0), irq = platform_get_irq(v3d_to_pdev(v3d), 0);
if (irq < 0)
return irq;
v3d->irq[V3D_HUB_IRQ] = irq;
ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ],
v3d_hub_irq, IRQF_SHARED, v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d); "v3d_hub", v3d);
if (ret) if (ret)
@@ -244,8 +251,12 @@ v3d_irq_init(struct v3d_dev *v3d)
} else { } else {
v3d->single_irq_line = true; v3d->single_irq_line = true;
ret = devm_request_irq(v3d->drm.dev, irq = platform_get_irq(v3d_to_pdev(v3d), 0);
platform_get_irq(v3d_to_pdev(v3d), 0), if (irq < 0)
return irq;
v3d->irq[V3D_CORE_IRQ] = irq;
ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ],
v3d_irq, IRQF_SHARED, v3d_irq, IRQF_SHARED,
"v3d", v3d); "v3d", v3d);
if (ret) if (ret)
@@ -286,6 +297,12 @@ v3d_irq_disable(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0);
V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0);
/* Finish any interrupt handler still in flight. */
for (int i = 0; i < V3D_MAX_IRQS; i++) {
if (v3d->irq[i])
synchronize_irq(v3d->irq[i]);
}
/* Clear any pending interrupts we might have left. */ /* Clear any pending interrupts we might have left. */
for (core = 0; core < v3d->cores; core++) for (core = 0; core < v3d->cores; core++)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);

View File

@@ -327,6 +327,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
dev->msgs = msgs; dev->msgs = msgs;
dev->msgs_num = num_msgs; dev->msgs_num = num_msgs;
dev->msg_write_idx = 0;
i2c_dw_xfer_init(dev); i2c_dw_xfer_init(dev);
regmap_write(dev->map, DW_IC_INTR_MASK, 0); regmap_write(dev->map, DW_IC_INTR_MASK, 0);

View File

@@ -174,6 +174,7 @@ static const struct xpad_device {
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
{ 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
{ 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
{ 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX },
{ 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
{ 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
{ 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
@@ -514,6 +515,7 @@ static const struct usb_device_id xpad_table[] = {
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */
XPAD_XBOX360_VENDOR(0x0502), /* Acer Inc. Xbox 360 style controllers */
XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */
XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz Xbox 360 controllers */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz Xbox 360 controllers */

View File

@@ -301,6 +301,7 @@ struct iqs7222_dev_desc {
int allow_offset; int allow_offset;
int event_offset; int event_offset;
int comms_offset; int comms_offset;
int ext_chan;
bool legacy_gesture; bool legacy_gesture;
struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS]; struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS];
}; };
@@ -315,6 +316,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
.allow_offset = 9, .allow_offset = 9,
.event_offset = 10, .event_offset = 10,
.comms_offset = 12, .comms_offset = 12,
.ext_chan = 10,
.reg_grps = { .reg_grps = {
[IQS7222_REG_GRP_STAT] = { [IQS7222_REG_GRP_STAT] = {
.base = IQS7222_SYS_STATUS, .base = IQS7222_SYS_STATUS,
@@ -373,6 +375,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = {
.allow_offset = 9, .allow_offset = 9,
.event_offset = 10, .event_offset = 10,
.comms_offset = 12, .comms_offset = 12,
.ext_chan = 10,
.legacy_gesture = true, .legacy_gesture = true,
.reg_grps = { .reg_grps = {
[IQS7222_REG_GRP_STAT] = { [IQS7222_REG_GRP_STAT] = {
@@ -2244,7 +2247,7 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222,
const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
struct i2c_client *client = iqs7222->client; struct i2c_client *client = iqs7222->client;
int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
int ext_chan = rounddown(num_chan, 10); int ext_chan = dev_desc->ext_chan ? : num_chan;
int error, i; int error, i;
u16 *chan_setup = iqs7222->chan_setup[chan_index]; u16 *chan_setup = iqs7222->chan_setup[chan_index];
u16 *sys_setup = iqs7222->sys_setup; u16 *sys_setup = iqs7222->sys_setup;
@@ -2448,7 +2451,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222,
const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc;
struct i2c_client *client = iqs7222->client; struct i2c_client *client = iqs7222->client;
int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row;
int ext_chan = rounddown(num_chan, 10); int ext_chan = dev_desc->ext_chan ? : num_chan;
int count, error, reg_offset, i; int count, error, reg_offset, i;
u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset]; u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset];
u16 *sldr_setup = iqs7222->sldr_setup[sldr_index]; u16 *sldr_setup = iqs7222->sldr_setup[sldr_index];

View File

@@ -1177,7 +1177,6 @@ static int rk_iommu_of_xlate(struct device *dev,
iommu_dev = of_find_device_by_node(args->np); iommu_dev = of_find_device_by_node(args->np);
data->iommu = platform_get_drvdata(iommu_dev); data->iommu = platform_get_drvdata(iommu_dev);
data->iommu->domain = &rk_identity_domain;
dev_iommu_priv_set(dev, data); dev_iommu_priv_set(dev, data);
platform_device_put(iommu_dev); platform_device_put(iommu_dev);
@@ -1217,6 +1216,8 @@ static int rk_iommu_probe(struct platform_device *pdev)
if (!iommu) if (!iommu)
return -ENOMEM; return -ENOMEM;
iommu->domain = &rk_identity_domain;
platform_set_drvdata(pdev, iommu); platform_set_drvdata(pdev, iommu);
iommu->dev = dev; iommu->dev = dev;
iommu->num_mmu = 0; iommu->num_mmu = 0;

View File

@@ -1316,6 +1316,7 @@ static void spinand_cleanup(struct spinand_device *spinand)
{ {
struct nand_device *nand = spinand_to_nand(spinand); struct nand_device *nand = spinand_to_nand(spinand);
nanddev_ecc_engine_cleanup(nand);
nanddev_cleanup(nand); nanddev_cleanup(nand);
spinand_manufacturer_cleanup(spinand); spinand_manufacturer_cleanup(spinand);
kfree(spinand->databuf); kfree(spinand->databuf);

View File

@@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
* We need to do some backwards compatibility to make this work. * We need to do some backwards compatibility to make this work.
*/ */
if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) {
WARN_ON(1); ath6kl_err("mismatched byte count %d vs. expected %zd\n",
le32_to_cpu(targ_info->byte_count),
sizeof(*targ_info));
return -EINVAL; return -EINVAL;
} }

View File

@@ -224,6 +224,15 @@ static const struct dmi_system_id fwbug_list[] = {
DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"),
} }
}, },
/* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */
{
.ident = "PCSpecialist Lafite Pro V 14M",
.driver_data = &quirk_spurious_8042,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"),
}
},
{} {}
}; };

View File

@@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = {
.mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex), .mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex),
}; };
static const struct class *fw_attr_class;
ssize_t display_name_language_code_show(struct kobject *kobj, ssize_t display_name_language_code_show(struct kobject *kobj,
struct kobj_attribute *attr, struct kobj_attribute *attr,
char *buf) char *buf)
@@ -974,11 +972,7 @@ static int __init hp_init(void)
if (ret) if (ret)
return ret; return ret;
ret = fw_attributes_class_get(&fw_attr_class); bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
if (ret)
goto err_unregister_class;
bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0),
NULL, "%s", DRIVER_NAME); NULL, "%s", DRIVER_NAME);
if (IS_ERR(bioscfg_drv.class_dev)) { if (IS_ERR(bioscfg_drv.class_dev)) {
ret = PTR_ERR(bioscfg_drv.class_dev); ret = PTR_ERR(bioscfg_drv.class_dev);
@@ -1045,10 +1039,9 @@ err_release_attributes_data:
release_attributes_data(); release_attributes_data();
err_destroy_classdev: err_destroy_classdev:
device_destroy(fw_attr_class, MKDEV(0, 0)); device_unregister(bioscfg_drv.class_dev);
err_unregister_class: err_unregister_class:
fw_attributes_class_put();
hp_exit_attr_set_interface(); hp_exit_attr_set_interface();
return ret; return ret;
@@ -1057,9 +1050,8 @@ err_unregister_class:
static void __exit hp_exit(void) static void __exit hp_exit(void)
{ {
release_attributes_data(); release_attributes_data();
device_destroy(fw_attr_class, MKDEV(0, 0)); device_unregister(bioscfg_drv.class_dev);
fw_attributes_class_put();
hp_exit_attr_set_interface(); hp_exit_attr_set_interface();
} }

View File

@@ -916,6 +916,7 @@ static const struct attribute_group auth_attr_group = {
.is_visible = auth_attr_is_visible, .is_visible = auth_attr_is_visible,
.attrs = auth_attrs, .attrs = auth_attrs,
}; };
__ATTRIBUTE_GROUPS(auth_attr);
/* ---- Attributes sysfs --------------------------------------------------------- */ /* ---- Attributes sysfs --------------------------------------------------------- */
static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr, static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1119,6 +1120,7 @@ static const struct attribute_group tlmi_attr_group = {
.is_visible = attr_is_visible, .is_visible = attr_is_visible,
.attrs = tlmi_attrs, .attrs = tlmi_attrs,
}; };
__ATTRIBUTE_GROUPS(tlmi_attr);
static void tlmi_attr_setting_release(struct kobject *kobj) static void tlmi_attr_setting_release(struct kobject *kobj)
{ {
@@ -1138,11 +1140,13 @@ static void tlmi_pwd_setting_release(struct kobject *kobj)
static const struct kobj_type tlmi_attr_setting_ktype = { static const struct kobj_type tlmi_attr_setting_ktype = {
.release = &tlmi_attr_setting_release, .release = &tlmi_attr_setting_release,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.default_groups = tlmi_attr_groups,
}; };
static const struct kobj_type tlmi_pwd_setting_ktype = { static const struct kobj_type tlmi_pwd_setting_ktype = {
.release = &tlmi_pwd_setting_release, .release = &tlmi_pwd_setting_release,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.default_groups = auth_attr_groups,
}; };
static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -1212,19 +1216,16 @@ static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd);
/* ---- Initialisation --------------------------------------------------------- */ /* ---- Initialisation --------------------------------------------------------- */
static void tlmi_release_attr(void) static void tlmi_release_attr(void)
{ {
int i; struct kobject *pos, *n;
/* Attribute structures */ /* Attribute structures */
for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
if (tlmi_priv.setting[i]) {
sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
kobject_put(&tlmi_priv.setting[i]->kobj);
}
}
sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr);
if (tlmi_priv.can_debug_cmd && debug_support) if (tlmi_priv.can_debug_cmd && debug_support)
sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr); sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr);
list_for_each_entry_safe(pos, n, &tlmi_priv.attribute_kset->list, entry)
kobject_put(pos);
kset_unregister(tlmi_priv.attribute_kset); kset_unregister(tlmi_priv.attribute_kset);
/* Free up any saved signatures */ /* Free up any saved signatures */
@@ -1232,19 +1233,8 @@ static void tlmi_release_attr(void)
kfree(tlmi_priv.pwd_admin->save_signature); kfree(tlmi_priv.pwd_admin->save_signature);
/* Authentication structures */ /* Authentication structures */
sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); list_for_each_entry_safe(pos, n, &tlmi_priv.authentication_kset->list, entry)
kobject_put(&tlmi_priv.pwd_admin->kobj); kobject_put(pos);
sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_power->kobj);
if (tlmi_priv.opcode_support) {
sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_system->kobj);
sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_hdd->kobj);
sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
kobject_put(&tlmi_priv.pwd_nvme->kobj);
}
kset_unregister(tlmi_priv.authentication_kset); kset_unregister(tlmi_priv.authentication_kset);
} }
@@ -1285,6 +1275,14 @@ static int tlmi_sysfs_init(void)
goto fail_device_created; goto fail_device_created;
} }
tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
&tlmi_priv.class_dev->kobj);
if (!tlmi_priv.authentication_kset) {
kset_unregister(tlmi_priv.attribute_kset);
ret = -ENOMEM;
goto fail_device_created;
}
for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { for (i = 0; i < TLMI_SETTINGS_COUNT; i++) {
/* Check if index is a valid setting - skip if it isn't */ /* Check if index is a valid setting - skip if it isn't */
if (!tlmi_priv.setting[i]) if (!tlmi_priv.setting[i])
@@ -1301,12 +1299,8 @@ static int tlmi_sysfs_init(void)
/* Build attribute */ /* Build attribute */
tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset; tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset;
ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL, ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype,
"%s", tlmi_priv.setting[i]->display_name); NULL, "%s", tlmi_priv.setting[i]->display_name);
if (ret)
goto fail_create_attr;
ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group);
if (ret) if (ret)
goto fail_create_attr; goto fail_create_attr;
} }
@@ -1322,55 +1316,34 @@ static int tlmi_sysfs_init(void)
} }
/* Create authentication entries */ /* Create authentication entries */
tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL,
&tlmi_priv.class_dev->kobj);
if (!tlmi_priv.authentication_kset) {
ret = -ENOMEM;
goto fail_create_attr;
}
tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset; tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset;
ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin"); ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype,
if (ret) NULL, "%s", "Admin");
goto fail_create_attr;
ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group);
if (ret) if (ret)
goto fail_create_attr; goto fail_create_attr;
tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset; tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset;
ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on"); ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype,
if (ret) NULL, "%s", "Power-on");
goto fail_create_attr;
ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group);
if (ret) if (ret)
goto fail_create_attr; goto fail_create_attr;
if (tlmi_priv.opcode_support) { if (tlmi_priv.opcode_support) {
tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset; tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset;
ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System"); ret = kobject_init_and_add(&tlmi_priv.pwd_system->kobj, &tlmi_pwd_setting_ktype,
if (ret) NULL, "%s", "System");
goto fail_create_attr;
ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group);
if (ret) if (ret)
goto fail_create_attr; goto fail_create_attr;
tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset; tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset;
ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD"); ret = kobject_init_and_add(&tlmi_priv.pwd_hdd->kobj, &tlmi_pwd_setting_ktype,
if (ret) NULL, "%s", "HDD");
goto fail_create_attr;
ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group);
if (ret) if (ret)
goto fail_create_attr; goto fail_create_attr;
tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset; tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset;
ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe"); ret = kobject_init_and_add(&tlmi_priv.pwd_nvme->kobj, &tlmi_pwd_setting_ktype,
if (ret) NULL, "%s", "NVMe");
goto fail_create_attr;
ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group);
if (ret) if (ret)
goto fail_create_attr; goto fail_create_attr;
} }
@@ -1403,8 +1376,6 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type,
new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length; new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length;
new_pwd->index = 0; new_pwd->index = 0;
kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype);
return new_pwd; return new_pwd;
} }
@@ -1508,7 +1479,6 @@ static int tlmi_analyze(void)
if (setting->possible_values) if (setting->possible_values)
strreplace(setting->possible_values, ',', ';'); strreplace(setting->possible_values, ',', ';');
kobject_init(&setting->kobj, &tlmi_attr_setting_ktype);
tlmi_priv.setting[i] = setting; tlmi_priv.setting[i] = setting;
kfree(item); kfree(item);
} }

View File

@@ -338,12 +338,28 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
{ {
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
struct rapl_defaults *defaults = get_defaults(rd->rp); struct rapl_defaults *defaults = get_defaults(rd->rp);
u64 val;
int ret; int ret;
cpus_read_lock(); cpus_read_lock();
ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode); ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode);
if (!ret && defaults->set_floor_freq) if (ret)
goto end;
ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, false, &val);
if (ret)
goto end;
if (mode != val) {
pr_debug("%s cannot be %s\n", power_zone->name,
str_enabled_disabled(mode));
goto end;
}
if (defaults->set_floor_freq)
defaults->set_floor_freq(rd, mode); defaults->set_floor_freq(rd, mode);
end:
cpus_read_unlock(); cpus_read_unlock();
return ret; return ret;

View File

@@ -147,6 +147,7 @@ struct fan53555_device_info {
unsigned int slew_mask; unsigned int slew_mask;
const unsigned int *ramp_delay_table; const unsigned int *ramp_delay_table;
unsigned int n_ramp_values; unsigned int n_ramp_values;
unsigned int enable_time;
unsigned int slew_rate; unsigned int slew_rate;
}; };
@@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK; di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates; di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates); di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->enable_time = 250;
di->vsel_count = FAN53526_NVOLTAGES; di->vsel_count = FAN53526_NVOLTAGES;
return 0; return 0;
@@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
case FAN53555_CHIP_REV_00: case FAN53555_CHIP_REV_00:
di->vsel_min = 600000; di->vsel_min = 600000;
di->vsel_step = 10000; di->vsel_step = 10000;
di->enable_time = 400;
break; break;
case FAN53555_CHIP_REV_13: case FAN53555_CHIP_REV_13:
di->vsel_min = 800000; di->vsel_min = 800000;
di->vsel_step = 10000; di->vsel_step = 10000;
di->enable_time = 400;
break; break;
default: default:
dev_err(di->dev, dev_err(di->dev,
@@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
case FAN53555_CHIP_ID_01: case FAN53555_CHIP_ID_01:
case FAN53555_CHIP_ID_03: case FAN53555_CHIP_ID_03:
case FAN53555_CHIP_ID_05: case FAN53555_CHIP_ID_05:
di->vsel_min = 600000;
di->vsel_step = 10000;
di->enable_time = 400;
break;
case FAN53555_CHIP_ID_08: case FAN53555_CHIP_ID_08:
di->vsel_min = 600000; di->vsel_min = 600000;
di->vsel_step = 10000; di->vsel_step = 10000;
di->enable_time = 175;
break; break;
case FAN53555_CHIP_ID_04: case FAN53555_CHIP_ID_04:
di->vsel_min = 603000; di->vsel_min = 603000;
di->vsel_step = 12826; di->vsel_step = 12826;
di->enable_time = 400;
break; break;
default: default:
dev_err(di->dev, dev_err(di->dev,
@@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK; di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates; di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates); di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->enable_time = 360;
di->vsel_count = FAN53555_NVOLTAGES; di->vsel_count = FAN53555_NVOLTAGES;
return 0; return 0;
@@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK; di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates; di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates); di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->enable_time = 360;
di->vsel_count = RK8602_NVOLTAGES; di->vsel_count = RK8602_NVOLTAGES;
return 0; return 0;
@@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di)
di->slew_mask = CTL_SLEW_MASK; di->slew_mask = CTL_SLEW_MASK;
di->ramp_delay_table = slew_rates; di->ramp_delay_table = slew_rates;
di->n_ramp_values = ARRAY_SIZE(slew_rates); di->n_ramp_values = ARRAY_SIZE(slew_rates);
di->enable_time = 400;
di->vsel_count = FAN53555_NVOLTAGES; di->vsel_count = FAN53555_NVOLTAGES;
return 0; return 0;
@@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->ramp_mask = di->slew_mask; rdesc->ramp_mask = di->slew_mask;
rdesc->ramp_delay_table = di->ramp_delay_table; rdesc->ramp_delay_table = di->ramp_delay_table;
rdesc->n_ramp_values = di->n_ramp_values; rdesc->n_ramp_values = di->n_ramp_values;
rdesc->enable_time = di->enable_time;
rdesc->owner = THIS_MODULE; rdesc->owner = THIS_MODULE;
rdev = devm_regulator_register(di->dev, &di->desc, config); rdev = devm_regulator_register(di->dev, &di->desc, config);

View File

@@ -1841,6 +1841,8 @@ out:
} }
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
if (dest_se_deve)
core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_lunacl_undepend_item(dest_se_deve);
if (is_local) if (is_local)

View File

@@ -20,6 +20,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/sched/clock.h> #include <linux/sched/clock.h>
#include <linux/iopoll.h> #include <linux/iopoll.h>
@@ -323,7 +324,8 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba); static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba); static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up); static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba); static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba, static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode); struct ufs_pa_layer_attr *pwr_mode);
@@ -1114,14 +1116,32 @@ out:
return ret; return ret;
} }
static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
{
struct dev_pm_opp *opp;
int ret;
opp = dev_pm_opp_find_freq_floor_indexed(hba->dev,
&freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
ret = dev_pm_opp_set_opp(hba->dev, opp);
dev_pm_opp_put(opp);
return ret;
}
/** /**
* ufshcd_scale_clks - scale up or scale down UFS controller clocks * ufshcd_scale_clks - scale up or scale down UFS controller clocks
* @hba: per adapter instance * @hba: per adapter instance
* @freq: frequency to scale
* @scale_up: True if scaling up and false if scaling down * @scale_up: True if scaling up and false if scaling down
* *
* Return: 0 if successful; < 0 upon failure. * Return: 0 if successful; < 0 upon failure.
*/ */
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up) static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{ {
int ret = 0; int ret = 0;
ktime_t start = ktime_get(); ktime_t start = ktime_get();
@@ -1130,13 +1150,21 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
if (ret) if (ret)
goto out; goto out;
if (hba->use_pm_opp)
ret = ufshcd_opp_set_rate(hba, freq);
else
ret = ufshcd_set_clk_freq(hba, scale_up); ret = ufshcd_set_clk_freq(hba, scale_up);
if (ret) if (ret)
goto out; goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE); ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
if (ret) if (ret) {
if (hba->use_pm_opp)
ufshcd_opp_set_rate(hba,
hba->devfreq->previous_freq);
else
ufshcd_set_clk_freq(hba, !scale_up); ufshcd_set_clk_freq(hba, !scale_up);
}
out: out:
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
@@ -1148,12 +1176,13 @@ out:
/** /**
* ufshcd_is_devfreq_scaling_required - check if scaling is required or not * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
* @hba: per adapter instance * @hba: per adapter instance
* @freq: frequency to scale
* @scale_up: True if scaling up and false if scaling down * @scale_up: True if scaling up and false if scaling down
* *
* Return: true if scaling is required, false otherwise. * Return: true if scaling is required, false otherwise.
*/ */
static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba, static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
bool scale_up) unsigned long freq, bool scale_up)
{ {
struct ufs_clk_info *clki; struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head; struct list_head *head = &hba->clk_list_head;
@@ -1161,6 +1190,9 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
if (list_empty(head)) if (list_empty(head))
return false; return false;
if (hba->use_pm_opp)
return freq != hba->clk_scaling.target_freq;
list_for_each_entry(clki, head, list) { list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) { if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) { if (scale_up && clki->max_freq) {
@@ -1359,12 +1391,14 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool sc
/** /**
* ufshcd_devfreq_scale - scale up/down UFS clocks and gear * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
* @hba: per adapter instance * @hba: per adapter instance
* @freq: frequency to scale
* @scale_up: True for scaling up and false for scalin down * @scale_up: True for scaling up and false for scalin down
* *
* Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
* for any other errors. * for any other errors.
*/ */
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) static int ufshcd_devfreq_scale(struct ufs_hba *hba, unsigned long freq,
bool scale_up)
{ {
int ret = 0; int ret = 0;
@@ -1379,7 +1413,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
goto out_unprepare; goto out_unprepare;
} }
ret = ufshcd_scale_clks(hba, scale_up); ret = ufshcd_scale_clks(hba, freq, scale_up);
if (ret) { if (ret) {
if (!scale_up) if (!scale_up)
ufshcd_scale_gear(hba, true); ufshcd_scale_gear(hba, true);
@@ -1390,7 +1424,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
if (scale_up) { if (scale_up) {
ret = ufshcd_scale_gear(hba, true); ret = ufshcd_scale_gear(hba, true);
if (ret) { if (ret) {
ufshcd_scale_clks(hba, false); ufshcd_scale_clks(hba, hba->devfreq->previous_freq,
false);
goto out_unprepare; goto out_unprepare;
} }
} }
@@ -1449,9 +1484,22 @@ static int ufshcd_devfreq_target(struct device *dev,
if (!ufshcd_is_clkscaling_supported(hba)) if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL; return -EINVAL;
clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list); if (hba->use_pm_opp) {
struct dev_pm_opp *opp;
/* Get the recommended frequency from OPP framework */
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
} else {
/* Override with the closest supported frequency */ /* Override with the closest supported frequency */
clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info,
list);
*freq = (unsigned long) clk_round_rate(clki->clk, *freq); *freq = (unsigned long) clk_round_rate(clki->clk, *freq);
}
spin_lock_irqsave(hba->host->host_lock, irq_flags); spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) { if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags); spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
@@ -1473,12 +1521,17 @@ static int ufshcd_devfreq_target(struct device *dev,
goto out; goto out;
} }
/* Decide based on the rounded-off frequency and update */ /* Decide based on the target or rounded-off frequency and update */
if (hba->use_pm_opp)
scale_up = *freq > hba->clk_scaling.target_freq;
else
scale_up = *freq == clki->max_freq; scale_up = *freq == clki->max_freq;
if (!scale_up)
if (!hba->use_pm_opp && !scale_up)
*freq = clki->min_freq; *freq = clki->min_freq;
/* Update the frequency */ /* Update the frequency */
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) { if (!ufshcd_is_devfreq_scaling_required(hba, *freq, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags); spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0; ret = 0;
goto out; /* no state change required */ goto out; /* no state change required */
@@ -1486,7 +1539,9 @@ static int ufshcd_devfreq_target(struct device *dev,
spin_unlock_irqrestore(hba->host->host_lock, irq_flags); spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get(); start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up); ret = ufshcd_devfreq_scale(hba, *freq, scale_up);
if (!ret)
hba->clk_scaling.target_freq = *freq;
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev), trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"), (scale_up ? "up" : "down"),
@@ -1507,8 +1562,6 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct ufs_hba *hba = dev_get_drvdata(dev); struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling; struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags; unsigned long flags;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
ktime_t curr_t; ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba)) if (!ufshcd_is_clkscaling_supported(hba))
@@ -1521,17 +1574,24 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
if (!scaling->window_start_t) if (!scaling->window_start_t)
goto start_window; goto start_window;
clki = list_first_entry(clk_list, struct ufs_clk_info, list);
/* /*
* If current frequency is 0, then the ondemand governor considers * If current frequency is 0, then the ondemand governor considers
* there's no initial frequency set. And it always requests to set * there's no initial frequency set. And it always requests to set
* to max. frequency. * to max. frequency.
*/ */
if (hba->use_pm_opp) {
stat->current_frequency = hba->clk_scaling.target_freq;
} else {
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
clki = list_first_entry(clk_list, struct ufs_clk_info, list);
stat->current_frequency = clki->curr_freq; stat->current_frequency = clki->curr_freq;
}
if (scaling->is_busy_started) if (scaling->is_busy_started)
scaling->tot_busy_t += ktime_us_delta(curr_t, scaling->tot_busy_t += ktime_us_delta(curr_t,
scaling->busy_start_t); scaling->busy_start_t);
stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t); stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
stat->busy_time = scaling->tot_busy_t; stat->busy_time = scaling->tot_busy_t;
start_window: start_window:
@@ -1560,9 +1620,11 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
if (list_empty(clk_list)) if (list_empty(clk_list))
return 0; return 0;
if (!hba->use_pm_opp) {
clki = list_first_entry(clk_list, struct ufs_clk_info, list); clki = list_first_entry(clk_list, struct ufs_clk_info, list);
dev_pm_opp_add(hba->dev, clki->min_freq, 0); dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0); dev_pm_opp_add(hba->dev, clki->max_freq, 0);
}
ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile, ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
&hba->vps->ondemand_data); &hba->vps->ondemand_data);
@@ -1574,8 +1636,10 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
ret = PTR_ERR(devfreq); ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret); dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
if (!hba->use_pm_opp) {
dev_pm_opp_remove(hba->dev, clki->min_freq); dev_pm_opp_remove(hba->dev, clki->min_freq);
dev_pm_opp_remove(hba->dev, clki->max_freq); dev_pm_opp_remove(hba->dev, clki->max_freq);
}
return ret; return ret;
} }
@@ -1587,7 +1651,6 @@ static int ufshcd_devfreq_init(struct ufs_hba *hba)
static void ufshcd_devfreq_remove(struct ufs_hba *hba) static void ufshcd_devfreq_remove(struct ufs_hba *hba)
{ {
struct list_head *clk_list = &hba->clk_list_head; struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
if (!hba->devfreq) if (!hba->devfreq)
return; return;
@@ -1595,10 +1658,14 @@ static void ufshcd_devfreq_remove(struct ufs_hba *hba)
devfreq_remove_device(hba->devfreq); devfreq_remove_device(hba->devfreq);
hba->devfreq = NULL; hba->devfreq = NULL;
if (!hba->use_pm_opp) {
struct ufs_clk_info *clki;
clki = list_first_entry(clk_list, struct ufs_clk_info, list); clki = list_first_entry(clk_list, struct ufs_clk_info, list);
dev_pm_opp_remove(hba->dev, clki->min_freq); dev_pm_opp_remove(hba->dev, clki->min_freq);
dev_pm_opp_remove(hba->dev, clki->max_freq); dev_pm_opp_remove(hba->dev, clki->max_freq);
} }
}
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba) static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{ {
@@ -1673,7 +1740,7 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
ufshcd_resume_clkscaling(hba); ufshcd_resume_clkscaling(hba);
} else { } else {
ufshcd_suspend_clkscaling(hba); ufshcd_suspend_clkscaling(hba);
err = ufshcd_devfreq_scale(hba, true); err = ufshcd_devfreq_scale(hba, ULONG_MAX, true);
if (err) if (err)
dev_err(hba->dev, "%s: failed to scale clocks up %d\n", dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
__func__, err); __func__, err);
@@ -7772,7 +7839,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
hba->silence_err_logs = false; hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */ /* scale up clocks to max frequency before full reinitialization */
ufshcd_scale_clks(hba, true); if (ufshcd_is_clkscaling_supported(hba))
ufshcd_scale_clks(hba, ULONG_MAX, true);
err = ufshcd_hba_enable(hba); err = ufshcd_hba_enable(hba);
@@ -9382,6 +9450,17 @@ static int ufshcd_init_clocks(struct ufs_hba *hba)
dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__, dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
clki->name, clk_get_rate(clki->clk)); clki->name, clk_get_rate(clki->clk));
} }
/* Set Max. frequency for all clocks */
if (hba->use_pm_opp) {
ret = ufshcd_opp_set_rate(hba, ULONG_MAX);
if (ret) {
dev_err(hba->dev, "%s: failed to set OPP: %d", __func__,
ret);
goto out;
}
}
out: out:
return ret; return ret;
} }

View File

@@ -772,7 +772,9 @@ static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
} }
if (port_id != old_port) { if (port_id != old_port) {
if (pdev->slot_id)
cdnsp_disable_slot(pdev); cdnsp_disable_slot(pdev);
pdev->active_port = port; pdev->active_port = port;
cdnsp_enable_slot(pdev); cdnsp_enable_slot(pdev);
} }

View File

@@ -2213,6 +2213,10 @@ static void udc_suspend(struct ci_hdrc *ci)
*/ */
if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0) if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0)
hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0); hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0);
if (ci->gadget.connected &&
(!ci->suspended || !device_may_wakeup(ci->dev)))
usb_gadget_disconnect(&ci->gadget);
} }
static void udc_resume(struct ci_hdrc *ci, bool power_lost) static void udc_resume(struct ci_hdrc *ci, bool power_lost)
@@ -2223,6 +2227,9 @@ static void udc_resume(struct ci_hdrc *ci, bool power_lost)
OTGSC_BSVIS | OTGSC_BSVIE); OTGSC_BSVIS | OTGSC_BSVIE);
if (ci->vbus_active) if (ci->vbus_active)
usb_gadget_vbus_disconnect(&ci->gadget); usb_gadget_vbus_disconnect(&ci->gadget);
} else if (ci->vbus_active && ci->driver &&
!ci->gadget.connected) {
usb_gadget_connect(&ci->gadget);
} }
/* Restore value 0 if it was set for power lost check */ /* Restore value 0 if it was set for power lost check */

View File

@@ -227,7 +227,8 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech HD Webcam C270 */ /* Logitech HD Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME |
USB_QUIRK_NO_LPM},
/* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },

View File

@@ -639,6 +639,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc)
case DS_DISABLED: case DS_DISABLED:
return; return;
case DS_CONFIGURED: case DS_CONFIGURED:
spin_lock(&dbc->lock);
xhci_dbc_flush_requests(dbc);
spin_unlock(&dbc->lock);
if (dbc->driver->disconnect) if (dbc->driver->disconnect)
dbc->driver->disconnect(dbc); dbc->driver->disconnect(dbc);
break; break;

View File

@@ -585,6 +585,7 @@ int dbc_tty_init(void)
dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
dbc_tty_driver->init_termios = tty_std_termios; dbc_tty_driver->init_termios = tty_std_termios;
dbc_tty_driver->init_termios.c_lflag &= ~ECHO;
dbc_tty_driver->init_termios.c_cflag = dbc_tty_driver->init_termios.c_cflag =
B9600 | CS8 | CREAD | HUPCL | CLOCAL; B9600 | CS8 | CREAD | HUPCL | CLOCAL;
dbc_tty_driver->init_termios.c_ispeed = 9600; dbc_tty_driver->init_termios.c_ispeed = 9600;

View File

@@ -1442,6 +1442,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Periodic endpoint bInterval limit quirk */ /* Periodic endpoint bInterval limit quirk */
if (usb_endpoint_xfer_int(&ep->desc) || if (usb_endpoint_xfer_int(&ep->desc) ||
usb_endpoint_xfer_isoc(&ep->desc)) { usb_endpoint_xfer_isoc(&ep->desc)) {
if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) &&
interval >= 9) {
interval = 8;
}
if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) &&
udev->speed >= USB_SPEED_HIGH && udev->speed >= USB_SPEED_HIGH &&
interval >= 7) { interval >= 7) {

View File

@@ -65,12 +65,22 @@
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_PCH_XHCI 0x54ed
#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI 0x13ed
#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI 0x13ee
#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI 0x148c
#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI 0x15d4
#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI 0x15d5
#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI 0x15e0
#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI 0x15e1
#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI 0x15e5
#define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI 0x7316
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
@@ -348,6 +358,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_NEC) if (pdev->vendor == PCI_VENDOR_ID_NEC)
xhci->quirks |= XHCI_NEC_HOST; xhci->quirks |= XHCI_NEC_HOST;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
(pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI ||
pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI))
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9;
if (pdev->vendor == PCI_VENDOR_ID_ATI &&
pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI)
xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9;
if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
xhci->quirks |= XHCI_AMD_0x96_HOST; xhci->quirks |= XHCI_AMD_0x96_HOST;

View File

@@ -321,7 +321,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s
} }
usb3_hcd = xhci_get_usb3_hcd(xhci); usb3_hcd = xhci_get_usb3_hcd(xhci);
if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4) if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
!(xhci->quirks & XHCI_BROKEN_STREAMS))
usb3_hcd->can_do_streams = 1; usb3_hcd->can_do_streams = 1;
if (xhci->shared_hcd) { if (xhci->shared_hcd) {

View File

@@ -1681,6 +1681,7 @@ struct xhci_hcd {
#define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
#define XHCI_ETRON_HOST BIT_ULL(49) #define XHCI_ETRON_HOST BIT_ULL(49)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50)
unsigned int num_active_eps; unsigned int num_active_eps;
unsigned int limit_active_eps; unsigned int limit_active_eps;

View File

@@ -324,8 +324,7 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
case CMDT_RSP_NAK: case CMDT_RSP_NAK:
switch (cmd) { switch (cmd) {
case DP_CMD_STATUS_UPDATE: case DP_CMD_STATUS_UPDATE:
if (typec_altmode_exit(alt)) dp->state = DP_STATE_EXIT;
dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
break; break;
case DP_CMD_CONFIGURE: case DP_CMD_CONFIGURE:
dp->data.conf = 0; dp->data.conf = 0;

View File

@@ -55,15 +55,26 @@ static struct file_system_type anon_inode_fs_type = {
.kill_sb = kill_anon_super, .kill_sb = kill_anon_super,
}; };
static struct inode *anon_inode_make_secure_inode( /**
const char *name, * anon_inode_make_secure_inode - allocate an anonymous inode with security context
* @sb: [in] Superblock to allocate from
* @name: [in] Name of the class of the newfile (e.g., "secretmem")
* @context_inode:
* [in] Optional parent inode for security inheritance
*
* The function ensures proper security initialization through the LSM hook
* security_inode_init_security_anon().
*
* Return: Pointer to new inode on success, ERR_PTR on failure.
*/
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode) const struct inode *context_inode)
{ {
struct inode *inode; struct inode *inode;
const struct qstr qname = QSTR_INIT(name, strlen(name)); const struct qstr qname = QSTR_INIT(name, strlen(name));
int error; int error;
inode = alloc_anon_inode(anon_inode_mnt->mnt_sb); inode = alloc_anon_inode(sb);
if (IS_ERR(inode)) if (IS_ERR(inode))
return inode; return inode;
inode->i_flags &= ~S_PRIVATE; inode->i_flags &= ~S_PRIVATE;
@@ -74,6 +85,7 @@ static struct inode *anon_inode_make_secure_inode(
} }
return inode; return inode;
} }
EXPORT_SYMBOL_GPL_FOR_MODULES(anon_inode_make_secure_inode, "kvm");
static struct file *__anon_inode_getfile(const char *name, static struct file *__anon_inode_getfile(const char *name,
const struct file_operations *fops, const struct file_operations *fops,
@@ -88,7 +100,8 @@ static struct file *__anon_inode_getfile(const char *name,
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
if (secure) { if (secure) {
inode = anon_inode_make_secure_inode(name, context_inode); inode = anon_inode_make_secure_inode(anon_inode_mnt->mnt_sb,
name, context_inode);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
file = ERR_CAST(inode); file = ERR_CAST(inode);
goto err; goto err;

View File

@@ -1096,6 +1096,7 @@ static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
} }
static int ff_layout_async_handle_error_v4(struct rpc_task *task, static int ff_layout_async_handle_error_v4(struct rpc_task *task,
u32 op_status,
struct nfs4_state *state, struct nfs4_state *state,
struct nfs_client *clp, struct nfs_client *clp,
struct pnfs_layout_segment *lseg, struct pnfs_layout_segment *lseg,
@@ -1106,32 +1107,42 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table; struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
switch (task->tk_status) { switch (op_status) {
case -NFS4ERR_BADSESSION: case NFS4_OK:
case -NFS4ERR_BADSLOT: case NFS4ERR_NXIO:
case -NFS4ERR_BAD_HIGH_SLOT: break;
case -NFS4ERR_DEADSESSION: case NFSERR_PERM:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: if (!task->tk_xprt)
case -NFS4ERR_SEQ_FALSE_RETRY: break;
case -NFS4ERR_SEQ_MISORDERED: xprt_force_disconnect(task->tk_xprt);
goto out_retry;
case NFS4ERR_BADSESSION:
case NFS4ERR_BADSLOT:
case NFS4ERR_BAD_HIGH_SLOT:
case NFS4ERR_DEADSESSION:
case NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case NFS4ERR_SEQ_FALSE_RETRY:
case NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session. Exchangeid " dprintk("%s ERROR %d, Reset session. Exchangeid "
"flags 0x%x\n", __func__, task->tk_status, "flags 0x%x\n", __func__, task->tk_status,
clp->cl_exchange_flags); clp->cl_exchange_flags);
nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
break; goto out_retry;
case -NFS4ERR_DELAY: case NFS4ERR_DELAY:
case -NFS4ERR_GRACE: nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
fallthrough;
case NFS4ERR_GRACE:
rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX); rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
break; goto out_retry;
case -NFS4ERR_RETRY_UNCACHED_REP: case NFS4ERR_RETRY_UNCACHED_REP:
break; goto out_retry;
/* Invalidate Layout errors */ /* Invalidate Layout errors */
case -NFS4ERR_PNFS_NO_LAYOUT: case NFS4ERR_PNFS_NO_LAYOUT:
case -ESTALE: /* mapped NFS4ERR_STALE */ case NFS4ERR_STALE:
case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */ case NFS4ERR_BADHANDLE:
case -EISDIR: /* mapped NFS4ERR_ISDIR */ case NFS4ERR_ISDIR:
case -NFS4ERR_FHEXPIRED: case NFS4ERR_FHEXPIRED:
case -NFS4ERR_WRONG_TYPE: case NFS4ERR_WRONG_TYPE:
dprintk("%s Invalid layout error %d\n", __func__, dprintk("%s Invalid layout error %d\n", __func__,
task->tk_status); task->tk_status);
/* /*
@@ -1144,6 +1155,11 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
pnfs_destroy_layout(NFS_I(inode)); pnfs_destroy_layout(NFS_I(inode));
rpc_wake_up(&tbl->slot_tbl_waitq); rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset; goto reset;
default:
break;
}
switch (task->tk_status) {
/* RPC connection errors */ /* RPC connection errors */
case -ECONNREFUSED: case -ECONNREFUSED:
case -EHOSTDOWN: case -EHOSTDOWN:
@@ -1159,26 +1175,56 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
nfs4_delete_deviceid(devid->ld, devid->nfs_client, nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid); &devid->deviceid);
rpc_wake_up(&tbl->slot_tbl_waitq); rpc_wake_up(&tbl->slot_tbl_waitq);
fallthrough; break;
default: default:
break;
}
if (ff_layout_avoid_mds_available_ds(lseg)) if (ff_layout_avoid_mds_available_ds(lseg))
return -NFS4ERR_RESET_TO_PNFS; return -NFS4ERR_RESET_TO_PNFS;
reset: reset:
dprintk("%s Retry through MDS. Error %d\n", __func__, dprintk("%s Retry through MDS. Error %d\n", __func__,
task->tk_status); task->tk_status);
return -NFS4ERR_RESET_TO_MDS; return -NFS4ERR_RESET_TO_MDS;
}
out_retry:
task->tk_status = 0; task->tk_status = 0;
return -EAGAIN; return -EAGAIN;
} }
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
static int ff_layout_async_handle_error_v3(struct rpc_task *task, static int ff_layout_async_handle_error_v3(struct rpc_task *task,
u32 op_status,
struct nfs_client *clp,
struct pnfs_layout_segment *lseg, struct pnfs_layout_segment *lseg,
u32 idx) u32 idx)
{ {
struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
switch (op_status) {
case NFS_OK:
case NFSERR_NXIO:
break;
case NFSERR_PERM:
if (!task->tk_xprt)
break;
xprt_force_disconnect(task->tk_xprt);
goto out_retry;
case NFSERR_ACCES:
case NFSERR_BADHANDLE:
case NFSERR_FBIG:
case NFSERR_IO:
case NFSERR_NOSPC:
case NFSERR_ROFS:
case NFSERR_STALE:
goto out_reset_to_pnfs;
case NFSERR_JUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
goto out_retry;
default:
break;
}
switch (task->tk_status) { switch (task->tk_status) {
/* File access problems. Don't mark the device as unavailable */ /* File access problems. Don't mark the device as unavailable */
case -EACCES: case -EACCES:
@@ -1197,6 +1243,7 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
nfs4_delete_deviceid(devid->ld, devid->nfs_client, nfs4_delete_deviceid(devid->ld, devid->nfs_client,
&devid->deviceid); &devid->deviceid);
} }
out_reset_to_pnfs:
/* FIXME: Need to prevent infinite looping here. */ /* FIXME: Need to prevent infinite looping here. */
return -NFS4ERR_RESET_TO_PNFS; return -NFS4ERR_RESET_TO_PNFS;
out_retry: out_retry:
@@ -1207,6 +1254,7 @@ out_retry:
} }
static int ff_layout_async_handle_error(struct rpc_task *task, static int ff_layout_async_handle_error(struct rpc_task *task,
u32 op_status,
struct nfs4_state *state, struct nfs4_state *state,
struct nfs_client *clp, struct nfs_client *clp,
struct pnfs_layout_segment *lseg, struct pnfs_layout_segment *lseg,
@@ -1225,10 +1273,11 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
switch (vers) { switch (vers) {
case 3: case 3:
return ff_layout_async_handle_error_v3(task, lseg, idx); return ff_layout_async_handle_error_v3(task, op_status, clp,
case 4:
return ff_layout_async_handle_error_v4(task, state, clp,
lseg, idx); lseg, idx);
case 4:
return ff_layout_async_handle_error_v4(task, op_status, state,
clp, lseg, idx);
default: default:
/* should never happen */ /* should never happen */
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
@@ -1281,6 +1330,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
switch (status) { switch (status) {
case NFS4ERR_DELAY: case NFS4ERR_DELAY:
case NFS4ERR_GRACE: case NFS4ERR_GRACE:
case NFS4ERR_PERM:
break; break;
case NFS4ERR_NXIO: case NFS4ERR_NXIO:
ff_layout_mark_ds_unreachable(lseg, idx); ff_layout_mark_ds_unreachable(lseg, idx);
@@ -1313,7 +1363,8 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
trace_ff_layout_read_error(hdr); trace_ff_layout_read_error(hdr);
} }
err = ff_layout_async_handle_error(task, hdr->args.context->state, err = ff_layout_async_handle_error(task, hdr->res.op_status,
hdr->args.context->state,
hdr->ds_clp, hdr->lseg, hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx); hdr->pgio_mirror_idx);
@@ -1483,7 +1534,8 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
trace_ff_layout_write_error(hdr); trace_ff_layout_write_error(hdr);
} }
err = ff_layout_async_handle_error(task, hdr->args.context->state, err = ff_layout_async_handle_error(task, hdr->res.op_status,
hdr->args.context->state,
hdr->ds_clp, hdr->lseg, hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx); hdr->pgio_mirror_idx);
@@ -1529,8 +1581,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
trace_ff_layout_commit_error(data); trace_ff_layout_commit_error(data);
} }
err = ff_layout_async_handle_error(task, NULL, data->ds_clp, err = ff_layout_async_handle_error(task, data->res.op_status,
data->lseg, data->ds_commit_index); NULL, data->ds_clp, data->lseg,
data->ds_commit_index);
trace_nfs4_pnfs_commit_ds(data, err); trace_nfs4_pnfs_commit_ds(data, err);
switch (err) { switch (err) {

View File

@@ -263,7 +263,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
/* The Mode field in the response can now include the file type as well */ /* The Mode field in the response can now include the file type as well */
fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode), fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode),
fattr->cf_cifsattrs & ATTR_DIRECTORY); fattr->cf_cifsattrs & ATTR_DIRECTORY);
fattr->cf_dtype = S_DT(le32_to_cpu(info->Mode)); fattr->cf_dtype = S_DT(fattr->cf_mode);
switch (fattr->cf_mode & S_IFMT) { switch (fattr->cf_mode & S_IFMT) {
case S_IFLNK: case S_IFLNK:

View File

@@ -79,6 +79,7 @@ extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_indirect_target_selection(struct device *dev, extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf);
extern __printf(4, 5) extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata, struct device *cpu_device_create(struct device *parent, void *drvdata,

View File

@@ -42,11 +42,17 @@ extern struct module __this_module;
.long sym .long sym
#endif #endif
#define ___EXPORT_SYMBOL(sym, license, ns) \ /*
* LLVM integrated assembler cam merge adjacent string literals (like
* C and GNU-as) passed to '.ascii', but not to '.asciz' and chokes on:
*
* .asciz "MODULE_" "kvm" ;
*/
#define ___EXPORT_SYMBOL(sym, license, ns...) \
.section ".export_symbol","a" ASM_NL \ .section ".export_symbol","a" ASM_NL \
__export_symbol_##sym: ASM_NL \ __export_symbol_##sym: ASM_NL \
.asciz license ASM_NL \ .asciz license ASM_NL \
.asciz ns ASM_NL \ .ascii ns "\0" ASM_NL \
__EXPORT_SYMBOL_REF(sym) ASM_NL \ __EXPORT_SYMBOL_REF(sym) ASM_NL \
.previous .previous
@@ -88,4 +94,6 @@ extern struct module __this_module;
#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns)) #define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", __stringify(ns))
#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns)) #define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "GPL", __stringify(ns))
#define EXPORT_SYMBOL_GPL_FOR_MODULES(sym, mods) __EXPORT_SYMBOL(sym, "GPL", "module:" mods)
#endif /* _LINUX_EXPORT_H */ #endif /* _LINUX_EXPORT_H */

View File

@@ -3174,6 +3174,8 @@ extern int simple_write_begin(struct file *file, struct address_space *mapping,
extern const struct address_space_operations ram_aops; extern const struct address_space_operations ram_aops;
extern int always_delete_dentry(const struct dentry *); extern int always_delete_dentry(const struct dentry *);
extern struct inode *alloc_anon_inode(struct super_block *); extern struct inode *alloc_anon_inode(struct super_block *);
struct inode *anon_inode_make_secure_inode(struct super_block *sb, const char *name,
const struct inode *context_inode);
extern int simple_nosetlease(struct file *, int, struct file_lock **, void **); extern int simple_nosetlease(struct file *, int, struct file_lock **, void **);
extern const struct dentry_operations simple_dentry_operations; extern const struct dentry_operations simple_dentry_operations;

View File

@@ -1305,7 +1305,7 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm); int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
const struct ata_acpi_gtm *gtm); const struct ata_acpi_gtm *gtm);
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm); int ata_acpi_cbl_pata_type(struct ata_port *ap);
#else #else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap) static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
{ {
@@ -1330,10 +1330,9 @@ static inline unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev,
return 0; return 0;
} }
static inline int ata_acpi_cbl_80wire(struct ata_port *ap, static inline int ata_acpi_cbl_pata_type(struct ata_port *ap)
const struct ata_acpi_gtm *gtm)
{ {
return 0; return ATA_CBL_PATA40;
} }
#endif #endif

View File

@@ -445,6 +445,7 @@ struct ufs_clk_gating {
* @workq: workqueue to schedule devfreq suspend/resume work * @workq: workqueue to schedule devfreq suspend/resume work
* @suspend_work: worker to suspend devfreq * @suspend_work: worker to suspend devfreq
* @resume_work: worker to resume devfreq * @resume_work: worker to resume devfreq
* @target_freq: frequency requested by devfreq framework
* @min_gear: lowest HS gear to scale down to * @min_gear: lowest HS gear to scale down to
* @is_enabled: tracks if scaling is currently enabled or not, controlled by * @is_enabled: tracks if scaling is currently enabled or not, controlled by
* clkscale_enable sysfs node * clkscale_enable sysfs node
@@ -465,6 +466,7 @@ struct ufs_clk_scaling {
struct workqueue_struct *workq; struct workqueue_struct *workq;
struct work_struct suspend_work; struct work_struct suspend_work;
struct work_struct resume_work; struct work_struct resume_work;
unsigned long target_freq;
u32 min_gear; u32 min_gear;
bool is_enabled; bool is_enabled;
bool is_allowed; bool is_allowed;
@@ -926,6 +928,7 @@ enum ufshcd_mcq_opr {
* @auto_bkops_enabled: to track whether bkops is enabled in device * @auto_bkops_enabled: to track whether bkops is enabled in device
* @vreg_info: UFS device voltage regulator information * @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head * @clk_list_head: UFS host controller clocks list node head
* @use_pm_opp: Indicates whether OPP based scaling is used or not
* @req_abort_count: number of times ufshcd_abort() has been called * @req_abort_count: number of times ufshcd_abort() has been called
* @lanes_per_direction: number of lanes per data direction between the UFS * @lanes_per_direction: number of lanes per data direction between the UFS
* controller and the UFS device. * controller and the UFS device.
@@ -1078,6 +1081,7 @@ struct ufs_hba {
bool auto_bkops_enabled; bool auto_bkops_enabled;
struct ufs_vreg_info vreg_info; struct ufs_vreg_info vreg_info;
struct list_head clk_list_head; struct list_head clk_list_head;
bool use_pm_opp;
/* Number of requests aborts */ /* Number of requests aborts */
int req_abort_count; int req_abort_count;

View File

@@ -2699,6 +2699,10 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in)
/* Misaligned rcu_head! */ /* Misaligned rcu_head! */
WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1)); WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
/* Avoid NULL dereference if callback is NULL. */
if (WARN_ON_ONCE(!func))
return;
if (debug_rcu_head_queue(head)) { if (debug_rcu_head_queue(head)) {
/* /*
* Probable double call_rcu(), so leak the callback. * Probable double call_rcu(), so leak the callback.

View File

@@ -195,19 +195,10 @@ static struct file *secretmem_file_create(unsigned long flags)
struct file *file; struct file *file;
struct inode *inode; struct inode *inode;
const char *anon_name = "[secretmem]"; const char *anon_name = "[secretmem]";
const struct qstr qname = QSTR_INIT(anon_name, strlen(anon_name));
int err;
inode = alloc_anon_inode(secretmem_mnt->mnt_sb); inode = anon_inode_make_secure_inode(secretmem_mnt->mnt_sb, anon_name, NULL);
if (IS_ERR(inode)) if (IS_ERR(inode))
return ERR_CAST(inode); return ERR_CAST(inode);
err = security_inode_init_security_anon(inode, &qname, NULL);
if (err) {
file = ERR_PTR(err);
goto err_free_inode;
}
file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem", file = alloc_file_pseudo(inode, secretmem_mnt, "secretmem",
O_RDWR, &secretmem_fops); O_RDWR, &secretmem_fops);
if (IS_ERR(file)) if (IS_ERR(file))

View File

@@ -4419,6 +4419,10 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
if (!multicast && if (!multicast &&
!ether_addr_equal(sdata->dev->dev_addr, hdr->addr1)) !ether_addr_equal(sdata->dev->dev_addr, hdr->addr1))
return false; return false;
/* reject invalid/our STA address */
if (!is_valid_ether_addr(hdr->addr2) ||
ether_addr_equal(sdata->dev->dev_addr, hdr->addr2))
return false;
if (!rx->sta) { if (!rx->sta) {
int rate_idx; int rate_idx;
if (status->encoding != RX_ENC_LEGACY) if (status->encoding != RX_ENC_LEGACY)

View File

@@ -703,6 +703,9 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct
unsigned char nval, oval; unsigned char nval, oval;
int change; int change;
if (chip->mode & (SB_MODE_PLAYBACK | SB_MODE_CAPTURE))
return -EBUSY;
nval = ucontrol->value.enumerated.item[0]; nval = ucontrol->value.enumerated.item[0];
if (nval > 2) if (nval > 2)
return -EINVAL; return -EINVAL;
@@ -711,6 +714,10 @@ static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ct
change = nval != oval; change = nval != oval;
snd_sb16_set_dma_mode(chip, nval); snd_sb16_set_dma_mode(chip, nval);
spin_unlock_irqrestore(&chip->reg_lock, flags); spin_unlock_irqrestore(&chip->reg_lock, flags);
if (change) {
snd_dma_disable(chip->dma8);
snd_dma_disable(chip->dma16);
}
return change; return change;
} }

View File

@@ -451,6 +451,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"), DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VEK"),
} }
}, },
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 17 D7VF"),
}
},
{ {
.driver_data = &acp6x_card, .driver_data = &acp6x_card,
.matches = { .matches = {
@@ -514,6 +521,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"), DMI_MATCH(DMI_PRODUCT_NAME, "OMEN by HP Gaming Laptop 16z-n000"),
} }
}, },
{
.driver_data = &acp6x_card,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Victus by HP Gaming Laptop 15-fb2xxx"),
}
},
{ {
.driver_data = &acp6x_card, .driver_data = &acp6x_card,
.matches = { .matches = {