Merge tag 'android15-6.6.92_r00' into android15-6.6

This merges the android15-6.6.92_r00 tag into the android15-6.6 branch,
catching it up with the latest LTS releases.

It contains the following commits:

* fc57b3829f ANDROID: GKI: db845c: add devm_register_sys_off_handler to symbol list
* fa2415a1dc ANDROID: GKI: fix up crc issue with struct drm_atomic_state
* c6d284ab16 Revert "nvmem: qfprom: switch to 4-byte aligned reads"
* 2cb8fe26d3 Revert "perf: Avoid the read if the count is already updated"
* ae998420ae Revert "serial: mctrl_gpio: split disable_ms into sync and no_sync APIs"
* 8fcd71ea95 Revert "espintcp: remove encap socket caching to avoid reference leak"
* e811b7a0cf Revert "crypto: ahash - Set default reqsize from ahash_alg"
* 07e706a8d0 Revert "genirq/msi: Store the IOMMU IOVA directly in msi_desc instead of iommu_cookie"
* e6f7f48517 Revert "ipv6: save dontfrag in cork"
*   5b8d7202bb Merge 328840c93b ("af_unix: Run GC on only one CPU.") into android15-6.6-lts
|\
| * 328840c93b af_unix: Run GC on only one CPU.
| * 4be073d590 af_unix: Return struct unix_sock from unix_get_socket().
| * c7b1bd52a0 btrfs: check folio mapping after unlock in relocate_one_folio()
| * 63815bef47 hrtimers: Force migrate away hrtimers queued after CPUHP_AP_HRTIMERS_DYING
| * 5a9c0d5cbd can: kvaser_pciefd: Force IRQ edge in case of nested IRQ
| * f968f28cd1 drm/gem: Internally test import_attach for imported objects
| * 10aecdc1c3 x86/mm/init: Handle the special case of device private pages in add_pages(), to not increase max_pfn and trigger dma_addressing_limited() bounce buffers bounce buffers
| * 367b8b91de i3c: master: svc: Fix implicit fallthrough in svc_i3c_master_ibi_work()
| * 1f91707374 pinctrl: tegra: Fix off by one in tegra_pinctrl_get_group()
| * 4a5e6e798e watchdog: aspeed: fix 64-bit division
| * a9e4ee7f12 serial: sh-sci: Save and restore more registers
| * 517f928cc0 pds_core: Prevent possible adminq overflow/stuck condition
| * 77192e9cfe highmem: add folio_test_partial_kmap()
| * bc133e43cb x86/boot: Compile boot code with -std=gnu11 too
| * 5cdce62dd9 spi: spi-fsl-dspi: Reset SR flags before sending a new message
| * 7cf42e5f40 spi: spi-fsl-dspi: Halt the module after a new message transfer
| * 9df00bd476 spi: spi-fsl-dspi: restrict register range for regmap access
| * 0e7efc9acb ksmbd: fix stream write failure
| * fa68d5c14a Revert "arm64: dts: allwinner: h6: Use RSB for AXP805 PMIC connection"
| * f391043332 mm/page_alloc.c: avoid infinite retries caused by cpuset race
| * 363fd868d7 memcg: always call cond_resched() after fn()
| * f33001a7c1 Input: xpad - add more controllers
| * ed7d24b0c3 Revert "drm/amd: Keep display off while going into S4"
| * bee465c0c4 smb: client: Reset all search buffer pointers when releasing buffer
| * 17d096c485 arm64: dts: marvell: uDPU: define pinctrl state for alarm LEDs
| * 73cadde98f smb: client: Fix use-after-free in cifs_fill_dirent
| * 7227fc0692 drm/edid: fixed the bug that hdr metadata was not reset
| * 7093887a11 thermal: intel: x86_pkg_temp_thermal: Fix bogus trip temperature
| * f864656269 platform/x86: dell-wmi-sysman: Avoid buffer overflow in current_password_store()
| * dde5400dad can: kvaser_pciefd: Continue parsing DMA buf after dropped RX
| * 4cfe30f681 llc: fix data loss when reading from a socket in llc_ui_recvmsg()
| * 6764329675 ALSA: hda/realtek: Add quirk for Lenovo Yoga Pro 7 14ASP10
| * 74d90875f3 ALSA: pcm: Fix race of buffer access at PCM OSS layer
| * b5bada85c1 ASoC: SOF: ipc4-pcm: Delay reporting is only supported for playback direction
| * 1beb8c26b1 ASoc: SOF: topology: connect DAI to a single DAI link
| * e8be784d30 ASoC: SOF: ipc4-control: Use SOF_CTRL_CMD_BINARY as numid for bytes_ext
| * 1f912f8484 can: bcm: add missing rcu read protection for procfs content
| * 76c84c3728 can: bcm: add locking for bcm_op runtime updates
| * e80f4f9c64 can: slcan: allow reception of short error messages
| * 584a729615 padata: do not leak refcount in reorder_work
| * 134daaba93 crypto: algif_hash - fix double free in hash_accept
| * 0c605de7ed clk: s2mps11: initialise clk_hw_onecell_data::num before accessing ::hws[] in probe()
| * 35016086ae octeontx2-af: Fix APR entry mapping based on APR_LMT_CFG
| * cc797adde6 octeontx2-af: Set LMT_ENA bit for APR table entries
| * b19fc1d0be net/tipc: fix slab-use-after-free Read in tipc_aead_encrypt_done
| * a3d24e4a83 octeontx2-pf: Add AF_XDP non-zero copy support
| * 93c276942e sch_hfsc: Fix qlen accounting bug when using peek in hfsc_enqueue()
| * c844ace5b8 io_uring: fix overflow resched cqe reordering
| * dbcd0909a1 net: lan743x: Restore SGMII CTRL register on resume
| * 0b4cde7284 net: dwmac-sun8i: Use parsed internal PHY address instead of 1
| * 52b2e55792 pinctrl: qcom: switch to devm_register_sys_off_handler()
| * 78b70388ca pinctrl: qcom/msm: Convert to platform remove callback returning void
| * df941e1177 ice: Fix LACP bonds without SRIOV environment
| * c8a1a805e6 ice: fix vf->num_mac count with port representors
| * fa942824b9 bridge: netfilter: Fix forwarding of fragmented packets
| * feb6bde1a3 Bluetooth: btusb: use skb_pull to avoid unsafe access in QCA dump handling
| * c331a616a0 Bluetooth: L2CAP: Fix not checking l2cap_chan security level
| * 2b82d8e483 dmaengine: fsl-edma: Fix return code for unhandled interrupts
| * 55f3c97fcb dmaengine: idxd: Fix ->poll() return value
| * dcdc1aca68 xfrm: Sanitize marks before insert
| * a1fad2da94 clk: sunxi-ng: d1: Add missing divider for MMC mod clocks
| * c2b52d947c remoteproc: qcom_wcnss: Fix on platforms without fallback regulators
| * 5ff1a234fa kernel/fork: only call untrack_pfn_clear() on VMAs duplicated for fork()
| * 1b388afe88 dmaengine: idxd: Fix allowing write() from different address spaces
| * 05298c30b7 dmaengine: idxd: add wq driver name support for accel-config user tool
| * b58a295d10 espintcp: remove encap socket caching to avoid reference leak
| * 3cf3d4f112 soundwire: bus: Fix race on the creation of the IRQ domain
| * d8ece4ced3 __legitimize_mnt(): check for MNT_SYNC_UMOUNT should be under mount_lock
| * e6d703b693 xenbus: Allow PVH dom0 a non-local xenstore
| * 85d12487d9 wifi: iwlwifi: add support for Killer on MTL
| * 4e3401aa6e tools: ynl-gen: validate 0 len strings from kernel
| * 50d0de59f6 btrfs: avoid NULL pointer dereference if no valid csum tree
| * 7a97f961a5 btrfs: correct the order of prelim_ref arguments in btrfs__prelim_ref
| * 225cc549ed cifs: Fix changing times and read-only attr over SMB1 smb_set_file_info() function
| * fc460c12cd cifs: Fix and improve cifs_query_path_info() and cifs_query_file_info()
| * b72952c8c3 io_uring/fdinfo: annotate racy sq/cq head/tail reads
| * 3a982ada41 nvmet-tcp: don't restore null sk_state_change
| * a7a2315d8d nvme-pci: add quirks for WDC Blue SN550 15b7:5009
| * 0a3f5efd1c nvme-pci: add quirks for device 126f:1001
| * 1c0d7792cf ALSA: hda/realtek: Add quirk for HP Spectre x360 15-df1xxx
| * dbb47cbdbe ASoC: Intel: bytcr_rt5640: Add DMI quirk for Acer Aspire SW3-013
| * 4c017ff3d7 ASoC: cs42l43: Disable headphone clamps during type detection
| * 0ab50f622f pinctrl: meson: define the pull up/down resistor value as 60 kOhm
| * 534794c719 book3s64/radix: Fix compile errors when CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=n
| * 442a247361 ASoC: imx-card: Adjust over allocation of memory in imx_card_parse_of()
| * fe4a7145f0 drm: Add valid clones check
| * 1c58b332bc drm/panel-edp: Add Starry 116KHD024006
| * e585f4f44a drm/atomic: clarify the rules around drm_atomic_state->allow_modeset
| * 5d4951bad7 drm: bridge: adv7511: fill stream capabilities
| * a7e7cf5389 wifi: ath12k: Fix end offset bit definition in monitor ring descriptor
| * 2dbcaddcf8 wifi: ath9k: return by of_get_mac_address
| * a21971b4be accel/qaic: Mask out SR-IOV PCI resources
| * fbdf410d3b wifi: ath12k: fix ath12k_hal_tx_cmd_ext_desc_setup() info1 override
| * b658e144a0 regulator: ad5398: Add device tree support
| * df76df11fd spi: zynqmp-gqspi: Always acknowledge interrupts
| * ccf4a818d8 wifi: rtw89: add wiphy_lock() to work that isn't held wiphy_lock() yet
| * 5e479af499 wifi: rtw88: Don't use static local variable in rtw8822b_set_tx_power_index_by_rate
| * ee71c34d4f wifi: rtl8xxxu: retry firmware download on error
| * 419988f4d0 perf/amd/ibs: Fix ->config to sample period calculation for OP PMU
| * 0cb6a1292a perf/amd/ibs: Fix perf_ibs_op.cnt_mask for CurCnt
| * 9610a67963 firmware: arm_scmi: Relax duplicate name constraint across protocol ids
| * 38d906f1c5 bpftool: Fix readlink usage in get_fd_type
| * 0e8eb91a49 drm/ast: Find VBIOS mode from regular display size
| * e190ed2c12 ASoC: rt722-sdca: Add some missing readable registers
| * 48c78cf634 ASoC: codecs: pcm3168a: Allow for 24-bit in provider mode
| * 123ac614e5 arm64: zynqmp: add clock-output-names property in clock nodes
| * 23b0b86ad1 HID: usbkbd: Fix the bit shift number for LED_KANA
| * c317b0694a wifi: ath12k: Avoid napi_sync() before napi_enable()
| * 32bcf54138 scsi: st: Restore some drive settings after reset
| * 1960bb56a9 scsi: lpfc: Free phba irq in lpfc_sli4_enable_msi() when pci_irq_vector() fails
| * 3dfeee957a scsi: lpfc: Handle duplicate D_IDs in ndlp search-by D_ID routine
| * c34ab75add net/mana: fix warning in the writer of client oob
| * 1d10624a42 ice: count combined queues using Rx/Tx count
| * 948664b92e perf: Avoid the read if the count is already updated
| * f47d605c5e rcu: fix header guard for rcu_all_qs()
| * 9abec90c85 rcu: handle unstable rdp in rcu_read_unlock_strict()
| * 4a36d93a01 rcu: handle quiescent states for PREEMPT_RCU=n, PREEMPT_COUNT=y
| * cae5699816 ice: treat dyn_allowed only as suggestion
| * 9ed3c1b11d bridge: mdb: Allow replace of a host-joined group
| * 961ee13210 r8169: don't scan PHY addresses > 0
| * 2cb57a887a ipv4: ip_gre: Fix set but not used warning in ipgre_err() if IPv4-only
| * 13cba3f837 vxlan: Annotate FDB data races
| * 090c0ba179 net/mlx5e: Avoid WARN_ON when configuring MQPRIO with HTB offload enabled
| * f312bd5cf2 media: qcom: camss: csid: Only add TPG v4l2 ctrl if TPG hardware is available
| * 6fed5e23d2 f2fs: introduce f2fs_base_attr for global sysfs entries
| * 9af429febf hwmon: (xgene-hwmon) use appropriate type for the latency value
| * 70e7df6f69 clk: qcom: camcc-sm8250: Use clk_rcg2_shared_ops for some RCGs
| * ba02bb3a01 wifi: rtw88: Fix __rtw_download_firmware() for RTL8814AU
| * 0b660a7f29 wifi: rtw88: Fix download_firmware_validate() for RTL8814AU
| * 23fe8aa8cc ext4: remove writable userspace mappings before truncating page cache
| * 73e7c65b21 ext4: don't write back data before punch hole in nojournal mode
| * 3648ddabcd leds: trigger: netdev: Configure LED blink interval for HW offload
| * 7ef54a11c1 pstore: Change kmsg_bytes storage size to u32
| * 09c3a82664 r8152: add vendor/device ID pair for Dell Alienware AW1022z
| * cc1d408e7c ip: fib_rules: Fetch net from fib_rule in fib[46]_rule_configure().
| * 22ee8dabec arch/powerpc/perf: Check the instruction type before creating sample with perf_mem_data_src
| * fa5d374f6b powerpc/pseries/iommu: memory notifier incorrectly adds TCEs for pmemory
| * 6758d09763 net: fec: Refactor MAC reset to function
| * 2caf52c072 wifi: mac80211: remove misplaced drv_mgd_complete_tx() call
| * bb1f9d6312 wifi: mac80211: don't unconditionally call drv_mgd_complete_tx()
| * 9ef9ecabc6 drm/v3d: Add clock handling
| * 76016797e7 net/mlx5e: reduce the max log mpwrq sz for ECPF and reps
| * dce57841d9 net/mlx5e: reduce rep rxq depth to 256 for ECPF
| * 9411c965e4 net/mlx5e: set the tx_queue_len for pfifo_fast
| * c31e839649 net/mlx5: Extend Ethtool loopback selftest to support non-linear SKB
| * 353cd6804e scsi: target: spc: Fix loop traversal in spc_rsoc_get_descr()
| * 991970293f drm/amd/display/dm: drop hw_support check in amdgpu_dm_i2c_xfer()
| * 26b6548dc7 drm/amdgpu: enlarge the VBIOS binary size limit
| * de67e80ab4 drm/amd/display: Increase block_sequence array size
| * 16c26a6404 drm/amd/display: Initial psr_version with correct setting
| * c59c96b850 drm/amd/display: Update CR AUX RD interval interpretation
| * af2d36316a drm/amdgpu: reset psp->cmd to NULL after releasing the buffer
| * 6881a3a588 drm/amd/display: Don't try AUX transactions on disconnected link
| * ab83ed96f7 drm/amdgpu: Set snoop bit for SDMA for MI series
| * 929cda8e4b soundwire: amd: change the soundwire wake enable/disable sequence
| * 6a7b7e98fe phy: core: don't require set_mode() callback for phy_get_mode() to work
| * 3bf3eae909 serial: sh-sci: Update the suspend/resume support
| * e9bed533ec sched: Reduce the default slice to avoid tasks getting an extra tick
| * 5024d8e58a x86/traps: Cleanup and robustify decode_bug()
| * cc50c7c1a9 clk: qcom: clk-alpha-pll: Do not use random stack value for recalc rate
| * fba6e5d44f clk: qcom: ipq5018: allow it to be bulid on arm32
| * d655dbd82b net/mlx4_core: Avoid impossible mlx4_db_alloc() order value
| * f8a9f45f75 media: v4l: Memset argument to 0 before calling get_mbus_config pad op
| * 0b9d2468c3 media: i2c: imx219: Correct the minimum vblanking value
| * 868c3d8cfc kunit: tool: Use qboot on QEMU x86_64
| * 3d6d13540f smack: Revert "smackfs: Added check catlen"
| * e30f31434e smack: recognize ipv4 CIPSO w/o categories
| * de4332d7a5 pinctrl: devicetree: do not goto err when probing hogs in pinctrl_dt_to_map
| * b3d2a8809e ASoC: soc-dai: check return value at snd_soc_dai_set_tdm_slot()
| * 35160eda61 ASoC: tas2764: Power up/down amp on mute ops
| * c1045e7702 ASoC: tas2764: Mark SW_RESET as volatile
| * 7ac84ee5ee ASoC: tas2764: Add reg defaults for TAS2764_INT_CLK_CFG
| * 9dcce3f40d ASoC: ops: Enforce platform maximum on initial value
| * 3da8088afd firmware: arm_ffa: Reject higher major version as incompatible
| * 3ec539f1e9 net/mlx5: Apply rate-limiting to high temperature warning
| * 69f453ccb9 net/mlx5: Modify LSB bitmask in temperature event to include only the first bit
| * a9b2bb8a4f media: test-drivers: vivid: don't call schedule in loop
| * ddddd806ba vxlan: Join / leave MC group after remote changes
| * d97c38275d ACPI: HED: Always initialize before evged
| * 0d398ed850 PCI: Fix old_size lower bound in calculate_iosize() too
| * d8afc25580 eth: mlx4: don't try to complete XDP frames in netpoll
| * 242272c953 bpf: don't do clean_live_states when state->loop_entry->branches > 0
| * 00e59d1495 can: c_can: Use of_property_present() to test existence of DT property
| * ef7ee9b815 pmdomain: imx: gpcv2: use proper helper for property detection
| * 7c5e736083 RDMA/core: Fix best page size finding when it can cross SG entries
| * c504c11b94 serial: mctrl_gpio: split disable_ms into sync and no_sync APIs
| * df8970a270 drm/amd/display: Add support for disconnected eDP streams
| * 671fea645a i3c: master: svc: Flush FIFO before sending Dynamic Address Assignment(DAA)
| * 398351dcdb EDAC/ie31200: work around false positive build warning
| * c81c2ee1c3 net: pktgen: fix access outside of user given buffer in pktgen_thread_write()
| * 35434d1c93 wifi: rtw89: fw: propagate error code from rtw89_h2c_tx()
| * 5ce1f780a3 wifi: rtw88: Fix rtw_desc_to_mcsrate() to handle MCS16-31
| * 1b1daafd2e wifi: rtw88: Fix rtw_init_ht_cap() for RTL8814AU
| * a95813193a wifi: rtw88: Fix rtw_init_vht_cap() for RTL8814AU
| * f38a1b35c8 scsi: mpt3sas: Send a diag reset if target reset fails
| * 06abee685c clocksource: mips-gic-timer: Enable counter when CPUs start
| * 2e1b3650f5 MIPS: pm-cps: Use per-CPU variables as per-CPU, not per-core
| * ba41e4e627 genirq/msi: Store the IOMMU IOVA directly in msi_desc instead of iommu_cookie
| * 2ed497f369 MIPS: Use arch specific syscall name match function
| * b2ea189a95 crypto: skcipher - Zap type in crypto_alloc_sync_skcipher
| * 3a0c8429ab crypto: ahash - Set default reqsize from ahash_alg
| * b1d1b09698 x86/kaslr: Reduce KASLR entropy on most x86 systems
| * b0556ba85a net/mlx5: Change POOL_NEXT_SIZE define value and make it global
| * 52aa28f7b1 dm: fix unconditional IO throttle caused by REQ_PREFLUSH
| * ecd205a524 libbpf: Fix out-of-bound read
| * e17a6ba079 media: tc358746: improve calculation of the D-PHY timing registers
| * ca9f3ab612 media: adv7180: Disable test-pattern control on adv7180
| * 3cd2aa9367 cpuidle: menu: Avoid discarding useful information
| * 48fd80f7d7 vdpa/mlx5: Fix mlx5_vdpa_get_config() endianness on big-endian machines
| * f5121d5ba7 vhost-scsi: Return queue full for page alloc failures during copy
| * de4469a900 x86/nmi: Add an emergency handler in nmi_desc & use it in nmi_shootdown_cpus()
| * 01768d1564 ASoC: mediatek: mt8188: Add reference for dmic clocks
| * 6dadc66f6c ASoC: mediatek: mt8188: Treat DMIC_GAINx_CUR as non-volatile
| * ec6f764ab7 drm/amd/display: handle max_downscale_src_width fail check
| * 7472e0da47 x86/build: Fix broken copy command in genimage.sh when making isoimage
| * f32344bf95 Octeontx2-af: RPM: Register driver with PCI subsys IDs
| * 3314310b3f soc: ti: k3-socinfo: Do not use syscon helper to build regmap
| * f4e35b2c5f wifi: ath12k: Improve BSS discovery with hidden SSID in 6 GHz band
| * fed3038a80 bonding: report duplicate MAC address in all situations
| * 7819a7dcf0 net: xgene-v2: remove incorrect ACPI_PTR annotation
| * 7b8fe48cc8 net: ethernet: mtk_ppe_offload: Allow QinQ, double ETH_P_8021Q only
| * ff01e0d0a6 leds: pwm-multicolor: Add check for fwnode_property_read_u32
| * 2059e6ea11 drm/amdkfd: KFD release_work possible circular locking
| * 0759d15465 selftests/net: have `gro.sh -t` return a correct exit code
| * b3c8449484 net/mlx5: Avoid report two health errors on same syndrome
| * e2de76c34a firmware: arm_ffa: Set dma_mask for ffa devices
| * a1a5e783d7 PCI: brcmstb: Add a softdep to MIP MSI-X driver
| * 1f27e708e3 PCI: brcmstb: Expand inbound window size up to 64GB
| * f6dfaf5905 wifi: ath12k: Report proper tx completion status to mac80211
| * c7c729d7a8 soc: apple: rtkit: Implement OSLog buffers properly
| * 78b6b2fe17 soc: apple: rtkit: Use high prio work queue
| * 1ae981c91d perf: arm_pmuv3: Call kvm_vcpu_pmu_resync_el0() before enabling counters
| * 372ed21710 fpga: altera-cvp: Increase credit timeout
| * 7b851d6a02 drm/mediatek: mtk_dpi: Add checks for reg_h_fre_con existence
| * d8e57904dc ARM: at91: pm: fix at91_suspend_finish for ZQ calibration
| * a9dd5d748c hwmon: (gpio-fan) Add missing mutex locks
| * 7adb96687c x86/bugs: Make spectre user default depend on MITIGATION_SPECTRE_V2
| * ca16d144d1 clk: imx8mp: inform CCF of maximum frequency of clocks
| * 032f3bf647 media: uvcvideo: Handle uvc menu translation inside uvc_get_le_value
| * faa24692f7 media: uvcvideo: Add sanity check to uvc_ioctl_xu_ctrl_map
| * 9459abd770 drm/rockchip: vop2: Add uv swap for cluster window
| * 9778a91827 ipv4: fib: Move fib_valid_key_len() to rtm_to_fib_config().
| * 2be4a7d532 ALSA: hda/realtek: Enable PC beep passthrough for HP EliteBook 855 G7
| * 30ae895966 perf/hw_breakpoint: Return EOPNOTSUPP for unsupported breakpoint type
| * 2a9a7d2f19 net: pktgen: fix mpls maximum labels list parsing
| * cf81dae86b net: ethernet: ti: cpsw_new: populate netdev of_node
| * 2cbe6d551b pinctrl: bcm281xx: Use "unsigned int" instead of bare "unsigned"
| * 5c9eca180a media: cx231xx: set device_caps for 417
| * a7b8f0addf drm/amd/display: Skip checking FRL_MODE bit for PCON BW determination
| * 3af9db7842 drm/amdgpu: Do not program AGP BAR regs under SRIOV in gfxhub_v1_0.c
| * fef1e1487d remoteproc: qcom_wcnss: Handle platforms with only single power domain
| * c5b23df98a net: phylink: use pl->link_interface in phylink_expects_phy()
| * b27be76f35 drm/gem: Test for imported GEM buffers with helper
| * 121f0335d9 orangefs: Do not truncate file size
| * 3986ef4a9b dm cache: prevent BUG_ON by blocking retries on failed device resumes
| * d5f8c8ec1e media: c8sectpfe: Call of_node_put(i2c_bus) only once in c8sectpfe_probe()
| * 74eea50b7a ARM: tegra: Switch DSI-B clock parent to PLLD on Tegra114
| * da74e11ced ieee802154: ca8210: Use proper setters and getters for bitwise types
| * c0acbeac2b rtc: ds1307: stop disabling alarms on probe
| * fb69189023 tcp: bring back NUMA dispersion in inet_ehash_locks_alloc()
| * 8c1a16d612 ALSA: seq: Improve data consistency at polling
| * 7b1bb4d40d powerpc/prom_init: Fixup missing #size-cells on PowerBook6,7
| * d0e3e19564 arm64: tegra: Resize aperture for the IGX PCIe C5 slot
| * 988770bf31 arm64: tegra: p2597: Fix gpio for vdd-1v8-dis regulator
| * ab13c8a5f7 drm/amdkfd: Set per-process flags only once cik/vi
| * 0acdc4d6e6 crypto: lzo - Fix compression buffer overrun
| * ab4545a254 watchdog: aspeed: Update bootstatus handling
| * ac64f0e893 cpufreq: tegra186: Share policy per cluster
| * 3123b3d445 iommu/amd/pgtbl_v2: Improve error handling
| * 8c305588ba ASoC: qcom: sm8250: explicitly set format in sm8250_be_hw_params_fixup()
| * 5b518c452f auxdisplay: charlcd: Partially revert "Move hwidth and bwidth to struct hd44780_common"
| * 769d1bccd1 gfs2: Check for empty queue in run_queue
| * 2e37d331c0 drm/amd/display: Fix incorrect DPCD configs while Replay/PSR switch
| * 1da3dc7372 drm/amd/display: calculate the remain segments for all pipes
| * 4c2a348875 drm/amd/display: remove minimum Dispclk and apply oem panel timing.
| * 8ebf2709fe ipv6: save dontfrag in cork
| * 2b790fe67e wifi: iwlwifi: fix debug actions order
| * 386a1a0d55 printk: Check CON_SUSPEND when unblanking a console
| * 8a7f2e84f8 hwmon: (dell-smm) Increment the number of fans
| * def5f5bc83 usb: xhci: Don't change the status of stalled TDs on failed Stop EP
| * 903c4a0c2a mmc: sdhci: Disable SD card clock before changing parameters
| * 5ae7524593 mmc: dw_mmc: add exynos7870 DW MMC support
| * 110f701e60 arm64/mm: Check PUD_TYPE_TABLE in pud_bad()
| * 89947eea8f netfilter: conntrack: Bound nf_conntrack sysctl writes
| * da36c3ad7c timer_list: Don't use %pK through printk()
| * 6df3855868 posix-timers: Add cond_resched() to posix_timer_add() search loop
| * 1bef181165 RDMA/uverbs: Propagate errors from rdma_lookup_get_uobject()
| * ba84162730 ext4: do not convert the unwritten extents if data writeback fails
| * d7f3c874ea ext4: reject the 'data_err=abort' option in nojournal mode
| * baf667f23f ASoC: sun4i-codec: support hp-det-gpios property
| * f8d9460347 drm/amdgpu: Update SRIOV video codec caps
| * 2e290e9cf6 mfd: tps65219: Remove TPS65219_REG_TI_DEV_ID check
| * aa97ea576a pinctrl-tegra: Restore SFSEL bit when freeing pins
| * 0fb6c439d2 xen: Add support for XenServer 6.1 platform device
| * 3ecfb92c01 net/smc: use the correct ndev to find pnetid by pnetid table
| * ba8354a613 dm: restrict dm device size to 2^63-512 bytes
| * 44a82d2495 crypto: octeontx2 - suppress auth failure screaming due to negative tests
| * 3edb08b6c2 kbuild: fix argument parsing in scripts/config
| * bc8023ef3b bpf: Allow pre-ordering for bpf cgroup progs
| * aed5bd3a84 ASoC: mediatek: mt6359: Add stub for mt6359_accdet_enable_jack_detect
| * bf9cfc7fbe thunderbolt: Do not add non-active NVM if NVM upgrade is disabled for retimer
| * 4beb1e55f6 objtool: Fix error handling inconsistencies in check()
| * 72d9ccdcfc rtc: rv3032: fix EERD location
| * a7b7bc510e tcp: reorganize tcp_in_ack_event() and tcp_count_delivered()
| * ed88717950 jbd2: do not try to recover wiped journal
| * 5741b9d7bb bpf: Return prog btf_id without capable check
| * 743bb75325 vfio/pci: Handle INTx IRQ_NOTCONNECTED
| * 9f51fa1971 scsi: st: ERASE does not change tape location
| * 01195aa1d6 scsi: st: Tighten the page format heuristics with MODE SELECT
| * 3011cdc5cb hypfs_create_cpu_files(): add missing check for hypfs_mkdir() failure
| * 4eda8a85d4 ext4: reorder capability check last
| * 57a2882cd4 um: Update min_low_pfn to match changes in uml_reserved
| * 7638182277 um: Store full CSGSFS and SS register from mcontext
| * 61225b3395 dlm: make tcp still work in multi-link env
| * 0541822045 i3c: master: svc: Fix missing STOP for master request
| * 398c541ed0 drm/amd/display: Guard against setting dispclk low for dcn31x
| * 3192d59fb7 btrfs: send: return -ENAMETOOLONG when attempting a path that is too long
| * a7f1c5fb7d btrfs: get zone unusable bytes while holding lock at btrfs_reclaim_bgs_work()
| * 4dc7dcb919 btrfs: fix non-empty delayed iputs list on unmount due to async workers
| * de635f9bba btrfs: run btrfs_error_commit_super() early
| * 1a012fd4eb btrfs: avoid linker error in btrfs_find_create_tree_block()
| * 4e74f91a6f btrfs: make btrfs_discard_workfn() block_group ref explicit
| * a4666a8127 i2c: pxa: fix call balance of i2c->clk handling routines
| * 4fc8e3fb8e i2c: qup: Vote for interconnect bandwidth to DRAM
| * bffd5f2815 x86/mm: Check return value from memblock_phys_alloc_range()
| * 7124a9b682 x86/stackprotector/64: Only export __ref_stack_chk_guard on CONFIG_SMP
| * 1752ceaa72 wifi: mt76: mt7996: revise TXS size
| * d9776ce173 wifi: mt76: only mark tx-status-failed frames as ACKed on mt76x0/2
| * 17e3ab067d mmc: host: Wait for Vdd to settle on card power off
| * ea3d95e05e libnvdimm/labels: Fix divide error in nd_label_data_init()
| * f29eb4c63b ext4: on a remount, only log the ro or r/w state when it has changed
| * 44acbc14ea PCI: vmd: Disable MSI remapping bypass under Xen
| * aa59ccacf8 pNFS/flexfiles: Report ENETDOWN as a connection error
| * ec59dfbc1b tools/build: Don't pass test log files to linker
| * e4510552c2 PCI: dwc: ep: Ensure proper iteration over outbound map windows
| * a74286d17e objtool: Properly disable uaccess validation
| * 454a770aea lockdep: Fix wait context check on softirq for PREEMPT_RT
| * 0c2aa72f4f dql: Fix dql->limit value when reset.
| * 3854f4e98e thermal/drivers/qoriq: Power down TMU on system suspend
| * 4a12022166 spi-rockchip: Fix register out of bounds access
| * 4a8ebc45f2 SUNRPC: rpcbind should never reset the port to the value '0'
| * 3ef02a05c8 SUNRPC: rpc_clnt_set_transport() must not change the autobind setting
| * 6b8beb8104 NFSv4: Treat ENETUNREACH errors as fatal for state recovery
| * ee68e068cf cifs: Fix establishing NetBIOS session for SMB2+ connection
| * 89bcd83f67 cifs: add validation check for the fields in smb_aces
| * 88bf6295f0 fbdev: core: tileblit: Implement missing margin clearing for tileblit
| * 1714afc14d fbcon: Use correct erase colour for clearing in fbcon
| * 7f370b7e6f fbdev: fsl-diu-fb: add missing device_remove_file()
| * bbf3bb6dde riscv: Allow NOMMU kernels to access all of RAM
| * 09096ead92 mailbox: use error ret code of of_parse_phandle_with_args()
| * 5641f6b3a4 mailbox: pcc: Use acpi_os_ioremap() instead of ioremap()
| * 7d5b227875 ACPI: PNP: Add Intel OC Watchdog IDs to non-PNP device list
| * c1ab9f008e tracing: Mark binary printing functions with __printf() attribute
| * 50702e7b47 arm64: Add support for HIP09 Spectre-BHB mitigation
| * e0b05cd9ec SUNRPC: Don't allow waiting for exiting tasks
| * c72826efbb NFS: Don't allow waiting for exiting tasks
| * a81dd69f85 NFSv4: Check for delegation validity in nfs_start_delegation_return_locked()
| * 2371143e41 fuse: Return EPERM rather than ENOSYS from link()
| * 209a4da04a smb: client: Store original IO parameters and prevent zero IO sizes
| * 5194597b9c cifs: Fix negotiate retry functionality
| * d50f7ce2fc cifs: Fix querying and creating MF symlinks over SMB1
| * 18066188eb cifs: Add fallback for SMB2 CREATE without FILE_READ_ATTRIBUTES
| * f08641cd21 s390/vfio-ap: Fix no AP queue sharing allowed message written to kernel log
| * a6f24a41ef kconfig: merge_config: use an empty file as initfile
| * adbb39eca3 samples/bpf: Fix compilation failure for samples/bpf on LoongArch Fedora
| * 38aa3cf154 bpf: fix possible endless loop in BPF map iteration
| * f5f169cd90 selftests/bpf: Mitigate sockmap_ktls disconnect_after_delete failure
| * f7292fbb41 drm/amdgpu: Allow P2P access through XGMI
| * 722a6972de ima: process_measurement() needlessly takes inode_lock() on MAY_READ
| * 91ba964a75 net: enetc: refactor bulk flipping of RX buffers to separate function
| * c36f5f659a scsi: mpi3mr: Add level check to control event logging
| * ca85c2d0db vhost-scsi: protect vq->log_used with vq->mutex
| * 20fb292ab5 cgroup: Fix compilation issue due to cgroup_mutex not being exported
| * c15dc980ff dma-mapping: avoid potential unused data compilation warning
| * 2cab5ea2f5 intel_th: avoid using deprecated page->mapping, index fields
| * b49b5132e4 virtio_ring: Fix data race by tagging event_triggered as racy for KCSAN
| * 42d15918da scsi: ufs: Introduce quirk to extend PA_HIBERN8TIME for UFS devices
| * fe8421e853 scsi: target: iscsi: Fix timeout on deleted connection
| * beb6382add nvmem: qfprom: switch to 4-byte aligned reads
| * 35d77c8d88 nvmem: core: update raw_len if the bit reading is required
| * 497f19cacb nvmem: core: verify cell's raw_len
| * 04c81ac33a nvmem: rockchip-otp: add rk3576 variant data
| * 266e5f4813 nvmem: rockchip-otp: Move read-offset into variant-data
| * b3145041e9 cpufreq: Add SM8650 to cpufreq-dt-platdev blocklist
| * 1efbe2c7a0 phy: renesas: rcar-gen3-usb2: Assert PLL reset on PHY power off
| * cf60d19721 phy: renesas: rcar-gen3-usb2: Lock around hardware registers and driver data
| * e992f2581b phy: renesas: rcar-gen3-usb2: Move IRQ request in probe
| * eb4fdee1d6 phy: renesas: rcar-gen3-usb2: Add support to initialize the bus
| * e668cbeb95 i2c: designware: Fix an error handling path in i2c_dw_pci_probe()
| * ad40588df1 i2c: designware: Use temporary variable for struct device
| * 0ef9396a7d i2c: designware: Remove ->disable() callback
| * 33378973d8 i2c: designware: Uniform initialization flow for polling mode
| * d1954e7aa1 gpio: pca953x: fix IRQ storm on system wake up
| * 58e3459843 gpio: pca953x: Simplify code with cleanup helpers
| * 475d6ebc91 gpio: pca953x: Split pca953x_restore_context() and pca953x_save_context()
* | 9163e678c1 Merge 6.6.92 into android15-6.6-lts
|\|
| * ffaf617813 Linux 6.6.92
| * 35559ebcdc drm/amdgpu: fix pm notifier handling
| * c1d42a239b phy: tegra: xusb: remove a stray unlock
| * 3cfec712a4 btrfs: don't BUG_ON() when 0 reference count at btrfs_lookup_extent_info()
| * b3598f5321 sctp: add mutual exclusion in proc_sctp_do_udp_port()
| * 93df6da64b hwpoison, memory_hotplug: lock folio before unmap hwpoisoned folio
| * 7bcd29181b memblock: Accept allocated memory before use in memblock_double_array()
| * 6133444518 LoongArch: Explicitly specify code model in Makefile
| * f521c2a0c0 bpf, arm64: Fix address emission with tag-based KASAN enabled
| * d9664e6ff0 bpf, arm64: Fix trampoline for BPF_TRAMP_F_CALL_ORIG
| * bfc26aa3ab mm/migrate: correct nr_failed in migrate_pages_sync()
| * 62798e3291 selftests/mm: compaction_test: support platform with huge mount of memory
| * 962ce9028c usb: typec: ucsi: displayport: Fix deadlock
| * a0a736d985 Bluetooth: btnxpuart: Fix kernel panic during FW release
| * 98fdd2f612 mm/page_alloc: fix race condition in unaccepted memory handling
| * e24073cd8f x86/its: Fix build error for its_static_thunk()
| * d530dd65f6 dmaengine: idxd: Refactor remove call with idxd_cleanup() helper
| * a6cce31632 dmaengine: idxd: fix memory leak in error handling path of idxd_pci_probe
| * 6e94a2c3e4 dmaengine: idxd: fix memory leak in error handling path of idxd_alloc
| * d2d05fd0fc dmaengine: idxd: Add missing idxd cleanup to fix memory leak in remove call
| * 32ec46adf1 dmaengine: idxd: Add missing cleanups in cleanup internals
| * 404aad9b0c dmaengine: idxd: Add missing cleanup for early error out in idxd_setup_internals
| * 6ab9526e32 dmaengine: idxd: fix memory leak in error handling path of idxd_setup_groups
| * 9ba964b17f dmaengine: idxd: fix memory leak in error handling path of idxd_setup_engines
| * 4784621199 dmaengine: idxd: fix memory leak in error handling path of idxd_setup_wqs
| * 1c9e4ed011 dmaengine: ti: k3-udma: Use cap_mask directly from dma_device structure instead of a local copy
| * d87f1cddc5 dmaengine: ti: k3-udma: Add missing locking
| * 5340d0e84d net: qede: Initialize qede_ll_ops with designated initializer
| * 5e700b06b9 wifi: mt76: disable napi on driver removal
| * 55018ca1cc spi: tegra114: Use value to check for invalid delays
| * d8843fb1a2 smb: client: fix memory leak during error handling for POSIX mkdir
| * c682a19344 scsi: sd_zbc: block: Respect bio vector limits for REPORT ZONES buffer
| * a1546ec7e0 phy: renesas: rcar-gen3-usb2: Set timing registers only once
| * 99fc6f1c37 phy: renesas: rcar-gen3-usb2: Fix role detection on unbind/bind
| * 2b169aa46f phy: Fix error handling in tegra_xusb_port_init
| * ba25131b3c phy: tegra: xusb: Use a bitmask for UTMI pad power state tracking
| * b1627af84e tracing: samples: Initialize trace_array_printk() with the correct function
| * 9d1216bf49 ftrace: Fix preemption accounting for stacktrace filter command
| * fa75d941b1 ftrace: Fix preemption accounting for stacktrace trigger command
| * 6d2d375205 Drivers: hv: vmbus: Remove vmbus_sendpacket_pagebuffer()
| * c0f3f0c88f Drivers: hv: Allow vmbus_sendpacket_mpb_desc() to create multiple ranges
| * 115c789047 hv_netvsc: Remove rmsg_pgcnt
| * 307963a3ab hv_netvsc: Preserve contiguous PFN grouping in the page buffer array
| * da5d5bc3ad hv_netvsc: Use vmbus_sendpacket_mpb_desc() to send VMBus messages
| * c9d2b9a80d dma-buf: insert memory barrier before updating num_fences
| * fc8fa09d40 ALSA: usb-audio: Add sample rate quirk for Microdia JP001 USB Camera
| * 46b33b9725 ALSA: usb-audio: Add sample rate quirk for Audioengine D1
| * 2759938c4a ALSA: es1968: Add error handling for snd_pcm_hw_constraint_pow2()
| * 124522cc6b ACPI: PPTT: Fix processor subtable walk
| * 6456c818dc drm/amd/display: Avoid flooding unnecessary info messages
| * 43b35d404f drm/amd/display: Correct the reply value when AUX write incomplete
| * 34439d470b LoongArch: uprobes: Remove redundant code about resume_era
| * 840663825e LoongArch: uprobes: Remove user_{en,dis}able_single_step()
| * ad1e441016 LoongArch: Fix MAX_REG_OFFSET calculation
| * e89bd983f3 LoongArch: Save and restore CSR.CNTC for hibernation
| * f4c0a7999f LoongArch: Prevent cond_resched() occurring within kernel-fpu
| * 7722fba9f3 udf: Make sure i_lenExtents is uptodate on inode eviction
| * a0302cb139 dmaengine: Revert "dmaengine: dmatest: Fix dmatest waiting less when interrupted"
| * e1b755c0d8 NFSv4/pnfs: Reset the layout state after a layoutreturn
| * c1dd9ccc0f tsnep: fix timestamping with a stacked DSA driver
| * 0980e62ab8 tsnep: Inline small fragments within TX descriptor
| * 406d05da26 net/tls: fix kernel panic when alloc_page failed
| * f1ecccb5cd mlxsw: spectrum_router: Fix use-after-free when deleting GRE net devices
| * fde33ab3c0 wifi: mac80211: Set n_channels after allocating struct cfg80211_scan_request
| * 72ee7af615 octeontx2-af: Fix CGX Receive counters
| * 398989c780 net: ethernet: mtk_eth_soc: fix typo for declaration MT7988 ESW capability
| * 03c42d35c3 octeontx2-pf: macsec: Fix incorrect max transmit size in TX secy
| * 7bddac8603 regulator: max20086: fix invalid memory access
| * 9c2d0899c6 qlcnic: fix memory leak in qlcnic_sriov_channel_cfg_cmd()
| * b48a47e137 net/mlx5e: Disable MACsec offload for uplink representor profile
| * 4a7d4031fa ALSA: sh: SND_AICA should depend on SH_DMA_API
| * c70c021be0 nvme-pci: acquire cq_poll_lock in nvme_poll_irqdisable
| * 64638f3103 nvme-pci: make nvme_pci_npages_prp() __always_inline
| * cbe3f04451 net: dsa: sja1105: discard incoming frames in BR_STATE_LISTENING
| * 1d60c0781c net: cadence: macb: Fix a possible deadlock in macb_halt_tx.
| * 4e132f56bc ALSA: ump: Fix a typo of snd_ump_stream_msg_device_info
| * 8a8dc7fd1e ALSA: seq: Fix delivery of UMP events to group ports
| * e4d8a51732 net: mctp: Ensure keys maintain only one ref to corresponding dev
| * acab78ae12 net: mctp: Don't access ifa_index when missing
| * 2d45eeb7d5 mctp: no longer rely on net->dev_index_head[]
| * 7777ca11a4 tools/net/ynl: ethtool: fix crash when Hardware Clock info is missing
| * 95a9e08ea5 tools: ynl: ethtool.py: Output timestamping statistics from tsinfo-get operation
| * a7d6e0ac0a net_sched: Flush gso_skb list too during ->change()
| * 31492b8386 Bluetooth: MGMT: Fix MGMT_OP_ADD_DEVICE invalid device flags
| * b8084e8135 spi: loopback-test: Do not split 1024-byte hexdumps
| * a6879a076b nfs: handle failure of nfs_get_lock_context in unlock path
| * ad6caaf29b HID: uclogic: Add NULL check in uclogic_input_configured()
| * 4715f16b1e HID: thrustmaster: fix memory leak in thrustmaster_interrupts()
| * ee4c5a2a38 RDMA/rxe: Fix slab-use-after-free Read in rxe_queue_cleanup bug
| * 99173e6f28 iio: chemical: sps30: use aligned_s64 for timestamp
| * 02023c2899 iio: adc: ad7768-1: Fix insufficient alignment of timestamp.
| * 41ade94ad4 Revert "drm/amd: Stop evicting resources on APUs in suspend"
| * c1a4d21a15 drm/amd: Add Suspend/Hibernate notification callback support
| * d59f455951 drm/amdgpu: trigger flr_work if reading pf2vf data failed
| * 9a6d2e1944 drm/amdgpu: Fix the runtime resume failure issue
| * 95db39dbaa drm/amd: Stop evicting resources on APUs in suspend
| * 220192d514 iio: adc: ad7266: Fix potential timestamp alignment issue.
| * ec24e62a1d KVM: SVM: Forcibly leave SMM mode on SHUTDOWN interception
| * 635b3cc639 KVM: SVM: Update SEV-ES shutdown intercepts with more metadata
| * 2402a3ef9e firmware: arm_scmi: Fix timeout checks on polling path
| * b388129425 firmware: arm_scmi: Track basic SCMI communication debug metrics
| * 0b175f7c4f firmware: arm_scmi: Add support for debug metrics at the interface
| * 844b899f78 firmware: arm_scmi: Add message dump traces for bad and unexpected replies
| * 6bddbdbeb7 firmware: arm_scmi: Add helper to trace bad messages
| * 7ea1913daa tpm: tis: Double the timeout B to 4s
| * 70417bada4 tracing: probes: Fix a possible race in trace_probe_log APIs
| * 4e63b6907d cgroup/cpuset: Extend kthread_is_per_cpu() check to all PF_NO_SETAFFINITY tasks
| * 44a4c0dba8 platform/x86: asus-wmi: Fix wlan_ctrl_by_user detection
| * 6b92eee6b9 platform/x86/amd/pmc: Declare quirk_spurious_8042 for MECHREVO Wujie 14XA (GX4HRXL)
| * bfe0dd3b73 binfmt_elf: Move brk for static PIE even if ASLR disabled
| * 622b7267d7 binfmt_elf: Honor PT_LOAD alignment for static PIE
| * d9c1758756 binfmt_elf: Calculate total_size earlier
| * d3642f29f5 selftests/exec: Build both static and non-static load_address tests
| * 7435fa1c53 binfmt_elf: Leave a gap between .bss and brk
| * 0d6a2bada5 selftests/exec: load_address: conform test to TAP format output
| * fe7eb1f32a binfmt_elf: elf_bss no longer used by load_elf_binary()
| * c6df4c71ae binfmt_elf: Support segments with 0 filesz and misaligned starts
| * 11689bb0f7 fs/xattr.c: fix simple_xattr_list to always include security.* xattrs
* | 70cce264ed UPSTREAM: x86/its: Fix build error for its_static_thunk()
* | 195969c110 Merge 6.6.91 into android15-6.6-lts
|\|
| * 615b9e10e3 Linux 6.6.91
| * 772934d906 x86/its: FineIBT-paranoid vs ITS
| * 9f69fe3888 x86/its: Fix build errors when CONFIG_MODULES=n
| * 3b2234cd50 x86/its: Use dynamic thunks for indirect branches
| * 6699bf27a4 x86/ibt: Keep IBT disabled during alternative patching
| * 9e7364c32c x86/its: Align RETs in BHB clear sequence to avoid thunking
| * ba1d703626 x86/its: Add support for RSB stuffing mitigation
| * 61bed1ddb2 x86/its: Add "vmexit" option to skip mitigation on some CPUs
| * f7ef7f6ccf x86/its: Enable Indirect Target Selection mitigation
| * 4754e29f43 x86/its: Add support for ITS-safe return thunk
| * c5a5d80752 x86/its: Add support for ITS-safe indirect thunk
| * 195579752c x86/its: Enumerate Indirect Target Selection (ITS) bug
| * c6c1319d19 Documentation: x86/bugs/its: Add ITS documentation
| * a42e916231 x86/speculation: Remove the extra #ifdef around CALL_NOSPEC
| * 0a90b50ebf x86/speculation: Add a conditional CS prefix to CALL_NOSPEC
| * 2eecf5cf21 x86/speculation: Simplify and make CALL_NOSPEC consistent
| * a8a8826bf6 x86/bhi: Do not set BHI_DIS_S in 32-bit mode
| * a0ff7f679b x86/bpf: Add IBHF call at end of classic BPF
| * 1afebfeaf1 x86/bpf: Call branch history clearing sequence on exit
| * ca8a5626ca arm64: proton-pack: Add new CPUs 'k' values for branch mitigation
| * 80251f6202 arm64: bpf: Only mitigate cBPF programs loaded by unprivileged users
| * 42a20cf510 arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs
| * 73591041a5 arm64: proton-pack: Expose whether the branchy loop k value
| * 854da0ed06 arm64: proton-pack: Expose whether the platform is mitigated by firmware
| * 054fc98d69 arm64: insn: Add support for encoding DSB
| * 746e7d285d io_uring: ensure deferred completions are posted for multishot
| * 51f1389b5f io_uring: always arm linked timeouts prior to issue
| * 00f0dd1a01 do_umount(): add missing barrier before refcount checks in sync case
| * eba09f4239 nvme: unblock ctrl state transition for firmware update
| * f5a7d616a5 drm/panel: simple: Update timings for AUO G101EVN010
| * 1c0620213f MIPS: Fix MAX_REG_OFFSET
| * 66c4ec15e3 iio: adc: dln2: Use aligned_s64 for timestamp
| * 8b5273051b iio: accel: adxl355: Make timestamp 64-bit aligned using aligned_s64
| * 9519771908 types: Complement the aligned types with signed 64-bit one
| * 85d430aef4 iio: temp: maxim-thermocouple: Fix potential lack of DMA safe buffer.
| * 2b58e7c1e4 iio: accel: adxl367: fix setting odr for activity time update
| * 37a55b692d usb: usbtmc: Fix erroneous generic_read ioctl return
| * 1991ed796d usb: usbtmc: Fix erroneous wait_srq ioctl return
| * e96be8bd53 usb: usbtmc: Fix erroneous get_stb ioctl error returns
| * e2fef620e5 USB: usbtmc: use interruptible sleep in usbtmc_read
| * 14f298c521 usb: typec: ucsi: displayport: Fix NULL pointer access
| * c9d8b0932e usb: typec: tcpm: delay SNK_TRY_WAIT_DEBOUNCE to SRC_TRYWAIT transition
| * 2d44ee69e6 usb: host: tegra: Prevent host controller crash when OTG port is used
| * 9b09b99a95 usb: gadget: Use get_status callback to set remote wakeup capability
| * 0b32d03e79 usb: gadget: tegra-xudc: ACK ST_RC after clearing CTRL_RUN
| * dffa51cf2d usb: gadget: f_ecm: Add get_status callback
| * 1981926eb3 usb: cdnsp: fix L1 resume issue for RTL_REVISION_NEW_LPM version
| * d846031225 usb: cdnsp: Fix issue with resuming from L1
| * 8c133a0852 ocfs2: stop quota recovery before disabling quotas
| * cc335d4f4e ocfs2: implement handshaking with ocfs2 recovery thread
| * c7441aa8d0 ocfs2: switch osb->disable_recovery to enum
| * cbd5108119 x86/microcode: Consolidate the loader enablement checking
| * faa9059631 module: ensure that kobject_put() is safe for module type kobjects
| * 7e1c774840 clocksource/i8253: Use raw_spinlock_irqsave() in clockevent_i8253_disable()
| * cbfaf46b88 xenbus: Use kref to track req lifetime
| * ffa14d4dc7 xen: swiotlb: Use swiotlb bouncing if kmalloc allocation demands it
| * 2ed98e89eb smb: client: Avoid race in open_cached_dir with lease breaks
| * 671c05434d usb: uhci-platform: Make the clock really optional
| * 03f108a718 drm/amdgpu/hdp6: use memcfg register to post the write for HDP flush
| * 1824e914e2 drm/amdgpu/hdp5: use memcfg register to post the write for HDP flush
| * 7133ff3bed drm/amdgpu/hdp5.2: use memcfg register to post the write for HDP flush
| * 05340a423c drm/amdgpu/hdp4: use memcfg register to post the write for HDP flush
| * f647ce6d9b drm/amd/display: Copy AUX read reply data whenever length > 0
| * 79d982ae2f drm/amd/display: Fix wrong handling for AUX_DEFER case
| * 89850f11bb drm/amd/display: Remove incorrect checking in dmub aux handler
| * cee6856fbf drm/amd/display: Fix the checking condition in dmub aux handling
| * c8a91debb0 drm/amd/display: more liberal vmin/vmax update for freesync
| * 12125f7d9c drm/v3d: Add job to pending list if the reset was skipped
| * 9ce6628513 iio: imu: st_lsm6dsx: fix possible lockup in st_lsm6dsx_read_tagged_fifo
| * da33c4167b iio: imu: st_lsm6dsx: fix possible lockup in st_lsm6dsx_read_fifo
| * 3413b87a9e iio: adis16201: Correct inclinometer channel resolution
| * 7748b1b27f iio: adc: rockchip: Fix clock initialization sequence
| * badda05d6b iio: adc: ad7606: fix serial register access
| * b229fa0d09 drm/amd/display: Shift DMUB AUX reply command if necessary
| * d41072906a x86/mm: Eliminate window where TLB flushes may be inadvertently skipped
| * 0f2b18c076 staging: axis-fifo: Correct handling of tx_fifo_depth for size validation
| * f4d6b9f413 staging: axis-fifo: Remove hardware resets for user errors
| * 90db122533 staging: iio: adc: ad7816: Correct conditional logic for store mode
| * f7adc49438 Input: synaptics - enable InterTouch on TUXEDO InfinityBook Pro 14 v5
| * 85961bb7e4 Input: synaptics - enable SMBus for HP Elitebook 850 G1
| * cbd085c424 Input: synaptics - enable InterTouch on Dell Precision M3800
| * 82b02402ee Input: synaptics - enable InterTouch on Dynabook Portege X30L-G
| * c321045f08 Input: synaptics - enable InterTouch on Dynabook Portege X30-D
| * 2d9d6a4cd3 Input: xpad - fix two controller table values
| * 56b4e8b621 Input: xpad - add support for 8BitDo Ultimate 2 Wireless Controller
| * cbc82e7db1 Input: xpad - fix Share button on Xbox One controllers
| * 90fa6015ff Input: mtk-pmic-keys - fix possible null pointer dereference
| * f72f017207 Input: cyttsp5 - fix power control issue on wakeup
| * dadbe33fee Input: cyttsp5 - ensure minimum reset pulse width
| * ce4e826dbf net: dsa: b53: fix learning on VLAN unaware bridges
| * b1c9c58d09 net: dsa: b53: always rejoin default untagged VLAN on bridge leave
| * ca071649e2 net: dsa: b53: fix VLAN ID for untagged vlan on bridge leave
| * a143c39add net: dsa: b53: fix flushing old pvid VLAN on pvid change
| * aa00a30a28 net: dsa: b53: fix clearing PVID of a port
| * 0121c19ebd net: dsa: b53: allow leaky reserved multicast
| * 355b052633 bpf: Scrub packet on bpf_redirect_peer
| * a3dfec4854 netfilter: ipset: fix region locking in hash types
| * adbc8cc116 ipvs: fix uninit-value for saddr in do_output_route4
| * 42b7a7c962 can: gw: fix RCU/BH usage in cgw_create_job()
| * 3455e6394f can: mcp251xfd: fix TDC setting for low data bit rates
| * 68f29bb97a net: ethernet: mtk_eth_soc: reset all TX queues on DMA free
| * 370635397b netdevice: add netdev_tx_reset_subqueue() shorthand
| * fa1fe9f3dd gre: Fix again IPv6 link-local address generation.
| * 31ff70ad39 sch_htb: make htb_deactivate() idempotent
| * af9e2d4732 ksmbd: fix memory leak in parse_lease_state()
| * 0236742bd9 openvswitch: Fix unsafe attribute parsing in output_userspace()
| * fec1f9e9a6 ksmbd: Fix UAF in __close_file_table_ids
| * 04c8a38c60 ksmbd: prevent out-of-bounds stream writes by validating *pos
| * 6ee551672c ksmbd: prevent rename with empty string
| * 3482037d0f can: mcp251xfd: mcp251xfd_remove(): fix order of unregistration calls
| * 73dde269a1 wifi: cfg80211: fix out-of-bounds access during multi-link element defragmentation
| * d2520dc79c can: mcan: m_can_class_unregister(): fix order of unregistration calls
| * 0efe996568 arm64: dts: imx8mm-verdin: Link reg_usdhc2_vqmmc to usdhc2
| * 38433aa31b dm: add missing unlock on in dm_keyslot_evict()
* | 91c0e1c874 Merge 6.6.90 into android15-6.6-lts
|\|
| * 9c2dd8954d Linux 6.6.90
* | 19ad69ff3a Merge ed3248a403 ("dm: fix copying after src array boundaries") into android15-6.6-lts
|\|
| * ed3248a403 dm: fix copying after src array boundaries
* | 32c69e085e Reapply "dm: always update the array size in realloc_argv on success"
* | 75ac935d8a Merge fe465003bb ("xhci: fix possible null pointer dereference at secondary interrupter removal") into android15-6.6-lts
|\|
| * fe465003bb xhci: fix possible null pointer dereference at secondary interrupter removal
* | 1654cc3350 Merge 3efb29f6a7 ("usb: xhci: Check for xhci->interrupters being allocated in xhci_mem_clearup()") into android15-6.6-lts
|\|
| * 3efb29f6a7 usb: xhci: Check for xhci->interrupters being allocated in xhci_mem_clearup()
* | f9fc1f58f4 Merge android15-6.6 into android15-6.6-lts
* | a9e29a7fa1 Merge bbc66abcd2 ("drm/amd/display: Fix slab-use-after-free in hdcp") into android15-6.6-lts
|\|
| * bbc66abcd2 drm/amd/display: Fix slab-use-after-free in hdcp
| * 97a918755a drm/amd/display: Add scoped mutexes for amdgpu_dm_dhcp
| * 4306dbd767 iommu/arm-smmu-v3: Fix iommu_device_probe bug due to duplicated stream ids
| * 3dc33f145a iommu/arm-smmu-v3: Use the new rb tree helpers
| * 8f2451ebaf drivers: base: handle module_kobject creation
| * ace531f2fe kernel: globalize lookup_or_create_module_kobject()
| * 0ecbb45e30 kernel: param: rename locate_module_kobject
* | b60621691f Merge baa3ebbad8 ("xhci: Limit time spent with xHC interrupts disabled during bus resume") into android15-6.6-lts
|\|
| * baa3ebbad8 xhci: Limit time spent with xHC interrupts disabled during bus resume
| * ebb7195dac xhci: support setting interrupt moderation IMOD for secondary interrupters
| * cc64775afb usb: xhci: check if 'requested segments' exceeds ERST capacity
| * 3733697cb7 xhci: Add helper to set an interrupters interrupt moderation interval
| * bcd191d7ba xhci: add support to allocate several interrupters
| * ff0a51339a xhci: split free interrupter into separate remove and free parts
| * 3f6e8ad65b xhci: Clean up stale comment on ERST_SIZE macro
| * f377434945 xhci: Use more than one Event Ring segment
| * 6299fb6190 xhci: Set DESI bits in ERDP register correctly
* | 0604a6126c Revert "cpufreq: Fix setting policy limits when frequency tables are used"
* | 3d14104593 Revert "net: Rename mono_delivery_time to tstamp_type for scalabilty"
* | cf8861e19d Revert "Bluetooth: L2CAP: copy RX timestamp to new fragments"
* | 8d05667716 Revert "bpf: add find_containing_subprog() utility function"
* | 6eac5086a6 Revert "bpf: refactor bpf_helper_changes_pkt_data to use helper number"
* | 7b53e4c6fe Revert "bpf: track changes_pkt_data property for global functions"
* | df17196324 Revert "selftests/bpf: test for changing packet data from global functions"
* | 4fe4dffa13 Revert "bpf: check changes_pkt_data property for extension programs"
* | bb9c273562 Revert "selftests/bpf: freplace tests for tracking of changes_packet_data"
* | 3414f3064d Revert "bpf: consider that tail calls invalidate packet pointers"
* | 16ae98c704 Revert "selftests/bpf: validate that tail call invalidates packet pointers"
* | b51ffb61ee Revert "bpf: fix null dereference when computing changes_pkt_data of prog w/o subprogs"
* | 1e513665cb Revert "selftests/bpf: extend changes_pkt_data with cases w/o subprograms"
* | 58a59d0ee6 Merge 5e8c311318 ("Revert "drm/meson: vclk: fix calculation of 59.94 fractional rates"") into android15-6.6-lts
|\|
| * 5e8c311318 Revert "drm/meson: vclk: fix calculation of 59.94 fractional rates"
| * f129d8aea8 arm64: dts: st: Use 128kB size for aliased GIC400 register access on stm32mp25 SoCs
| * ce2fa5bc01 arm64: dts: st: Adjust interrupt-controller for stm32mp25 SoCs
| * fabe730323 ARM: dts: opos6ul: add ksz8081 phy properties
| * f68f93c9d0 firmware: arm_ffa: Skip Rx buffer ownership release if not acquired
| * 2fbf6c9695 firmware: arm_scmi: Balance device refcount when destroying devices
| * e762ec4c7b sch_ets: make est_qlen_notify() idempotent
| * 3626915603 sch_qfq: make qfq_qlen_notify() idempotent
| * 9030a91235 sch_hfsc: make hfsc_qlen_notify() idempotent
| * 077e355dae sch_drr: make drr_qlen_notify() idempotent
| * bbbf5e0f87 sch_htb: make htb_qlen_notify() idempotent
| * b9ded1fb29 riscv: Pass patch_text() the length in bytes
| * 85484bc611 ASoC: soc-core: Stop using of_property_read_bool() for non-boolean properties
| * 54583776e4 ASoC: Use of_property_read_bool()
| * 29ea1abc7d net: vertexcom: mse102x: Fix RX error handling
| * 6ac8ec92f5 net: vertexcom: mse102x: Add range check for CMD_RTS
| * 04d9c34079 net: vertexcom: mse102x: Fix LEN_MASK
| * e1301580bf net: vertexcom: mse102x: Fix possible stuck of SPI interrupt
| * cb5922f74b net: hns3: defer calling ptp_clock_register()
| * accb5a8ef1 net: hns3: fixed debugfs tm_qset size
| * c1b1d3b0d8 net: hns3: fix an interrupt residual problem
| * 67d587bfa3 net: hns3: store rx VLAN tag offload state for VF
| * 7e1ca1bed3 octeon_ep: Fix host hang issue during device reboot
| * 0e32cbcbc4 net: fec: ERR007885 Workaround for conventional TX
| * dae1ce27ce net: lan743x: Fix memleak issue when GSO enabled
| * ec00ea5645 nvme-tcp: fix premature queue removal and I/O failover
| * 252a5a67ad bnxt_en: Fix ethtool -d byte order for 32-bit values
| * 4d69864915 bnxt_en: Fix out-of-bound memcpy() during ethtool -w
| * 3facfd7200 bnxt_en: Fix coredump logic to free allocated buffer
| * e027284499 net: ipv6: fix UDPv6 GSO segmentation with NAT
| * 8acf08b1e4 net: dsa: felix: fix broken taprio gate states after clock jump
| * cb625f783f net: ethernet: mtk_eth_soc: fix SER panic with 4GB+ RAM
| * 683ad6d7f4 igc: fix lock order in igc_ptp_reset
| * 2399d1a750 net: mdio: mux-meson-gxl: set reversed bit when using internal phy
| * 1bbbdfc247 net: dlink: Correct endianness handling of led_mode
| * 012a413b28 nvme-pci: fix queue unquiesce check on slot_reset
| * ce4f77bef2 ALSA: ump: Fix buffer overflow at UMP SysEx message conversion
| * eae60cfe25 ice: Check VF VSI Pointer Value in ice_vc_add_fdir_fltr()
| * a43783119e net_sched: qfq: Fix double list add in class with netem as child qdisc
| * 72c3da7e6c net_sched: ets: Fix double list add in class with netem as child qdisc
| * 6082a87af4 net_sched: hfsc: Fix a UAF vulnerability in class with netem as child qdisc
| * db205b92df net_sched: drr: Fix double list add in class with netem as child qdisc
| * 9b467c5bcd pds_core: remove write-after-free of client_id
| * 0861fccd43 pds_core: specify auxiliary_device to be created
| * da23d7edb2 pds_core: make pdsc_auxbus_dev_del() void
| * 452fa190dd pds_core: delete VF dev on reset
| * 5e51c0b51b pds_core: check health in devcmd wait
| * 5ba9380f79 net: ethernet: mtk-star-emac: rearm interrupts in rx_poll only when advised
| * 7cb10f17bd net: ethernet: mtk-star-emac: fix spinlock recursion issues on rx/tx poll
| * a74777bb18 net: mscc: ocelot: delete PVID VLAN when readding it as non-PVID
| * dc7ffe02ad net: mscc: ocelot: treat 802.1ad tagged traffic as 802.1Q-untagged
| * 8c7bfb6198 Bluetooth: L2CAP: copy RX timestamp to new fragments
| * 8dde02229b net: Rename mono_delivery_time to tstamp_type for scalabilty
| * 2e8d44ebaa Bluetooth: btusb: avoid NULL pointer dereference in skb_dequeue()
| * dd20a33a0d net/mlx5: E-switch, Fix error handling for enabling roce
| * bab395c891 net/mlx5: E-Switch, Initialize MAC Address for Default GID
| * 3576e9a80b vxlan: vnifilter: Fix unlocked deletion of default FDB entry
| * 029f11349e powerpc/boot: Fix dash warning
| * 36a9a26478 wifi: plfxlc: Remove erroneous assert in plfxlc_mac_release
| * c5a5de348c drm/i915/pxp: fix undefined reference to `intel_pxp_gsccs_is_ready_for_sessions'
| * 27a1efe21f powerpc/boot: Check for ld-option support
| * 9a8d4d7072 book3s64/radix : Align section vmemmap start address to PAGE_SIZE
| * 8890eeae5c ASoC: soc-pcm: Fix hw_params() and DAPM widget sequence
| * a6dbcf69d4 iommu: Handle race with default domain setup
| * a1723e9c53 KVM: x86: Load DR6 with guest value only before entering .vcpu_run() loop
| * 46acbfbcc0 PCI: imx6: Skip controller_id generation logic for i.MX7D
| * 28cfd94980 Revert "PCI: imx6: Skip controller_id generation logic for i.MX7D"
| * 47e24c86cb selftests/bpf: extend changes_pkt_data with cases w/o subprograms
| * 3a467d938d bpf: fix null dereference when computing changes_pkt_data of prog w/o subprogs
| * b7c0d2d4ef selftests/bpf: validate that tail call invalidates packet pointers
| * f1692ee23d bpf: consider that tail calls invalidate packet pointers
| * 70234fbfa2 selftests/bpf: freplace tests for tracking of changes_packet_data
| * 7197fc4acd bpf: check changes_pkt_data property for extension programs
| * 85a30a4639 selftests/bpf: test for changing packet data from global functions
| * 79751e9227 bpf: track changes_pkt_data property for global functions
| * 3d496a1eaa bpf: refactor bpf_helper_changes_pkt_data to use helper number
| * f0b56f1d2f bpf: add find_containing_subprog() utility function
| * 056ebbddb8 tracing: Fix oob write in trace_seq_to_buffer()
| * 25687fd196 cpufreq: Fix setting policy limits when frequency tables are used
| * cbd54987db cpufreq: Avoid using inconsistent policy->min and policy->max
* | a10c89734c Revert "dm: always update the array size in realloc_argv on success"
* | 800d65c876 Merge e76948644e ("smb: client: fix zero length for mkdir POSIX create context") into android-mainline
|\|
| * e76948644e smb: client: fix zero length for mkdir POSIX create context
| * b447463562 ksmbd: fix use-after-free in kerberos authentication
| * a45445b609 platform/x86/intel-uncore-freq: Fix missing uncore sysfs during CPU hotplug
| * 3a868a7f8d platform/x86/amd: pmc: Require at least 2.5 seconds between HW sleep cycles
| * f39493cf70 iommu/vt-d: Apply quirk_iommu_igfx for 8086:0044 (QM57/QS57)
| * 13d67528e1 iommu/amd: Fix potential buffer overflow in parse_ivrs_acpihid
| * 64e95bb379 dm: always update the array size in realloc_argv on success
* | 802865c2f2 Merge ecc7f159d1 ("dm-integrity: fix a warning on invalid table line") into android-mainline
|\|
| * ecc7f159d1 dm-integrity: fix a warning on invalid table line
* | 9bcea087a2 Merge c8c8305228 ("dm-bufio: don't schedule in atomic context") into android-mainline
|\|
| * c8c8305228 dm-bufio: don't schedule in atomic context
* | 2b2f0d5a3a Merge 08424a0922 ("wifi: brcm80211: fmac: Add error handling for brcmf_usb_dl_writeimage()") into android-mainline
|\|
| * 08424a0922 wifi: brcm80211: fmac: Add error handling for brcmf_usb_dl_writeimage()
* | 57af78f45e Merge 8daa71c694 ("tracing: Do not take trace_event_sem in print_event_fields()") into android-mainline
|\|
| * 8daa71c694 tracing: Do not take trace_event_sem in print_event_fields()
* | f6c8c492b2 Merge 75fda0e591 ("spi: tegra114: Don't fail set_cs_timing when delays are zero") into android-mainline
|\|
| * 75fda0e591 spi: tegra114: Don't fail set_cs_timing when delays are zero
* | 8f486cc103 Merge 3aa4aff158 ("mmc: renesas_sdhi: Fix error handling in renesas_sdhi_probe") into android-mainline
|\|
| * 3aa4aff158 mmc: renesas_sdhi: Fix error handling in renesas_sdhi_probe
* | 4f41bfdbc0 Merge 4d1a2d1363 ("mm/memblock: repeat setting reserved region nid if array is doubled") into android-mainline
|\|
| * 4d1a2d1363 mm/memblock: repeat setting reserved region nid if array is doubled
* | bf30bf594d Merge c0fabecd65 ("mm/memblock: pass size instead of end to memblock_set_node()") into android-mainline
|\|
| * c0fabecd65 mm/memblock: pass size instead of end to memblock_set_node()
* | b917e32d2b Merge dfbaecf7e3 ("irqchip/qcom-mpm: Prevent crash when trying to handle non-wake GPIOs") into android-mainline
|\|
| * dfbaecf7e3 irqchip/qcom-mpm: Prevent crash when trying to handle non-wake GPIOs
* | 624451d032 Merge e51dd71266 ("amd-xgbe: Fix to ensure dependent features are toggled with RX checksum offload") into android-mainline
|\|
| * e51dd71266 amd-xgbe: Fix to ensure dependent features are toggled with RX checksum offload
* | 6b5926ac43 Merge 34b6fa1143 ("perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value.") into android-mainline
|\|
| * 34b6fa1143 perf/x86/intel: KVM: Mask PEBS_ENABLE loaded for guest with vCPU's value.
* | 3dfac7bc40 Merge 6a098c51d1 ("parisc: Fix double SIGFPE crash") into android-mainline
|\|
| * 6a098c51d1 parisc: Fix double SIGFPE crash
* | 7adf4c4597 Merge 3821cae9bd ("arm64: errata: Add missing sentinels to Spectre-BHB MIDR arrays") into android-mainline
|\|
| * 3821cae9bd arm64: errata: Add missing sentinels to Spectre-BHB MIDR arrays
* | 183e0245fd Reapply "arm64: errata: Add newer ARM cores to the spectre_bhb_loop_affected() lists"
* | eb6f76b70c Merge ddf4e7f0f5 ("i2c: imx-lpi2c: Fix clock count when probe defers") into android-mainline
|/
* ddf4e7f0f5 i2c: imx-lpi2c: Fix clock count when probe defers
* 78cf306f72 EDAC/altera: Set DDR and SDMMC interrupt mask before registration
* 7266416334 EDAC/altera: Test the correct error reg offset
* 126f5c6e0c drm/nouveau: Fix WARN_ON in nouveau_fence_context_kill()
* 94808275aa drm/fdinfo: Protect against driver unbind
* b364ee98ac btrfs: fix COW handling in run_delalloc_nocow()
* 3ac18e0a94 ALSA: usb-audio: Add second USB ID for Jabra Evolve 65 headset
* f73ecbfa78 ALSA: usb-audio: Add retry on -EPROTO from usb_set_interface()
* fd1aeef3e9 Revert "rndis_host: Flag RNDIS modems as WWAN devices"

Change-Id: I9176e1db2f5a199ce0ba84842488ecdcb508f324
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-06-16 06:45:20 +00:00
committed by Treehugger Robot
778 changed files with 8635 additions and 3587 deletions

View File

@@ -270,6 +270,12 @@ Description: Shows the operation capability bits displayed in bitmap format
correlates to the operations allowed. It's visible only correlates to the operations allowed. It's visible only
on platforms that support the capability. on platforms that support the capability.
What: /sys/bus/dsa/devices/wq<m>.<n>/driver_name
Date: Sept 8, 2023
KernelVersion: 6.7.0
Contact: dmaengine@vger.kernel.org
Description: Name of driver to be bounded to the wq.
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
Date: Oct 25, 2019 Date: Oct 25, 2019
KernelVersion: 5.6.0 KernelVersion: 5.6.0

View File

@@ -514,6 +514,7 @@ Description: information about CPUs heterogeneity.
What: /sys/devices/system/cpu/vulnerabilities What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling /sys/devices/system/cpu/vulnerabilities/gather_data_sampling
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
/sys/devices/system/cpu/vulnerabilities/itlb_multihit /sys/devices/system/cpu/vulnerabilities/itlb_multihit
/sys/devices/system/cpu/vulnerabilities/l1tf /sys/devices/system/cpu/vulnerabilities/l1tf
/sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/mds

View File

@@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
srso srso
gather_data_sampling gather_data_sampling
reg-file-data-sampling reg-file-data-sampling
indirect-target-selection

View File

@@ -0,0 +1,168 @@
.. SPDX-License-Identifier: GPL-2.0
Indirect Target Selection (ITS)
===============================
ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
released before Alder Lake. ITS may allow an attacker to control the prediction
of indirect branches and RETs located in the lower half of a cacheline.
ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
Scope of Impact
---------------
- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
predicted with unintended target corresponding to a branch in the guest.
- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
gadgets.
- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
branches may still be predicted with targets corresponding to direct branches
executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
should be available via distro updates. Alternatively microcode can be
obtained from Intel's github repository [#f1]_.
Affected CPUs
-------------
Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
======================== ============ ==================== ===============
Common name Family_Model eIBRS Intra-mode BTI
Guest/Host Isolation
======================== ============ ==================== ===============
SKYLAKE_X (step >= 6) 06_55H Affected Affected
ICELAKE_X 06_6AH Not affected Affected
ICELAKE_D 06_6CH Not affected Affected
ICELAKE_L 06_7EH Not affected Affected
TIGERLAKE_L 06_8CH Not affected Affected
TIGERLAKE 06_8DH Not affected Affected
KABYLAKE_L (step >= 12) 06_8EH Affected Affected
KABYLAKE (step >= 13) 06_9EH Affected Affected
COMETLAKE 06_A5H Affected Affected
COMETLAKE_L 06_A6H Affected Affected
ROCKETLAKE 06_A7H Not affected Affected
======================== ============ ==================== ===============
- All affected CPUs enumerate Enhanced IBRS feature.
- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
update for mitigation.
- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
host's affected status.
- Intel Atom CPUs are not affected by ITS.
Mitigation
----------
As only the indirect branches and RETs that have their last byte of instruction
in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
the mitigation is to not allow indirect branches in the lower half.
This is achieved by relying on existing retpoline support in the kernel, and in
compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
added ITS-safe thunks. These safe thunks consists of indirect branch in the
second half of the cacheline. Not all retpoline sites are patched to thunks, if
a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
indirect branch.
Dynamic thunks
~~~~~~~~~~~~~~
From a dynamically allocated pool of safe-thunks, each vulnerable site is
replaced with a new thunk, such that they get a unique address. This could
improve the branch prediction accuracy. Also, it is a defense-in-depth measure
against aliasing.
Note, for simplicity, indirect branches in eBPF programs are always replaced
with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
in future this can be changed to use dynamic thunks.
All vulnerable RETs are replaced with a static thunk, they do not use dynamic
thunks. This is because RETs get their prediction from RSB mostly that does not
depend on source address. RETs that underflow RSB may benefit from dynamic
thunks. But, RETs significantly outnumber indirect branches, and any benefit
from a unique source address could be outweighed by the increased icache
footprint and iTLB pressure.
Retpoline
~~~~~~~~~
Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
safe thunks. Unless user requested the RSB-stuffing mitigation.
RSB Stuffing
~~~~~~~~~~~~
RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
attacks. And it also mitigates RETs that are vulnerable to ITS.
Mitigation in guests
^^^^^^^^^^^^^^^^^^^^
All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
and Family/Model of the guest. This is because eIBRS feature could be hidden
from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
indicates that the guest is running on an unaffected host.
To prevent guests from unnecessarily deploying the mitigation on unaffected
platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
is not set by any hardware, but is **intended for VMMs to synthesize** it for
guests as per the host's affected status.
Mitigation options
^^^^^^^^^^^^^^^^^^
The ITS mitigation can be controlled using the "indirect_target_selection"
kernel parameter. The available options are:
======== ===================================================================
on (default) Deploy the "Aligned branch/return thunks" mitigation.
If spectre_v2 mitigation enables retpoline, aligned-thunks are only
deployed for the affected RET instructions. Retpoline mitigates
indirect branches.
off Disable ITS mitigation.
vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
part of ITS. Otherwise, mitigation is not deployed. This option is
useful when host userspace is not in the threat model, and only
attacks from guest to host are considered.
stuff Deploy RSB-fill mitigation when retpoline is also deployed.
Otherwise, deploy the default mitigation. When retpoline mitigation
is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
ITS.
force Force the ITS bug and deploy the default mitigation.
======== ===================================================================
Sysfs reporting
---------------
The sysfs file showing ITS mitigation status is:
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
Note, microcode mitigation status is not reported in this file.
The possible values in this file are:
.. list-table::
* - Not affected
- The processor is not vulnerable.
* - Vulnerable
- System is vulnerable and no mitigation has been applied.
* - Vulnerable, KVM: Not affected
- System is vulnerable to intra-mode BTI, but not affected by eIBRS
guest/host isolation.
* - Mitigation: Aligned branch/return thunks
- The mitigation is enabled, affected indirect branches and RETs are
relocated to safe thunks.
* - Mitigation: Retpolines, Stuffing RSB
- The mitigation is enabled using retpoline and RSB stuffing.
References
----------
.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list

View File

@@ -2084,6 +2084,23 @@
different crypto accelerators. This option can be used different crypto accelerators. This option can be used
to achieve best performance for particular HW. to achieve best performance for particular HW.
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
Target Selection(ITS) bug in Intel CPUs. Updated
microcode is also required for a fix in IBPB.
on: Enable mitigation (default).
off: Disable mitigation.
force: Force the ITS bug and deploy default
mitigation.
vmexit: Only deploy mitigation if CPU is affected by
guest/host isolation part of ITS.
stuff: Deploy RSB-fill mitigation when retpoline is
also deployed. Otherwise, deploy the default
mitigation.
For details see:
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
init= [KNL] init= [KNL]
Format: <full_path> Format: <full_path>
Run specified binary instead of /sbin/init as init Run specified binary instead of /sbin/init as init
@@ -3367,6 +3384,7 @@
expose users to several CPU vulnerabilities. expose users to several CPU vulnerabilities.
Equivalent to: if nokaslr then kpti=0 [ARM64] Equivalent to: if nokaslr then kpti=0 [ARM64]
gather_data_sampling=off [X86] gather_data_sampling=off [X86]
indirect_target_selection=off [X86]
kvm.nx_huge_pages=off [X86] kvm.nx_huge_pages=off [X86]
l1tf=off [X86] l1tf=off [X86]
mds=off [X86] mds=off [X86]
@@ -6026,6 +6044,8 @@
Selecting 'on' will also enable the mitigation Selecting 'on' will also enable the mitigation
against user space to user space task attacks. against user space to user space task attacks.
Selecting specific mitigation does not force enable
user mitigations.
Selecting 'off' will disable both the kernel and Selecting 'off' will disable both the kernel and
the user space protections. the user space protections.

View File

@@ -32,12 +32,12 @@ Temperature sensors and fans can be queried and set via the standard
=============================== ======= ======================================= =============================== ======= =======================================
Name Perm Description Name Perm Description
=============================== ======= ======================================= =============================== ======= =======================================
fan[1-3]_input RO Fan speed in RPM. fan[1-4]_input RO Fan speed in RPM.
fan[1-3]_label RO Fan label. fan[1-4]_label RO Fan label.
fan[1-3]_min RO Minimal Fan speed in RPM fan[1-4]_min RO Minimal Fan speed in RPM
fan[1-3]_max RO Maximal Fan speed in RPM fan[1-4]_max RO Maximal Fan speed in RPM
fan[1-3]_target RO Expected Fan speed in RPM fan[1-4]_target RO Expected Fan speed in RPM
pwm[1-3] RW Control the fan PWM duty-cycle. pwm[1-4] RW Control the fan PWM duty-cycle.
pwm1_enable WO Enable or disable automatic BIOS fan pwm1_enable WO Enable or disable automatic BIOS fan
control (not supported on all laptops, control (not supported on all laptops,
see below for details). see below for details).
@@ -93,7 +93,7 @@ Again, when you find new codes, we'd be happy to have your patches!
--------------------------- ---------------------------
The driver also exports the fans as thermal cooling devices with The driver also exports the fans as thermal cooling devices with
``type`` set to ``dell-smm-fan[1-3]``. This allows for easy fan control ``type`` set to ``dell-smm-fan[1-4]``. This allows for easy fan control
using one of the thermal governors. using one of the thermal governors.
Module parameters Module parameters

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 6 VERSION = 6
PATCHLEVEL = 6 PATCHLEVEL = 6
SUBLEVEL = 89 SUBLEVEL = 92
EXTRAVERSION = EXTRAVERSION =
NAME = Pinguïn Aangedreven NAME = Pinguïn Aangedreven

View File

@@ -1710,6 +1710,7 @@
# required by pinctrl-msm.ko # required by pinctrl-msm.ko
device_property_read_u16_array device_property_read_u16_array
devm_register_sys_off_handler
gpiochip_line_is_valid gpiochip_line_is_valid
gpiochip_lock_as_irq gpiochip_lock_as_irq
gpiochip_unlock_as_irq gpiochip_unlock_as_irq

View File

@@ -139,7 +139,7 @@
reg = <0x54400000 0x00040000>; reg = <0x54400000 0x00040000>;
clocks = <&tegra_car TEGRA114_CLK_DSIB>, clocks = <&tegra_car TEGRA114_CLK_DSIB>,
<&tegra_car TEGRA114_CLK_DSIBLP>, <&tegra_car TEGRA114_CLK_DSIBLP>,
<&tegra_car TEGRA114_CLK_PLL_D2_OUT0>; <&tegra_car TEGRA114_CLK_PLL_D_OUT0>;
clock-names = "dsi", "lp", "parent"; clock-names = "dsi", "lp", "parent";
resets = <&tegra_car 82>; resets = <&tegra_car 82>;
reset-names = "dsi"; reset-names = "dsi";

View File

@@ -40,6 +40,9 @@
reg = <1>; reg = <1>;
interrupt-parent = <&gpio4>; interrupt-parent = <&gpio4>;
interrupts = <16 IRQ_TYPE_LEVEL_LOW>; interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
micrel,led-mode = <1>;
clocks = <&clks IMX6UL_CLK_ENET_REF>;
clock-names = "rmii-ref";
status = "okay"; status = "okay";
}; };
}; };

View File

@@ -538,11 +538,12 @@ extern u32 at91_pm_suspend_in_sram_sz;
static int at91_suspend_finish(unsigned long val) static int at91_suspend_finish(unsigned long val)
{ {
unsigned char modified_gray_code[] = { /* SYNOPSYS workaround to fix a bug in the calibration logic */
0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d, unsigned char modified_fix_code[] = {
0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b, 0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13, 0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
0x10, 0x11, 0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
0x1e, 0x1f,
}; };
unsigned int tmp, index; unsigned int tmp, index;
int i; int i;
@@ -553,25 +554,25 @@ static int at91_suspend_finish(unsigned long val)
* restore the ZQ0SR0 with the value saved here. But the * restore the ZQ0SR0 with the value saved here. But the
* calibration is buggy and restoring some values from ZQ0SR0 * calibration is buggy and restoring some values from ZQ0SR0
* is forbidden and risky thus we need to provide processed * is forbidden and risky thus we need to provide processed
* values for these (modified gray code values). * values for these.
*/ */
tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0); tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
/* Store pull-down output impedance select. */ /* Store pull-down output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f; index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index]; soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
/* Store pull-up output impedance select. */ /* Store pull-up output impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f; index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
/* Store pull-down on-die termination impedance select. */ /* Store pull-down on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f; index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
/* Store pull-up on-die termination impedance select. */ /* Store pull-up on-die termination impedance select. */
index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f; index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index]; soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
/* /*
* The 1st 8 words of memory might get corrupted in the process * The 1st 8 words of memory might get corrupted in the process

View File

@@ -151,28 +151,12 @@
vcc-pg-supply = <&reg_aldo1>; vcc-pg-supply = <&reg_aldo1>;
}; };
&r_ir { &r_i2c {
linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
&r_pio {
/*
* FIXME: We can't add that supply for now since it would
* create a circular dependency between pinctrl, the regulator
* and the RSB Bus.
*
* vcc-pl-supply = <&reg_aldo1>;
*/
vcc-pm-supply = <&reg_aldo1>;
};
&r_rsb {
status = "okay"; status = "okay";
axp805: pmic@745 { axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806"; compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>; reg = <0x36>;
interrupt-parent = <&r_intc>; interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller; interrupt-controller;
@@ -290,6 +274,22 @@
}; };
}; };
&r_ir {
linux,rc-map-name = "rc-beelink-gs1";
status = "okay";
};
&r_pio {
/*
* PL0 and PL1 are used for PMIC I2C
* don't enable the pl-supply else
* it will fail at boot
*
* vcc-pl-supply = <&reg_aldo1>;
*/
vcc-pm-supply = <&reg_aldo1>;
};
&spdif { &spdif {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&spdif_tx_pin>; pinctrl-0 = <&spdif_tx_pin>;

View File

@@ -175,16 +175,12 @@
vcc-pg-supply = <&reg_vcc_wifi_io>; vcc-pg-supply = <&reg_vcc_wifi_io>;
}; };
&r_ir { &r_i2c {
status = "okay";
};
&r_rsb {
status = "okay"; status = "okay";
axp805: pmic@745 { axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806"; compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>; reg = <0x36>;
interrupt-parent = <&r_intc>; interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller; interrupt-controller;
@@ -295,6 +291,10 @@
}; };
}; };
&r_ir {
status = "okay";
};
&rtc { &rtc {
clocks = <&ext_osc32k>; clocks = <&ext_osc32k>;
}; };

View File

@@ -112,20 +112,12 @@
vcc-pg-supply = <&reg_aldo1>; vcc-pg-supply = <&reg_aldo1>;
}; };
&r_ir { &r_i2c {
status = "okay";
};
&r_pio {
vcc-pm-supply = <&reg_bldo3>;
};
&r_rsb {
status = "okay"; status = "okay";
axp805: pmic@745 { axp805: pmic@36 {
compatible = "x-powers,axp805", "x-powers,axp806"; compatible = "x-powers,axp805", "x-powers,axp806";
reg = <0x745>; reg = <0x36>;
interrupt-parent = <&r_intc>; interrupt-parent = <&r_intc>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller; interrupt-controller;
@@ -240,6 +232,14 @@
}; };
}; };
&r_ir {
status = "okay";
};
&r_pio {
vcc-pm-supply = <&reg_bldo3>;
};
&rtc { &rtc {
clocks = <&ext_osc32k>; clocks = <&ext_osc32k>;
}; };

View File

@@ -148,6 +148,19 @@
startup-delay-us = <20000>; startup-delay-us = <20000>;
}; };
reg_usdhc2_vqmmc: regulator-usdhc2-vqmmc {
compatible = "regulator-gpio";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc2_vsel>;
gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
regulator-max-microvolt = <3300000>;
regulator-min-microvolt = <1800000>;
states = <1800000 0x1>,
<3300000 0x0>;
regulator-name = "PMIC_USDHC_VSELECT";
vin-supply = <&reg_nvcc_sd>;
};
reserved-memory { reserved-memory {
#address-cells = <2>; #address-cells = <2>;
#size-cells = <2>; #size-cells = <2>;
@@ -266,7 +279,7 @@
"SODIMM_19", "SODIMM_19",
"", "",
"", "",
"", "PMIC_USDHC_VSELECT",
"", "",
"", "",
"", "",
@@ -787,6 +800,7 @@
pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>; pinctrl-2 = <&pinctrl_usdhc2_200mhz>, <&pinctrl_usdhc2_cd>;
pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>; pinctrl-3 = <&pinctrl_usdhc2_sleep>, <&pinctrl_usdhc2_cd_sleep>;
vmmc-supply = <&reg_usdhc2_vmmc>; vmmc-supply = <&reg_usdhc2_vmmc>;
vqmmc-supply = <&reg_usdhc2_vqmmc>;
}; };
&wdog1 { &wdog1 {
@@ -1209,13 +1223,17 @@
<MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */ <MX8MM_IOMUXC_NAND_CLE_GPIO3_IO5 0x6>; /* SODIMM 76 */
}; };
pinctrl_usdhc2_vsel: usdhc2vselgrp {
fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_GPIO1_IO4 0x10>; /* PMIC_USDHC_VSELECT */
};
/* /*
* Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the * Note: Due to ERR050080 we use discrete external on-module resistors pulling-up to the
* on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here. * on-module +V3.3_1.8_SD (LDO5) rail and explicitly disable the internal pull-ups here.
*/ */
pinctrl_usdhc2: usdhc2grp { pinctrl_usdhc2: usdhc2grp {
fsl,pins = fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */ <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x90>, /* SODIMM 78 */
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */ <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x90>, /* SODIMM 74 */
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */ <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x90>, /* SODIMM 80 */
@@ -1226,7 +1244,6 @@
pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp {
fsl,pins = fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x94>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x94>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x94>,
@@ -1237,7 +1254,6 @@
pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp {
fsl,pins = fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x10>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x96>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x96>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x96>,
@@ -1249,7 +1265,6 @@
/* Avoid backfeeding with removed card power */ /* Avoid backfeeding with removed card power */
pinctrl_usdhc2_sleep: usdhc2slpgrp { pinctrl_usdhc2_sleep: usdhc2slpgrp {
fsl,pins = fsl,pins =
<MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x0>,
<MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>, <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x0>,
<MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x0>,
<MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x0>,

View File

@@ -26,6 +26,8 @@
leds { leds {
compatible = "gpio-leds"; compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&spi_quad_pins>;
led-power1 { led-power1 {
label = "udpu:green:power"; label = "udpu:green:power";
@@ -82,8 +84,6 @@
&spi0 { &spi0 {
status = "okay"; status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi_quad_pins>;
flash@0 { flash@0 {
compatible = "jedec,spi-nor"; compatible = "jedec,spi-nor";
@@ -108,6 +108,10 @@
}; };
}; };
&spi_quad_pins {
function = "gpio";
};
&pinctrl_nb { &pinctrl_nb {
i2c2_recovery_pins: i2c2-recovery-pins { i2c2_recovery_pins: i2c2-recovery-pins {
groups = "i2c2"; groups = "i2c2";

View File

@@ -1635,7 +1635,7 @@
regulator-min-microvolt = <1800000>; regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>; regulator-max-microvolt = <1800000>;
regulator-always-on; regulator-always-on;
gpio = <&exp1 14 GPIO_ACTIVE_HIGH>; gpio = <&exp1 9 GPIO_ACTIVE_HIGH>;
enable-active-high; enable-active-high;
vin-supply = <&vdd_1v8>; vin-supply = <&vdd_1v8>;
}; };

View File

@@ -102,6 +102,16 @@
}; };
pcie@141a0000 { pcie@141a0000 {
reg = <0x00 0x141a0000 0x0 0x00020000 /* appl registers (128K) */
0x00 0x3a000000 0x0 0x00040000 /* configuration space (256K) */
0x00 0x3a040000 0x0 0x00040000 /* iATU_DMA reg space (256K) */
0x00 0x3a080000 0x0 0x00040000 /* DBI reg space (256K) */
0x2e 0x20000000 0x0 0x10000000>; /* ECAM (256MB) */
ranges = <0x81000000 0x00 0x3a100000 0x00 0x3a100000 0x0 0x00100000 /* downstream I/O (1MB) */
0x82000000 0x00 0x40000000 0x2e 0x30000000 0x0 0x08000000 /* non-prefetchable memory (128MB) */
0xc3000000 0x28 0x00000000 0x28 0x00000000 0x6 0x20000000>; /* prefetchable memory (25088MB) */
status = "okay"; status = "okay";
vddio-pex-ctl-supply = <&vdd_1v8_ls>; vddio-pex-ctl-supply = <&vdd_1v8_ls>;
phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>, phys = <&p2u_nvhs_0>, <&p2u_nvhs_1>, <&p2u_nvhs_2>,

View File

@@ -73,14 +73,13 @@
}; };
intc: interrupt-controller@4ac00000 { intc: interrupt-controller@4ac00000 {
compatible = "arm,cortex-a7-gic"; compatible = "arm,gic-400";
#interrupt-cells = <3>; #interrupt-cells = <3>;
#address-cells = <1>;
interrupt-controller; interrupt-controller;
reg = <0x0 0x4ac10000 0x0 0x1000>, reg = <0x0 0x4ac10000 0x0 0x1000>,
<0x0 0x4ac20000 0x0 0x2000>, <0x0 0x4ac20000 0x0 0x20000>,
<0x0 0x4ac40000 0x0 0x2000>, <0x0 0x4ac40000 0x0 0x20000>,
<0x0 0x4ac60000 0x0 0x2000>; <0x0 0x4ac60000 0x0 0x20000>;
}; };
psci { psci {

View File

@@ -10,39 +10,44 @@
#include <dt-bindings/clock/xlnx-zynqmp-clk.h> #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
/ { / {
pss_ref_clk: pss_ref_clk { pss_ref_clk: pss-ref-clk {
bootph-all; bootph-all;
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <33333333>; clock-frequency = <33333333>;
clock-output-names = "pss_ref_clk";
}; };
video_clk: video_clk { video_clk: video-clk {
bootph-all; bootph-all;
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <27000000>; clock-frequency = <27000000>;
clock-output-names = "video_clk";
}; };
pss_alt_ref_clk: pss_alt_ref_clk { pss_alt_ref_clk: pss-alt-ref-clk {
bootph-all; bootph-all;
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <0>; clock-frequency = <0>;
clock-output-names = "pss_alt_ref_clk";
}; };
gt_crx_ref_clk: gt_crx_ref_clk { gt_crx_ref_clk: gt-crx-ref-clk {
bootph-all; bootph-all;
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <108000000>; clock-frequency = <108000000>;
clock-output-names = "gt_crx_ref_clk";
}; };
aux_ref_clk: aux_ref_clk { aux_ref_clk: aux-ref-clk {
bootph-all; bootph-all;
compatible = "fixed-clock"; compatible = "fixed-clock";
#clock-cells = <0>; #clock-cells = <0>;
clock-frequency = <27000000>; clock-frequency = <27000000>;
clock-output-names = "aux_ref_clk";
}; };
}; };

View File

@@ -81,6 +81,7 @@
#define ARM_CPU_PART_CORTEX_A78AE 0xD42 #define ARM_CPU_PART_CORTEX_A78AE 0xD42
#define ARM_CPU_PART_CORTEX_X1 0xD44 #define ARM_CPU_PART_CORTEX_X1 0xD44
#define ARM_CPU_PART_CORTEX_A510 0xD46 #define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
#define ARM_CPU_PART_CORTEX_A520 0xD80 #define ARM_CPU_PART_CORTEX_A520 0xD80
#define ARM_CPU_PART_CORTEX_A710 0xD47 #define ARM_CPU_PART_CORTEX_A710 0xD47
#define ARM_CPU_PART_CORTEX_A715 0xD4D #define ARM_CPU_PART_CORTEX_A715 0xD4D
@@ -131,6 +132,7 @@
#define FUJITSU_CPU_PART_A64FX 0x001 #define FUJITSU_CPU_PART_A64FX 0x001
#define HISI_CPU_PART_TSV110 0xD01 #define HISI_CPU_PART_TSV110 0xD01
#define HISI_CPU_PART_HIP09 0xD02
#define APPLE_CPU_PART_M1_ICESTORM 0x022 #define APPLE_CPU_PART_M1_ICESTORM 0x022
#define APPLE_CPU_PART_M1_FIRESTORM 0x023 #define APPLE_CPU_PART_M1_FIRESTORM 0x023
@@ -166,6 +168,7 @@
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE) #define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520) #define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715) #define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
@@ -206,6 +209,7 @@
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL) #define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX) #define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110) #define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
#define MIDR_HISI_HIP09 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_HIP09)
#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM) #define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM) #define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO) #define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)

View File

@@ -687,6 +687,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
} }
#endif #endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type); u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
s32 aarch64_get_branch_offset(u32 insn); s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset); u32 aarch64_set_branch_offset(u32 insn, s32 offset);

View File

@@ -696,7 +696,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e)) pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud)) #define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!pud_table(pud)) #define pud_bad(pud) ((pud_val(pud) & PUD_TYPE_MASK) != \
PUD_TYPE_TABLE)
#define pud_present(pud) pte_present(pud_pte(pud)) #define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) (pud_present(pud) && !pud_table(pud)) #define pud_leaf(pud) (pud_present(pud) && !pud_table(pud))
#define pud_valid(pud) pte_valid(pud_pte(pud)) #define pud_valid(pud) pte_valid(pud_pte(pud))

View File

@@ -97,6 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void); enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
extern bool __nospectre_bhb;
u8 get_spectre_bhb_loop_value(void);
bool is_spectre_bhb_fw_mitigated(void);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);

View File

@@ -876,11 +876,22 @@ static u8 spectre_bhb_loop_affected(void)
{ {
u8 k = 0; u8 k = 0;
static const struct midr_range spectre_bhb_k132_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_X3),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V2),
{},
};
static const struct midr_range spectre_bhb_k38_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A715),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A720),
{},
};
static const struct midr_range spectre_bhb_k32_list[] = { static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
@@ -889,9 +900,11 @@ static u8 spectre_bhb_loop_affected(void)
}; };
static const struct midr_range spectre_bhb_k24_list[] = { static const struct midr_range spectre_bhb_k24_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76AE),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD), MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
MIDR_ALL_VERSIONS(MIDR_HISI_HIP09),
{}, {},
}; };
static const struct midr_range spectre_bhb_k11_list[] = { static const struct midr_range spectre_bhb_k11_list[] = {
@@ -904,7 +917,11 @@ static u8 spectre_bhb_loop_affected(void)
{}, {},
}; };
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k132_list))
k = 132;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k38_list))
k = 38;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
k = 32; k = 32;
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
k = 24; k = 24;
@@ -983,6 +1000,11 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
return true; return true;
} }
u8 get_spectre_bhb_loop_value(void)
{
return max_bhb_k;
}
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
{ {
const char *v = arm64_get_bp_hardening_vector(slot); const char *v = arm64_get_bp_hardening_vector(slot);
@@ -1000,7 +1022,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
isb(); isb();
} }
static bool __read_mostly __nospectre_bhb; bool __read_mostly __nospectre_bhb;
static int __init parse_spectre_bhb_param(char *str) static int __init parse_spectre_bhb_param(char *str)
{ {
__nospectre_bhb = true; __nospectre_bhb = true;
@@ -1078,6 +1100,11 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
update_mitigation_state(&spectre_bhb_state, state); update_mitigation_state(&spectre_bhb_state, state);
} }
bool is_spectre_bhb_fw_mitigated(void)
{
return test_bit(BHB_FW, &system_bhb_mitigations);
}
/* Patched to NOP when enabled */ /* Patched to NOP when enabled */
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt, void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
__le32 *origptr, __le32 *origptr,

View File

@@ -5,6 +5,7 @@
* *
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com> * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
*/ */
#include <linux/bitfield.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/printk.h> #include <linux/printk.h>
@@ -1471,43 +1472,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm); return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
} }
static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
{
switch (type) {
case AARCH64_INSN_MB_SY:
return 0xf;
case AARCH64_INSN_MB_ST:
return 0xe;
case AARCH64_INSN_MB_LD:
return 0xd;
case AARCH64_INSN_MB_ISH:
return 0xb;
case AARCH64_INSN_MB_ISHST:
return 0xa;
case AARCH64_INSN_MB_ISHLD:
return 0x9;
case AARCH64_INSN_MB_NSH:
return 0x7;
case AARCH64_INSN_MB_NSHST:
return 0x6;
case AARCH64_INSN_MB_NSHLD:
return 0x5;
default:
pr_err("%s: unknown barrier type %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
}
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type) u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
{ {
u32 opt; u32 opt;
u32 insn; u32 insn;
switch (type) { opt = __get_barrier_crm_val(type);
case AARCH64_INSN_MB_SY: if (opt == AARCH64_BREAK_FAULT)
opt = 0xf;
break;
case AARCH64_INSN_MB_ST:
opt = 0xe;
break;
case AARCH64_INSN_MB_LD:
opt = 0xd;
break;
case AARCH64_INSN_MB_ISH:
opt = 0xb;
break;
case AARCH64_INSN_MB_ISHST:
opt = 0xa;
break;
case AARCH64_INSN_MB_ISHLD:
opt = 0x9;
break;
case AARCH64_INSN_MB_NSH:
opt = 0x7;
break;
case AARCH64_INSN_MB_NSHST:
opt = 0x6;
break;
case AARCH64_INSN_MB_NSHLD:
opt = 0x5;
break;
default:
pr_err("%s: unknown dmb type %d\n", __func__, type);
return AARCH64_BREAK_FAULT; return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_dmb_value(); insn = aarch64_insn_get_dmb_value();
insn &= ~GENMASK(11, 8); insn &= ~GENMASK(11, 8);
@@ -1515,3 +1514,18 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
return insn; return insn;
} }
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
{
u32 opt, insn;
opt = __get_barrier_crm_val(type);
if (opt == AARCH64_BREAK_FAULT)
return AARCH64_BREAK_FAULT;
insn = aarch64_insn_get_dsb_base_value();
insn &= ~GENMASK(11, 8);
insn |= (opt << 8);
return insn;
}

View File

@@ -7,6 +7,7 @@
#define pr_fmt(fmt) "bpf_jit: " fmt #define pr_fmt(fmt) "bpf_jit: " fmt
#include <linux/arm-smccc.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/filter.h> #include <linux/filter.h>
@@ -17,6 +18,7 @@
#include <asm/asm-extable.h> #include <asm/asm-extable.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/insn.h> #include <asm/insn.h>
#include <asm/patching.h> #include <asm/patching.h>
@@ -655,7 +657,51 @@ static void build_plt(struct jit_ctx *ctx)
plt->target = (u64)&dummy_tramp; plt->target = (u64)&dummy_tramp;
} }
static void build_epilogue(struct jit_ctx *ctx) /* Clobbers BPF registers 1-4, aka x0-x3 */
static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
{
const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
u8 k = get_spectre_bhb_loop_value();
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
cpu_mitigations_off() || __nospectre_bhb ||
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
return;
if (capable(CAP_SYS_ADMIN))
return;
if (supports_clearbhb(SCOPE_SYSTEM)) {
emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
return;
}
if (k) {
emit_a64_mov_i64(r1, k, ctx);
emit(A64_B(1), ctx);
emit(A64_SUBS_I(true, r1, r1, 1), ctx);
emit(A64_B_(A64_COND_NE, -2), ctx);
emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
emit(aarch64_insn_get_isb_value(), ctx);
}
if (is_spectre_bhb_fw_mitigated()) {
emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
switch (arm_smccc_1_1_get_conduit()) {
case SMCCC_CONDUIT_HVC:
emit(aarch64_insn_get_hvc_value(), ctx);
break;
case SMCCC_CONDUIT_SMC:
emit(aarch64_insn_get_smc_value(), ctx);
break;
default:
pr_err_once("Firmware mitigation enabled with unknown conduit\n");
}
}
}
static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
{ {
const u8 r0 = bpf2a64[BPF_REG_0]; const u8 r0 = bpf2a64[BPF_REG_0];
const u8 r6 = bpf2a64[BPF_REG_6]; const u8 r6 = bpf2a64[BPF_REG_6];
@@ -677,10 +723,13 @@ static void build_epilogue(struct jit_ctx *ctx)
emit(A64_POP(r8, r9, A64_SP), ctx); emit(A64_POP(r8, r9, A64_SP), ctx);
emit(A64_POP(r6, r7, A64_SP), ctx); emit(A64_POP(r6, r7, A64_SP), ctx);
if (was_classic)
build_bhb_mitigation(ctx);
/* Restore FP/LR registers */ /* Restore FP/LR registers */
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
/* Set return value */ /* Move the return value from bpf:r0 (aka x7) to x0 */
emit(A64_MOV(1, A64_R(0), r0), ctx); emit(A64_MOV(1, A64_R(0), r0), ctx);
/* Authenticate lr */ /* Authenticate lr */
@@ -1588,7 +1637,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
} }
ctx.epilogue_offset = ctx.idx; ctx.epilogue_offset = ctx.idx;
build_epilogue(&ctx); build_epilogue(&ctx, was_classic);
build_plt(&ctx); build_plt(&ctx);
extable_align = __alignof__(struct exception_table_entry); extable_align = __alignof__(struct exception_table_entry);
@@ -1624,7 +1673,7 @@ skip_init_ctx:
goto out_off; goto out_off;
} }
build_epilogue(&ctx); build_epilogue(&ctx, was_classic);
build_plt(&ctx); build_plt(&ctx);
/* 3. Extra pass to validate JITed code. */ /* 3. Extra pass to validate JITed code. */
@@ -1956,7 +2005,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
if (flags & BPF_TRAMP_F_CALL_ORIG) { if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); /* for the first pass, assume the worst case */
if (!ctx->image)
ctx->idx += 4;
else
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_enter, ctx); emit_call((const u64)__bpf_tramp_enter, ctx);
} }
@@ -2000,7 +2053,11 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_CALL_ORIG) { if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->image + ctx->idx; im->ip_epilogue = ctx->image + ctx->idx;
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); /* for the first pass, assume the worst case */
if (!ctx->image)
ctx->idx += 4;
else
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_exit, ctx); emit_call((const u64)__bpf_tramp_exit, ctx);
} }

View File

@@ -43,7 +43,7 @@ endif
ifdef CONFIG_64BIT ifdef CONFIG_64BIT
ld-emul = $(64bit-emul) ld-emul = $(64bit-emul)
cflags-y += -mabi=lp64s cflags-y += -mabi=lp64s -mcmodel=normal
endif endif
cflags-y += -pipe -msoft-float cflags-y += -pipe -msoft-float

View File

@@ -55,7 +55,7 @@ static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long v
/* Query offset/name of register from its name/offset */ /* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name); extern int regs_query_register_offset(const char *name);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) #define MAX_REG_OFFSET (offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/** /**
* regs_get_register() - get register value from its offset * regs_get_register() - get register value from its offset

View File

@@ -15,7 +15,6 @@ typedef u32 uprobe_opcode_t;
#define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP) #define UPROBE_XOLBP_INSN larch_insn_gen_break(BRK_UPROBE_XOLBP)
struct arch_uprobe { struct arch_uprobe {
unsigned long resume_era;
u32 insn[2]; u32 insn[2];
u32 ixol[2]; u32 ixol[2];
bool simulate; bool simulate;

View File

@@ -18,11 +18,28 @@ static unsigned int euen_mask = CSR_EUEN_FPEN;
static DEFINE_PER_CPU(bool, in_kernel_fpu); static DEFINE_PER_CPU(bool, in_kernel_fpu);
static DEFINE_PER_CPU(unsigned int, euen_current); static DEFINE_PER_CPU(unsigned int, euen_current);
static inline void fpregs_lock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_disable();
else
local_bh_disable();
}
static inline void fpregs_unlock(void)
{
if (IS_ENABLED(CONFIG_PREEMPT_RT))
preempt_enable();
else
local_bh_enable();
}
void kernel_fpu_begin(void) void kernel_fpu_begin(void)
{ {
unsigned int *euen_curr; unsigned int *euen_curr;
preempt_disable(); if (!irqs_disabled())
fpregs_lock();
WARN_ON(this_cpu_read(in_kernel_fpu)); WARN_ON(this_cpu_read(in_kernel_fpu));
@@ -73,7 +90,8 @@ void kernel_fpu_end(void)
this_cpu_write(in_kernel_fpu, false); this_cpu_write(in_kernel_fpu, false);
preempt_enable(); if (!irqs_disabled())
fpregs_unlock();
} }
EXPORT_SYMBOL_GPL(kernel_fpu_end); EXPORT_SYMBOL_GPL(kernel_fpu_end);

View File

@@ -110,7 +110,7 @@ static unsigned long __init get_loops_per_jiffy(void)
return lpj; return lpj;
} }
static long init_offset __nosavedata; static long init_offset;
void save_counter(void) void save_counter(void)
{ {

View File

@@ -42,7 +42,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
utask->autask.saved_trap_nr = current->thread.trap_nr; utask->autask.saved_trap_nr = current->thread.trap_nr;
current->thread.trap_nr = UPROBE_TRAP_NR; current->thread.trap_nr = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr); instruction_pointer_set(regs, utask->xol_vaddr);
user_enable_single_step(current);
return 0; return 0;
} }
@@ -53,13 +52,7 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
current->thread.trap_nr = utask->autask.saved_trap_nr; current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
if (auprobe->simulate)
instruction_pointer_set(regs, auprobe->resume_era);
else
instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
user_disable_single_step(current);
return 0; return 0;
} }
@@ -70,7 +63,6 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
current->thread.trap_nr = utask->autask.saved_trap_nr; current->thread.trap_nr = utask->autask.saved_trap_nr;
instruction_pointer_set(regs, utask->vaddr); instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
} }
bool arch_uprobe_xol_was_trapped(struct task_struct *t) bool arch_uprobe_xol_was_trapped(struct task_struct *t)
@@ -90,7 +82,6 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
insn.word = auprobe->insn[0]; insn.word = auprobe->insn[0];
arch_simulate_insn(insn, regs); arch_simulate_insn(insn, regs);
auprobe->resume_era = regs->csr_era;
return true; return true;
} }

View File

@@ -2,6 +2,7 @@
#include <asm/fpu.h> #include <asm/fpu.h>
#include <asm/loongson.h> #include <asm/loongson.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/time.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <linux/suspend.h> #include <linux/suspend.h>
@@ -14,6 +15,7 @@ struct pt_regs saved_regs;
void save_processor_state(void) void save_processor_state(void)
{ {
save_counter();
saved_crmd = csr_read32(LOONGARCH_CSR_CRMD); saved_crmd = csr_read32(LOONGARCH_CSR_CRMD);
saved_prmd = csr_read32(LOONGARCH_CSR_PRMD); saved_prmd = csr_read32(LOONGARCH_CSR_PRMD);
saved_euen = csr_read32(LOONGARCH_CSR_EUEN); saved_euen = csr_read32(LOONGARCH_CSR_EUEN);
@@ -26,6 +28,7 @@ void save_processor_state(void)
void restore_processor_state(void) void restore_processor_state(void)
{ {
sync_counter();
csr_write32(saved_crmd, LOONGARCH_CSR_CRMD); csr_write32(saved_crmd, LOONGARCH_CSR_CRMD);
csr_write32(saved_prmd, LOONGARCH_CSR_PRMD); csr_write32(saved_prmd, LOONGARCH_CSR_PRMD);
csr_write32(saved_euen, LOONGARCH_CSR_EUEN); csr_write32(saved_euen, LOONGARCH_CSR_EUEN);

View File

@@ -87,4 +87,20 @@ struct dyn_arch_ftrace {
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FTRACE_SYSCALLS
#ifndef __ASSEMBLY__
/*
* Some syscall entry functions on mips start with "__sys_" (fork and clone,
* for instance). We should also match the sys_ variant with those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
static inline bool arch_syscall_match_sym_name(const char *sym,
const char *name)
{
return !strcmp(sym, name) ||
(!strncmp(sym, "__sys_", 6) && !strcmp(sym + 6, name + 4));
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FTRACE_SYSCALLS */
#endif /* _ASM_MIPS_FTRACE_H */ #endif /* _ASM_MIPS_FTRACE_H */

View File

@@ -65,7 +65,8 @@ static inline void instruction_pointer_set(struct pt_regs *regs,
/* Query offset/name of register from its name/offset */ /* Query offset/name of register from its name/offset */
extern int regs_query_register_offset(const char *name); extern int regs_query_register_offset(const char *name);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) #define MAX_REG_OFFSET \
(offsetof(struct pt_regs, __last) - sizeof(unsigned long))
/** /**
* regs_get_register() - get register value from its offset * regs_get_register() - get register value from its offset

View File

@@ -56,10 +56,7 @@ static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
/* Indicates online CPUs coupled with the current CPU */ /* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
/* /* Used to synchronize entry to deep idle states */
* Used to synchronize entry to deep idle states. Actually per-core rather
* than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
/* Saved CPU state across the CPS_PM_POWER_GATED state */ /* Saved CPU state across the CPS_PM_POWER_GATED state */
@@ -118,9 +115,10 @@ int cps_pm_enter_state(enum cps_pm_state state)
cps_nc_entry_fn entry; cps_nc_entry_fn entry;
struct core_boot_config *core_cfg; struct core_boot_config *core_cfg;
struct vpe_boot_config *vpe_cfg; struct vpe_boot_config *vpe_cfg;
atomic_t *barrier;
/* Check that there is an entry function for this state */ /* Check that there is an entry function for this state */
entry = per_cpu(nc_asm_enter, core)[state]; entry = per_cpu(nc_asm_enter, cpu)[state];
if (!entry) if (!entry)
return -EINVAL; return -EINVAL;
@@ -156,7 +154,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
smp_mb__after_atomic(); smp_mb__after_atomic();
/* Create a non-coherent mapping of the core ready_count */ /* Create a non-coherent mapping of the core ready_count */
core_ready_count = per_cpu(ready_count, core); core_ready_count = per_cpu(ready_count, cpu);
nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
(unsigned long)core_ready_count); (unsigned long)core_ready_count);
nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
@@ -164,7 +162,8 @@ int cps_pm_enter_state(enum cps_pm_state state)
/* Ensure ready_count is zero-initialised before the assembly runs */ /* Ensure ready_count is zero-initialised before the assembly runs */
WRITE_ONCE(*nc_core_ready_count, 0); WRITE_ONCE(*nc_core_ready_count, 0);
coupled_barrier(&per_cpu(pm_barrier, core), online); barrier = &per_cpu(pm_barrier, cpumask_first(&cpu_sibling_map[cpu]));
coupled_barrier(barrier, online);
/* Run the generated entry code */ /* Run the generated entry code */
left = entry(online, nc_core_ready_count); left = entry(online, nc_core_ready_count);
@@ -635,12 +634,14 @@ out_err:
static int cps_pm_online_cpu(unsigned int cpu) static int cps_pm_online_cpu(unsigned int cpu)
{ {
enum cps_pm_state state; unsigned int sibling, core;
unsigned core = cpu_core(&cpu_data[cpu]);
void *entry_fn, *core_rc; void *entry_fn, *core_rc;
enum cps_pm_state state;
core = cpu_core(&cpu_data[cpu]);
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
if (per_cpu(nc_asm_enter, core)[state]) if (per_cpu(nc_asm_enter, cpu)[state])
continue; continue;
if (!test_bit(state, state_support)) if (!test_bit(state, state_support))
continue; continue;
@@ -652,16 +653,19 @@ static int cps_pm_online_cpu(unsigned int cpu)
clear_bit(state, state_support); clear_bit(state, state_support);
} }
per_cpu(nc_asm_enter, core)[state] = entry_fn; for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(nc_asm_enter, sibling)[state] = entry_fn;
} }
if (!per_cpu(ready_count, core)) { if (!per_cpu(ready_count, cpu)) {
core_rc = kmalloc(sizeof(u32), GFP_KERNEL); core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) { if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core); pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM; return -ENOMEM;
} }
per_cpu(ready_count, core) = core_rc;
for_each_cpu(sibling, &cpu_sibling_map[cpu])
per_cpu(ready_count, sibling) = core_rc;
} }
return 0; return 0;

View File

@@ -103,9 +103,19 @@ handle_fpe(struct pt_regs *regs)
memcpy(regs->fr, frcopy, sizeof regs->fr); memcpy(regs->fr, frcopy, sizeof regs->fr);
if (signalcode != 0) { if (signalcode != 0) {
force_sig_fault(signalcode >> 24, signalcode & 0xffffff, int sig = signalcode >> 24;
(void __user *) regs->iaoq[0]);
return -1; if (sig == SIGFPE) {
/*
* Clear floating point trap bit to avoid trapping
* again on the first floating-point instruction in
* the userspace signal handler.
*/
regs->fr[0] &= ~(1ULL << 38);
}
force_sig_fault(sig, signalcode & 0xffffff,
(void __user *) regs->iaoq[0]);
return -1;
} }
return signalcode ? -1 : 0; return signalcode ? -1 : 0;

View File

@@ -234,10 +234,8 @@ fi
# suppress some warnings in recent ld versions # suppress some warnings in recent ld versions
nowarn="-z noexecstack" nowarn="-z noexecstack"
if ! ld_is_lld; then if "${CROSS}ld" -v --no-warn-rwx-segments >/dev/null 2>&1; then
if [ "$LD_VERSION" -ge "$(echo 2.39 | ld_version)" ]; then nowarn="$nowarn --no-warn-rwx-segments"
nowarn="$nowarn --no-warn-rwx-segments"
fi
fi fi
platformo=$object/"$platform".o platformo=$object/"$platform".o

View File

@@ -35,6 +35,7 @@ extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn; extern unsigned long max_pfn;
u64 memory_hotplug_max(void); u64 memory_hotplug_max(void);
u64 hot_add_drconf_memory_max(void);
#else #else
#define memory_hotplug_max() memblock_end_of_DRAM() #define memory_hotplug_max() memblock_end_of_DRAM()
#endif #endif

View File

@@ -2974,11 +2974,11 @@ static void __init fixup_device_tree_pmac(void)
char type[8]; char type[8];
phandle node; phandle node;
// Some pmacs are missing #size-cells on escc nodes // Some pmacs are missing #size-cells on escc or i2s nodes
for (node = 0; prom_next_node(&node); ) { for (node = 0; prom_next_node(&node); ) {
type[0] = '\0'; type[0] = '\0';
prom_getprop(node, "device_type", type, sizeof(type)); prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "escc")) if (prom_strcmp(type, "escc") && prom_strcmp(type, "i2s"))
continue; continue;
if (prom_getproplen(node, "#size-cells") != PROM_ERROR) if (prom_getproplen(node, "#size-cells") != PROM_ERROR)

View File

@@ -912,7 +912,7 @@ int __meminit radix__vmemmap_create_mapping(unsigned long start,
return 0; return 0;
} }
#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
{ {
if (radix_enabled()) if (radix_enabled())
@@ -920,6 +920,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
return false; return false;
} }
#endif
int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
unsigned long addr, unsigned long next) unsigned long addr, unsigned long next)
@@ -1056,6 +1057,19 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
pmd_t *pmd; pmd_t *pmd;
pte_t *pte; pte_t *pte;
/*
* Make sure we align the start vmemmap addr so that we calculate
* the correct start_pfn in altmap boundary check to decided whether
* we should use altmap or RAM based backing memory allocation. Also
* the address need to be aligned for set_pte operation.
* If the start addr is already PMD_SIZE aligned we will try to use
* a pmd mapping. We don't want to be too aggressive here beacause
* that will cause more allocations in RAM. So only if the namespace
* vmemmap start addr is PMD_SIZE aligned we will use PMD mapping.
*/
start = ALIGN_DOWN(start, PAGE_SIZE);
for (addr = start; addr < end; addr = next) { for (addr = start; addr < end; addr = next) {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
@@ -1081,8 +1095,8 @@ int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, in
* in altmap block allocation failures, in which case * in altmap block allocation failures, in which case
* we fallback to RAM for vmemmap allocation. * we fallback to RAM for vmemmap allocation.
*/ */
if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) || if (!IS_ALIGNED(addr, PMD_SIZE) || (altmap &&
altmap_cross_boundary(altmap, addr, PMD_SIZE))) { altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
/* /*
* make sure we don't create altmap mappings * make sure we don't create altmap mappings
* covering things outside the device. * covering things outside the device.

View File

@@ -1342,7 +1342,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
return nid; return nid;
} }
static u64 hot_add_drconf_memory_max(void) u64 hot_add_drconf_memory_max(void)
{ {
struct device_node *memory = NULL; struct device_node *memory = NULL;
struct device_node *dn = NULL; struct device_node *dn = NULL;

View File

@@ -2229,6 +2229,10 @@ static struct pmu power_pmu = {
#define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \ #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
PERF_SAMPLE_PHYS_ADDR | \ PERF_SAMPLE_PHYS_ADDR | \
PERF_SAMPLE_DATA_PAGE_SIZE) PERF_SAMPLE_DATA_PAGE_SIZE)
#define SIER_TYPE_SHIFT 15
#define SIER_TYPE_MASK (0x7ull << SIER_TYPE_SHIFT)
/* /*
* A counter has overflowed; update its count and record * A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled * things if requested. Note that interrupts are hard-disabled
@@ -2297,6 +2301,22 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
is_kernel_addr(mfspr(SPRN_SIAR))) is_kernel_addr(mfspr(SPRN_SIAR)))
record = 0; record = 0;
/*
* SIER[46-48] presents instruction type of the sampled instruction.
* In ISA v3.0 and before values "0" and "7" are considered reserved.
* In ISA v3.1, value "7" has been used to indicate "larx/stcx".
* Drop the sample if "type" has reserved values for this field with a
* ISA version check.
*/
if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC &&
ppmu->get_mem_data_src) {
val = (regs->dar & SIER_TYPE_MASK) >> SIER_TYPE_SHIFT;
if (val == 0 || (val == 7 && !cpu_has_feature(CPU_FTR_ARCH_31))) {
record = 0;
atomic64_inc(&event->lost_samples);
}
}
/* /*
* Finally record data if requested. * Finally record data if requested.
*/ */

View File

@@ -321,8 +321,10 @@ void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
sier = mfspr(SPRN_SIER); sier = mfspr(SPRN_SIER);
val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT; val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) if (val != 1 && val != 2 && !(val == 7 && cpu_has_feature(CPU_FTR_ARCH_31))) {
dsrc->val = 0;
return; return;
}
idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT; idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT; sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;

View File

@@ -1183,17 +1183,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
static phys_addr_t ddw_memory_hotplug_max(void) static phys_addr_t ddw_memory_hotplug_max(void)
{ {
resource_size_t max_addr = memory_hotplug_max(); resource_size_t max_addr;
struct device_node *memory;
for_each_node_by_type(memory, "memory") { #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
struct resource res; max_addr = hot_add_drconf_memory_max();
#else
if (of_address_to_resource(memory, 0, &res)) max_addr = memblock_end_of_DRAM();
continue; #endif
max_addr = max_t(resource_size_t, max_addr, res.end + 1);
}
return max_addr; return max_addr;
} }
@@ -1471,7 +1467,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
window->direct = true; window->direct = true;
/* DDW maps the whole partition, so enable direct DMA mapping */ /* DDW maps the whole partition, so enable direct DMA mapping */
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
win64->value, tce_setrange_multi_pSeriesLP_walk); win64->value, tce_setrange_multi_pSeriesLP_walk);
if (ret) { if (ret) {
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n", dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
@@ -1658,11 +1654,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
struct memory_notify *arg = data; struct memory_notify *arg = data;
int ret = 0; int ret = 0;
/* This notifier can get called when onlining persistent memory as well.
* TCEs are not pre-mapped for persistent memory. Persistent memory will
* always be above ddw_memory_hotplug_max()
*/
switch (action) { switch (action) {
case MEM_GOING_ONLINE: case MEM_GOING_ONLINE:
spin_lock(&dma_win_list_lock); spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) { list_for_each_entry(window, &dma_win_list, list) {
if (window->direct) { if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
ddw_memory_hotplug_max()) {
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop); arg->nr_pages, window->prop);
} }
@@ -1674,7 +1676,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
case MEM_OFFLINE: case MEM_OFFLINE:
spin_lock(&dma_win_list_lock); spin_lock(&dma_win_list_lock);
list_for_each_entry(window, &dma_win_list, list) { list_for_each_entry(window, &dma_win_list, list) {
if (window->direct) { if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
ddw_memory_hotplug_max()) {
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
arg->nr_pages, window->prop); arg->nr_pages, window->prop);
} }

View File

@@ -26,12 +26,9 @@
* When not using MMU this corresponds to the first free page in * When not using MMU this corresponds to the first free page in
* physical memory (aligned on a page boundary). * physical memory (aligned on a page boundary).
*/ */
#ifdef CONFIG_64BIT
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#ifdef CONFIG_64BIT
#define PAGE_OFFSET kernel_map.page_offset #define PAGE_OFFSET kernel_map.page_offset
#else
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif
/* /*
* By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
* define the PAGE_OFFSET value for SV48 and SV39. * define the PAGE_OFFSET value for SV48 and SV39.
@@ -41,6 +38,9 @@
#else #else
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) #define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#else
#define PAGE_OFFSET ((unsigned long)phys_ram_base)
#endif /* CONFIG_MMU */
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
@@ -97,11 +97,7 @@ typedef struct page *pgtable_t;
#define MIN_MEMBLOCK_ADDR 0 #define MIN_MEMBLOCK_ADDR 0
#endif #endif
#ifdef CONFIG_MMU
#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base)) #define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
#else
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
struct kernel_mapping { struct kernel_mapping {
unsigned long page_offset; unsigned long page_offset;

View File

@@ -9,7 +9,7 @@
int patch_insn_write(void *addr, const void *insn, size_t len); int patch_insn_write(void *addr, const void *insn, size_t len);
int patch_text_nosync(void *addr, const void *insns, size_t len); int patch_text_nosync(void *addr, const void *insns, size_t len);
int patch_text_set_nosync(void *addr, u8 c, size_t len); int patch_text_set_nosync(void *addr, u8 c, size_t len);
int patch_text(void *addr, u32 *insns, int ninsns); int patch_text(void *addr, u32 *insns, size_t len);
extern int riscv_patch_in_stop_machine; extern int riscv_patch_in_stop_machine;

View File

@@ -12,7 +12,7 @@
#include <asm/pgtable-bits.h> #include <asm/pgtable-bits.h>
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
#define KERNEL_LINK_ADDR PAGE_OFFSET #define KERNEL_LINK_ADDR _AC(CONFIG_PAGE_OFFSET, UL)
#define KERN_VIRT_SIZE (UL(-1)) #define KERN_VIRT_SIZE (UL(-1))
#else #else

View File

@@ -19,7 +19,7 @@
struct patch_insn { struct patch_insn {
void *addr; void *addr;
u32 *insns; u32 *insns;
int ninsns; size_t len;
atomic_t cpu_count; atomic_t cpu_count;
}; };
@@ -234,14 +234,10 @@ NOKPROBE_SYMBOL(patch_text_nosync);
static int patch_text_cb(void *data) static int patch_text_cb(void *data)
{ {
struct patch_insn *patch = data; struct patch_insn *patch = data;
unsigned long len; int ret = 0;
int i, ret = 0;
if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
for (i = 0; ret == 0 && i < patch->ninsns; i++) { ret = patch_insn_write(patch->addr, patch->insns, patch->len);
len = GET_INSN_LENGTH(patch->insns[i]);
ret = patch_insn_write(patch->addr + i * len, &patch->insns[i], len);
}
/* /*
* Make sure the patching store is effective *before* we * Make sure the patching store is effective *before* we
* increment the counter which releases all waiting CPUs * increment the counter which releases all waiting CPUs
@@ -262,13 +258,13 @@ static int patch_text_cb(void *data)
} }
NOKPROBE_SYMBOL(patch_text_cb); NOKPROBE_SYMBOL(patch_text_cb);
int patch_text(void *addr, u32 *insns, int ninsns) int patch_text(void *addr, u32 *insns, size_t len)
{ {
int ret; int ret;
struct patch_insn patch = { struct patch_insn patch = {
.addr = addr, .addr = addr,
.insns = insns, .insns = insns,
.ninsns = ninsns, .len = len,
.cpu_count = ATOMIC_INIT(0), .cpu_count = ATOMIC_INIT(0),
}; };

View File

@@ -23,13 +23,13 @@ post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
static void __kprobes arch_prepare_ss_slot(struct kprobe *p) static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{ {
size_t len = GET_INSN_LENGTH(p->opcode);
u32 insn = __BUG_INSN_32; u32 insn = __BUG_INSN_32;
unsigned long offset = GET_INSN_LENGTH(p->opcode);
p->ainsn.api.restore = (unsigned long)p->addr + offset; p->ainsn.api.restore = (unsigned long)p->addr + len;
patch_text_nosync(p->ainsn.api.insn, &p->opcode, 1); patch_text_nosync(p->ainsn.api.insn, &p->opcode, len);
patch_text_nosync((void *)p->ainsn.api.insn + offset, &insn, 1); patch_text_nosync((void *)p->ainsn.api.insn + len, &insn, GET_INSN_LENGTH(insn));
} }
static void __kprobes arch_prepare_simulate(struct kprobe *p) static void __kprobes arch_prepare_simulate(struct kprobe *p)
@@ -116,16 +116,18 @@ void *alloc_insn_page(void)
/* install breakpoint in text */ /* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)
{ {
u32 insn = (p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32 ? size_t len = GET_INSN_LENGTH(p->opcode);
__BUG_INSN_32 : __BUG_INSN_16; u32 insn = len == 4 ? __BUG_INSN_32 : __BUG_INSN_16;
patch_text(p->addr, &insn, 1); patch_text(p->addr, &insn, len);
} }
/* remove breakpoint from text */ /* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_disarm_kprobe(struct kprobe *p)
{ {
patch_text(p->addr, &p->opcode, 1); size_t len = GET_INSN_LENGTH(p->opcode);
patch_text(p->addr, &p->opcode, len);
} }
void __kprobes arch_remove_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p)

View File

@@ -14,6 +14,7 @@
#include "bpf_jit.h" #include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2 #define RV_FENTRY_NINSNS 2
#define RV_FENTRY_NBYTES (RV_FENTRY_NINSNS * 4)
#define RV_REG_TCC RV_REG_A6 #define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
@@ -681,7 +682,7 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
if (ret) if (ret)
return ret; return ret;
if (memcmp(ip, old_insns, RV_FENTRY_NINSNS * 4)) if (memcmp(ip, old_insns, RV_FENTRY_NBYTES))
return -EFAULT; return -EFAULT;
ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call); ret = gen_jump_or_nops(new_addr, ip, new_insns, is_call);
@@ -690,8 +691,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
cpus_read_lock(); cpus_read_lock();
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
if (memcmp(ip, new_insns, RV_FENTRY_NINSNS * 4)) if (memcmp(ip, new_insns, RV_FENTRY_NBYTES))
ret = patch_text(ip, new_insns, RV_FENTRY_NINSNS); ret = patch_text(ip, new_insns, RV_FENTRY_NBYTES);
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
cpus_read_unlock(); cpus_read_unlock();

View File

@@ -208,6 +208,8 @@ static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(), snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_get_info_type(),
cpu_info)); cpu_info));
cpu_dir = hypfs_mkdir(cpus_dir, buffer); cpu_dir = hypfs_mkdir(cpus_dir, buffer);
if (IS_ERR(cpu_dir))
return PTR_ERR(cpu_dir);
rc = hypfs_create_u64(cpu_dir, "mgmtime", rc = hypfs_create_u64(cpu_dir, "mgmtime",
cpu_info__acc_time(diag204_get_info_type(), cpu_info) - cpu_info__acc_time(diag204_get_info_type(), cpu_info) -
cpu_info__lp_time(diag204_get_info_type(), cpu_info)); cpu_info__lp_time(diag204_get_info_type(), cpu_info));

View File

@@ -68,6 +68,7 @@ void __init mem_init(void)
map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0); map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
memblock_free((void *)brk_end, uml_reserved - brk_end); memblock_free((void *)brk_end, uml_reserved - brk_end);
uml_reserved = brk_end; uml_reserved = brk_end;
min_low_pfn = PFN_UP(__pa(uml_reserved));
/* this will put all low memory onto the freelists */ /* this will put all low memory onto the freelists */
memblock_free_all(); memblock_free_all();

View File

@@ -2610,6 +2610,17 @@ config MITIGATION_SPECTRE_BHI
indirect branches. indirect branches.
See <file:Documentation/admin-guide/hw-vuln/spectre.rst> See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
config MITIGATION_ITS
bool "Enable Indirect Target Selection mitigation"
depends on CPU_SUP_INTEL && X86_64
depends on RETPOLINE && RETHUNK
default y
help
Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
disabled, mitigation cannot be enabled via cmdline.
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
endif endif
config ARCH_HAS_ADD_PAGES config ARCH_HAS_ADD_PAGES

View File

@@ -43,7 +43,7 @@ endif
# How to compile the 16-bit code. Note we always compile for -march=i386; # How to compile the 16-bit code. Note we always compile for -march=i386;
# that way we can complain to the user if the CPU is insufficient. # that way we can complain to the user if the CPU is insufficient.
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \ REALMODE_CFLAGS := -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none) -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)

View File

@@ -22,6 +22,7 @@
# This script requires: # This script requires:
# bash # bash
# syslinux # syslinux
# genisoimage
# mtools (for fdimage* and hdimage) # mtools (for fdimage* and hdimage)
# edk2/OVMF (for hdimage) # edk2/OVMF (for hdimage)
# #
@@ -251,7 +252,9 @@ geniso() {
cp "$isolinux" "$ldlinux" "$tmp_dir" cp "$isolinux" "$ldlinux" "$tmp_dir"
cp "$FBZIMAGE" "$tmp_dir"/linux cp "$FBZIMAGE" "$tmp_dir"/linux
echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg echo default linux "$KCMDLINE" > "$tmp_dir"/isolinux.cfg
cp "${FDINITRDS[@]}" "$tmp_dir"/ if [ ${#FDINITRDS[@]} -gt 0 ]; then
cp "${FDINITRDS[@]}" "$tmp_dir"/
fi
genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \ genisoimage -J -r -appid 'LINUX_BOOT' -input-charset=utf-8 \
-quiet -o "$FIMAGE" -b isolinux.bin \ -quiet -o "$FIMAGE" -b isolinux.bin \
-c boot.cat -no-emul-boot -boot-load-size 4 \ -c boot.cat -no-emul-boot -boot-load-size 4 \

View File

@@ -59,7 +59,7 @@ EXPORT_SYMBOL_GPL(mds_verw_sel);
* entirely in the C code, and use an alias emitted by the linker script * entirely in the C code, and use an alias emitted by the linker script
* instead. * instead.
*/ */
#ifdef CONFIG_STACKPROTECTOR #if defined(CONFIG_STACKPROTECTOR) && defined(CONFIG_SMP)
EXPORT_SYMBOL(__ref_stack_chk_guard); EXPORT_SYMBOL(__ref_stack_chk_guard);
#endif #endif
#endif #endif

View File

@@ -1569,7 +1569,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
* ORC to unwind properly. * ORC to unwind properly.
* *
* The alignment is for performance and not for safety, and may be safely * The alignment is for performance and not for safety, and may be safely
* refactored in the future if needed. * refactored in the future if needed. The .skips are for safety, to ensure
* that all RETs are in the second half of a cacheline to mitigate Indirect
* Target Selection, rather than taking the slowpath via its_return_thunk.
*/ */
SYM_FUNC_START(clear_bhb_loop) SYM_FUNC_START(clear_bhb_loop)
push %rbp push %rbp
@@ -1579,10 +1581,22 @@ SYM_FUNC_START(clear_bhb_loop)
call 1f call 1f
jmp 5f jmp 5f
.align 64, 0xcc .align 64, 0xcc
/*
* Shift instructions so that the RET is in the upper half of the
* cacheline and don't take the slowpath to its_return_thunk.
*/
.skip 32 - (.Lret1 - 1f), 0xcc
ANNOTATE_INTRA_FUNCTION_CALL ANNOTATE_INTRA_FUNCTION_CALL
1: call 2f 1: call 2f
RET .Lret1: RET
.align 64, 0xcc .align 64, 0xcc
/*
* As above shift instructions for RET at .Lret2 as well.
*
* This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
* but some Clang versions (e.g. 18) don't like this.
*/
.skip 32 - 18, 0xcc
2: movl $5, %eax 2: movl $5, %eax
3: jmp 4f 3: jmp 4f
nop nop
@@ -1590,7 +1604,7 @@ SYM_FUNC_START(clear_bhb_loop)
jnz 3b jnz 3b
sub $1, %ecx sub $1, %ecx
jnz 1b jnz 1b
RET .Lret2: RET
5: lfence 5: lfence
pop %rbp pop %rbp
RET RET

View File

@@ -272,7 +272,7 @@ static int perf_ibs_init(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
struct perf_ibs *perf_ibs; struct perf_ibs *perf_ibs;
u64 max_cnt, config; u64 config;
int ret; int ret;
perf_ibs = get_ibs_pmu(event->attr.type); perf_ibs = get_ibs_pmu(event->attr.type);
@@ -306,10 +306,19 @@ static int perf_ibs_init(struct perf_event *event)
if (!hwc->sample_period) if (!hwc->sample_period)
hwc->sample_period = 0x10; hwc->sample_period = 0x10;
} else { } else {
max_cnt = config & perf_ibs->cnt_mask; u64 period = 0;
if (perf_ibs == &perf_ibs_op) {
period = (config & IBS_OP_MAX_CNT) << 4;
if (ibs_caps & IBS_CAPS_OPCNTEXT)
period |= config & IBS_OP_MAX_CNT_EXT_MASK;
} else {
period = (config & IBS_FETCH_MAX_CNT) << 4;
}
config &= ~perf_ibs->cnt_mask; config &= ~perf_ibs->cnt_mask;
event->attr.sample_period = max_cnt << 4; event->attr.sample_period = period;
hwc->sample_period = event->attr.sample_period; hwc->sample_period = period;
} }
if (!hwc->sample_period) if (!hwc->sample_period)
@@ -1219,7 +1228,8 @@ static __init int perf_ibs_op_init(void)
if (ibs_caps & IBS_CAPS_OPCNTEXT) { if (ibs_caps & IBS_CAPS_OPCNTEXT) {
perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK; perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK; perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK; perf_ibs_op.cnt_mask |= (IBS_OP_MAX_CNT_EXT_MASK |
IBS_OP_CUR_CNT_EXT_MASK);
} }
if (ibs_caps & IBS_CAPS_ZEN4) if (ibs_caps & IBS_CAPS_ZEN4)

View File

@@ -4206,7 +4206,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
arr[pebs_enable] = (struct perf_guest_switch_msr){ arr[pebs_enable] = (struct perf_guest_switch_msr){
.msr = MSR_IA32_PEBS_ENABLE, .msr = MSR_IA32_PEBS_ENABLE,
.host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask,
.guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask & kvm_pmu->pebs_enable,
}; };
if (arr[pebs_enable].host) { if (arr[pebs_enable].host) {

View File

@@ -5,6 +5,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/asm.h> #include <asm/asm.h>
#include <asm/bug.h>
#define ALT_FLAGS_SHIFT 16 #define ALT_FLAGS_SHIFT 16
@@ -130,6 +131,37 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
} }
#endif #endif
#ifdef CONFIG_MITIGATION_ITS
extern void its_init_mod(struct module *mod);
extern void its_fini_mod(struct module *mod);
extern void its_free_mod(struct module *mod);
extern u8 *its_static_thunk(int reg);
#else /* CONFIG_MITIGATION_ITS */
static inline void its_init_mod(struct module *mod) { }
static inline void its_fini_mod(struct module *mod) { }
static inline void its_free_mod(struct module *mod) { }
static inline u8 *its_static_thunk(int reg)
{
WARN_ONCE(1, "ITS not compiled in");
return NULL;
}
#endif
#if defined(CONFIG_RETHUNK) && defined(CONFIG_OBJTOOL)
extern bool cpu_wants_rethunk(void);
extern bool cpu_wants_rethunk_at(void *addr);
#else
static __always_inline bool cpu_wants_rethunk(void)
{
return false;
}
static __always_inline bool cpu_wants_rethunk_at(void *addr)
{
return false;
}
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void alternatives_smp_module_add(struct module *mod, char *name, extern void alternatives_smp_module_add(struct module *mod, char *name,
void *locks, void *locks_end, void *locks, void *locks_end,

View File

@@ -22,8 +22,9 @@
#define SECOND_BYTE_OPCODE_UD2 0x0b #define SECOND_BYTE_OPCODE_UD2 0x0b
#define BUG_NONE 0xffff #define BUG_NONE 0xffff
#define BUG_UD1 0xfffe #define BUG_UD2 0xfffe
#define BUG_UD2 0xfffd #define BUG_UD1 0xfffd
#define BUG_UD1_UBSAN 0xfffc
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG

View File

@@ -468,6 +468,7 @@
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */ #define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */ #define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */ #define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 5) /* "" Use thunk for indirect branches in lower half of cacheline */
/* /*
* BUG word(s) * BUG word(s)
@@ -518,4 +519,6 @@
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */ #define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */ #define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
#define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */ #define X86_BUG_IBPB_NO_RET X86_BUG(1*32 + 4) /* "ibpb_no_ret" IBPB omits return target predictions */
#define X86_BUG_ITS X86_BUG(1*32 + 5) /* CPU is affected by Indirect Target Selection */
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 6) /* CPU is affected by ITS, VMX is not affected */
#endif /* _ASM_X86_CPUFEATURES_H */ #endif /* _ASM_X86_CPUFEATURES_H */

View File

@@ -41,7 +41,7 @@
_ASM_PTR fname "\n\t" \ _ASM_PTR fname "\n\t" \
".popsection\n\t" ".popsection\n\t"
static inline __attribute_const__ u32 gen_endbr(void) static __always_inline __attribute_const__ u32 gen_endbr(void)
{ {
u32 endbr; u32 endbr;
@@ -56,7 +56,7 @@ static inline __attribute_const__ u32 gen_endbr(void)
return endbr; return endbr;
} }
static inline __attribute_const__ u32 gen_endbr_poison(void) static __always_inline __attribute_const__ u32 gen_endbr_poison(void)
{ {
/* /*
* 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it * 4 byte NOP that isn't NOP4 (in fact it is OSP NOP3), such that it

View File

@@ -48,6 +48,7 @@ KVM_X86_OP(set_idt)
KVM_X86_OP(get_gdt) KVM_X86_OP(get_gdt)
KVM_X86_OP(set_gdt) KVM_X86_OP(set_gdt)
KVM_X86_OP(sync_dirty_debug_regs) KVM_X86_OP(sync_dirty_debug_regs)
KVM_X86_OP(set_dr6)
KVM_X86_OP(set_dr7) KVM_X86_OP(set_dr7)
KVM_X86_OP(cache_reg) KVM_X86_OP(cache_reg)
KVM_X86_OP(get_rflags) KVM_X86_OP(get_rflags)

View File

@@ -1595,6 +1595,7 @@ struct kvm_x86_ops {
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu); void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);

View File

@@ -17,10 +17,12 @@ struct ucode_cpu_info {
void load_ucode_bsp(void); void load_ucode_bsp(void);
void load_ucode_ap(void); void load_ucode_ap(void);
void microcode_bsp_resume(void); void microcode_bsp_resume(void);
bool __init microcode_loader_disabled(void);
#else #else
static inline void load_ucode_bsp(void) { } static inline void load_ucode_bsp(void) { }
static inline void load_ucode_ap(void) { } static inline void load_ucode_ap(void) { }
static inline void microcode_bsp_resume(void) { } static inline void microcode_bsp_resume(void) { }
static inline bool __init microcode_loader_disabled(void) { return false; }
#endif #endif
extern unsigned long initrd_start_early; extern unsigned long initrd_start_early;

View File

@@ -180,6 +180,14 @@
* VERW clears CPU Register * VERW clears CPU Register
* File. * File.
*/ */
#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
* Not susceptible to
* Indirect Target Selection.
* This bit is not set by
* HW, but is synthesized by
* VMMs for guests to know
* their affected status.
*/
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /* #define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
* IA32_XAPIC_DISABLE_STATUS MSR * IA32_XAPIC_DISABLE_STATUS MSR

View File

@@ -59,6 +59,8 @@ int __register_nmi_handler(unsigned int, struct nmiaction *);
void unregister_nmi_handler(unsigned int, const char *); void unregister_nmi_handler(unsigned int, const char *);
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler);
void stop_nmi(void); void stop_nmi(void);
void restart_nmi(void); void restart_nmi(void);
void local_touch_nmi(void); void local_touch_nmi(void);

View File

@@ -219,9 +219,8 @@
.endm .endm
/* /*
* Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call * Emits a conditional CS prefix that is compatible with
* to the retpoline thunk with a CS prefix when the register requires * -mindirect-branch-cs-prefix.
* a RAX prefix byte to encode. Also see apply_retpolines().
*/ */
.macro __CS_PREFIX reg:req .macro __CS_PREFIX reg:req
.irp rs,r8,r9,r10,r11,r12,r13,r14,r15 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
@@ -365,10 +364,14 @@
".long 999b\n\t" \ ".long 999b\n\t" \
".popsection\n\t" ".popsection\n\t"
#define ITS_THUNK_SIZE 64
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE]; typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
typedef u8 its_thunk_t[ITS_THUNK_SIZE];
extern retpoline_thunk_t __x86_indirect_thunk_array[]; extern retpoline_thunk_t __x86_indirect_thunk_array[];
extern retpoline_thunk_t __x86_indirect_call_thunk_array[]; extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[]; extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
extern its_thunk_t __x86_indirect_its_thunk_array[];
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
extern void __x86_return_thunk(void); extern void __x86_return_thunk(void);
@@ -392,6 +395,12 @@ static inline void srso_return_thunk(void) {}
static inline void srso_alias_return_thunk(void) {} static inline void srso_alias_return_thunk(void) {}
#endif #endif
#ifdef CONFIG_MITIGATION_ITS
extern void its_return_thunk(void);
#else
static inline void its_return_thunk(void) {}
#endif
extern void retbleed_return_thunk(void); extern void retbleed_return_thunk(void);
extern void srso_return_thunk(void); extern void srso_return_thunk(void);
extern void srso_alias_return_thunk(void); extern void srso_alias_return_thunk(void);
@@ -412,11 +421,6 @@ extern void (*x86_return_thunk)(void);
#ifdef CONFIG_CALL_DEPTH_TRACKING #ifdef CONFIG_CALL_DEPTH_TRACKING
extern void __x86_return_skl(void); extern void __x86_return_skl(void);
static inline void x86_set_skl_return_thunk(void)
{
x86_return_thunk = &__x86_return_skl;
}
#define CALL_DEPTH_ACCOUNT \ #define CALL_DEPTH_ACCOUNT \
ALTERNATIVE("", \ ALTERNATIVE("", \
__stringify(INCREMENT_CALL_DEPTH), \ __stringify(INCREMENT_CALL_DEPTH), \
@@ -429,7 +433,6 @@ DECLARE_PER_CPU(u64, __x86_stuffs_count);
DECLARE_PER_CPU(u64, __x86_ctxsw_count); DECLARE_PER_CPU(u64, __x86_ctxsw_count);
#endif #endif
#else #else
static inline void x86_set_skl_return_thunk(void) {}
#define CALL_DEPTH_ACCOUNT "" #define CALL_DEPTH_ACCOUNT ""
@@ -454,20 +457,23 @@ static inline void x86_set_skl_return_thunk(void) {}
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/*
* Emits a conditional CS prefix that is compatible with
* -mindirect-branch-cs-prefix.
*/
#define __CS_PREFIX(reg) \
".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
".ifc \\rs," reg "\n" \
".byte 0x2e\n" \
".endif\n" \
".endr\n"
/* /*
* Inline asm uses the %V modifier which is only in newer GCC * Inline asm uses the %V modifier which is only in newer GCC
* which is ensured when CONFIG_RETPOLINE is defined. * which is ensured when CONFIG_RETPOLINE is defined.
*/ */
# define CALL_NOSPEC \ #define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
ALTERNATIVE_2( \ "call __x86_indirect_thunk_%V[thunk_target]\n"
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
"call __x86_indirect_thunk_%V[thunk_target]\n", \
X86_FEATURE_RETPOLINE, \
"lfence;\n" \
ANNOTATE_RETPOLINE_SAFE \
"call *%[thunk_target]\n", \
X86_FEATURE_RETPOLINE_LFENCE)
# define THUNK_TARGET(addr) [thunk_target] "r" (addr) # define THUNK_TARGET(addr) [thunk_target] "r" (addr)

View File

@@ -501,6 +501,7 @@ struct pebs_xmm {
*/ */
#define IBS_OP_CUR_CNT (0xFFF80ULL<<32) #define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) #define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
#define IBS_OP_CUR_CNT_EXT_MASK (0x7FULL<<52)
#define IBS_OP_CNT_CTL (1ULL<<19) #define IBS_OP_CNT_CTL (1ULL<<19)
#define IBS_OP_VAL (1ULL<<18) #define IBS_OP_VAL (1ULL<<18)
#define IBS_OP_ENABLE (1ULL<<17) #define IBS_OP_ENABLE (1ULL<<17)

View File

@@ -18,6 +18,8 @@
#include <linux/mmu_context.h> #include <linux/mmu_context.h>
#include <linux/bsearch.h> #include <linux/bsearch.h>
#include <linux/sync_core.h> #include <linux/sync_core.h>
#include <linux/moduleloader.h>
#include <linux/cleanup.h>
#include <asm/text-patching.h> #include <asm/text-patching.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/sections.h> #include <asm/sections.h>
@@ -30,6 +32,8 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/cfi.h>
#include <asm/set_memory.h>
int __read_mostly alternatives_patched; int __read_mostly alternatives_patched;
@@ -123,6 +127,135 @@ const unsigned char * const x86_nops[ASM_NOP_MAX+1] =
#endif #endif
}; };
#ifdef CONFIG_MITIGATION_ITS
#ifdef CONFIG_MODULES
static struct module *its_mod;
static void *its_page;
static unsigned int its_offset;
/* Initialize a thunk with the "jmp *reg; int3" instructions. */
static void *its_init_thunk(void *thunk, int reg)
{
u8 *bytes = thunk;
int i = 0;
if (reg >= 8) {
bytes[i++] = 0x41; /* REX.B prefix */
reg -= 8;
}
bytes[i++] = 0xff;
bytes[i++] = 0xe0 + reg; /* jmp *reg */
bytes[i++] = 0xcc;
return thunk;
}
void its_init_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
mutex_lock(&text_mutex);
its_mod = mod;
its_page = NULL;
}
void its_fini_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
WARN_ON_ONCE(its_mod != mod);
its_mod = NULL;
its_page = NULL;
mutex_unlock(&text_mutex);
for (int i = 0; i < mod->its_num_pages; i++) {
void *page = mod->its_page_array[i];
set_memory_rox((unsigned long)page, 1);
}
}
void its_free_mod(struct module *mod)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return;
for (int i = 0; i < mod->its_num_pages; i++) {
void *page = mod->its_page_array[i];
module_memfree(page);
}
kfree(mod->its_page_array);
}
DEFINE_FREE(its_execmem, void *, if (_T) module_memfree(_T));
static void *its_alloc(void)
{
void *page __free(its_execmem) = module_alloc(PAGE_SIZE);
if (!page)
return NULL;
if (its_mod) {
void *tmp = krealloc(its_mod->its_page_array,
(its_mod->its_num_pages+1) * sizeof(void *),
GFP_KERNEL);
if (!tmp)
return NULL;
its_mod->its_page_array = tmp;
its_mod->its_page_array[its_mod->its_num_pages++] = page;
}
return no_free_ptr(page);
}
static void *its_allocate_thunk(int reg)
{
int size = 3 + (reg / 8);
void *thunk;
if (!its_page || (its_offset + size - 1) >= PAGE_SIZE) {
its_page = its_alloc();
if (!its_page) {
pr_err("ITS page allocation failed\n");
return NULL;
}
memset(its_page, INT3_INSN_OPCODE, PAGE_SIZE);
its_offset = 32;
}
/*
* If the indirect branch instruction will be in the lower half
* of a cacheline, then update the offset to reach the upper half.
*/
if ((its_offset + size - 1) % 64 < 32)
its_offset = ((its_offset - 1) | 0x3F) + 33;
thunk = its_page + its_offset;
its_offset += size;
set_memory_rw((unsigned long)its_page, 1);
thunk = its_init_thunk(thunk, reg);
set_memory_rox((unsigned long)its_page, 1);
return thunk;
}
#else /* CONFIG_MODULES */
static void *its_allocate_thunk(int reg)
{
return NULL;
}
#endif /* CONFIG_MODULES */
#endif /* CONFIG_MITIGATION_ITS */
/* /*
* Fill the buffer with a single effective instruction of size @len. * Fill the buffer with a single effective instruction of size @len.
* *
@@ -521,7 +654,8 @@ static int emit_indirect(int op, int reg, u8 *bytes)
return i; return i;
} }
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes) static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
void *call_dest, void *jmp_dest)
{ {
u8 op = insn->opcode.bytes[0]; u8 op = insn->opcode.bytes[0];
int i = 0; int i = 0;
@@ -542,7 +676,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
switch (op) { switch (op) {
case CALL_INSN_OPCODE: case CALL_INSN_OPCODE:
__text_gen_insn(bytes+i, op, addr+i, __text_gen_insn(bytes+i, op, addr+i,
__x86_indirect_call_thunk_array[reg], call_dest,
CALL_INSN_SIZE); CALL_INSN_SIZE);
i += CALL_INSN_SIZE; i += CALL_INSN_SIZE;
break; break;
@@ -550,7 +684,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
case JMP32_INSN_OPCODE: case JMP32_INSN_OPCODE:
clang_jcc: clang_jcc:
__text_gen_insn(bytes+i, op, addr+i, __text_gen_insn(bytes+i, op, addr+i,
__x86_indirect_jump_thunk_array[reg], jmp_dest,
JMP32_INSN_SIZE); JMP32_INSN_SIZE);
i += JMP32_INSN_SIZE; i += JMP32_INSN_SIZE;
break; break;
@@ -565,6 +699,47 @@ clang_jcc:
return i; return i;
} }
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
{
return __emit_trampoline(addr, insn, bytes,
__x86_indirect_call_thunk_array[reg],
__x86_indirect_jump_thunk_array[reg]);
}
#ifdef CONFIG_MITIGATION_ITS
static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
{
u8 *thunk = __x86_indirect_its_thunk_array[reg];
u8 *tmp = its_allocate_thunk(reg);
if (tmp)
thunk = tmp;
return __emit_trampoline(addr, insn, bytes, thunk, thunk);
}
/* Check if an indirect branch is at ITS-unsafe address */
static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
{
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
return false;
/* Indirect branch opcode is 2 or 3 bytes depending on reg */
addr += 1 + reg / 8;
/* Lower-half of the cacheline? */
return !(addr & 0x20);
}
u8 *its_static_thunk(int reg)
{
u8 *thunk = __x86_indirect_its_thunk_array[reg];
return thunk;
}
#endif /* CONFIG_MITIGATION_ITS */
/* /*
* Rewrite the compiler generated retpoline thunk calls. * Rewrite the compiler generated retpoline thunk calls.
* *
@@ -639,6 +814,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
bytes[i++] = 0xe8; /* LFENCE */ bytes[i++] = 0xe8; /* LFENCE */
} }
#ifdef CONFIG_MITIGATION_ITS
/*
* Check if the address of last byte of emitted-indirect is in
* lower-half of the cacheline. Such branches need ITS mitigation.
*/
if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
return emit_its_trampoline(addr, insn, reg, bytes);
#endif
ret = emit_indirect(op, reg, bytes + i); ret = emit_indirect(op, reg, bytes + i);
if (ret < 0) if (ret < 0)
return ret; return ret;
@@ -710,6 +894,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
#ifdef CONFIG_RETHUNK #ifdef CONFIG_RETHUNK
bool cpu_wants_rethunk(void)
{
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
}
bool cpu_wants_rethunk_at(void *addr)
{
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
return false;
if (x86_return_thunk != its_return_thunk)
return true;
return !((unsigned long)addr & 0x20);
}
/* /*
* Rewrite the compiler generated return thunk tail-calls. * Rewrite the compiler generated return thunk tail-calls.
* *
@@ -726,7 +925,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
int i = 0; int i = 0;
/* Patch the custom return thunks... */ /* Patch the custom return thunks... */
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { if (cpu_wants_rethunk_at(addr)) {
i = JMP32_INSN_SIZE; i = JMP32_INSN_SIZE;
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i); __text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
} else { } else {
@@ -743,7 +942,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
{ {
s32 *s; s32 *s;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) if (cpu_wants_rethunk())
static_call_force_reinit(); static_call_force_reinit();
for (s = start; s < end; s++) { for (s = start; s < end; s++) {
@@ -1575,6 +1774,8 @@ static noinline void __init alt_reloc_selftest(void)
void __init alternative_instructions(void) void __init alternative_instructions(void)
{ {
u64 ibt;
int3_selftest(); int3_selftest();
/* /*
@@ -1612,6 +1813,9 @@ void __init alternative_instructions(void)
*/ */
paravirt_set_cap(); paravirt_set_cap();
/* Keep CET-IBT disabled until caller/callee are patched */
ibt = ibt_save(/*disable*/ true);
/* /*
* First patch paravirt functions, such that we overwrite the indirect * First patch paravirt functions, such that we overwrite the indirect
* call with the direct call. * call with the direct call.
@@ -1645,6 +1849,8 @@ void __init alternative_instructions(void)
*/ */
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end); apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
ibt_restore(ibt);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Patch to UP if other cpus not imminent. */ /* Patch to UP if other cpus not imminent. */
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {

View File

@@ -49,6 +49,7 @@ static void __init srbds_select_mitigation(void);
static void __init l1d_flush_select_mitigation(void); static void __init l1d_flush_select_mitigation(void);
static void __init srso_select_mitigation(void); static void __init srso_select_mitigation(void);
static void __init gds_select_mitigation(void); static void __init gds_select_mitigation(void);
static void __init its_select_mitigation(void);
/* The base value of the SPEC_CTRL MSR without task-specific bits set */ /* The base value of the SPEC_CTRL MSR without task-specific bits set */
u64 x86_spec_ctrl_base; u64 x86_spec_ctrl_base;
@@ -67,6 +68,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
static void __init set_return_thunk(void *thunk)
{
if (x86_return_thunk != __x86_return_thunk)
pr_warn("x86/bugs: return thunk changed\n");
x86_return_thunk = thunk;
}
/* Update SPEC_CTRL MSR and its cached copy unconditionally */ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
static void update_spec_ctrl(u64 val) static void update_spec_ctrl(u64 val)
{ {
@@ -175,6 +184,7 @@ void __init cpu_select_mitigations(void)
*/ */
srso_select_mitigation(); srso_select_mitigation();
gds_select_mitigation(); gds_select_mitigation();
its_select_mitigation();
} }
/* /*
@@ -1102,7 +1112,7 @@ do_cmd_auto:
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET); setup_force_cpu_cap(X86_FEATURE_UNRET);
x86_return_thunk = retbleed_return_thunk; set_return_thunk(retbleed_return_thunk);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
@@ -1136,7 +1146,9 @@ do_cmd_auto:
case RETBLEED_MITIGATION_STUFF: case RETBLEED_MITIGATION_STUFF:
setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
x86_set_skl_return_thunk(); #ifdef CONFIG_CALL_DEPTH_TRACKING
set_return_thunk(&__x86_return_skl);
#endif
break; break;
default: default:
@@ -1170,6 +1182,146 @@ do_cmd_auto:
pr_info("%s\n", retbleed_strings[retbleed_mitigation]); pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
} }
#undef pr_fmt
#define pr_fmt(fmt) "ITS: " fmt
enum its_mitigation_cmd {
ITS_CMD_OFF,
ITS_CMD_ON,
ITS_CMD_VMEXIT,
ITS_CMD_RSB_STUFF,
};
enum its_mitigation {
ITS_MITIGATION_OFF,
ITS_MITIGATION_VMEXIT_ONLY,
ITS_MITIGATION_ALIGNED_THUNKS,
ITS_MITIGATION_RETPOLINE_STUFF,
};
static const char * const its_strings[] = {
[ITS_MITIGATION_OFF] = "Vulnerable",
[ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
[ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
[ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
};
static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
static enum its_mitigation_cmd its_cmd __ro_after_init =
IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
static int __init its_parse_cmdline(char *str)
{
if (!str)
return -EINVAL;
if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
return 0;
}
if (!strcmp(str, "off")) {
its_cmd = ITS_CMD_OFF;
} else if (!strcmp(str, "on")) {
its_cmd = ITS_CMD_ON;
} else if (!strcmp(str, "force")) {
its_cmd = ITS_CMD_ON;
setup_force_cpu_bug(X86_BUG_ITS);
} else if (!strcmp(str, "vmexit")) {
its_cmd = ITS_CMD_VMEXIT;
} else if (!strcmp(str, "stuff")) {
its_cmd = ITS_CMD_RSB_STUFF;
} else {
pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
}
return 0;
}
early_param("indirect_target_selection", its_parse_cmdline);
static void __init its_select_mitigation(void)
{
enum its_mitigation_cmd cmd = its_cmd;
if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
its_mitigation = ITS_MITIGATION_OFF;
return;
}
/* Retpoline+CDT mitigates ITS, bail out */
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
goto out;
}
/* Exit early to avoid irrelevant warnings */
if (cmd == ITS_CMD_OFF) {
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (spectre_v2_enabled == SPECTRE_V2_NONE) {
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (!IS_ENABLED(CONFIG_RETPOLINE) || !IS_ENABLED(CONFIG_RETHUNK)) {
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
its_mitigation = ITS_MITIGATION_OFF;
goto out;
}
if (cmd == ITS_CMD_RSB_STUFF &&
(!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_CALL_DEPTH_TRACKING))) {
pr_err("RSB stuff mitigation not supported, using default\n");
cmd = ITS_CMD_ON;
}
switch (cmd) {
case ITS_CMD_OFF:
its_mitigation = ITS_MITIGATION_OFF;
break;
case ITS_CMD_VMEXIT:
if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
goto out;
}
fallthrough;
case ITS_CMD_ON:
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
set_return_thunk(its_return_thunk);
break;
case ITS_CMD_RSB_STUFF:
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
#ifdef CONFIG_CALL_DEPTH_TRACKING
set_return_thunk(&__x86_return_skl);
#endif
if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
pr_info("Retbleed mitigation updated to stuffing\n");
}
break;
}
out:
pr_info("%s\n", its_strings[its_mitigation]);
}
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt #define pr_fmt(fmt) "Spectre V2 : " fmt
@@ -1290,9 +1442,13 @@ static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
static enum spectre_v2_user_cmd __init static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline(void) spectre_v2_parse_user_cmdline(void)
{ {
enum spectre_v2_user_cmd mode;
char arg[20]; char arg[20];
int ret, i; int ret, i;
mode = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ?
SPECTRE_V2_USER_CMD_AUTO : SPECTRE_V2_USER_CMD_NONE;
switch (spectre_v2_cmd) { switch (spectre_v2_cmd) {
case SPECTRE_V2_CMD_NONE: case SPECTRE_V2_CMD_NONE:
return SPECTRE_V2_USER_CMD_NONE; return SPECTRE_V2_USER_CMD_NONE;
@@ -1305,7 +1461,7 @@ spectre_v2_parse_user_cmdline(void)
ret = cmdline_find_option(boot_command_line, "spectre_v2_user", ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
arg, sizeof(arg)); arg, sizeof(arg));
if (ret < 0) if (ret < 0)
return SPECTRE_V2_USER_CMD_AUTO; return mode;
for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
if (match_option(arg, ret, v2_user_options[i].option)) { if (match_option(arg, ret, v2_user_options[i].option)) {
@@ -1315,8 +1471,8 @@ spectre_v2_parse_user_cmdline(void)
} }
} }
pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); pr_err("Unknown user space protection option (%s). Switching to default\n", arg);
return SPECTRE_V2_USER_CMD_AUTO; return mode;
} }
static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
@@ -1677,10 +1833,11 @@ static void __init bhi_select_mitigation(void)
return; return;
} }
if (spec_ctrl_bhi_dis()) if (!IS_ENABLED(CONFIG_X86_64))
return; return;
if (!IS_ENABLED(CONFIG_X86_64)) /* Mitigate in hardware if supported */
if (spec_ctrl_bhi_dis())
return; return;
/* Mitigate KVM by default */ /* Mitigate KVM by default */
@@ -2606,10 +2763,10 @@ static void __init srso_select_mitigation(void)
if (boot_cpu_data.x86 == 0x19) { if (boot_cpu_data.x86 == 0x19) {
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
x86_return_thunk = srso_alias_return_thunk; set_return_thunk(srso_alias_return_thunk);
} else { } else {
setup_force_cpu_cap(X86_FEATURE_SRSO); setup_force_cpu_cap(X86_FEATURE_SRSO);
x86_return_thunk = srso_return_thunk; set_return_thunk(srso_return_thunk);
} }
if (has_microcode) if (has_microcode)
srso_mitigation = SRSO_MITIGATION_SAFE_RET; srso_mitigation = SRSO_MITIGATION_SAFE_RET;
@@ -2793,6 +2950,11 @@ static ssize_t rfds_show_state(char *buf)
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
} }
static ssize_t its_show_state(char *buf)
{
return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
}
static char *stibp_state(void) static char *stibp_state(void)
{ {
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
@@ -2975,6 +3137,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_RFDS: case X86_BUG_RFDS:
return rfds_show_state(buf); return rfds_show_state(buf);
case X86_BUG_ITS:
return its_show_state(buf);
default: default:
break; break;
} }
@@ -3054,4 +3219,9 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
{ {
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
} }
ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
{
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
}
#endif #endif

View File

@@ -1272,6 +1272,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
#define GDS BIT(6) #define GDS BIT(6)
/* CPU is affected by Register File Data Sampling */ /* CPU is affected by Register File Data Sampling */
#define RFDS BIT(7) #define RFDS BIT(7)
/* CPU is affected by Indirect Target Selection */
#define ITS BIT(8)
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
#define ITS_NATIVE_ONLY BIT(9)
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
@@ -1283,22 +1287,25 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0x5), MMIO | RETBLEED | GDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPINGS(0x0, 0xb), MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS), VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPINGS(0x0, 0xc), MMIO | RETBLEED | GDS | SRBDS),
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | ITS),
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | ITS),
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS), VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS), VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS), VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
@@ -1362,6 +1369,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
return cpu_matches(cpu_vuln_blacklist, RFDS); return cpu_matches(cpu_vuln_blacklist, RFDS);
} }
static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
{
/* The "immunity" bit trumps everything else: */
if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
return false;
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return false;
/* None of the affected CPUs have BHI_CTRL */
if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
return false;
/*
* If a VMM did not expose ITS_NO, assume that a guest could
* be running on a vulnerable hardware or may migrate to such
* hardware.
*/
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return true;
if (cpu_matches(cpu_vuln_blacklist, ITS))
return true;
return false;
}
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
{ {
u64 x86_arch_cap_msr = x86_read_arch_cap_msr(); u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
@@ -1476,9 +1509,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (vulnerable_to_rfds(x86_arch_cap_msr)) if (vulnerable_to_rfds(x86_arch_cap_msr))
setup_force_cpu_bug(X86_BUG_RFDS); setup_force_cpu_bug(X86_BUG_RFDS);
/* When virtualized, eIBRS could be hidden, assume vulnerable */ /*
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) && * Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
!cpu_matches(cpu_vuln_whitelist, NO_BHI) && * BHI_NO still need to use the BHI mitigation to prevent Intra-mode
* attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
*/
if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) || (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
boot_cpu_has(X86_FEATURE_HYPERVISOR))) boot_cpu_has(X86_FEATURE_HYPERVISOR)))
setup_force_cpu_bug(X86_BUG_BHI); setup_force_cpu_bug(X86_BUG_BHI);
@@ -1486,6 +1522,12 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET)) if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET); setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
if (vulnerable_to_its(x86_arch_cap_msr)) {
setup_force_cpu_bug(X86_BUG_ITS);
if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
}
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
return; return;

View File

@@ -1102,15 +1102,17 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
static int __init save_microcode_in_initrd(void) static int __init save_microcode_in_initrd(void)
{ {
unsigned int cpuid_1_eax = native_cpuid_eax(1);
struct cpuinfo_x86 *c = &boot_cpu_data; struct cpuinfo_x86 *c = &boot_cpu_data;
struct cont_desc desc = { 0 }; struct cont_desc desc = { 0 };
unsigned int cpuid_1_eax;
enum ucode_state ret; enum ucode_state ret;
struct cpio_data cp; struct cpio_data cp;
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
return 0; return 0;
cpuid_1_eax = native_cpuid_eax(1);
if (!find_blobs_in_containers(&cp)) if (!find_blobs_in_containers(&cp))
return -EINVAL; return -EINVAL;

View File

@@ -43,8 +43,8 @@
#define DRIVER_VERSION "2.2" #define DRIVER_VERSION "2.2"
static struct microcode_ops *microcode_ops; static struct microcode_ops *microcode_ops;
bool dis_ucode_ldr = true; static bool dis_ucode_ldr = false;
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
module_param(force_minrev, bool, S_IRUSR | S_IWUSR); module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
@@ -91,6 +91,9 @@ static bool amd_check_current_patch_level(void)
u32 lvl, dummy, i; u32 lvl, dummy, i;
u32 *levels; u32 *levels;
if (x86_cpuid_vendor() != X86_VENDOR_AMD)
return false;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
levels = final_levels; levels = final_levels;
@@ -102,27 +105,29 @@ static bool amd_check_current_patch_level(void)
return false; return false;
} }
static bool __init check_loader_disabled_bsp(void) bool __init microcode_loader_disabled(void)
{ {
static const char *__dis_opt_str = "dis_ucode_ldr"; if (dis_ucode_ldr)
const char *cmdline = boot_command_line;
const char *option = __dis_opt_str;
/*
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
* completely accurate as xen pv guests don't see that CPUID bit set but
* that's good enough as they don't land on the BSP path anyway.
*/
if (native_cpuid_ecx(1) & BIT(31))
return true; return true;
if (x86_cpuid_vendor() == X86_VENDOR_AMD) { /*
if (amd_check_current_patch_level()) * Disable when:
return true; *
} * 1) The CPU does not support CPUID.
*
if (cmdline_find_option_bool(cmdline, option) <= 0) * 2) Bit 31 in CPUID[1]:ECX is clear
dis_ucode_ldr = false; * The bit is reserved for hypervisor use. This is still not
* completely accurate as XEN PV guests don't see that CPUID bit
* set, but that's good enough as they don't land on the BSP
* path anyway.
*
* 3) Certain AMD patch levels are not allowed to be
* overwritten.
*/
if (!have_cpuid_p() ||
native_cpuid_ecx(1) & BIT(31) ||
amd_check_current_patch_level())
dis_ucode_ldr = true;
return dis_ucode_ldr; return dis_ucode_ldr;
} }
@@ -132,7 +137,10 @@ void __init load_ucode_bsp(void)
unsigned int cpuid_1_eax; unsigned int cpuid_1_eax;
bool intel = true; bool intel = true;
if (!have_cpuid_p()) if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
dis_ucode_ldr = true;
if (microcode_loader_disabled())
return; return;
cpuid_1_eax = native_cpuid_eax(1); cpuid_1_eax = native_cpuid_eax(1);
@@ -153,9 +161,6 @@ void __init load_ucode_bsp(void)
return; return;
} }
if (check_loader_disabled_bsp())
return;
if (intel) if (intel)
load_ucode_intel_bsp(&early_data); load_ucode_intel_bsp(&early_data);
else else
@@ -166,6 +171,11 @@ void load_ucode_ap(void)
{ {
unsigned int cpuid_1_eax; unsigned int cpuid_1_eax;
/*
* Can't use microcode_loader_disabled() here - .init section
* hell. It doesn't have to either - the BSP variant must've
* parsed cmdline already anyway.
*/
if (dis_ucode_ldr) if (dis_ucode_ldr)
return; return;
@@ -817,7 +827,7 @@ static int __init microcode_init(void)
struct cpuinfo_x86 *c = &boot_cpu_data; struct cpuinfo_x86 *c = &boot_cpu_data;
int error; int error;
if (dis_ucode_ldr) if (microcode_loader_disabled())
return -EINVAL; return -EINVAL;
if (c->x86_vendor == X86_VENDOR_INTEL) if (c->x86_vendor == X86_VENDOR_INTEL)

View File

@@ -389,7 +389,7 @@ static int __init save_builtin_microcode(void)
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
return 0; return 0;
if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return 0; return 0;
uci.mc = get_microcode_blob(&uci, true); uci.mc = get_microcode_blob(&uci, true);

View File

@@ -94,7 +94,6 @@ static inline unsigned int x86_cpuid_family(void)
return x86_family(eax); return x86_family(eax);
} }
extern bool dis_ucode_ldr;
extern bool force_minrev; extern bool force_minrev;
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD

View File

@@ -363,7 +363,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
goto fail; goto fail;
ip = trampoline + size; ip = trampoline + size;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) if (cpu_wants_rethunk_at(ip))
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE); __text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
else else
memcpy(ip, retq, sizeof(retq)); memcpy(ip, retq, sizeof(retq));

View File

@@ -145,10 +145,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
*ptr = (unsigned long)ptep + PAGE_OFFSET; *ptr = (unsigned long)ptep + PAGE_OFFSET;
#ifdef CONFIG_MICROCODE_INITRD32 #ifdef CONFIG_MICROCODE_INITRD32
/* Running on a hypervisor? */
if (native_cpuid_ecx(1) & BIT(31))
return;
params = (struct boot_params *)__pa_nodebug(&boot_params); params = (struct boot_params *)__pa_nodebug(&boot_params);
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
return; return;

View File

@@ -312,6 +312,9 @@ int module_finalize(const Elf_Ehdr *hdr,
void *pseg = (void *)para->sh_addr; void *pseg = (void *)para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size); apply_paravirt(pseg, pseg + para->sh_size);
} }
its_init_mod(me);
if (retpolines || cfi) { if (retpolines || cfi) {
void *rseg = NULL, *cseg = NULL; void *rseg = NULL, *cseg = NULL;
unsigned int rsize = 0, csize = 0; unsigned int rsize = 0, csize = 0;
@@ -332,6 +335,9 @@ int module_finalize(const Elf_Ehdr *hdr,
void *rseg = (void *)retpolines->sh_addr; void *rseg = (void *)retpolines->sh_addr;
apply_retpolines(rseg, rseg + retpolines->sh_size); apply_retpolines(rseg, rseg + retpolines->sh_size);
} }
its_fini_mod(me);
if (returns) { if (returns) {
void *rseg = (void *)returns->sh_addr; void *rseg = (void *)returns->sh_addr;
apply_returns(rseg, rseg + returns->sh_size); apply_returns(rseg, rseg + returns->sh_size);
@@ -379,4 +385,5 @@ int module_finalize(const Elf_Ehdr *hdr,
void module_arch_cleanup(struct module *mod) void module_arch_cleanup(struct module *mod)
{ {
alternatives_smp_module_del(mod); alternatives_smp_module_del(mod);
its_free_mod(mod);
} }

View File

@@ -39,8 +39,12 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/nmi.h> #include <trace/events/nmi.h>
/*
* An emergency handler can be set in any context including NMI
*/
struct nmi_desc { struct nmi_desc {
raw_spinlock_t lock; raw_spinlock_t lock;
nmi_handler_t emerg_handler;
struct list_head head; struct list_head head;
}; };
@@ -131,9 +135,22 @@ static void nmi_check_duration(struct nmiaction *action, u64 duration)
static int nmi_handle(unsigned int type, struct pt_regs *regs) static int nmi_handle(unsigned int type, struct pt_regs *regs)
{ {
struct nmi_desc *desc = nmi_to_desc(type); struct nmi_desc *desc = nmi_to_desc(type);
nmi_handler_t ehandler;
struct nmiaction *a; struct nmiaction *a;
int handled=0; int handled=0;
/*
* Call the emergency handler, if set
*
* In the case of crash_nmi_callback() emergency handler, it will
* return in the case of the crashing CPU to enable it to complete
* other necessary crashing actions ASAP. Other handlers in the
* linked list won't need to be run.
*/
ehandler = desc->emerg_handler;
if (ehandler)
return ehandler(type, regs);
rcu_read_lock(); rcu_read_lock();
/* /*
@@ -223,6 +240,31 @@ void unregister_nmi_handler(unsigned int type, const char *name)
} }
EXPORT_SYMBOL_GPL(unregister_nmi_handler); EXPORT_SYMBOL_GPL(unregister_nmi_handler);
/**
* set_emergency_nmi_handler - Set emergency handler
* @type: NMI type
* @handler: the emergency handler to be stored
*
* Set an emergency NMI handler which, if set, will preempt all the other
* handlers in the linked list. If a NULL handler is passed in, it will clear
* it. It is expected that concurrent calls to this function will not happen
* or the system is screwed beyond repair.
*/
void set_emergency_nmi_handler(unsigned int type, nmi_handler_t handler)
{
struct nmi_desc *desc = nmi_to_desc(type);
if (WARN_ON_ONCE(desc->emerg_handler == handler))
return;
desc->emerg_handler = handler;
/*
* Ensure the emergency handler is visible to other CPUs before
* function return
*/
smp_wmb();
}
static void static void
pci_serr_error(unsigned char reason, struct pt_regs *regs) pci_serr_error(unsigned char reason, struct pt_regs *regs)
{ {

View File

@@ -908,15 +908,11 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback)
shootdown_callback = callback; shootdown_callback = callback;
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
/* Would it be better to replace the trap vector here? */
if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback,
NMI_FLAG_FIRST, "crash"))
return; /* Return what? */
/* /*
* Ensure the new callback function is set before sending * Set emergency handler to preempt other handlers.
* out the NMI
*/ */
wmb(); set_emergency_nmi_handler(NMI_LOCAL, crash_nmi_callback);
apic_send_IPI_allbutself(NMI_VECTOR); apic_send_IPI_allbutself(NMI_VECTOR);

View File

@@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
break; break;
case RET: case RET:
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) if (cpu_wants_rethunk_at(insn))
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk); code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
else else
code = &retinsn; code = &retinsn;
@@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
case JCC: case JCC:
if (!func) { if (!func) {
func = __static_call_return; func = __static_call_return;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) if (cpu_wants_rethunk())
func = x86_return_thunk; func = x86_return_thunk;
} }

View File

@@ -92,10 +92,17 @@ __always_inline int is_valid_bugaddr(unsigned long addr)
/* /*
* Check for UD1 or UD2, accounting for Address Size Override Prefixes. * Check for UD1 or UD2, accounting for Address Size Override Prefixes.
* If it's a UD1, get the ModRM byte to pass along to UBSan. * If it's a UD1, further decode to determine its use:
*
* UBSan{0}: 67 0f b9 00 ud1 (%eax),%eax
* UBSan{10}: 67 0f b9 40 10 ud1 0x10(%eax),%eax
* static_call: 0f b9 cc ud1 %esp,%ecx
*
* Notably UBSAN uses EAX, static_call uses ECX.
*/ */
__always_inline int decode_bug(unsigned long addr, u32 *imm) __always_inline int decode_bug(unsigned long addr, s32 *imm, int *len)
{ {
unsigned long start = addr;
u8 v; u8 v;
if (addr < TASK_SIZE_MAX) if (addr < TASK_SIZE_MAX)
@@ -108,24 +115,42 @@ __always_inline int decode_bug(unsigned long addr, u32 *imm)
return BUG_NONE; return BUG_NONE;
v = *(u8 *)(addr++); v = *(u8 *)(addr++);
if (v == SECOND_BYTE_OPCODE_UD2) if (v == SECOND_BYTE_OPCODE_UD2) {
*len = addr - start;
return BUG_UD2; return BUG_UD2;
}
if (!IS_ENABLED(CONFIG_UBSAN_TRAP) || v != SECOND_BYTE_OPCODE_UD1) if (v != SECOND_BYTE_OPCODE_UD1)
return BUG_NONE; return BUG_NONE;
/* Retrieve the immediate (type value) for the UBSAN UD1 */
v = *(u8 *)(addr++);
if (X86_MODRM_RM(v) == 4)
addr++;
*imm = 0; *imm = 0;
if (X86_MODRM_MOD(v) == 1) v = *(u8 *)(addr++); /* ModRM */
*imm = *(u8 *)addr;
else if (X86_MODRM_MOD(v) == 2) if (X86_MODRM_MOD(v) != 3 && X86_MODRM_RM(v) == 4)
*imm = *(u32 *)addr; addr++; /* SIB */
else
WARN_ONCE(1, "Unexpected MODRM_MOD: %u\n", X86_MODRM_MOD(v)); /* Decode immediate, if present */
switch (X86_MODRM_MOD(v)) {
case 0: if (X86_MODRM_RM(v) == 5)
addr += 4; /* RIP + disp32 */
break;
case 1: *imm = *(s8 *)addr;
addr += 1;
break;
case 2: *imm = *(s32 *)addr;
addr += 4;
break;
case 3: break;
}
/* record instruction length */
*len = addr - start;
if (X86_MODRM_REG(v) == 0) /* EAX */
return BUG_UD1_UBSAN;
return BUG_UD1; return BUG_UD1;
} }
@@ -256,10 +281,10 @@ static inline void handle_invalid_op(struct pt_regs *regs)
static noinstr bool handle_bug(struct pt_regs *regs) static noinstr bool handle_bug(struct pt_regs *regs)
{ {
bool handled = false; bool handled = false;
int ud_type; int ud_type, ud_len;
u32 imm; s32 ud_imm;
ud_type = decode_bug(regs->ip, &imm); ud_type = decode_bug(regs->ip, &ud_imm, &ud_len);
if (ud_type == BUG_NONE) if (ud_type == BUG_NONE)
return handled; return handled;
@@ -279,15 +304,28 @@ static noinstr bool handle_bug(struct pt_regs *regs)
*/ */
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_enable(); raw_local_irq_enable();
if (ud_type == BUG_UD2) {
switch (ud_type) {
case BUG_UD2:
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN || if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN ||
handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) { handle_cfi_failure(regs) == BUG_TRAP_TYPE_WARN) {
regs->ip += LEN_UD2; regs->ip += ud_len;
handled = true; handled = true;
} }
} else if (IS_ENABLED(CONFIG_UBSAN_TRAP)) { break;
pr_crit("%s at %pS\n", report_ubsan_failure(regs, imm), (void *)regs->ip);
case BUG_UD1_UBSAN:
if (IS_ENABLED(CONFIG_UBSAN_TRAP)) {
pr_crit("%s at %pS\n",
report_ubsan_failure(regs, ud_imm),
(void *)regs->ip);
}
break;
default:
break;
} }
if (regs->flags & X86_EFLAGS_IF) if (regs->flags & X86_EFLAGS_IF)
raw_local_irq_disable(); raw_local_irq_disable();
instrumentation_end(); instrumentation_end();

View File

@@ -541,4 +541,14 @@ INIT_PER_CPU(irq_stack_backing_store);
"SRSO function pair won't alias"); "SRSO function pair won't alias");
#endif #endif
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
#endif
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
#endif
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */

View File

@@ -131,6 +131,7 @@ void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_smm_changed);
void process_smi(struct kvm_vcpu *vcpu) void process_smi(struct kvm_vcpu *vcpu)
{ {

View File

@@ -2014,11 +2014,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
svm->asid = sd->next_asid++; svm->asid = sd->next_asid++;
} }
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{ {
struct vmcb *vmcb = svm->vmcb; struct vmcb *vmcb = to_svm(vcpu)->vmcb;
if (svm->vcpu.arch.guest_state_protected) if (vcpu->arch.guest_state_protected)
return; return;
if (unlikely(value != vmcb->save.dr6)) { if (unlikely(value != vmcb->save.dr6)) {
@@ -2211,12 +2211,6 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
struct kvm_run *kvm_run = vcpu->run; struct kvm_run *kvm_run = vcpu->run;
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
/*
* The VM save area has already been encrypted so it
* cannot be reinitialized - just terminate.
*/
if (sev_es_guest(vcpu->kvm))
return -EINVAL;
/* /*
* VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put * VMCB is undefined after a SHUTDOWN intercept. INIT the vCPU to put
@@ -2225,9 +2219,18 @@ static int shutdown_interception(struct kvm_vcpu *vcpu)
* userspace. At a platform view, INIT is acceptable behavior as * userspace. At a platform view, INIT is acceptable behavior as
* there exist bare metal platforms that automatically INIT the CPU * there exist bare metal platforms that automatically INIT the CPU
* in response to shutdown. * in response to shutdown.
*
* The VM save area for SEV-ES guests has already been encrypted so it
* cannot be reinitialized, i.e. synthesizing INIT is futile.
*/ */
clear_page(svm->vmcb); if (!sev_es_guest(vcpu->kvm)) {
kvm_vcpu_reset(vcpu, true); clear_page(svm->vmcb);
#ifdef CONFIG_KVM_SMM
if (is_smm(vcpu))
kvm_smm_changed(vcpu, false);
#endif
kvm_vcpu_reset(vcpu, true);
}
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN; kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0; return 0;
@@ -4220,10 +4223,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
* Run with all-zero DR6 unless needed, so that we can get the exact cause * Run with all-zero DR6 unless needed, so that we can get the exact cause
* of a #DB. * of a #DB.
*/ */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
svm_set_dr6(svm, vcpu->arch.dr6); svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
else
svm_set_dr6(svm, DR6_ACTIVE_LOW);
clgi(); clgi();
kvm_load_guest_xsave_state(vcpu); kvm_load_guest_xsave_state(vcpu);
@@ -5002,6 +5003,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_idt = svm_set_idt, .set_idt = svm_set_idt,
.get_gdt = svm_get_gdt, .get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt, .set_gdt = svm_set_gdt,
.set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7, .set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs, .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
.cache_reg = svm_cache_reg, .cache_reg = svm_cache_reg,

View File

@@ -5617,6 +5617,12 @@ static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
set_debugreg(DR6_RESERVED, 6); set_debugreg(DR6_RESERVED, 6);
} }
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
{
lockdep_assert_irqs_disabled();
set_debugreg(vcpu->arch.dr6, 6);
}
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{ {
vmcs_writel(GUEST_DR7, val); vmcs_writel(GUEST_DR7, val);
@@ -7356,10 +7362,6 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs->host_state.cr4 = cr4; vmx->loaded_vmcs->host_state.cr4 = cr4;
} }
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
set_debugreg(vcpu->arch.dr6, 6);
/* When single-stepping over STI and MOV SS, we must clear the /* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise * corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug * vmentry fails as it then expects bit 14 (BS) in pending debug
@@ -8292,6 +8294,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_idt = vmx_set_idt, .set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt, .get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt, .set_gdt = vmx_set_gdt,
.set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7, .set_dr7 = vmx_set_dr7,
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs, .sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
.cache_reg = vmx_cache_reg, .cache_reg = vmx_cache_reg,

View File

@@ -1621,7 +1621,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO) ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
static u64 kvm_get_arch_capabilities(void) static u64 kvm_get_arch_capabilities(void)
{ {
@@ -1655,6 +1655,8 @@ static u64 kvm_get_arch_capabilities(void)
data |= ARCH_CAP_MDS_NO; data |= ARCH_CAP_MDS_NO;
if (!boot_cpu_has_bug(X86_BUG_RFDS)) if (!boot_cpu_has_bug(X86_BUG_RFDS))
data |= ARCH_CAP_RFDS_NO; data |= ARCH_CAP_RFDS_NO;
if (!boot_cpu_has_bug(X86_BUG_ITS))
data |= ARCH_CAP_ITS_NO;
if (!boot_cpu_has(X86_FEATURE_RTM)) { if (!boot_cpu_has(X86_FEATURE_RTM)) {
/* /*
@@ -10772,6 +10774,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(vcpu->arch.eff_db[1], 1); set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2); set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3); set_debugreg(vcpu->arch.eff_db[3], 3);
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
static_call(kvm_x86_set_dr6)(vcpu, vcpu->arch.dr6);
} else if (unlikely(hw_breakpoint_active())) { } else if (unlikely(hw_breakpoint_active())) {
set_debugreg(0, 7); set_debugreg(0, 7);
} }

View File

@@ -360,6 +360,45 @@ SYM_FUNC_END(__x86_return_skl)
#endif /* CONFIG_CALL_DEPTH_TRACKING */ #endif /* CONFIG_CALL_DEPTH_TRACKING */
#ifdef CONFIG_MITIGATION_ITS
.macro ITS_THUNK reg
SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
UNWIND_HINT_UNDEFINED
ANNOTATE_NOENDBR
ANNOTATE_RETPOLINE_SAFE
jmp *%\reg
int3
.align 32, 0xcc /* fill to the end of the line */
.skip 32, 0xcc /* skip to the next upper half */
.endm
/* ITS mitigation requires thunks be aligned to upper half of cacheline */
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(__x86_indirect_its_thunk_array)
#define GEN(reg) ITS_THUNK reg
#include <asm/GEN-for-each-reg.h>
#undef GEN
.align 64, 0xcc
SYM_CODE_END(__x86_indirect_its_thunk_array)
.align 64, 0xcc
.skip 32, 0xcc
SYM_CODE_START(its_return_thunk)
UNWIND_HINT_FUNC
ANNOTATE_NOENDBR
ANNOTATE_UNRET_SAFE
ret
int3
SYM_CODE_END(its_return_thunk)
EXPORT_SYMBOL(its_return_thunk)
#endif /* CONFIG_MITIGATION_ITS */
/* /*
* This function name is magical and is used by -mfunction-return=thunk-extern * This function name is magical and is used by -mfunction-return=thunk-extern
* for the compiler to generate JMPs to it. * for the compiler to generate JMPs to it.

View File

@@ -650,8 +650,13 @@ static void __init memory_map_top_down(unsigned long map_start,
*/ */
addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start, addr = memblock_phys_alloc_range(PMD_SIZE, PMD_SIZE, map_start,
map_end); map_end);
memblock_phys_free(addr, PMD_SIZE); if (!addr) {
real_end = addr + PMD_SIZE; pr_warn("Failed to release memory for alloc_low_pages()");
real_end = max(map_start, ALIGN_DOWN(map_end, PMD_SIZE));
} else {
memblock_phys_free(addr, PMD_SIZE);
real_end = addr + PMD_SIZE;
}
/* step_size need to be small so pgt_buf from BRK could cover it */ /* step_size need to be small so pgt_buf from BRK could cover it */
step_size = PMD_SIZE; step_size = PMD_SIZE;

View File

@@ -959,9 +959,18 @@ int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
ret = __add_pages(nid, start_pfn, nr_pages, params); ret = __add_pages(nid, start_pfn, nr_pages, params);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
/* update max_pfn, max_low_pfn and high_memory */ /*
update_end_of_memory_vars(start_pfn << PAGE_SHIFT, * Special case: add_pages() is called by memremap_pages() for adding device
nr_pages << PAGE_SHIFT); * private pages. Do not bump up max_pfn in the device private path,
* because max_pfn changes affect dma_addressing_limited().
*
* dma_addressing_limited() returning true when max_pfn is the device's
* addressable memory can force device drivers to use bounce buffers
* and impact their performance negatively:
*/
if (!params->pgmap)
/* update max_pfn, max_low_pfn and high_memory */
update_end_of_memory_vars(start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
return ret; return ret;
} }

View File

@@ -109,8 +109,14 @@ void __init kernel_randomize_memory(void)
memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
/* Adapt physical memory region size based on available memory */ /*
if (memory_tb < kaslr_regions[0].size_tb) * Adapt physical memory region size based on available memory,
* except when CONFIG_PCI_P2PDMA is enabled. P2PDMA exposes the
* device BAR space assuming the direct map space is large enough
* for creating a ZONE_DEVICE mapping in the direct map corresponding
* to the physical BAR address.
*/
if (!IS_ENABLED(CONFIG_PCI_P2PDMA) && (memory_tb < kaslr_regions[0].size_tb))
kaslr_regions[0].size_tb = memory_tb; kaslr_regions[0].size_tb = memory_tb;
/* /*

View File

@@ -630,7 +630,11 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
/* Let nmi_uaccess_okay() know that we're changing CR3. */ /*
* Indicate that CR3 is about to change. nmi_uaccess_okay()
* and others are sensitive to the window where mm_cpumask(),
* CR3 and cpu_tlbstate.loaded_mm are not all in sync.
*/
this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING); this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
barrier(); barrier();
} }
@@ -900,8 +904,16 @@ done:
static bool should_flush_tlb(int cpu, void *data) static bool should_flush_tlb(int cpu, void *data)
{ {
struct mm_struct *loaded_mm = per_cpu(cpu_tlbstate.loaded_mm, cpu);
struct flush_tlb_info *info = data; struct flush_tlb_info *info = data;
/*
* Order the 'loaded_mm' and 'is_lazy' against their
* write ordering in switch_mm_irqs_off(). Ensure
* 'is_lazy' is at least as new as 'loaded_mm'.
*/
smp_rmb();
/* Lazy TLB will get flushed at the next context switch. */ /* Lazy TLB will get flushed at the next context switch. */
if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu)) if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu))
return false; return false;
@@ -910,8 +922,15 @@ static bool should_flush_tlb(int cpu, void *data)
if (!info->mm) if (!info->mm)
return true; return true;
/*
* While switching, the remote CPU could have state from
* either the prev or next mm. Assume the worst and flush.
*/
if (loaded_mm == LOADED_MM_SWITCHING)
return true;
/* The target mm is loaded, and the CPU is not lazy. */ /* The target mm is loaded, and the CPU is not lazy. */
if (per_cpu(cpu_tlbstate.loaded_mm, cpu) == info->mm) if (loaded_mm == info->mm)
return true; return true;
/* In cpumask, but not the loaded mm? Periodically remove by flushing. */ /* In cpumask, but not the loaded mm? Periodically remove by flushing. */

View File

@@ -37,6 +37,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
#define EMIT5(b1, b2, b3, b4, b5) \
do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
#define EMIT1_off32(b1, off) \ #define EMIT1_off32(b1, off) \
do { EMIT1(b1); EMIT(off, 4); } while (0) do { EMIT1(b1); EMIT(off, 4); } while (0)
@@ -470,7 +472,11 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) { if (IS_ENABLED(CONFIG_MITIGATION_ITS) &&
cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
OPTIMIZER_HIDE_VAR(reg);
emit_jump(&prog, its_static_thunk(reg), ip);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
EMIT_LFENCE(); EMIT_LFENCE();
EMIT2(0xFF, 0xE0 + reg); EMIT2(0xFF, 0xE0 + reg);
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) { } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
@@ -492,7 +498,7 @@ static void emit_return(u8 **pprog, u8 *ip)
{ {
u8 *prog = *pprog; u8 *prog = *pprog;
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) { if (cpu_wants_rethunk()) {
emit_jump(&prog, x86_return_thunk, ip); emit_jump(&prog, x86_return_thunk, ip);
} else { } else {
EMIT1(0xC3); /* ret */ EMIT1(0xC3); /* ret */
@@ -1072,6 +1078,48 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
#define RESTORE_TAIL_CALL_CNT(stack) \ #define RESTORE_TAIL_CALL_CNT(stack) \
EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8)
static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
struct bpf_prog *bpf_prog)
{
u8 *prog = *pprog;
u8 *func;
if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
/* The clearing sequence clobbers eax and ecx. */
EMIT1(0x50); /* push rax */
EMIT1(0x51); /* push rcx */
ip += 2;
func = (u8 *)clear_bhb_loop;
ip += x86_call_depth_emit_accounting(&prog, func);
if (emit_call(&prog, func, ip))
return -EINVAL;
EMIT1(0x59); /* pop rcx */
EMIT1(0x58); /* pop rax */
}
/* Insert IBHF instruction */
if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
/*
* Add an Indirect Branch History Fence (IBHF). IBHF acts as a
* fence preventing branch history from before the fence from
* affecting indirect branches after the fence. This is
* specifically used in cBPF jitted code to prevent Intra-mode
* BHI attacks. The IBHF instruction is designed to be a NOP on
* hardware that doesn't need or support it. The REP and REX.W
* prefixes are required by the microcode, and they also ensure
* that the NOP is unlikely to be used in existing code.
*
* IBHF is not a valid instruction in 32-bit mode.
*/
EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
}
*pprog = prog;
return 0;
}
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
int oldproglen, struct jit_context *ctx, bool jmp_padding) int oldproglen, struct jit_context *ctx, bool jmp_padding)
{ {
@@ -1945,6 +1993,15 @@ emit_jmp:
seen_exit = true; seen_exit = true;
/* Update cleanup_addr */ /* Update cleanup_addr */
ctx->cleanup_addr = proglen; ctx->cleanup_addr = proglen;
if (bpf_prog_was_classic(bpf_prog) &&
!capable(CAP_SYS_ADMIN)) {
u8 *ip = image + addrs[i - 1];
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
return -EINVAL;
}
pop_callee_regs(&prog, callee_regs_used); pop_callee_regs(&prog, callee_regs_used);
EMIT1(0xC9); /* leave */ EMIT1(0xC9); /* leave */
emit_return(&prog, image + addrs[i - 1] + (prog - temp)); emit_return(&prog, image + addrs[i - 1] + (prog - temp));

View File

@@ -26,7 +26,6 @@ void get_regs_from_mc(struct uml_pt_regs *regs, mcontext_t *mc)
COPY(RIP); COPY(RIP);
COPY2(EFLAGS, EFL); COPY2(EFLAGS, EFL);
COPY2(CS, CSGSFS); COPY2(CS, CSGSFS);
regs->gp[CS / sizeof(unsigned long)] &= 0xffff; regs->gp[SS / sizeof(unsigned long)] = mc->gregs[REG_CSGSFS] >> 48;
regs->gp[CS / sizeof(unsigned long)] |= 3;
#endif #endif
} }

View File

@@ -604,7 +604,7 @@ struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
{ {
struct bio *bio; struct bio *bio;
if (nr_vecs > UIO_MAXIOV) if (nr_vecs > BIO_MAX_INLINE_VECS)
return NULL; return NULL;
return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask);
} }

View File

@@ -265,10 +265,6 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
goto out_free_state; goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state); err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
out_free_state: out_free_state:
kfree_sensitive(state); kfree_sensitive(state);

Some files were not shown because too many files have changed in this diff Show More