Merge tag 'android15-6.6.89_r00' into android15-6.6

This merges the android15-6.6.89_r00 tag into the android15-6.6 branch,
catching it up with the latest LTS releases.

It contains the following commits:

* 65999c7d05 BACKPORT: KVM: arm64: Eagerly switch ZCR_EL{1,2}
* 2bdae59ccf BACKPORT: KVM: arm64: Mark some header functions as inline
* 2179040e7b BACKPORT: KVM: arm64: Refactor exit handlers
* 2af7cb2232 BACKPORT: KVM: arm64: Remove VHE host restore of CPACR_EL1.SMEN
* 0af026605a BACKPORT: KVM: arm64: Remove VHE host restore of CPACR_EL1.ZEN
* 1e88a51e5d BACKPORT: KVM: arm64: Remove host FPSIMD saving for non-protected KVM
* 1775aac0bd BACKPORT: KVM: arm64: Unconditionally save+flush host FPSIMD/SVE/SME state
* db6c8d3397 BACKPORT: KVM: arm64: Calculate cptr_el2 traps on activating traps
* a8408e37ce ANDROID: KVM: arm64: Eagerly restore host FPSIMD/SVE state in pKVM
* 051c5c53b0 ANDROID: KVM: arm64: Move __deactivate_fpsimd_traps() to switch.h
* 290f5d0002 ANDROID: KVM: arm64: Move kvm_hyp_handle_fpsimd_host() to switch.h
* 6fc7e69888 ANDROID: GKI: fix up crc issue in crypto_get_default_null_skcipher()
*   c3e7212c79 Merge 6.6.89 into android15-6.6-lts
|\
| * 71e4ec9b2a Linux 6.6.89
| * 20bab4eb1c objtool: Silence more KCOV warnings, part 2
| * 014a761be0 objtool: Ignore end-of-section jumps for KCOV/GCOV
| * 26084aa1a1 nvme: fixup scan failure for non-ANA multipath controllers
| * 17d41d0f3a MIPS: cm: Fix warning if MIPS_CM is disabled
| * eee19a8037 net: dsa: mv88e6xxx: enable STU methods for 6320 family
| * cd17c8638d net: dsa: mv88e6xxx: enable .port_set_policy() for 6320 family
| * 1368548407 net: dsa: mv88e6xxx: enable PVT for 6321 switch
| * bfbd730e10 net: dsa: mv88e6xxx: fix atu_move_port_mask for 6341 family
| * 551667f99b ext4: goto right label 'out_mmap_sem' in ext4_setattr()
| * ffe79cc6c7 comedi: jr3_pci: Fix synchronous deletion of timer
| * c4312c4d24 vmxnet3: Fix malformed packet sizing in vmxnet3_process_xdp
| * abe56be73e driver core: fix potential NULL pointer dereference in dev_uevent()
| * a551110662 driver core: introduce device_set_driver() helper
| * d29c2d5335 Revert "drivers: core: synchronize really_probe() and dev_uevent()"
| * 0fd13033d4 x86/pvh: Call C code via the kernel virtual mapping
| * 2b4479eb46 spi: spi-imx: Add check for spi_imx_setupxfer()
| * 459098685e md/raid1: Add check for missing source disk in process_checks()
| * 41866aa93d x86/cpu: Add CPU model number for Bartlett Lake CPUs with Raptor Cove cores
| * aa0cf04b69 ubsan: Fix panic from test_ubsan_out_of_bounds
| * 96ae4c19ed spi: tegra210-quad: add rate limiting and simplify timeout error message
| * 0134e326ab spi: tegra210-quad: use WARN_ON_ONCE instead of WARN_ON for timeouts
| * 6c2e136ffd loop: aio inherit the ioprio of original request
| * f0209397ed riscv: Provide all alternative macros all the time
| * ce7e04d26a iomap: skip unnecessary ifs_block_is_uptodate check
| * bb7878cca9 x86/i8253: Call clockevent_i8253_disable() with interrupts disabled
| * a862d24e1f scsi: pm80xx: Set phy_attached to zero when device is gone
| * 4cdb02e9f9 scsi: ufs: exynos: Ensure pre_link() executes before exynos_ufs_phy_init()
| * fa99f1886e scsi: hisi_sas: Fix I/O errors caused by hardware port ID changes
| * a8550ac19d ext4: make block validity check resistent to sb bh corruption
| * ea92c93887 cifs: Fix querying of WSL CHR and BLK reparse points over SMB1
| * dc18c5b130 timekeeping: Add a lockdep override in tick_freeze()
| * e5208da739 cifs: Fix encoding of SMB1 Session Setup Kerberos Request in non-UNICODE mode
| * f444c139e8 nvmet-fc: put ref when assoc->del_work is already scheduled
| * e45e8f0dd3 nvmet-fc: take tgtport reference only once
| * 0cad1849e9 x86/bugs: Don't fill RSB on context switch with eIBRS
| * b73c62b1ca x86/bugs: Don't fill RSB on VMEXIT with eIBRS+retpoline
| * 19160ed5e5 x86/bugs: Use SBPB in write_ibpb() if applicable
| * 84b52a6cd0 selftests/mincore: Allow read-ahead pages to reach the end of the file
| * 4978a798a3 gpiolib: of: Move Atmel HSMCI quirk up out of the regulator comment
| * 18082da6ce objtool: Stop UNRET validation on UD2
| * 0cf5fd8024 nvme: multipath: fix return value of nvme_available_path
| * 60ed102378 nvme: re-read ANA log page after ns scan completes
| * 4c97ba1b0b ACPI PPTT: Fix coding mistakes in a couple of sizeof() calls
| * fcf524eaff ACPI: EC: Set ec_no_wakeup for Lenovo Go S
| * fe063491e9 nvme: requeue namespace scan on missed AENs
| * 892fcde994 xen: Change xen-acpi-processor dom0 dependency
| * 1fe9b92eed perf/core: Fix WARN_ON(!ctx) in __free_event() for partial init
| * eee189ccd4 selftests: ublk: fix test_stripe_04
| * b2ff4e9c59 udmabuf: fix a buf size overflow issue during udmabuf creation
| * 6234a3c727 KVM: s390: Don't use %pK through debug printing
| * 1b2c45697f KVM: s390: Don't use %pK through tracepoints
| * b205d02726 sched/isolation: Make CONFIG_CPU_ISOLATION depend on CONFIG_SMP
| * c5d4d10300 io_uring: always do atomic put from iowq
| * 7e9b836182 rtc: pcf85063: do a SW reset if POR failed
| * a68768e280 9p/net: fix improper handling of bogus negative read/write replies
| * 04ebabdbe9 ntb_hw_amd: Add NTB PCI ID for new gen CPU
| * 27f6ee168c ntb: reduce stack usage in idt_scan_mws
| * 5d53e88d83 qibfs: fix _another_ leak
| * db437e9854 objtool, lkdtm: Obfuscate the do_nothing() pointer
| * 11c16b6449 objtool, regulator: rk808: Remove potential undefined behavior in rk806_set_mode_dcdc()
| * 547695db50 objtool, ASoC: codecs: wcd934x: Remove potential undefined behavior in wcd934x_slim_irq_handler()
| * 4ab980b5c3 objtool, panic: Disable SMAP in __stack_chk_fail()
| * 97f70d27be objtool: Silence more KCOV warnings
| * d5ada7bf3d thunderbolt: Scan retimers after device router has been enumerated
| * e59fc484d4 usb: host: xhci-plat: mvebu: use ->quirks instead of ->init_quirk() func
| * cfa7984f69 usb: gadget: aspeed: Add NULL pointer check in ast_vhub_init_dev()
| * 8b26eb1d8b usb: xhci: Avoid Stop Endpoint retry loop if the endpoint seems Running
| * 2c6a11eaec dmaengine: dmatest: Fix dmatest waiting less when interrupted
| * 5be9407b41 sound/virtio: Fix cancel_sync warnings on uninitialized work_structs
| * a7f9991e4e usb: dwc3: gadget: Avoid using reserved endpoints on Intel Merrifield
| * e5aabc76d1 usb: dwc3: gadget: Refactor loop to avoid NULL endpoints
| * db56636beb fs/ntfs3: Fix WARNING in ntfs_extend_initialized_size
| * 46a150788e usb: host: max3421-hcd: Add missing spi_device_id table
| * 1a0a2d8c6d mailbox: pcc: Always clear the platform ack interrupt first
| * a6e3026e0a mailbox: pcc: Fix the possible race in updation of chan_in_use flag
| * 10cf3135c6 bpf: Reject attaching fexit/fmod_ret to __noreturn functions
| * 4ed42d1a4a bpf: Only fails the busy counter check in bpf_cgrp_storage_get if it creates storage
| * c487fcc632 bpf: bpftool: Setting error code in do_loader()
| * 7f30987294 s390/tty: Fix a potential memory leak bug
| * 28e5a867aa s390/sclp: Add check for get_zeroed_page()
| * 96eab3c96a parisc: PDT: Fix missing prototype warning
| * 51ae5b7aa9 clk: check for disabled clock-provider in of_clk_get_hw_from_clkspec()
| * b5a528a34e bpf: Fix deadlock between rcu_tasks_trace and event_mutex.
| * 1b66a5920b crypto: null - Use spin lock instead of mutex
| * d999b11302 crypto: ccp - Add support for PCI device 0x1134
| * 02f53b8f2f MIPS: cm: Detect CM quirks from device tree
| * 979ba0d694 pinctrl: renesas: rza2: Fix potential NULL pointer dereference
| * 9c97886f80 USB: wdm: add annotation
| * 4b0369c530 USB: wdm: wdm_wwan_port_tx_complete mutex in atomic context
| * 54f7f8978a USB: wdm: close race between wdm_open and wdm_wwan_port_stop
| * 8312053acc USB: wdm: handle IO errors in wdm_wwan_port_start
| * e455bf315e USB: VLI disk crashes if LPM is used
| * 097d3c27f8 usb: quirks: Add delay init quirk for SanDisk 3.2Gen1 Flash Drive
| * 57beab8ea4 usb: quirks: add DELAY_INIT quirk for Silicon Motion Flash Drive
| * 127b6aba32 usb: dwc3: xilinx: Prevent spike in reset signal
| * c4d80e41cb usb: dwc3: gadget: check that event count does not exceed event buffer length
| * 6a91a198cd USB: OHCI: Add quirk for LS7A OHCI controller (rev 0x02)
| * 23d4bb3b06 usb: chipidea: ci_hdrc_imx: implement usb_phy_init() error handling
| * 15120673da usb: chipidea: ci_hdrc_imx: fix call balance of regulator routines
| * 121e9f80ea usb: chipidea: ci_hdrc_imx: fix usbmisc handling
| * 48a62deb85 usb: cdns3: Fix deadlock when using NCM gadget
| * 142273a49f usb: xhci: Fix invalid pointer dereference in Etron workaround
| * 01eeddf93a USB: serial: simple: add OWON HDS200 series oscilloscope support
| * 51d4b23ae5 USB: serial: option: add Sierra Wireless EM9291
| * c6dc3b71bd USB: serial: ftdi_sio: add support for Abacus Electrics Optical Probe
| * 3b377f805e serial: sifive: lock port in startup()/shutdown() callbacks
| * c2af265bea serial: msm: Configure correct working mode before starting earlycon
| * 5d5e8a880c misc: microchip: pci1xxxx: Fix incorrect IRQ status handling during ack
| * 62957f58ab misc: microchip: pci1xxxx: Fix Kernel panic during IRQ handler registration
| * 5f253cc40e char: misc: register chrdev region with all possible minors
| * 3481fd96d8 KVM: x86: Reset IRTE to host control if *new* route isn't postable
| * 9514202882 KVM: x86: Explicitly treat routing entry type changes as changes
| * ac0fe095ac mei: me: add panther lake H DID
| * 164bc7e26d scsi: Improve CDL control
| * 4c2de359b1 USB: storage: quirk for ADATA Portable HDD CH94
| * ff733a91a0 ata: libata-scsi: Fix ata_msense_control_ata_feature()
| * f0a1b05487 ata: libata-scsi: Fix ata_mselect_control_ata_feature() return type
| * f92d5d7134 ata: libata-scsi: Improve CDL control
| * 96838eb183 mcb: fix a double free bug in chameleon_parse_gdd()
| * 406ca74ade cxl/core/regs.c: Skip Memory Space Enable check for RCD and RCH Ports
| * ff826d60d9 KVM: SVM: Allocate IR data using atomic allocation
| * b675b4c863 io_uring: fix 'sync' handling of io_fallback_tw()
| * 20a3f73dd4 LoongArch: Remove a bogus reference to ZONE_DMA
| * 2ca9380b12 LoongArch: Return NULL from huge_pte_offset() for invalid PMD
| * 2f2803e4b5 irqchip/gic-v2m: Prevent use after free of gicv2m_get_fwnode()
| * 3d36fae383 drm/amd/display: Force full update in gpu reset
| * 253827297c drm/amd/display: Fix gpu reset in multidisplay config
| * 689d5be94f net: phy: microchip: force IRQ polling mode for lan88xx
| * a7b8358eb4 net: selftests: initialize TCP header and skb payload with zero
| * cefd8a2e2d xen-netfront: handle NULL returned by xdp_convert_buff_to_frame()
| * 284db2f35b crypto: atmel-sha204a - Set hwrng quality to lowest possible
| * ad787442af virtio_console: fix missing byte order handling for cols and rows
| * b74fb07153 LoongArch: Make do_xyz() exception handlers more robust
| * eaa0849818 LoongArch: Make regs_irqs_disabled() more clear
| * 5767f4ac64 LoongArch: Select ARCH_USE_MEMTEST
| * cd5970e001 perf/x86: Fix non-sampling (counting) events on certain x86 platforms
| * 9e0d94a292 splice: remove duplicate noinline from pipe_clear_nowait
| * 415f3481ca iommu/amd: Return an error if vCPU affinity is set for non-vCPU IRTE
| * 1d7c4b2b0b pds_core: make wait_context part of q_info
| * c918ce100d pds_core: Remove unnecessary check in pds_client_adminq_cmd()
| * cdd784c96f pds_core: handle unsupported PDS_CORE_CMD_FW_CONTROL result
| * 49083dc3cb net: dsa: mt7530: sync driver-specific behavior of MT7531 variants
| * 76c4c22c24 net_sched: hfsc: Fix a potential UAF in hfsc_dequeue() too
| * 3aa852e360 net_sched: hfsc: Fix a UAF vulnerability in class handling
| * 4f435c1f4c fix a couple of races in MNT_TREE_BENEATH handling by do_move_mount()
| * e9f67da077 net: ethernet: mtk_eth_soc: net: revise NETSYSv3 hardware configuration
| * dd6cb0a857 tipc: fix NULL pointer dereference in tipc_mon_reinit_self()
| * 618541a6cc net: phy: leds: fix memory leak
| * d5093d6486 net: lwtunnel: disable BHs when required
| * 4d6919dd7c scsi: core: Clear flags for scsi_cmnd that did not complete
| * 4a1b61f910 btrfs: avoid page_lockend underflow in btrfs_punch_hole_lock_range()
| * 1845e03ea7 cpufreq: cppc: Fix invalid return value in .get() callback
| * d6979fabe8 scsi: ufs: mcq: Add NULL check in ufshcd_mcq_abort()
| * 19e0eaa62e cpufreq: scpi: Fix null-ptr-deref in scpi_cpufreq_get_rate()
| * ea834c90aa cpufreq: scmi: Fix null-ptr-deref in scmi_cpufreq_get_rate()
| * 1053dcf8a5 cpufreq: apple-soc: Fix null-ptr-deref in apple_soc_cpufreq_get_rate()
| * 92d55d7051 dma/contiguous: avoid warning about unused size_bytes
| * f55e7f8abb ceph: Fix incorrect flush end position calculation
* | 33f2dfa9f6 Merge d70c078c26 ("cpufreq/sched: Explicitly synchronize limits_changed flag handling") into android15-6.6-lts
|\|
| * d70c078c26 cpufreq/sched: Explicitly synchronize limits_changed flag handling
| * ada8d7fa0a sched/cpufreq: Rework schedutil governor performance estimation
* | e7d23a6113 Revert "sched/topology: Consolidate and clean up access to a CPU's max compute capacity"
* | a29fd1101f Merge 7fc781ca93 ("sched/topology: Consolidate and clean up access to a CPU's max compute capacity") into android15-6.6-lts
|\|
| * 7fc781ca93 sched/topology: Consolidate and clean up access to a CPU's max compute capacity
* | 00201c6c4e Revert "media: subdev: Fix use of sd->enabled_streams in call_s_stream()"
* | c06018316a Revert "media: subdev: Improve v4l2_subdev_enable/disable_streams_fallback"
* | 7c63c3455a Revert "media: subdev: Add v4l2_subdev_is_streaming()"
* | 81a2d01c7e Revert "media: vimc: skip .s_stream() for stopped entities"
* | 112728ca64 Revert "module: sign with sha512 instead of sha1 by default"
* | c92ac69873 Merge 485104cd63 ("scsi: ufs: qcom: fix dev reference leaked through of_qcom_ice_get") into android15-6.6-lts
|\|
| * 485104cd63 scsi: ufs: qcom: fix dev reference leaked through of_qcom_ice_get
| * bd2a352a0d PCI: Fix reference leak in pci_register_host_bridge()
| * 81435b85b2 of: resolver: Fix device node refcount leakage in of_resolve_phandles()
| * f549a4d2e0 of: resolver: Simplify of_resolve_phandles() using __free()
| * 9549391bb6 clk: renesas: r9a07g043: Fix HP clock source for RZ/Five
| * 3b6f9b555d clk: renesas: r9a07g04[34]: Fix typo for sel_shdi variable
| * d56be18579 clk: renesas: r9a07g04[34]: Use SEL_SDHI1_STS status configuration for SD1 mux
| * a15ff92d5f clk: renesas: rzg2l: Refactor SD mux driver
| * 1b9daac238 clk: renesas: rzg2l: Remove CPG_SDHI_DSEL from generic header
| * 2e1162149b clk: renesas: rzg2l: Add struct clk_hw_data
| * aa487374d7 clk: renesas: rzg2l: Use u32 for flag and mux_flags
| * e56acd53a9 arm64: tegra: Remove the Orin NX/Nano suspend key
| * 6f21dfc4fa iio: adc: ad7768-1: Fix conversion result sign
| * 28aadcb759 iio: adc: ad7768-1: Move setting of val a bit later to avoid unnecessary return value check
| * 8df02691d0 ASoC: q6apm-dai: make use of q6apm_get_hw_pointer
| * 175cb0c28a ASoC: qcom: Fix trivial code style issues
| * 9f5d3022d8 ASoC: qcom: lpass: Make asoc_qcom_lpass_cpu_platform_remove() return void
| * 0addfbfd60 ASoC: q6apm-dai: schedule all available frames to avoid dsp under-runs
| * 860c028415 ASoC: qcom: q6apm-dai: drop unused 'q6apm_dai_rtd' fields
| * 9973c8192e net: dsa: mv88e6xxx: fix VTU methods for 6320 family
| * 70d2b16cdd net: dsa: mv88e6xxx: fix internal PHYs for 6320 family
| * 83c12c052c auxdisplay: hd44780: Fix an API misuse in hd44780.c
| * ab239a12fd auxdisplay: hd44780: Convert to platform remove callback returning void
| * 880b1c98d1 mmc: sdhci-msm: fix dev reference leaked through of_qcom_ice_get
| * d4f5f29c26 soc: qcom: ice: introduce devm_of_qcom_ice_get
| * a505075730 media: vimc: skip .s_stream() for stopped entities
| * a64a102e01 media: subdev: Add v4l2_subdev_is_streaming()
| * 2b3dc697a4 media: subdev: Improve v4l2_subdev_enable/disable_streams_fallback
| * beeeea11ee media: subdev: Fix use of sd->enabled_streams in call_s_stream()
| * 4d11fac941 tracing: Verify event formats that have "%*p.."
| * e13358c488 tracing: Add __print_dynamic_array() helper
| * 896e30f768 tracing: Add __string_len() example
| * 5724654a08 x86/mce: use is_copy_from_user() to determine copy-from-user context
| * 4156203620 x86/extable: Remove unused fixup type EX_TYPE_COPY
| * 247395cbec module: sign with sha512 instead of sha1 by default
* | 40fa39aa9f Merge 6.6.88 into android15-6.6-lts
|\|
| * 23ec0b4057 Linux 6.6.88
| * e7144dae6b ALSA: hda/realtek: Fix built-in mic on another ASUS VivoBook model
| * 03af77aa9c drm/tests: Build KMS helpers when DRM_KUNIT_TEST_HELPERS is enabled
| * b6b77ca4c9 btrfs: fix the length of reserved qgroup to free
| * 81faa5bfba MIPS: ds1287: Match ds1287_set_base_clock() function types
| * 1dab036557 MIPS: cevt-ds1287: Add missing ds1287.h include
| * 9cac3ed9ca MIPS: dec: Declare which_prom() as static
* | 30f955d0bc Merge 2eb70f54ad ("sign-file,extract-cert: use pkcs11 provider for OPENSSL MAJOR >= 3") into android15-6.6-lts
|\|
| * 2eb70f54ad sign-file,extract-cert: use pkcs11 provider for OPENSSL MAJOR >= 3
| * f8dafdafdd sign-file,extract-cert: avoid using deprecated ERR_get_error_line()
| * 1e2d849efc sign-file,extract-cert: move common SSL helper functions to a header
* | f8bfb03c95 ANDROID: GKI: Add bpf_redirect_info to virtual_device symbol list.
* | 96e027296c Revert "arm64: errata: Add newer ARM cores to the spectre_bhb_loop_affected() lists"
* | 9537aa8d72 Merge 202bca49b7 ("xdp: Reset bpf_redirect_info before running a xdp's BPF prog.") into android15-6.6-lts
|\|
| * 202bca49b7 xdp: Reset bpf_redirect_info before running a xdp's BPF prog.
| * 0a721f240a nvmet-fc: Remove unused functions
| * efde4462b3 landlock: Add the errata interface
| * c846320967 drm/amd/display: Stop amdgpu_dm initialize when link nums greater than max_links
| * 31ff06b4a8 wifi: rtw89: pci: disable PCIE wake bit when PCIE deinit
| * 48128f54f5 wifi: rtw89: pci: add pre_deinit to be called after probe complete
| * 84464db2ec btrfs: fix qgroup reserve leaks in cow_file_range
| * e8336d3c9a usb: typec: fix pm usage counter imbalance in ucsi_ccg_sync_control()
| * 0e66fd8e5a usb: typec: fix potential array underflow in ucsi_ccg_sync_control()
| * 37491e1dbb LoongArch: Eliminate superfluous get_numa_distances_cnt()
| * 1f1feff02e powerpc/rtas: Prevent Spectre v1 gadget construction in sys_rtas()
| * 427036030f nvme-rdma: unquiesce admin_q before destroy it
| * 6f8d51051d x86/split_lock: Fix the delayed detection logic
| * 29f040d4ef x86/tdx: Fix arch_safe_halt() execution for TDX VMs
| * e5f0581ecb x86/xen: fix memblock_reserve() usage on PVH
| * fa1103f21b x86/xen: move xen_reserve_extra_memory()
| * dafbcfb8ff efi/libstub: Bump up EFI_MMAP_NR_SLACK_SLOTS to 32
| * 4f687721a9 Fix mmu notifiers for range-based invalidates
| * da210d4f88 misc: pci_endpoint_test: Fix 'irq_type' to convey the correct type
| * 9d8d2899c5 misc: pci_endpoint_test: Fix displaying 'irq_type' after 'request_irq' error
| * 5a4b718121 misc: pci_endpoint_test: Avoid issue of interrupts remaining after request_irq error
| * 11a2f91f18 selftests: mptcp: add mptcp_lib_wait_local_port_listen
| * 392dfed4af mptcp: sockopt: fix getting freebind & transparent
| * d69a23d8e9 md: fix mddev uaf while iterating all_mddevs list
| * 9b1f50da60 kbuild: Add '-fno-builtin-wcslen'
| * 8c8d0e8000 cpufreq: Reference count policy in cpufreq_update_limits()
| * 7e2449ee66 io_uring/net: fix accept multishot handling
| * 3184297d6f drm/i915/gvt: fix unterminated-string-initialization warning
| * 3309feab2b drm/sti: remove duplicate object names
| * 31e94c7989 drm/nouveau: prime: fix ttm_bo_delayed_delete oops
| * ae73db71a2 drm/amdgpu/dma_buf: fix page_link check
| * 068091b796 drm/amd/pm/powerplay/hwmgr/vega20_thermal: Prevent division by zero
| * c3ff73e3bd drm/amd/pm/swsmu/smu13/smu_v13_0: Prevent division by zero
| * b0742a709b drm/amd/pm/powerplay/hwmgr/smu7_thermal: Prevent division by zero
| * de2cba068c drm/amd/pm/smu11: Prevent division by zero
| * 587de3ca78 drm/amd/pm/powerplay: Prevent division by zero
| * 5096174074 drm/amd/pm: Prevent division by zero
| * d189b32f9d drm/amd: Handle being compiled without SI or CIK support better
| * 42c2525bc0 drm/msm/a6xx: Fix stale rpmh votes from GPU
| * 31330248ab drm/repaper: fix integer overflows in repeat functions
| * 0df68b5860 perf/x86/intel/uncore: Fix the scale of IIO free running counters on SPR
| * 9686a16c35 perf/x86/intel/uncore: Fix the scale of IIO free running counters on ICX
| * aea923afea perf/x86/intel/uncore: Fix the scale of IIO free running counters on SNR
| * 196a4eecb8 perf/x86/intel: Allow to update user space GPRs from PEBS records
| * c2b169fc7a RDMA/cma: Fix workqueue crash in cma_netevent_work_handler
| * 064dc7a70c scsi: ufs: exynos: Ensure consistent phy reference counts
| * 92d8a4e621 scsi: megaraid_sas: Block zero-length ATA VPD inquiry
| * bbb6b149c3 x86/boot/sev: Avoid shared GHCB page for early memory acceptance
| * 5e036349bb x86/cpu/amd: Fix workaround for erratum 1054
| * 971ba6a64c x86/microcode/AMD: Extend the SHA check to Zen5, block loading of any unreleased standalone Zen5 microcode patches
| * f6ec52710d virtiofs: add filesystem context source name check
| * 89baf6bbe6 tracing: Fix filter string testing
| * 596cbe6320 string: Add load_unaligned_zeropad() code path to sized_strscpy()
| * d5421baa0e smb3 client: fix open hardlink on deferred close file error
| * 607723b13a selftests/mm: generate a temporary mountpoint for cgroup filesystem
| * e0e1b00208 riscv: Avoid fortify warning in syscall_get_arguments()
| * 8dbf060480 Revert "smb: client: fix TCP timers deadlock after rmmod"
| * fd8973b625 Revert "smb: client: Fix netns refcount imbalance causing leaks and use-after-free"
| * b7ce8db490 ksmbd: fix the warning from __kernel_write_iter
| * 160935d81f ksmbd: Prevent integer overflow in calculation of deadtime
| * 296cb5457c ksmbd: fix use-after-free in smb_break_all_levII_oplock()
| * 1db2451de2 ksmbd: Fix dangling pointer in krb_authenticate
| * 0874b629f6 ovl: don't allow datadir only
| * 23385f567b mm: fix apply_to_existing_page_range()
| * c3b3987bae mm: fix filemap_get_folios_contig returning batches of identical folios
| * 006b67ac61 mm/gup: fix wrongly calculated returned value in fault_in_safe_writeable()
| * 2912683a10 loop: LOOP_SET_FD: send uevents for partitions
| * f8530400f2 loop: properly send KOBJ_CHANGED uevent for disk device
| * 952e7a7e31 isofs: Prevent the use of too small fid
| * e89bf1311d i2c: cros-ec-tunnel: defer probe if parent EC is not present
| * 9f77aa584a hfs/hfsplus: fix slab-out-of-bounds in hfs_bnode_read_key
| * 8a53d36ba1 crypto: caam/qi - Fix drv_ctx refcount bug
| * 78b1126ca4 btrfs: correctly escape subvol in btrfs_show_options()
| * ab46314a4e Bluetooth: vhci: Avoid needless snprintf() calls
| * 600a099bec Bluetooth: l2cap: Process valid commands in too long frame
| * 0a6c0fc1f8 ftrace: fix incorrect hash size in register_ftrace_direct()
| * 42203e004d i2c: atr: Fix wrong include
| * 7d192e27a4 nfsd: decrease sc_count directly if fail to queue dl_recall
| * b699aeb91b nfs: add missing selections of CONFIG_CRC32
| * e446b60a2b asus-laptop: Fix an uninitialized variable
| * a12c145778 ASoC: qcom: Fix sc7280 lpass potential buffer overflow
| * bf39a185a6 ASoC: codecs:lpass-wsa-macro: Fix logic of enabling vi channels
| * 444139d6c4 ASoC: codecs:lpass-wsa-macro: Fix vi feedback rate
| * 3d6102ae33 Revert "PCI: Avoid reset when disabled via sysfs"
| * 411b7005f4 writeback: fix false warning in inode_to_wb()
| * 404faab1dd cpufreq/sched: Fix the usage of CPUFREQ_NEED_UPDATE_LIMITS
| * d217f7a822 riscv: KGDB: Remove ".option norvc/.option rvc" for kgdb_compiled_break
| * ba433e6c10 riscv: KGDB: Do not inline arch_kgdb_breakpoint()
| * ec95d0dfc6 kunit: qemu_configs: SH: Respect kunit cmdline
| * d7c65ecad9 riscv: Properly export reserved regions in /proc/iomem
| * a2874f0dff net: ethernet: mtk_eth_soc: revise QDMA packet scheduler settings
| * d853feb79d net: ethernet: mtk_eth_soc: correct the max weight of the queue limit for 100Mbps
| * 7891619d21 net: ti: icss-iep: Fix possible NULL pointer dereference for perout request
| * 8b9808b1f6 net: ti: icss-iep: Add phase offset configuration for perout signal
| * d64a7166fd net: ti: icss-iep: Add pwidth configuration for perout signal
| * 06ec3c1881 ptp: ocp: fix start time alignment in ptp_ocp_signal_set
| * c7ee467f29 net: dsa: avoid refcount warnings when ds->ops->tag_8021q_vlan_del() fails
| * fb12b460ec net: dsa: free routing table on probe failure
| * 86c6613a69 net: dsa: clean up FDB, MDB, VLAN entries on unbind
| * afae908730 net: dsa: mv88e6xxx: fix -ENOENT when deleting VLANs and MST is unsupported
| * bbb80f004f net: dsa: mv88e6xxx: avoid unregistering devlink regions which were never registered
| * 0f0284285a net: bridge: switchdev: do not notify new brentries as changed
| * c9dbc7ec44 net: b53: enable BPDU reception for management port
| * 2315cfaf47 netlink: specs: rt-link: adjust mctp attribute naming
| * 2f6e52cab1 netlink: specs: rt-link: add an attr layer around alt-ifname
| * 08aa59c0be cxgb4: fix memory leak in cxgb4_init_ethtool_filters() error path
| * e729dd9602 ata: libata-sata: Save all fields from sense data descriptor
| * 94d5809c7f net: ethernet: ti: am65-cpsw: fix port_np reference counting
| * 422b7b9d13 net: ethernet: ti: am65-cpsw-nuss: rename phy_node -> port_np
| * 7c2b66a31c net: ngbe: fix memory leak in ngbe_probe() error path
| * 824a7c2df5 net: openvswitch: fix nested key length validation in the set() action
| * 549cbbd14b block: fix resource leak in blk_register_queue() error path
| * a8a3b61ce1 net: mctp: Set SOCK_RCU_FREE
| * 9027e07356 pds_core: fix memory leak in pdsc_debugfs_add_qcq()
| * 86dc3a05fa test suite: use %zu to print size_t
| * b45b7a0bb8 igc: add lock preventing multiple simultaneous PTM transactions
| * d8dde48a1c igc: cleanup PTP module if probe fails
| * bcb2d6bfa5 igc: handle the IGC_PTP_ENABLED flag correctly
| * 5262d34f5e igc: move ktime snapshot into PTM retry loop
| * b1f3e69dc9 igc: increase wait time before retrying PTM
| * 16194ca3f3 igc: fix PTM cycle trigger logic
| * 6f1ac9b5a9 Revert "wifi: mac80211: Update skb's control block key in ieee80211_tx_dequeue()"
| * 9e3114958d Bluetooth: l2cap: Check encryption key size on incoming connection
| * 3db6605043 Bluetooth: btrtl: Prevent potential NULL dereference
| * fd1621f38f Bluetooth: hci_event: Fix sending MGMT_EV_DEVICE_FOUND for invalid address
| * ae470d0632 RDMA/core: Silence oversized kvmalloc() warning
| * d1d6fa08b1 ASoC: cs42l43: Reset clamp override on jack removal
| * efeddd8ace RDMA/hns: Fix wrong maximum DMA segment size
| * 344baf8c1f RDMA/usnic: Fix passing zero to PTR_ERR in usnic_ib_pci_probe()
| * 4f7b6029ae ovl: remove unused forward declaration
| * aaa93b8846 ASoC: Intel: avs: Fix null-ptr-deref in avs_component_probe()
| * 4a655ba283 ASoC: dwc: always enable/disable i2s irqs
| * 0b5390aeaa md/md-bitmap: fix stats collection for external bitmaps
| * f10aa9bc94 md/raid10: fix missing discard IO accounting
| * b0348f3394 scsi: iscsi: Fix missing scsi_host_put() in error path
| * 4a43fd3671 wifi: wl1251: fix memory leak in wl1251_tx_work
| * c74b84544d wifi: mac80211: Purge vif txq in ieee80211_do_stop()
| * 7fa75affe2 wifi: mac80211: Update skb's control block key in ieee80211_tx_dequeue()
| * 7ca513631f wifi: at76c50x: fix use after free access in at76_disconnect
| * a70ea92964 scsi: hisi_sas: Enable force phy when SATA disk directly connected
| * 58eb29dba7 HSI: ssi_protocol: Fix use after free vulnerability in ssi_protocol Driver Due to Race Condition
| * 02e1bcdfdf Bluetooth: hci_uart: Fix another race during initialization
| * fa43166f2e media: mediatek: vcodec: mark vdec_vp9_slice_map_counts_eob_coef noinline
| * 5029c8f2d7 x86/e820: Fix handling of subpage regions when calculating nosave ranges in e820__register_nosave_regions()
| * bf56f45b16 iommufd: Fail replace if device has not been attached
| * e5eadb6520 ACPI: platform-profile: Fix CFI violation when accessing sysfs files
| * 8d25a8e275 x86/paravirt: Move halt paravirt calls under CONFIG_PARAVIRT
| * 4117975672 arm64: errata: Add newer ARM cores to the spectre_bhb_loop_affected() lists
| * d784cb1890 selftests: mptcp: fix incorrect fd checks in main_loop
| * 970688e64d selftests: mptcp: close fd_in before returning in main_loop
| * 0d527afa76 pinctrl: qcom: Clear latched interrupt status when changing IRQ type
| * d118391566 phy: freescale: imx8m-pcie: assert phy reset and perst in power off
| * 6176fa57dd PCI: Fix reference leak in pci_alloc_child_bus()
| * 1fea772627 PCI: brcmstb: Fix missing of_node_put() in brcm_pcie_probe()
| * 468773325e of/irq: Fix device node refcount leakages in of_irq_init()
| * b48732849d of/irq: Fix device node refcount leakage in API irq_of_parse_and_map()
| * eac3d34101 of/irq: Fix device node refcount leakages in of_irq_count()
| * 37ff581ddf of/irq: Fix device node refcount leakage in API of_irq_parse_raw()
| * 77e3fd8803 of/irq: Fix device node refcount leakage in API of_irq_parse_one()
| * 6ea67933af ntb: use 64-bit arithmetic for the MSI doorbell mask
| * 8a3df0aa10 KVM: x86: Acquire SRCU in KVM_GET_MP_STATE to protect guest memory accesses
| * 8cb249d56e KVM: x86: Explicitly zero-initialize on-stack CPUID unions
| * 504464fd01 gve: handle overflow when reporting TX consumed descriptors
| * bbc702d73f gpio: zynq: Fix wakeup source leaks on device unbind
| * 4009ad6c76 gpio: tegra186: fix resource handling in ACPI probe path
| * 5d336ac215 ftrace: Add cond_resched() to ftrace_graph_set_hash()
| * 2eec9e2eb6 dt-bindings: coresight: qcom,coresight-tpdm: Fix too many 'reg'
| * 39cf18f8ec dt-bindings: coresight: qcom,coresight-tpda: Fix too many 'reg'
| * f46260c3dd dm-verity: fix prefetch-vs-suspend race
| * 7958b71d8b dm-integrity: set ti->error on memory allocation failure
| * 841ca9e922 dm-ebs: fix prefetch-vs-suspend race
| * 5baddc0806 crypto: ccp - Fix check for the primary ASP device
| * ecc44297ac clk: qcom: gdsc: Set retain_ff before moving to HW CTRL
| * fe25487f2d clk: qcom: gdsc: Capture pm_genpd_add_subdomain result code
| * 1be6fe9527 clk: qcom: gdsc: Release pm subdomains in reverse add order
| * 541c1a5554 clk: qcom: clk-branch: Fix invert halt status bit check for votable clocks
| * 5569a2c557 cifs: Ensure that all non-client-specific reparse points are processed by the server
| * b350dab6d4 cifs: fix integer overflow in match_server()
| * 864ba5c651 cifs: avoid NULL pointer dereference in dbg call
| * f75eb9acf9 thermal/drivers/rockchip: Add missing rk3328 mapping entry
| * 233b9cef91 tracing: Do not add length to print format in synthetic events
| * 8d4750f063 x86/xen: fix balloon target initialization for PVH dom0
| * c6fefcb71d sctp: detect and prevent references to a freed transport in sendmsg
| * b5681a8b99 mm/hugetlb: move hugetlb_sysctl_init() to the __init section
| * 94b3a19ced mm/hwpoison: do not send SIGBUS to processes with recovered clean pages
| * 0b83b941d3 mm/userfaultfd: fix release hang over concurrent GUP
| * 376183178f mm: add missing release barrier on PGDAT_RECLAIM_LOCKED unlock
| * e351ffc48b mm/mremap: correctly handle partial mremap() of VMA starting at 0
| * f4bc11b3c5 mm: fix lazy mmu docs and usage
| * 402769cde5 mm: make page_mapped_in_vma() hugetlb walk aware
| * e09661ac0b mm/rmap: reject hugetlb folios in folio_make_device_exclusive()
| * 70ec7d13bb sparc/mm: avoid calling arch_enter/leave_lazy_mmu() in set_ptes
| * b266dd4d22 sparc/mm: disable preemption in lazy mmu mode
| * e02c44b6ec iommufd: Fix uninitialized rc in iommufd_access_rw()
| * e921fce3a2 btrfs: zoned: fix zone finishing with missing devices
| * fa55f2a31b btrfs: zoned: fix zone activation with missing devices
| * 897ad7f70d btrfs: fix non-empty delayed iputs list on unmount due to compressed write workers
| * 1c82f5a393 backlight: led_bl: Hold led_access lock when calling led_sysfs_disable()
| * c8fa7ffc1c arm64: dts: mediatek: mt8173: Fix disp-pwm compatible string
| * 8db1206d83 arm64: mm: Correct the update of max_pfn
| * bfbbef7f1d mtd: rawnand: Add status chack in r852_ready()
| * 7772621041 mtd: inftlcore: Add error check for inftl_read_oob()
| * 29b2114572 mptcp: only inc MPJoinAckHMacFailure for HMAC failures
| * 7f9ae060ed mptcp: fix NULL pointer in can_accept_new_subflow
| * 8ddd124f8a lib: scatterlist: fix sg_split_phys to preserve original scatterlist offsets
| * 8385532d4d locking/lockdep: Decrease nr_unused_locks if lock unused in zap_class()
| * 388ba87816 mailbox: tegra-hsp: Define dimensioning masks in SoC data
| * ea07760676 mfd: ene-kb3930: Fix a potential NULL pointer dereference
| * b20ec02fcb leds: rgb: leds-qcom-lpg: Fix calculation of best period Hi-Res PWMs
| * 5d97ee4d8d leds: rgb: leds-qcom-lpg: Fix pwm resolution max for Hi-Res PWMs
| * c88f7328bb jbd2: remove wrong sb->s_sequence check
| * 6871a676aa i3c: Add NULL pointer check in i3c_master_queue_ibi()
| * c3250bdf5d i3c: master: svc: Use readsb helper for reading MDB
| * b00b040abf smb311 client: fix missing tcon check when mounting with linux/posix extensions
| * 5f80fd2ff8 soc: samsung: exynos-chipid: Add NULL pointer check in exynos_chipid_probe()
| * 1404dff1e1 tpm: do not start chip while suspended
| * a64d8972f0 udf: Fix inode_getblk() return value
| * a5434db74b vdpa/mlx5: Fix oversized null mkey longer than 32bit
| * a5464ac3ee f2fs: fix to avoid atomicity corruption of atomic file
| * 2eeb1085bf ext4: fix off-by-one error in do_split
| * 5f084993c9 bus: mhi: host: Fix race between unprepare and queue_buf
| * 7e439ff5ef ASoC: qdsp6: q6asm-dai: fix q6asm_dai_compr_set_params error path
| * 7ed8f978a8 ASoC: qdsp6: q6apm-dai: fix capture pipeline overruns.
| * b860a99800 ASoC: qdsp6: q6apm-dai: set 10 ms period and buffer alignment.
| * 176e7c9a0f ASoC: q6apm: add q6apm_get_hw_pointer helper
| * 35c4a652d8 io_uring/kbuf: reject zero sized provided buffers
| * 21b0c54546 wifi: mac80211: fix integer overflow in hwmp_route_info_get()
| * f86e2d0837 wifi: mt76: Add check for devm_kstrdup()
| * f304da6928 clocksource/drivers/stm32-lptimer: Use wakeup capable instead of init wakeup
| * ce6cabc080 mtd: Replace kcalloc() with devm_kcalloc()
| * 64baf64684 net: dsa: mv88e6xxx: workaround RGMII transmit delay erratum for 6320 family
| * 7a083ad29f mtd: Add check for devm_kcalloc()
| * 92b6844279 mptcp: sockopt: fix getting IPV6_V6ONLY
| * 8a19d34f1e media: i2c: imx219: Rectify runtime PM handling in probe and remove
| * 05b07e52a0 media: venus: hfi_parser: refactor hfi packet parsing logic
| * d4d88ece4b media: venus: hfi_parser: add check to avoid out of bound access
| * 28bdf7b914 media: i2c: ov7251: Introduce 1 ms delay between regulators and en GPIO
| * f249c05416 media: i2c: ov7251: Set enable GPIO low in probe
| * 86c20c4ebc media: i2c: ccs: Set the device's runtime PM status correctly in probe
| * 2f282e88c2 media: i2c: ccs: Set the device's runtime PM status correctly in remove
| * c3d68e38aa media: v4l2-dv-timings: prevent possible overflow in v4l2_detect_gtf()
| * 571a1873da media: platform: stm32: Add check for clk_enable()
| * bedb5a7b97 media: visl: Fix ERANGE error when setting enum controls
| * 3eaf580cba media: streamzap: prevent processing IR data on URB failure
| * 3fcff11317 tpm, tpm_tis: Fix timeout handling when waiting for TPM status
| * 9bd51723ab mtd: rawnand: brcmnand: fix PM resume warning
| * b8e772e2d8 spi: cadence-qspi: Fix probe on AM62A LP SK
| * 5085e02362 KVM: arm64: Tear down vGIC on failed vCPU creation
| * db8a657980 arm64: errata: Add KRYO 2XX/3XX/4XX silver cores to Spectre BHB safe list
| * 3ca6b0c917 arm64: errata: Assume that unknown CPUs _are_ vulnerable to Spectre BHB
| * 1577657f5e arm64: errata: Add QCOM_KRYO_4XX_GOLD to the spectre_bhb_k24_list
| * 892d24d7e3 arm64: cputype: Add MIDR_CORTEX_A76AE
| * 9da005f884 xenfs/xensyms: respect hypervisor's "next" indication
| * 192b87608c media: siano: Fix error handling in smsdvb_module_init()
| * 3e7fc0606e media: vim2m: print device name after registering device
| * 40084302f6 media: venus: hfi: add check to handle incorrect queue size
| * 5af611c70f media: venus: hfi: add a check to handle OOB in sfr region
| * 259dafed18 media: i2c: adv748x: Fix test pattern selection mask
| * 8895a7af84 media: mtk-vcodec: venc: avoid -Wenum-compare-conditional warning
| * fd7bb97ede media: mediatek: vcodec: Fix a resource leak related to the scp device in FW initialization
| * 39cb5a29cd dt-bindings: media: st,stmipid02: correct lane-polarities maxItems
| * 480faed292 ext4: don't treat fhandle lookup of ea_inode as FS corruption
| * d5cba7730d bpf: support SKF_NET_OFF and SKF_LL_OFF on skb frags
| * 0e1816d8c2 pwm: fsl-ftm: Handle clk_get_rate() returning 0
| * d837f37cd4 pwm: rcar: Improve register calculation
| * 8ddbec73ea pwm: mediatek: Prevent divide-by-zero in pwm_mediatek_config()
| * a0171967ab tpm, tpm_tis: Workaround failed command reception on Infineon devices
| * 16fae241f4 ktest: Fix Test Failures Due to Missing LOG_FILE Directories
| * 14345d5d7d tracing: probe-events: Add comments about entry data storing code
| * fda15c5b96 fbdev: omapfb: Add 'plane' value check
| * 3ab9cd2d2e drm/amdgpu: grab an additional reference on the gang fence v2
| * 13e5148f70 PCI: vmd: Make vmd_dev::cfg_lock a raw_spinlock_t type
| * f8693e1bae drm/amdgpu: handle amdgpu_cgs_create_device() errors in amd_powerplay_create()
| * 988705e558 drm/mediatek: mtk_dpi: Explicitly manage TVD clock in power on/off
| * 50967df6e4 drm/mediatek: mtk_dpi: Move the input_2p_en bit to platform data
| * a36f8d5445 drm/amdkfd: debugfs hang_hws skip GPU with MES
| * 864d739fbc drm/amdkfd: Fix pqm_destroy_queue race with GPU reset
| * 89af6b39f0 drm/amdkfd: Fix mode1 reset crash issue
| * 06a1db4cb7 drm/amdkfd: clamp queue size to minimum
| * 61d1a8296e drivers: base: devres: Allow to release group on device release
| * 100cf2fa01 drm/bridge: panel: forbid initializing a panel with unknown connector type
| * 1e27e6050c drm: panel-orientation-quirks: Add quirk for OneXPlayer Mini (Intel)
| * 49ef9e47e7 drm: panel-orientation-quirks: Add new quirk for GPD Win 2
| * 118f95e3ff drm: panel-orientation-quirks: Add quirk for AYA NEO Slide
| * 14dc152795 drm: panel-orientation-quirks: Add quirks for AYA NEO Flip DS and KB
| * bfea2e6f03 drm: panel-orientation-quirks: Add support for AYANEO 2S
| * d74554965c drm/amd/display: add workaround flag to link to force FFE preset
| * 2528ccb0f2 drm/amd/display: Update Cursor request mode to the beginning prefetch always
| * f5b168c309 drm: allow encoder mode_set even when connectors change for crtc
| * fb95ab35d0 Bluetooth: qca: simplify WCN399x NVM loading
| * 806464634e Bluetooth: hci_uart: fix race during initialization
| * e711501970 tracing: fix return value in __ftrace_event_enable_disable for TRACE_REG_UNREGISTER
| * d537859e56 net: vlan: don't propagate flags on open
| * efd75512e3 wifi: mt76: mt76x2u: add TP-Link TL-WDN6200 ID to device table
| * e4d1ca0a84 scsi: st: Fix array overflow in st_setup()
| * 06f20cd706 cdc_ether|r8152: ThinkPad Hybrid USB-C/A Dock quirk
| * 362a90cecd ext4: ignore xattrs past end
| * b7cc9e3adf Revert "f2fs: rebuild nat_bits during umount"
| * 1dbece9c2f ext4: protect ext4_release_dquot against freezing
| * 1263713795 ahci: add PCI ID for Marvell 88SE9215 SATA Controller
| * 8b5e5aac44 f2fs: fix to avoid out-of-bounds access in f2fs_truncate_inode_blocks()
| * 7f1d986da5 wifi: ath12k: Fix invalid data access in ath12k_dp_rx_h_undecap_nwifi
| * da3ba4a44e net: sfp: add quirk for 2.5G OEM BX SFP
| * 7568e5e448 ata: libata-eh: Do not use ATAPI DMA for a device limited to PIO mode
| * cc0bc4cb62 jfs: add sanity check for agwidth in dbMount
| * b3c4884b98 jfs: Prevent copying of nlink with value 0 from disk inode
| * 7ccf3b3527 fs/jfs: Prevent integer overflow in AG size calculation
| * a14b2592a7 fs/jfs: cast inactags to s64 to prevent potential overflow
| * 067347e00a jfs: Fix uninit-value access of imap allocated in the diMount() function
| * 95f17738b8 page_pool: avoid infinite loop to schedule delayed worker
| * 809bf3a7d0 net: usb: asix_devices: add FiberGecko DeviceID
| * 4ae2e89155 scsi: target: spc: Fix RSOC parameter data header size
| * 4fc5c17538 f2fs: don't retry IO for corrupted data scenario
| * 2c512f2ead wifi: ath12k: Fix invalid entry fetch in ath12k_dp_mon_srng_process
| * b3e04472a8 ASoC: amd: yc: update quirk data for new Lenovo model
| * 01529e60ae ASoC: amd: Add DMI quirk for ACP6X mic support
| * 63f5160888 ALSA: usb-audio: Fix CME quirk for UF series keyboards
| * 0932950d3a mmc: dw_mmc: add a quirk for accessing 64-bit FIFOs in two halves
| * 035c6e6007 media: uvcvideo: Add quirk for Actions UVC05
| * bd095d3a9d ASoC: fsl_audmix: register card device depends on 'dais' property
| * 1d91458b12 ALSA: hda: intel: Add Lenovo IdeaPad Z570 to probe denylist
| * fb44392bd5 ALSA: hda: intel: Fix Optimus when GPU has no sound
| * be706a48bb HID: pidff: Fix null pointer dereference in pidff_find_fields
| * 4d5bcca217 HID: pidff: Add PERIODIC_SINE_ONLY quirk
| * c1fde337b3 HID: Add hid-universal-pidff driver and supported device ids
| * fd6055c1a1 HID: pidff: Add FIX_WHEEL_DIRECTION quirk
| * 123e94f66d HID: pidff: Add hid_pidff_init_with_quirks and export as GPL symbol
| * b2f378891c HID: pidff: Add PERMISSIVE_CONTROL quirk
| * 1830b4166f HID: pidff: Add MISSING_PBO quirk and its detection
| * bf28476037 HID: pidff: Add MISSING_DELAY quirk and its detection
| * a08ee3c7ee HID: pidff: Do not send effect envelope if it's empty
| * 520e0371d1 HID: pidff: Convert infinite length from Linux API to PID standard
| * 4a29465fbe ASoC: SOF: topology: Use krealloc_array() to replace krealloc()
| * 1e5b04f08e zstd: Increase DYNAMIC_BMI2 GCC version cutoff from 4.8 to 11.0 to work around compiler segfault
| * af71db7de0 xen/mcelog: Add __nonstring annotations for unterminated strings
| * 150d1cbdf7 arm64: cputype: Add QCOM_CPU_PART_KRYO_3XX_GOLD
| * f14d29b891 perf: arm_pmu: Don't disable counter in armpmu_add()
| * c7dec34612 x86/cpu: Don't clear X86_FEATURE_LAHF_LM flag in init_amd_k8() on AMD when running in a virtual machine
| * 8449fd2a14 x86/ia32: Leave NULL selector values 0~3 unchanged
| * d0f4b75a8f x86/mm: Clear _PAGE_DIRTY for kernel mappings when we clear _PAGE_RW
| * 942a4b97fc pm: cpupower: bench: Prevent NULL dereference on malloc failure
| * 4ec80a6873 umount: Allow superblock owners to force umount
| * 323960a187 fs: consistently deref the files table with rcu_dereference_raw()
| * 6abd09bed4 iommu/mediatek: Fix NULL pointer deference in mtk_iommu_device_group
| * 90cdd7e5a4 nft_set_pipapo: fix incorrect avx2 match of 5th field octet
| * fbaffe8bcc net: ppp: Add bound checking for skb data on ppp_sync_txmung
| * 6509e2e17d ipv6: Align behavior across nexthops during path selection
| * d2718324f9 net_sched: sch_sfq: move the limit validation
| * 00d44fe29e net_sched: sch_sfq: use a temporary work area for validating configuration
| * a8008b9758 nvmet-fcloop: swap list_add_tail arguments
| * 9f5ef4a5ea drm/i915/huc: Fix fence not released on early probe errors
| * 44984339d3 ata: sata_sx4: Add error handling in pdc20621_i2c_read()
| * c17ef974bf net: libwx: handle page_pool_dev_alloc_pages error
| * 9649d08010 drm/tests: probe-helper: Fix drm_display_mode memory leak
| * 718a892e2d drm/tests: modes: Fix drm_display_mode memory leak
| * cea2c8ace7 drm/tests: cmdline: Fix drm_display_mode memory leak
| * 742a084c6a drm/tests: helpers: Create kunit helper to destroy a drm_display_mode
| * 0c6550ab45 drm/tests: helpers: Fix compiler warning
| * 85926abafa drm/tests: helpers: Add helper for drm_display_mode_from_cea_vic()
| * 60f481fdf1 drm/tests: Add helper to create mock crtc
| * af1dccab38 drm/tests: Add helper to create mock plane
| * a31fd0eaf7 drm/tests: helpers: Add atomic helpers
| * eceb15bbf6 drm/tests: modeset: Fix drm_display_mode memory leak
| * c5ed0eaddc net: ethtool: Don't call .cleanup_data when prepare_data fails
| * 8b158a0d19 tc: Ensure we have enough buffer space when sending filter netlink notifications
| * 28f2cd143b net/sched: cls_api: conditional notification of events
| * aa258dbf45 rtnl: add helper to check if a notification is needed
| * abd07987f7 rtnl: add helper to check if rtnl group has listeners
| * af4e364fb1 octeontx2-pf: qos: fix VF root node parent queue index
| * 2bcad8fefc net: tls: explicitly disallow disconnect
| * 2f9761a94b codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog()
| * 09c2dcda2c tipc: fix memory leak in tipc_link_xmit
| * ae7e9d29a3 objtool: Fix INSN_CONTEXT_SWITCH handling in validate_unret()
| * 5b09bf6243 ata: pata_pxa: Fix potential NULL pointer dereference in pxa_ata_probe()
| * 4d4bf6d6b1 drm/i915: Disable RPG during live selftest
| * 303d760475 drm/i915/dg2: wait for HuC load completion before running selftests
| * 8b2148daa7 drm/i915/xelpg: Extend driver code of Xe_LPG to Xe_LPG+
| * 18520ecc3c drm/i915/mocs: use to_gt() instead of direct &i915->gt
| * 58c1e8c569 selftests/futex: futex_waitv wouldblock test should fail
* a573fb085d Merge branch 'android15-6.6' into android15-6.6-lts

Change-Id: I794f1b2aa6407cb0caadf30b675fb35a4cd3181c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-05-19 10:40:27 +00:00
committed by Treehugger Robot
548 changed files with 5812 additions and 2787 deletions

View File

@@ -55,8 +55,7 @@ properties:
- const: arm,primecell
reg:
minItems: 1
maxItems: 2
maxItems: 1
clocks:
maxItems: 1

View File

@@ -41,8 +41,7 @@ properties:
- const: arm,primecell
reg:
minItems: 1
maxItems: 2
maxItems: 1
clocks:
maxItems: 1

View File

@@ -71,7 +71,7 @@ properties:
description:
Any lane can be inverted or not.
minItems: 1
maxItems: 2
maxItems: 3
required:
- data-lanes

View File

@@ -892,11 +892,10 @@ attribute-sets:
-
name: prop-list
type: nest
nested-attributes: link-attrs
nested-attributes: prop-list-link-attrs
-
name: alt-ifname
type: string
multi-attr: true
-
name: perm-address
type: binary
@@ -931,6 +930,13 @@ attribute-sets:
-
name: gro-ipv4-max-size
type: u32
-
name: prop-list-link-attrs
subset-of: link-attrs
attributes:
-
name: alt-ifname
multi-attr: true
-
name: af-spec-attrs
attributes:
@@ -1193,9 +1199,10 @@ attribute-sets:
type: u32
-
name: mctp-attrs
name-prefix: ifla-mctp-
attributes:
-
name: mctp-net
name: net
type: u32
-
name: stats-attrs
@@ -1362,7 +1369,6 @@ operations:
- min-mtu
- max-mtu
- prop-list
- alt-ifname
- perm-address
- proto-down-reason
- parent-dev-name

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 87
SUBLEVEL = 89
EXTRAVERSION =
NAME = Pinguïn Aangedreven
@@ -1033,6 +1033,9 @@ ifdef CONFIG_CC_IS_GCC
KBUILD_CFLAGS += -fconserve-stack
endif
# Ensure compilers do not transform certain loops into calls to wcslen()
KBUILD_CFLAGS += -fno-builtin-wcslen
# change __FILE__ to the relative path from the srctree
KBUILD_CPPFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)

View File

@@ -43671,6 +43671,11 @@ member {
type_id: 0x69fa9768
offset: 256
}
member {
id: 0x39d6d358
type_id: 0x6875099c
offset: 32
}
member {
id: 0x39e9b87f
type_id: 0x6888b6e5
@@ -116525,6 +116530,11 @@ member {
type_id: 0x34544a3f
offset: 1216
}
member {
id: 0xfa946861
name: "ipv4_nh"
type_id: 0xc9082b19
}
member {
id: 0x3b9e102b
name: "ipv6"
@@ -116594,6 +116604,11 @@ member {
type_id: 0x12e0cbae
offset: 704
}
member {
id: 0xab39140b
name: "ipv6_nh"
type_id: 0x6d25e07f
}
member {
id: 0x9cf559e0
name: "ipv6_pinfo_offset"
@@ -119164,6 +119179,12 @@ member {
type_id: 0x340dea21
offset: 384
}
member {
id: 0x632a482d
name: "kern_flags"
type_id: 0xc9082b19
offset: 224
}
member {
id: 0x3afd0e3e
name: "kern_hyp_va"
@@ -127557,6 +127578,12 @@ member {
name: "map"
type_id: 0x04b193cc
}
member {
id: 0x8df2c9e6
name: "map"
type_id: 0x04b193cc
offset: 128
}
member {
id: 0x8df87907
name: "map"
@@ -127792,6 +127819,12 @@ member {
name: "map_id"
type_id: 0xe62ebf07
}
member {
id: 0x86e8b50e
name: "map_id"
type_id: 0xc9082b19
offset: 256
}
member {
id: 0xa32be5db
name: "map_ifindex"
@@ -128088,6 +128121,12 @@ member {
type_id: 0x6e73208e
offset: 192
}
member {
id: 0x2a09386d
name: "map_type"
type_id: 0x6e73208e
offset: 288
}
member {
id: 0x2a261544
name: "map_type"
@@ -141594,6 +141633,12 @@ member {
name: "nh"
type_id: 0x1f9da9a8
}
member {
id: 0x713b4511
name: "nh"
type_id: 0x1e9c55da
offset: 320
}
member {
id: 0xdb1c1db1
name: "nh_all"
@@ -141622,6 +141667,11 @@ member {
name: "nh_entry"
type_id: 0x17b2105b
}
member {
id: 0xd51ec347
name: "nh_family"
type_id: 0xc9082b19
}
member {
id: 0x9fb787a3
name: "nh_flags"
@@ -199512,6 +199562,11 @@ member {
name: "tgt"
type_id: 0x3df0a7d3
}
member {
id: 0x073a3a88
name: "tgt_index"
type_id: 0x92233392
}
member {
id: 0xe9c20711
name: "tgt_qps"
@@ -199524,6 +199579,12 @@ member {
type_id: 0x28b9ec9a
offset: 192
}
member {
id: 0xc9d6516c
name: "tgt_value"
type_id: 0x18bd6530
offset: 64
}
member {
id: 0x141d2755
name: "thaw"
@@ -226777,6 +226838,15 @@ struct_union {
member_id: 0x12f2249e
}
}
struct_union {
id: 0x6875099c
kind: UNION
definition {
bytesize: 16
member_id: 0xfa946861
member_id: 0xab39140b
}
}
struct_union {
id: 0x6888b6e5
kind: UNION
@@ -231444,6 +231514,16 @@ struct_union {
member_id: 0x982b4630
}
}
struct_union {
id: 0x1e9c55da
kind: STRUCT
name: "bpf_nh_params"
definition {
bytesize: 20
member_id: 0xd51ec347
member_id: 0x39d6d358
}
}
struct_union {
id: 0x3b3bbe3f
kind: STRUCT
@@ -231640,6 +231720,22 @@ struct_union {
member_id: 0x0082372e
}
}
struct_union {
id: 0x212d37a1
kind: STRUCT
name: "bpf_redirect_info"
definition {
bytesize: 64
member_id: 0x073a3a88
member_id: 0xc9d6516c
member_id: 0x8df2c9e6
member_id: 0x2da184bc
member_id: 0x632a482d
member_id: 0x86e8b50e
member_id: 0x2a09386d
member_id: 0x713b4511
}
}
struct_union {
id: 0x3a354a67
kind: STRUCT
@@ -376343,6 +376439,15 @@ elf_symbol {
type_id: 0x1de47b51
full_name: "bpf_prog_sub"
}
elf_symbol {
id: 0x56b332ad
name: "bpf_redirect_info"
is_defined: true
symbol_type: OBJECT
crc: 0x678abaea
type_id: 0x212d37a1
full_name: "bpf_redirect_info"
}
elf_symbol {
id: 0x6a712ee1
name: "bpf_stats_enabled_key"
@@ -437891,6 +437996,7 @@ interface {
symbol_id: 0x6ad917a1
symbol_id: 0xdeb1861d
symbol_id: 0x7661d150
symbol_id: 0x56b332ad
symbol_id: 0x6a712ee1
symbol_id: 0xe594a242
symbol_id: 0x3afeb397

View File

@@ -12,6 +12,7 @@
__arch_copy_to_user
arm64_use_ng_mappings
bcmp
bpf_redirect_info
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3

View File

@@ -1246,8 +1246,7 @@
};
pwm0: pwm@1401e000 {
compatible = "mediatek,mt8173-disp-pwm",
"mediatek,mt6595-disp-pwm";
compatible = "mediatek,mt8173-disp-pwm";
reg = <0 0x1401e000 0 0x1000>;
#pwm-cells = <2>;
clocks = <&mmsys CLK_MM_DISP_PWM026M>,
@@ -1257,8 +1256,7 @@
};
pwm1: pwm@1401f000 {
compatible = "mediatek,mt8173-disp-pwm",
"mediatek,mt6595-disp-pwm";
compatible = "mediatek,mt8173-disp-pwm";
reg = <0 0x1401f000 0 0x1000>;
#pwm-cells = <2>;
clocks = <&mmsys CLK_MM_DISP_PWM126M>,

View File

@@ -196,13 +196,6 @@
wakeup-event-action = <EV_ACT_ASSERTED>;
wakeup-source;
};
key-suspend {
label = "Suspend";
gpios = <&gpio TEGRA234_MAIN_GPIO(G, 2) GPIO_ACTIVE_LOW>;
linux,input-type = <EV_KEY>;
linux,code = <KEY_SLEEP>;
};
};
fan: pwm-fan {

View File

@@ -75,6 +75,7 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
#define ARM_CPU_PART_CORTEX_A76AE 0xD0E
#define ARM_CPU_PART_NEOVERSE_V1 0xD40
#define ARM_CPU_PART_CORTEX_A78 0xD41
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
@@ -119,6 +120,7 @@
#define QCOM_CPU_PART_KRYO 0x200
#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO_3XX_GOLD 0x802
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
@@ -158,6 +160,7 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
#define MIDR_CORTEX_A76AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76AE)
#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
@@ -195,6 +198,7 @@
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
#define MIDR_QCOM_KRYO_3XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_GOLD)
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)

View File

@@ -587,6 +587,8 @@ struct kvm_vcpu_arch {
/* Values of trap registers for the guest. */
u64 hcr_el2;
u64 mdcr_el2;
/* DO NOT USE: Removed upstream. Kept to not break the KMI. */
u64 cptr_el2;
/* Values of trap registers for the host before guest entry. */
@@ -637,7 +639,8 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
/* DO NOT USE: Removed upstream. Kept to not break the KMI. */
struct user_fpsimd_state *host_fpsimd_state;
struct {
/* {Break,watch}point registers */
@@ -846,10 +849,6 @@ struct kvm_vcpu_arch {
/* pKVM host vcpu state is dirty, needs resync (nVHE-only) */
#define PKVM_HOST_STATE_DIRTY __vcpu_single_flag(iflags, BIT(7))
/* SVE enabled for host EL0 */
#define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
/* SME enabled for EL0 */
#define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
/* Physical CPU not in supported_cpus */
#define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
/* WFIT instruction trapped */

View File

@@ -142,6 +142,9 @@ void __hyp_exit(void);
#ifdef __KVM_NVHE_HYPERVISOR__
struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu);
struct kvm_host_sve_state *get_host_sve_state(struct kvm_vcpu *vcpu);
#else
#define get_host_fpsimd_state(vcpu) NULL
#define get_host_sve_state(vcpu) NULL
#endif
extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);

View File

@@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void);
enum mitigation_state arm64_get_spectre_bhb_state(void);
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
u8 spectre_bhb_loop_affected(int scope);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);

View File

@@ -362,31 +362,33 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
#define __flush_tlb_range_op(op, start, pages, stride, \
asid, tlb_level, tlbi_user) \
do { \
typeof(start) __flush_start = start; \
typeof(pages) __flush_pages = pages; \
int num = 0; \
int scale = 3; \
unsigned long addr; \
\
while (pages > 0) { \
while (__flush_pages > 0) { \
if (!system_supports_tlb_range() || \
pages == 1) { \
addr = __TLBI_VADDR(start, asid); \
__flush_pages == 1) { \
addr = __TLBI_VADDR(__flush_start, asid); \
__tlbi_level(op, addr, tlb_level); \
if (tlbi_user) \
__tlbi_user_level(op, addr, tlb_level); \
start += stride; \
pages -= stride >> PAGE_SHIFT; \
__flush_start += stride; \
__flush_pages -= stride >> PAGE_SHIFT; \
continue; \
} \
\
num = __TLBI_RANGE_NUM(pages, scale); \
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
if (num >= 0) { \
addr = __TLBI_VADDR_RANGE(start, asid, scale, \
num, tlb_level); \
addr = __TLBI_VADDR_RANGE(__flush_start, asid, \
scale, num, tlb_level); \
__tlbi(r##op, addr); \
if (tlbi_user) \
__tlbi_user(r##op, addr); \
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
pages -= __TLBI_RANGE_PAGES(num, scale); \
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
} \
scale--; \
} \

View File

@@ -1707,31 +1707,6 @@ void fpsimd_signal_preserve_current_state(void)
sve_to_fpsimd(current);
}
/*
* Called by KVM when entering the guest.
*/
void fpsimd_kvm_prepare(void)
{
if (!system_supports_sve())
return;
/*
* KVM does not save host SVE state since we can only enter
* the guest from a syscall so the ABI means that only the
* non-saved SVE state needs to be saved. If we have left
* SVE enabled for performance reasons then update the task
* state to be FPSIMD only.
*/
get_cpu_fpsimd_context();
if (test_and_clear_thread_flag(TIF_SVE)) {
sve_to_fpsimd(current);
current->thread.fp_type = FP_STATE_FPSIMD;
}
put_cpu_fpsimd_context();
}
/*
* Associate current's FPSIMD context with this cpu
* The caller must have ownership of the cpu FPSIMD context before calling

View File

@@ -845,12 +845,37 @@ static unsigned long system_bhb_mitigations;
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
* SCOPE_SYSTEM call will give the right answer.
*/
u8 spectre_bhb_loop_affected(int scope)
static bool is_spectre_bhb_safe(int scope)
{
static const struct midr_range spectre_bhb_safe_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
static bool all_safe = true;
if (scope != SCOPE_LOCAL_CPU)
return all_safe;
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
return true;
all_safe = false;
return false;
}
static u8 spectre_bhb_loop_affected(void)
{
u8 k = 0;
static u8 max_bhb_k;
if (scope == SCOPE_LOCAL_CPU) {
static const struct midr_range spectre_bhb_k32_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
@@ -866,6 +891,7 @@ u8 spectre_bhb_loop_affected(int scope)
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
{},
};
static const struct midr_range spectre_bhb_k11_list[] = {
@@ -887,11 +913,6 @@ u8 spectre_bhb_loop_affected(int scope)
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
k = 8;
max_bhb_k = max(max_bhb_k, k);
} else {
k = max_bhb_k;
}
return k;
}
@@ -916,29 +937,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
}
}
static bool is_spectre_bhb_fw_affected(int scope)
static bool has_spectre_bhb_fw_mitigation(void)
{
static bool system_affected;
enum mitigation_state fw_state;
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
{},
};
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
spectre_bhb_firmware_mitigated_list);
if (scope != SCOPE_LOCAL_CPU)
return system_affected;
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
system_affected = true;
return true;
}
return false;
return has_smccc && fw_state == SPECTRE_MITIGATED;
}
static bool supports_ecbhb(int scope)
@@ -954,6 +959,8 @@ static bool supports_ecbhb(int scope)
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
}
static u8 max_bhb_k;
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
int scope)
{
@@ -962,16 +969,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
if (supports_csv2p3(scope))
return false;
if (supports_clearbhb(scope))
return true;
if (spectre_bhb_loop_affected(scope))
return true;
if (is_spectre_bhb_fw_affected(scope))
return true;
if (is_spectre_bhb_safe(scope))
return false;
/*
* At this point the core isn't known to be "safe" so we're going to
* assume it's vulnerable. We still need to update `max_bhb_k` though,
* but only if we aren't mitigating with clearbhb though.
*/
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
return true;
}
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
@@ -1002,7 +1011,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
{
bp_hardening_cb_t cpu_cb;
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
enum mitigation_state state = SPECTRE_VULNERABLE;
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
@@ -1028,7 +1037,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
state = SPECTRE_MITIGATED;
set_bit(BHB_INSN, &system_bhb_mitigations);
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
} else if (spectre_bhb_loop_affected()) {
/*
* Ensure KVM uses the indirect vector which will have the
* branchy-loop added. A57/A72-r0 will already have selected
@@ -1041,9 +1050,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
state = SPECTRE_MITIGATED;
set_bit(BHB_LOOP, &system_bhb_mitigations);
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
if (fw_state == SPECTRE_MITIGATED) {
} else if (has_spectre_bhb_fw_mitigation()) {
/*
* Ensure KVM uses one of the spectre bp_hardening
* vectors. The indirect vector doesn't include the EL3
@@ -1067,7 +1074,6 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
state = SPECTRE_MITIGATED;
set_bit(BHB_FW, &system_bhb_mitigations);
}
}
update_mitigation_state(&spectre_bhb_state, state);
}
@@ -1100,7 +1106,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
{
u8 rd;
u32 insn;
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
BUG_ON(nr_inst != 1); /* MOV -> MOV */
@@ -1109,7 +1114,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
insn = le32_to_cpu(*origptr);
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
AARCH64_INSN_VARIANT_64BIT,
AARCH64_INSN_MOVEWIDE_ZERO);
*updptr++ = cpu_to_le32(insn);

View File

@@ -1433,7 +1433,6 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
}
vcpu_reset_hcr(vcpu);
vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu);
/*
* Handle the "start in power-off" case.

View File

@@ -37,8 +37,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
if (ret)
return ret;
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
return 0;
}
@@ -56,48 +54,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
if (!system_supports_fpsimd())
return;
fpsimd_kvm_prepare();
/*
* We will check TIF_FOREIGN_FPSTATE just before entering the
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to
* FP_STATE_FREE if the flag set.
*/
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
if (system_supports_sme()) {
vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
vcpu_set_flag(vcpu, HOST_SME_ENABLED);
/*
* If PSTATE.SM is enabled then save any pending FP
* state and disable PSTATE.SM. If we leave PSTATE.SM
* enabled and the guest does not enable SME via
* CPACR_EL1.SMEN then operations that should be valid
* may generate SME traps from EL1 to EL1 which we
* can't intercept and which would confuse the guest.
* Ensure that any host FPSIMD/SVE/SME state is saved and unbound such
* that the host kernel is responsible for restoring this state upon
* return to userspace, and the hyp code doesn't need to save anything.
*
* Do the same for PSTATE.ZA in the case where there
* is state in the registers which has not already
* been saved, this is very unlikely to happen.
* When the host may use SME, fpsimd_save_and_flush_cpu_state() ensures
* that PSTATE.{SM,ZA} == {0,0}.
*/
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
vcpu->arch.fp_state = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state();
}
}
/*
* If normal guests gain this support, maintain this behavior
* for pKVM guests, which don't support SME.
*/
BUG_ON(is_protected_kvm_enabled() && system_supports_sme() &&
read_sysreg_s(SYS_SVCR));
vcpu->arch.fp_state = FP_STATE_FREE;
}
/*
@@ -162,57 +128,18 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
local_irq_save(flags);
/*
* If we have VHE then the Hyp code will reset CPACR_EL1 to
* the default value and we need to reenable SME.
*/
if (has_vhe() && system_supports_sme()) {
/* Also restore EL0 state seen on entry */
if (vcpu_get_flag(vcpu, HOST_SME_ENABLED))
sysreg_clear_set(CPACR_EL1, 0,
CPACR_EL1_SMEN_EL0EN |
CPACR_EL1_SMEN_EL1EN);
else
sysreg_clear_set(CPACR_EL1,
CPACR_EL1_SMEN_EL0EN,
CPACR_EL1_SMEN_EL1EN);
isb();
}
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
/*
* Restore the VL that was saved when bound to the CPU,
* which is the maximum VL for the guest. Because
* the layout of the data when saving the sve state
* depends on the VL, we need to use a consistent VL.
* Note that this means that at guest exit ZCR_EL1 is
* not necessarily the same as on guest entry.
* Flush (save and invalidate) the fpsimd/sve state so that if
* the host tries to use fpsimd/sve, it's not using stale data
* from the guest.
*
* Flushing the cpu state sets the TIF_FOREIGN_FPSTATE
* bit for the context, which lets the kernel restore
* the sve state, including ZCR_EL1 later.
* Flushing the state sets the TIF_FOREIGN_FPSTATE bit for the
* context unconditionally, in both nVHE and VHE. This allows
* the kernel to restore the fpsimd/sve state, including ZCR_EL1
* when needed.
*/
if (!has_vhe())
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1,
SYS_ZCR_EL1);
}
fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && system_supports_sve()) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
* reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE
* for EL0. To avoid spurious traps, restore the trap state
* seen by kvm_arch_vcpu_load_fp():
*/
if (vcpu_get_flag(vcpu, HOST_SVE_ENABLED))
sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
else
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
local_irq_restore(flags);

View File

@@ -44,6 +44,11 @@ alternative_if ARM64_HAS_RAS_EXTN
alternative_else_nop_endif
mrs x1, isr_el1
cbz x1, 1f
// Ensure that __guest_enter() always provides a context
// synchronization event so that callers don't need ISBs for anything
// that would usually be synchonized by the ERET.
isb
mov x0, #ARM_EXCEPTION_IRQ
ret

View File

@@ -26,6 +26,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pkvm.h>
#include <asm/kvm_nested.h>
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
@@ -303,9 +304,106 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1ULL, SYS_ZCR_EL2);
}
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu);
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (system_supports_sve()) {
struct kvm_host_sve_state *sve_state = get_host_sve_state(vcpu);
u64 zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu);
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
sve_cond_update_zcr_vq(zcr_el2, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs +
sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr);
/* Still trap SVE since it's handled by hyp in pKVM. */
if (!vcpu_has_sve(vcpu))
sysreg_clear_set(cptr_el2, 0, CPTR_EL2_TZ);
} else {
__fpsimd_save_state(get_host_fpsimd_state(vcpu));
}
}
static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu)
{
u64 reg;
bool trap_sve = vcpu_has_sve(vcpu) ||
(is_protected_kvm_enabled() && system_supports_sve());
if (has_vhe() || has_hvhe()) {
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
if (trap_sve)
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
sysreg_clear_set(cpacr_el1, 0, reg);
} else {
reg = CPTR_EL2_TFP;
if (trap_sve)
reg |= CPTR_EL2_TZ;
sysreg_clear_set(cptr_el2, reg, 0);
}
}
static inline void fpsimd_lazy_switch_to_guest(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)
return;
if (vcpu_has_sve(vcpu)) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = __vcpu_sys_reg(vcpu, ZCR_EL1);
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
{
u64 zcr_el1, zcr_el2;
if (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)
return;
/*
* When the guest owns the FP regs, we know that guest+hyp traps for
* any FPSIMD/SVE/SME features exposed to the guest have been disabled
* by either fpsimd_lazy_switch_to_guest() or kvm_hyp_handle_fpsimd()
* prior to __guest_entry(). As __guest_entry() guarantees a context
* synchronization event, we don't need an ISB here to avoid taking
* traps for anything that was exposed to the guest.
*/
if (vcpu_has_sve(vcpu)) {
zcr_el1 = read_sysreg_el1(SYS_ZCR);
__vcpu_sys_reg(vcpu, ZCR_EL1) = zcr_el1;
/*
* The guest's state is always saved using the guest's max VL.
* Ensure that the host has the guest's max VL active such that
* the host can save the guest's state lazily, but don't
* artificially restrict the host to the guest's max VL.
*/
if (has_vhe()) {
zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
} else {
zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
write_sysreg_el2(zcr_el2, SYS_ZCR);
zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
write_sysreg_el1(zcr_el1, SYS_ZCR);
}
}
}
/*
* We trap the first access to the FP/SIMD to save the host context and
@@ -313,7 +411,7 @@ static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu);
* If FP/SIMD is not implemented, handle the trap and inject an undefined
* instruction exception to the guest. Similarly for trapped SVE accesses.
*/
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest;
u8 esr_ec;
@@ -343,7 +441,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
/* Write out the host state if it's in the registers */
if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
if (is_protected_kvm_enabled() && vcpu->arch.fp_state == FP_STATE_HOST_OWNED)
kvm_hyp_handle_fpsimd_host(vcpu);
/* Restore the guest state */
@@ -543,7 +641,7 @@ static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu)
return true;
}
static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
handle_tx2_tvm(vcpu))
@@ -566,7 +664,7 @@ static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
__vgic_v3_perform_cpuif_access(vcpu) == 1)
@@ -575,19 +673,18 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu,
u64 *exit_code)
{
if (!__populate_fault_info(vcpu))
return true;
return false;
}
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
__alias(kvm_hyp_handle_memory_fault);
#define kvm_hyp_handle_iabt_low kvm_hyp_handle_memory_fault
#define kvm_hyp_handle_watchpt_low kvm_hyp_handle_memory_fault
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
return true;
@@ -617,23 +714,16 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
/*
* Allow the hypervisor to handle the exit with an exit handler if it has one.
*
* Returns true if the hypervisor handled the exit, and control should go back
* to the guest, or false if it hasn't.
*/
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
exit_handler_fn fn;
fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
exit_handler_fn fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
if (fn)
return fn(vcpu, exit_code);
@@ -663,20 +753,9 @@ static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code
* the guest, false when we should restore the host state and return to the
* main run loop.
*/
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool __fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code,
const exit_handler_fn *handlers)
{
/*
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
* another.
*/
early_exit_filter(vcpu, exit_code);
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
@@ -706,7 +785,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
goto exit;
/* Check if there's an exit handler and allow it to handle the exit. */
if (kvm_hyp_handle_exit(vcpu, exit_code))
if (kvm_hyp_handle_exit(vcpu, exit_code, handlers))
goto guest;
exit:
/* Return to the host kernel and handle the exit */

View File

@@ -7,6 +7,7 @@
#include <kvm/arm_hypercalls.h>
#include <hyp/adjust_pc.h>
#include <hyp/switch.h>
#include <asm/pgtable-types.h>
#include <asm/kvm_asm.h>
@@ -65,6 +66,8 @@ static inline void hyp_reqs_smccc_encode(unsigned long ret, struct kvm_cpu_conte
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
static void fpsimd_host_restore(struct kvm_vcpu *vcpu);
static bool (*default_host_smc_handler)(struct user_pt_regs *regs);
static bool (*default_trap_handler)(struct user_pt_regs *regs);
static bool (*unmask_serror)(void);
@@ -671,6 +674,8 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_entry_exit_handler_fn ec_handler;
u8 esr_ec;
hyp_vcpu->vcpu.arch.fp_state = FP_STATE_HOST_OWNED;
/*
* If we deal with a non-protected guest and the state is potentially
* dirty (from a host perspective), copy the state back into the hyp
@@ -761,6 +766,9 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, u32 exit_reason)
else
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
if (hyp_vcpu->vcpu.arch.fp_state != FP_STATE_HOST_OWNED)
fpsimd_host_restore(&hyp_vcpu->vcpu);
hyp_vcpu->exit_code = exit_reason;
}
@@ -788,20 +796,8 @@ static void __hyp_sve_restore_host(struct kvm_vcpu *vcpu)
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
}
static void fpsimd_host_restore(void)
static void fpsimd_host_restore(struct kvm_vcpu *vcpu)
{
if (has_hvhe())
sysreg_clear_set(cpacr_el1, 0,
(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN |
CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN));
else
sysreg_clear_set(cptr_el2, CPTR_EL2_TZ | CPTR_EL2_TFP, 0);
isb();
if (unlikely(is_protected_kvm_enabled())) {
struct pkvm_hyp_vcpu *hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
struct kvm_vcpu *vcpu = &hyp_vcpu->vcpu;
if (vcpu_has_sve(vcpu))
__hyp_sve_save_guest(vcpu);
else
@@ -812,8 +808,7 @@ static void fpsimd_host_restore(void)
else
__fpsimd_restore_state(get_host_fpsimd_state(vcpu));
hyp_vcpu->vcpu.arch.fp_state = FP_STATE_HOST_OWNED;
}
vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
}
static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
@@ -844,8 +839,6 @@ static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
*last_ran = hyp_vcpu->vcpu.vcpu_id;
}
hyp_vcpu->vcpu.arch.fp_state = FP_STATE_HOST_OWNED;
if (pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {
/* Propagate WFx trapping flags, trap ptrauth */
hyp_vcpu->vcpu.arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI |
@@ -865,9 +858,6 @@ static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt)
if (hyp_vcpu) {
struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
if (hyp_vcpu->vcpu.arch.fp_state == FP_STATE_GUEST_OWNED)
fpsimd_host_restore();
if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu) &&
!vcpu_get_flag(host_vcpu, PKVM_HOST_STATE_DIRTY)) {
__sync_hyp_vcpu(hyp_vcpu);
@@ -888,9 +878,6 @@ static void handle___pkvm_vcpu_sync_state(struct kvm_cpu_context *host_ctxt)
if (!hyp_vcpu || pkvm_hyp_vcpu_is_protected(hyp_vcpu))
return;
if (hyp_vcpu->vcpu.arch.fp_state == FP_STATE_GUEST_OWNED)
fpsimd_host_restore();
__sync_hyp_vcpu(hyp_vcpu);
}
@@ -961,34 +948,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
goto out;
flush_hyp_vcpu(hyp_vcpu);
ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
sync_hyp_vcpu(hyp_vcpu, ret);
if (hyp_vcpu->vcpu.arch.fp_state == FP_STATE_GUEST_OWNED) {
/*
* The guest has used the FP, trap all accesses
* from the host (both FP and SVE).
*/
u64 reg;
if (has_hvhe()) {
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
if (system_supports_sve())
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
sysreg_clear_set(cpacr_el1, reg, 0);
} else {
reg = CPTR_EL2_TFP;
if (system_supports_sve())
reg |= CPTR_EL2_TZ;
sysreg_clear_set(cptr_el2, 0, reg);
}
}
} else {
/* The host is fully trusted, run its vCPU directly. */
fpsimd_lazy_switch_to_guest(host_vcpu);
ret = __kvm_vcpu_run(host_vcpu);
fpsimd_lazy_switch_to_host(host_vcpu);
}
out:
cpu_reg(host_ctxt, 1) = ret;
@@ -1730,13 +1696,8 @@ inval:
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
struct pkvm_hyp_vcpu *hyp_vcpu;
bool handled;
hyp_vcpu = pkvm_get_loaded_hyp_vcpu();
if (hyp_vcpu && hyp_vcpu->vcpu.arch.fp_state == FP_STATE_GUEST_OWNED)
fpsimd_host_restore();
func_id &= ~ARM_SMCCC_CALL_HINTS;
handled = kvm_host_psci_handler(host_ctxt, func_id);
@@ -1769,11 +1730,6 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
case ESR_ELx_EC_SMC64:
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_FP_ASIMD:
case ESR_ELx_EC_SVE:
case ESR_ELx_EC_SME:
fpsimd_host_restore();
break;
case ESR_ELx_EC_IABT_LOW:
case ESR_ELx_EC_DABT_LOW:
handle_host_mem_abort(host_ctxt);

View File

@@ -60,9 +60,6 @@ static void *__get_host_fpsimd_bytes(void)
struct user_fpsimd_state *get_host_fpsimd_state(struct kvm_vcpu *vcpu)
{
if (likely(!is_protected_kvm_enabled()))
return vcpu->arch.host_fpsimd_state;
WARN_ON(system_supports_sve());
return __get_host_fpsimd_bytes();
}
@@ -82,8 +79,6 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
u64 hcr_set = HCR_RW;
u64 hcr_clear = 0;
u64 cptr_set = 0;
u64 cptr_clear = 0;
/* Protected KVM does not support AArch32 guests. */
BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
@@ -111,23 +106,11 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
}
/* Trap AMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids))
hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM;
}
/* Trap SVE */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
if (has_hvhe())
cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
else
cptr_set |= CPTR_EL2_TZ;
}
vcpu->arch.hcr_el2 |= hcr_set;
vcpu->arch.hcr_el2 &= ~hcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
vcpu->arch.cptr_el2 &= ~cptr_clear;
}
/*
@@ -138,8 +121,6 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
u64 hcr_set = 0;
u64 hcr_clear = 0;
u64 cptr_set = 0;
u64 cptr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
@@ -147,17 +128,8 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
hcr_clear |= HCR_DCT | HCR_ATA;
}
/* No SME supprot in KVM. */
BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME), feature_ids));
if (has_hvhe())
cptr_clear |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
else
cptr_set |= CPTR_EL2_TSM;
vcpu->arch.hcr_el2 |= hcr_set;
vcpu->arch.hcr_el2 &= ~hcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
vcpu->arch.cptr_el2 &= ~cptr_clear;
}
/*
@@ -168,7 +140,6 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
u64 mdcr_set = 0;
u64 mdcr_clear = 0;
u64 cptr_set = 0;
/* Trap/constrain PMU */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
@@ -195,21 +166,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
if (has_hvhe())
cptr_set |= CPACR_EL1_TTA;
else
cptr_set |= CPTR_EL2_TTA;
}
/* Trap External Trace */
if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
vcpu->arch.mdcr_el2 |= mdcr_set;
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
}
/*
@@ -267,11 +229,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE))
vcpu->arch.hcr_el2 |= HCR_TID2;
if (!has_hvhe()) {
vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
}
}
/*
@@ -279,7 +236,6 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
*/
static void pkvm_vcpu_init_traps(struct pkvm_hyp_vcpu *hyp_vcpu)
{
hyp_vcpu->vcpu.arch.cptr_el2 = kvm_get_reset_cptr_el2(&hyp_vcpu->vcpu);
hyp_vcpu->vcpu.arch.mdcr_el2 = 0;
if (!pkvm_hyp_vcpu_is_protected(hyp_vcpu)) {

View File

@@ -83,12 +83,70 @@ static void __deactivate_pvm_traps_hfgxtr(struct kvm_vcpu *vcpu)
write_sysreg_s(ctxt_sys_reg(hctxt, HAFGRTR_EL2), SYS_HAFGRTR_EL2);
}
static void __activate_cptr_traps(struct kvm_vcpu *vcpu)
{
u64 val = CPTR_EL2_TAM; /* Same bit irrespective of E2H */
if (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)
__activate_traps_fpsimd32(vcpu);
if (has_hvhe()) {
val |= CPACR_ELx_TTA;
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) {
val |= CPACR_ELx_FPEN;
if (vcpu_has_sve(vcpu))
val |= CPACR_ELx_ZEN;
}
write_sysreg(val, cpacr_el1);
} else {
val |= CPTR_EL2_TTA | CPTR_NVHE_EL2_RES1;
/*
* Always trap SME since it's not supported in KVM.
* TSM is RES1 if SME isn't implemented.
*/
val |= CPTR_EL2_TSM;
if (!vcpu_has_sve(vcpu) || vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)
val |= CPTR_EL2_TZ;
if (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)
val |= CPTR_EL2_TFP;
write_sysreg(val, cptr_el2);
}
}
static void __deactivate_cptr_traps(struct kvm_vcpu *vcpu)
{
if (has_hvhe()) {
u64 val = CPACR_ELx_FPEN;
if (cpus_have_final_cap(ARM64_SVE))
val |= CPACR_ELx_ZEN;
if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_ELx_SMEN;
write_sysreg(val, cpacr_el1);
} else {
u64 val = CPTR_NVHE_EL2_RES1;
if (!cpus_have_final_cap(ARM64_SVE))
val |= CPTR_EL2_TZ;
if (!cpus_have_final_cap(ARM64_SME))
val |= CPTR_EL2_TSM;
write_sysreg(val, cptr_el2);
}
}
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
___activate_traps(vcpu);
__activate_traps_common(vcpu);
__activate_cptr_traps(vcpu);
if (unlikely(vcpu_is_protected(vcpu))) {
__activate_pvm_fine_grain_traps(vcpu);
@@ -97,27 +155,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_hfgxtr(vcpu);
}
val = vcpu->arch.cptr_el2;
val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
if (cpus_have_final_cap(ARM64_SME)) {
if (has_hvhe())
val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
else
val |= CPTR_EL2_TSM;
}
if (vcpu->arch.fp_state != FP_STATE_GUEST_OWNED) {
if (has_hvhe())
val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
else
val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
__activate_traps_fpsimd32(vcpu);
}
kvm_write_cptr_el2(val);
write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
@@ -167,31 +204,10 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
kvm_reset_cptr_el2(vcpu);
__deactivate_cptr_traps(vcpu);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu)
{
u64 reg;
bool trap_sve = vcpu_has_sve(vcpu) ||
(is_protected_kvm_enabled() && system_supports_sve());
if (has_hvhe()) {
reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
if (trap_sve)
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
sysreg_clear_set(cpacr_el1, 0, reg);
} else {
reg = CPTR_EL2_TFP;
if (trap_sve)
reg |= CPTR_EL2_TZ;
sysreg_clear_set(cptr_el2, reg, 0);
}
}
/* Save VGICv3 state on non-VHE systems */
static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
{
@@ -262,31 +278,6 @@ static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
kvm_handle_pvm_sysreg(vcpu, exit_code));
}
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
{
/*
* Non-protected kvm relies on the host restoring its sve state.
* Protected kvm restores the host's sve state as not to reveal that
* fpsimd was used by a guest nor leak upper sve bits.
*/
if (unlikely(is_protected_kvm_enabled() && system_supports_sve())) {
struct kvm_host_sve_state *sve_state = get_host_sve_state(vcpu);
u64 zcr_el2 = sve_vq_from_vl(kvm_host_sve_max_vl) - 1;
sve_state->zcr_el1 = read_sysreg_el1(SYS_ZCR);
sve_cond_update_zcr_vq(zcr_el2, SYS_ZCR_EL2);
__sve_save_state(sve_state->sve_regs +
sve_ffr_offset(kvm_host_sve_max_vl),
&sve_state->fpsr);
/* Still trap SVE since it's handled by hyp in pKVM. */
if (!vcpu_has_sve(vcpu))
sysreg_clear_set(cptr_el2, 0, CPTR_EL2_TZ);
} else {
__fpsimd_save_state(get_host_fpsimd_state(vcpu));
}
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@@ -322,20 +313,23 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
return hyp_exit_handlers;
}
/*
* Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
* The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
* guest from dropping to AArch32 EL0 if implemented by the CPU. If the
* hypervisor spots a guest in such a state ensure it is handled, and don't
* trust the host to spot or fix it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*
* Returns false if the guest ran in AArch32 when it shouldn't have, and
* thus should exit to the host, or true if a the guest run loop can continue.
*/
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
if (unlikely(vcpu_is_protected(vcpu) && vcpu_mode_is_32bit(vcpu))) {
const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Some guests (e.g., protected VMs) are not be allowed to run in
* AArch32. The ARMv8 architecture does not give the hypervisor a
* mechanism to prevent a guest from dropping to AArch32 EL0 if
* implemented by the CPU. If the hypervisor spots a guest in such a
* state ensure it is handled, and don't trust the host to spot or fix
* it. The check below is based on the one in
* kvm_arch_vcpu_ioctl_run().
*/
if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
/*
* As we have caught the guest red-handed, decide that it isn't
* fit for purpose anymore by making the vcpu invalid. The VMM
@@ -347,6 +341,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
}
return __fixup_guest_exit(vcpu, exit_code, handlers);
}
/* Switch to the guest for legacy non-VHE systems */

View File

@@ -163,21 +163,6 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
local_irq_restore(flags);
}
static void __deactivate_fpsimd_traps(struct kvm_vcpu *vcpu)
{
u64 reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN;
if (vcpu_has_sve(vcpu))
reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
sysreg_clear_set(cpacr_el1, 0, reg);
}
static void kvm_hyp_handle_fpsimd_host(struct kvm_vcpu *vcpu)
{
__fpsimd_save_state(vcpu->arch.host_fpsimd_state);
}
static const exit_handler_fn hyp_exit_handlers[] = {
[0 ... ESR_ELx_EC_MAX] = NULL,
[ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
@@ -190,13 +175,10 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
};
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
{
return hyp_exit_handlers;
}
synchronize_vcpu_pstate(vcpu, exit_code);
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
{
/*
* If we were in HYP context on entry, adjust the PSTATE view
* so that the usual helpers work correctly.
@@ -216,6 +198,8 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
*vcpu_cpsr(vcpu) &= ~(PSR_MODE_MASK | PSR_MODE32_BIT);
*vcpu_cpsr(vcpu) |= mode;
}
return __fixup_guest_exit(vcpu, exit_code, hyp_exit_handlers);
}
/* Switch to the guest for VHE systems running in EL2 */
@@ -231,6 +215,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_save_host_state_vhe(host_ctxt);
fpsimd_lazy_switch_to_guest(vcpu);
/*
* ARM erratum 1165522 requires us to configure both stage 1 and
* stage 2 translation for the guest context before we clear
@@ -266,6 +252,8 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
__deactivate_traps(vcpu);
fpsimd_lazy_switch_to_host(vcpu);
sysreg_restore_host_state_vhe(host_ctxt);
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)

View File

@@ -41,6 +41,7 @@ int __init kvm_arm_init_sve(void)
if (system_supports_sve()) {
kvm_sve_max_vl = sve_max_virtualisable_vl();
kvm_host_sve_max_vl = sve_max_vl();
kvm_nvhe_sym(kvm_host_sve_max_vl) = kvm_host_sve_max_vl;
/*
* The get_sve_reg()/set_sve_reg() ioctl interface will need

View File

@@ -1328,7 +1328,8 @@ int arch_add_memory(int nid, u64 start, u64 size,
__remove_pgd_mapping(swapper_pg_dir,
__phys_to_virt(start), size);
else {
max_pfn = PFN_UP(start + size);
/* Address of hotplugged memory can be smaller */
max_pfn = max(max_pfn, PFN_UP(start + size));
max_low_pfn = max_pfn;
}

View File

@@ -59,6 +59,7 @@ config LOONGARCH
select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT

View File

@@ -33,9 +33,9 @@ struct pt_regs {
unsigned long __last[];
} __aligned(8);
static inline int regs_irqs_disabled(struct pt_regs *regs)
static __always_inline bool regs_irqs_disabled(struct pt_regs *regs)
{
return arch_irqs_disabled_flags(regs->csr_prmd);
return !(regs->csr_prmd & CSR_PRMD_PIE);
}
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)

View File

@@ -216,18 +216,6 @@ static __init int setup_node(int pxm)
return acpi_map_pxm_to_node(pxm);
}
/*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
unsigned int numa_distance_cnt;
static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
{
return slit->locality_count;
}
void __init numa_set_distance(int from, int to, int distance)
{
if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {

View File

@@ -527,9 +527,10 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
#else
bool pie = regs_irqs_disabled(regs);
unsigned int *pc;
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
@@ -556,7 +557,7 @@ sigbus:
die_if_kernel("Kernel ale access", regs);
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
#endif
irqentry_exit(regs, state);
@@ -588,12 +589,13 @@ static void bug_handler(struct pt_regs *regs)
asmlinkage void noinstr do_bce(struct pt_regs *regs)
{
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned long era = exception_era(regs);
u64 badv = 0, lower = 0, upper = ULONG_MAX;
union loongarch_instruction insn;
irqentry_state_t state = irqentry_enter(regs);
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
current->thread.trap_nr = read_csr_excode();
@@ -659,7 +661,7 @@ asmlinkage void noinstr do_bce(struct pt_regs *regs)
force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -677,11 +679,12 @@ bad_era:
asmlinkage void noinstr do_bp(struct pt_regs *regs)
{
bool user = user_mode(regs);
bool pie = regs_irqs_disabled(regs);
unsigned int opcode, bcode;
unsigned long era = exception_era(regs);
irqentry_state_t state = irqentry_enter(regs);
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
if (__get_inst(&opcode, (u32 *)era, user))
@@ -747,7 +750,7 @@ asmlinkage void noinstr do_bp(struct pt_regs *regs)
}
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
irqentry_exit(regs, state);
@@ -982,6 +985,7 @@ static void init_restore_lbt(void)
asmlinkage void noinstr do_lbt(struct pt_regs *regs)
{
bool pie = regs_irqs_disabled(regs);
irqentry_state_t state = irqentry_enter(regs);
/*
@@ -991,7 +995,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
* (including the user using 'MOVGR2GCSR' to turn on TM, which
* will not trigger the BTE), we need to check PRMD first.
*/
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_enable();
if (!cpu_has_lbt) {
@@ -1005,7 +1009,7 @@ asmlinkage void noinstr do_lbt(struct pt_regs *regs)
preempt_enable();
out:
if (regs->csr_prmd & CSR_PRMD_PIE)
if (!pie)
local_irq_disable();
irqentry_exit(regs, state);

View File

@@ -47,7 +47,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd = pmd_offset(pud, addr);
}
}
return (pte_t *) pmd;
return pmd_none(pmdp_get(pmd)) ? NULL : (pte_t *) pmd;
}
int pmd_huge(pmd_t pmd)

View File

@@ -64,9 +64,6 @@ void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
#endif

View File

@@ -42,7 +42,7 @@ int (*__pmax_close)(int);
* Detect which PROM the DECSTATION has, and set the callback vectors
* appropriately.
*/
void __init which_prom(s32 magic, s32 *prom_vec)
static void __init which_prom(s32 magic, s32 *prom_vec)
{
/*
* No sign of the REX PROM's magic number means we assume a non-REX

View File

@@ -8,7 +8,7 @@
#define __ASM_DS1287_H
extern int ds1287_timer_state(void);
extern void ds1287_set_base_clock(unsigned int clock);
extern int ds1287_set_base_clock(unsigned int hz);
extern int ds1287_clockevent_init(int irq);
#endif

View File

@@ -47,6 +47,16 @@ extern phys_addr_t __mips_cm_phys_base(void);
*/
extern int mips_cm_is64;
/*
* mips_cm_is_l2_hci_broken - determine if HCI is broken
*
* Some CM reports show that Hardware Cache Initialization is
* complete, but in reality it's not the case. They also incorrectly
* indicate that Hardware Cache Initialization is supported. This
* flags allows warning about this broken feature.
*/
extern bool mips_cm_is_l2_hci_broken;
/**
* mips_cm_error_report - Report CM cache errors
*/
@@ -85,6 +95,18 @@ static inline bool mips_cm_present(void)
#endif
}
/**
* mips_cm_update_property - update property from the device tree
*
* Retrieve the properties from the device tree if a CM node exist and
* update the internal variable based on this.
*/
#ifdef CONFIG_MIPS_CM
extern void mips_cm_update_property(void);
#else
static inline void mips_cm_update_property(void) {}
#endif
/**
* mips_cm_has_l2sync - determine whether an L2-only sync region is present
*

View File

@@ -10,6 +10,7 @@
#include <linux/mc146818rtc.h>
#include <linux/irq.h>
#include <asm/ds1287.h>
#include <asm/time.h>
int ds1287_timer_state(void)

View File

@@ -5,6 +5,7 @@
*/
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
@@ -14,6 +15,7 @@
void __iomem *mips_gcr_base;
void __iomem *mips_cm_l2sync_base;
int mips_cm_is64;
bool mips_cm_is_l2_hci_broken;
static char *cm2_tr[8] = {
"mem", "gcr", "gic", "mmio",
@@ -243,6 +245,18 @@ static void mips_cm_probe_l2sync(void)
mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
}
void mips_cm_update_property(void)
{
struct device_node *cm_node;
cm_node = of_find_compatible_node(of_root, NULL, "mobileye,eyeq6-cm");
if (!cm_node)
return;
pr_info("HCI (Hardware Cache Init for the L2 cache) in GCR_L2_RAM_CONFIG from the CM3 is broken");
mips_cm_is_l2_hci_broken = true;
of_node_put(cm_node);
}
int mips_cm_probe(void)
{
phys_addr_t addr;

View File

@@ -63,6 +63,7 @@ static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;
#define PDT_ADDR_PERM_ERR (pdt_type != PDT_PDC ? 2UL : 0UL)
#define PDT_ADDR_SINGLE_ERR 1UL
#ifdef CONFIG_PROC_FS
/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
{
@@ -74,6 +75,7 @@ void arch_report_meminfo(struct seq_file *m)
seq_printf(m, "PDT_cur_entries: %7lu\n",
pdt_status.pdt_entries);
}
#endif
static int get_info_pat_new(void)
{

View File

@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/memblock.h>
#include <linux/nospec.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <linux/reboot.h>
@@ -1839,6 +1840,9 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
|| nargs + nret > ARRAY_SIZE(args.args))
return -EINVAL;
nargs = array_index_nospec(nargs, ARRAY_SIZE(args.args));
nret = array_index_nospec(nret, ARRAY_SIZE(args.args) - nargs);
/* Copy in args. */
if (copy_from_user(args.args, uargs->args,
nargs * sizeof(rtas_arg_t)) != 0)

View File

@@ -115,24 +115,19 @@
\old_c
.endm
#define _ALTERNATIVE_CFG(old_c, ...) \
ALTERNATIVE_CFG old_c
#define _ALTERNATIVE_CFG_2(old_c, ...) \
ALTERNATIVE_CFG old_c
#define __ALTERNATIVE_CFG(old_c, ...) ALTERNATIVE_CFG old_c
#define __ALTERNATIVE_CFG_2(old_c, ...) ALTERNATIVE_CFG old_c
#else /* !__ASSEMBLY__ */
#define __ALTERNATIVE_CFG(old_c) \
old_c "\n"
#define _ALTERNATIVE_CFG(old_c, ...) \
__ALTERNATIVE_CFG(old_c)
#define _ALTERNATIVE_CFG_2(old_c, ...) \
__ALTERNATIVE_CFG(old_c)
#define __ALTERNATIVE_CFG(old_c, ...) old_c "\n"
#define __ALTERNATIVE_CFG_2(old_c, ...) old_c "\n"
#endif /* __ASSEMBLY__ */
#define _ALTERNATIVE_CFG(old_c, ...) __ALTERNATIVE_CFG(old_c)
#define _ALTERNATIVE_CFG_2(old_c, ...) __ALTERNATIVE_CFG_2(old_c)
#endif /* CONFIG_RISCV_ALTERNATIVE */
/*

View File

@@ -19,16 +19,9 @@
#ifndef __ASSEMBLY__
void arch_kgdb_breakpoint(void);
extern unsigned long kgdb_compiled_break;
static inline void arch_kgdb_breakpoint(void)
{
asm(".global kgdb_compiled_break\n"
".option norvc\n"
"kgdb_compiled_break: ebreak\n"
".option rvc\n");
}
#endif /* !__ASSEMBLY__ */
#define DBG_REG_ZERO "zero"

View File

@@ -62,8 +62,11 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
args[0] = regs->orig_a0;
args++;
memcpy(args, &regs->a1, 5 * sizeof(args[0]));
args[1] = regs->a1;
args[2] = regs->a2;
args[3] = regs->a3;
args[4] = regs->a4;
args[5] = regs->a5;
}
static inline int syscall_get_arch(struct task_struct *task)

View File

@@ -254,6 +254,12 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
regs->epc = pc;
}
noinline void arch_kgdb_breakpoint(void)
{
asm(".global kgdb_compiled_break\n"
"kgdb_compiled_break: ebreak\n");
}
void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
char *remcom_out_buffer)
{

View File

@@ -73,6 +73,9 @@ static struct resource bss_res = { .name = "Kernel bss", };
static struct resource elfcorehdr_res = { .name = "ELF Core hdr", };
#endif
static int num_standard_resources;
static struct resource *standard_resources;
static int __init add_resource(struct resource *parent,
struct resource *res)
{
@@ -146,7 +149,7 @@ static void __init init_resources(void)
struct resource *res = NULL;
struct resource *mem_res = NULL;
size_t mem_res_sz = 0;
int num_resources = 0, res_idx = 0;
int num_resources = 0, res_idx = 0, non_resv_res = 0;
int ret = 0;
/* + 1 as memblock_alloc() might increase memblock.reserved.cnt */
@@ -215,6 +218,7 @@ static void __init init_resources(void)
/* Add /memory regions to the resource tree */
for_each_mem_region(region) {
res = &mem_res[res_idx--];
non_resv_res++;
if (unlikely(memblock_is_nomap(region))) {
res->name = "Reserved";
@@ -232,6 +236,9 @@ static void __init init_resources(void)
goto error;
}
num_standard_resources = non_resv_res;
standard_resources = &mem_res[res_idx + 1];
/* Clean-up any unused pre-allocated resources */
if (res_idx >= 0)
memblock_free(mem_res, (res_idx + 1) * sizeof(*mem_res));
@@ -243,6 +250,33 @@ static void __init init_resources(void)
memblock_free(mem_res, mem_res_sz);
}
static int __init reserve_memblock_reserved_regions(void)
{
u64 i, j;
for (i = 0; i < num_standard_resources; i++) {
struct resource *mem = &standard_resources[i];
phys_addr_t r_start, r_end, mem_size = resource_size(mem);
if (!memblock_is_region_reserved(mem->start, mem_size))
continue;
for_each_reserved_mem_range(j, &r_start, &r_end) {
resource_size_t start, end;
start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
if (start > mem->end || end < mem->start)
continue;
reserve_region_with_split(mem, start, end, "Reserved");
}
}
return 0;
}
arch_initcall(reserve_memblock_reserved_regions);
static void __init parse_dtb(void)
{

View File

@@ -94,7 +94,7 @@ static int handle_validity(struct kvm_vcpu *vcpu)
vcpu->stat.exit_validity++;
trace_kvm_s390_intercept_validity(vcpu, viwhy);
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%pK)", viwhy,
KVM_EVENT(3, "validity intercept 0x%x for pid %u (kvm 0x%p)", viwhy,
current->pid, vcpu->kvm);
/* do not warn on invalid runtime instrumentation mode */

View File

@@ -3161,7 +3161,7 @@ void kvm_s390_gisa_clear(struct kvm *kvm)
if (!gi->origin)
return;
gisa_clear_ipm(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%p cleared", gi->origin);
}
void kvm_s390_gisa_init(struct kvm *kvm)
@@ -3178,7 +3178,7 @@ void kvm_s390_gisa_init(struct kvm *kvm)
gi->timer.function = gisa_vcpu_kicker;
memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
gi->origin->next_alert = (u32)virt_to_phys(gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
VM_EVENT(kvm, 3, "gisa 0x%p initialized", gi->origin);
}
void kvm_s390_gisa_enable(struct kvm *kvm)
@@ -3219,7 +3219,7 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
process_gib_alert_list();
hrtimer_cancel(&gi->timer);
gi->origin = NULL;
VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
VM_EVENT(kvm, 3, "gisa 0x%p destroyed", gisa);
}
void kvm_s390_gisa_disable(struct kvm *kvm)
@@ -3468,7 +3468,7 @@ int __init kvm_s390_gib_init(u8 nisc)
}
}
KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
KVM_EVENT(3, "gib 0x%p (nisc=%d) initialized", gib, gib->nisc);
goto out;
out_unreg_gal:

View File

@@ -990,7 +990,7 @@ static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *att
}
mutex_unlock(&kvm->lock);
VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
VM_EVENT(kvm, 3, "New guest asce: 0x%p",
(void *) kvm->arch.gmap->asce);
break;
}
@@ -3418,7 +3418,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_s390_gisa_init(kvm);
INIT_LIST_HEAD(&kvm->arch.pv.need_cleanup);
kvm->arch.pv.set_aside = NULL;
KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
return 0;
out_err:
@@ -3481,7 +3481,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_s390_destroy_adapters(kvm);
kvm_s390_clear_float_irqs(kvm);
kvm_s390_vsie_destroy(kvm);
KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
KVM_EVENT(3, "vm 0x%p destroyed", kvm);
}
/* Section: vcpu related */
@@ -3602,7 +3602,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
free_page((unsigned long)old_sca);
VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
VM_EVENT(kvm, 2, "Switched to ESCA (0x%p -> 0x%p)",
old_sca, kvm->arch.sca);
return 0;
}
@@ -3974,7 +3974,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
goto out_free_sie_block;
}
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sie block at 0x%p",
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);

View File

@@ -56,7 +56,7 @@ TRACE_EVENT(kvm_s390_create_vcpu,
__entry->sie_block = sie_block;
),
TP_printk("create cpu %d at 0x%pK, sie block at 0x%pK",
TP_printk("create cpu %d at 0x%p, sie block at 0x%p",
__entry->id, __entry->vcpu, __entry->sie_block)
);
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_s390_enable_css,
__entry->kvm = kvm;
),
TP_printk("enabling channel I/O support (kvm @ %pK)\n",
TP_printk("enabling channel I/O support (kvm @ %p)\n",
__entry->kvm)
);

View File

@@ -933,7 +933,6 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned int nr)
{
arch_enter_lazy_mmu_mode();
for (;;) {
__set_pte_at(mm, addr, ptep, pte, 0);
if (--nr == 0)
@@ -942,7 +941,6 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_val(pte) += PAGE_SIZE;
addr += PAGE_SIZE;
}
arch_leave_lazy_mmu_mode();
}
#define set_ptes set_ptes

View File

@@ -52,8 +52,10 @@ out:
void arch_enter_lazy_mmu_mode(void)
{
struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
struct tlb_batch *tb;
preempt_disable();
tb = this_cpu_ptr(&tlb_batch);
tb->active = 1;
}
@@ -64,6 +66,7 @@ void arch_leave_lazy_mmu_mode(void)
if (tb->tlb_nr)
flush_tlb_pending();
tb->active = 0;
preempt_enable();
}
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,

View File

@@ -881,6 +881,7 @@ config INTEL_TDX_GUEST
depends on X86_64 && CPU_SUP_INTEL
depends on X86_X2APIC
depends on EFI_STUB
depends on PARAVIRT
select ARCH_HAS_CC_PLATFORM
select X86_MEM_ENCRYPT
select X86_MCE

View File

@@ -34,11 +34,14 @@ static bool early_is_tdx_guest(void)
void arch_accept_memory(phys_addr_t start, phys_addr_t end)
{
static bool sevsnp;
/* Platform-specific memory-acceptance call goes here */
if (early_is_tdx_guest()) {
if (!tdx_accept_memory(start, end))
panic("TDX: Failed to accept memory\n");
} else if (sev_snp_enabled()) {
} else if (sevsnp || (sev_get_status() & MSR_AMD64_SEV_SNP_ENABLED)) {
sevsnp = true;
snp_accept_memory(start, end);
} else {
error("Cannot accept memory: unknown platform\n");

View File

@@ -135,10 +135,7 @@ bool sev_snp_enabled(void)
static void __page_state_change(unsigned long paddr, enum psc_op op)
{
u64 val;
if (!sev_snp_enabled())
return;
u64 val, msr;
/*
* If private -> shared then invalidate the page before requesting the
@@ -147,6 +144,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if (op == SNP_PAGE_STATE_SHARED && pvalidate(paddr, RMP_PG_SIZE_4K, 0))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
/* Save the current GHCB MSR value */
msr = sev_es_rd_ghcb_msr();
/* Issue VMGEXIT to change the page state in RMP table. */
sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
VMGEXIT();
@@ -156,6 +156,9 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
if ((GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP) || GHCB_MSR_PSC_RESP_VAL(val))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
/* Restore the GHCB MSR value */
sev_es_wr_ghcb_msr(msr);
/*
* Now that page state is changed in the RMP table, validate it so that it is
* consistent with the RMP entry.
@@ -166,11 +169,17 @@ static void __page_state_change(unsigned long paddr, enum psc_op op)
void snp_set_page_private(unsigned long paddr)
{
if (!sev_snp_enabled())
return;
__page_state_change(paddr, SNP_PAGE_STATE_PRIVATE);
}
void snp_set_page_shared(unsigned long paddr)
{
if (!sev_snp_enabled())
return;
__page_state_change(paddr, SNP_PAGE_STATE_SHARED);
}
@@ -194,56 +203,10 @@ static bool early_setup_ghcb(void)
return true;
}
static phys_addr_t __snp_accept_memory(struct snp_psc_desc *desc,
phys_addr_t pa, phys_addr_t pa_end)
{
struct psc_hdr *hdr;
struct psc_entry *e;
unsigned int i;
hdr = &desc->hdr;
memset(hdr, 0, sizeof(*hdr));
e = desc->entries;
i = 0;
while (pa < pa_end && i < VMGEXIT_PSC_MAX_ENTRY) {
hdr->end_entry = i;
e->gfn = pa >> PAGE_SHIFT;
e->operation = SNP_PAGE_STATE_PRIVATE;
if (IS_ALIGNED(pa, PMD_SIZE) && (pa_end - pa) >= PMD_SIZE) {
e->pagesize = RMP_PG_SIZE_2M;
pa += PMD_SIZE;
} else {
e->pagesize = RMP_PG_SIZE_4K;
pa += PAGE_SIZE;
}
e++;
i++;
}
if (vmgexit_psc(boot_ghcb, desc))
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
pvalidate_pages(desc);
return pa;
}
void snp_accept_memory(phys_addr_t start, phys_addr_t end)
{
struct snp_psc_desc desc = {};
unsigned int i;
phys_addr_t pa;
if (!boot_ghcb && !early_setup_ghcb())
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
pa = start;
while (pa < end)
pa = __snp_accept_memory(&desc, pa, end);
for (phys_addr_t pa = start; pa < end; pa += PAGE_SIZE)
__page_state_change(pa, SNP_PAGE_STATE_PRIVATE);
}
void sev_es_shutdown_ghcb(void)

View File

@@ -12,11 +12,13 @@
bool sev_snp_enabled(void);
void snp_accept_memory(phys_addr_t start, phys_addr_t end);
u64 sev_get_status(void);
#else
static inline bool sev_snp_enabled(void) { return false; }
static inline void snp_accept_memory(phys_addr_t start, phys_addr_t end) { }
static inline u64 sev_get_status(void) { return 0; }
#endif

View File

@@ -13,6 +13,7 @@
#include <asm/ia32.h>
#include <asm/insn.h>
#include <asm/insn-eval.h>
#include <asm/paravirt_types.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -334,7 +335,7 @@ static int handle_halt(struct ve_info *ve)
return ve_instr_len(ve);
}
void __cpuidle tdx_safe_halt(void)
void __cpuidle tdx_halt(void)
{
const bool irq_disabled = false;
@@ -345,6 +346,16 @@ void __cpuidle tdx_safe_halt(void)
WARN_ONCE(1, "HLT instruction emulation failed\n");
}
static void __cpuidle tdx_safe_halt(void)
{
tdx_halt();
/*
* "__cpuidle" section doesn't support instrumentation, so stick
* with raw_* variant that avoids tracing hooks.
*/
raw_local_irq_enable();
}
static int read_msr(struct pt_regs *regs, struct ve_info *ve)
{
struct tdx_hypercall_args args = {
@@ -888,6 +899,19 @@ void __init tdx_early_init(void)
x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
/*
* Avoid "sti;hlt" execution in TDX guests as HLT induces a #VE that
* will enable interrupts before HLT TDCALL invocation if executed
* in STI-shadow, possibly resulting in missed wakeup events.
*
* Modify all possible HLT execution paths to use TDX specific routines
* that directly execute TDCALL and toggle the interrupt state as
* needed after TDCALL completion. This also reduces HLT related #VEs
* in addition to having a reliable halt logic execution.
*/
pv_ops.irq.safe_halt = tdx_safe_halt;
pv_ops.irq.halt = tdx_halt;
/*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel
* bringup low level code. That raises #VE which cannot be handled

View File

@@ -16,7 +16,7 @@
SYM_FUNC_START(entry_ibpb)
movl $MSR_IA32_PRED_CMD, %ecx
movl $PRED_CMD_IBPB, %eax
movl _ASM_RIP(x86_pred_cmd), %eax
xorl %edx, %edx
wrmsr

View File

@@ -621,7 +621,7 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.type == event->pmu->type)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
if (!event->attr.freq && x86_pmu.limit_period) {
if (is_sampling_event(event) && !event->attr.freq && x86_pmu.limit_period) {
s64 left = event->attr.sample_period;
x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)

View File

@@ -1203,8 +1203,10 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
* + precise_ip < 2 for the non event IP
* + For RTM TSX weight we need GPRs for the abort code.
*/
gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
(attr->sample_regs_intr & PEBS_GP_REGS);
gprs = ((sample_type & PERF_SAMPLE_REGS_INTR) &&
(attr->sample_regs_intr & PEBS_GP_REGS)) ||
((sample_type & PERF_SAMPLE_REGS_USER) &&
(attr->sample_regs_user & PEBS_GP_REGS));
tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
((attr->config & INTEL_ARCH_EVENT_MASK) ==
@@ -1856,7 +1858,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
regs->flags &= ~PERF_EFLAGS_EXACT;
}
if (sample_type & PERF_SAMPLE_REGS_INTR)
if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))
adaptive_pebs_save_regs(regs, gprs);
}

View File

@@ -4882,28 +4882,28 @@ static struct uncore_event_desc snr_uncore_iio_freerunning_events[] = {
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.0517578125e-5"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
@@ -5476,37 +5476,6 @@ static struct freerunning_counters icx_iio_freerunning[] = {
[ICX_IIO_MSR_BW_IN] = { 0xaa0, 0x1, 0x10, 8, 48, icx_iio_bw_freerunning_box_offsets },
};
static struct uncore_event_desc icx_uncore_iio_freerunning_events[] = {
/* Free-Running IIO CLOCKS Counter */
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
static struct intel_uncore_type icx_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 9,
@@ -5514,7 +5483,7 @@ static struct intel_uncore_type icx_uncore_iio_free_running = {
.num_freerunning_types = ICX_IIO_FREERUNNING_TYPE_MAX,
.freerunning = icx_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
.event_descs = icx_uncore_iio_freerunning_events,
.event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};
@@ -6241,69 +6210,13 @@ static struct freerunning_counters spr_iio_freerunning[] = {
[SPR_IIO_MSR_BW_OUT] = { 0x3808, 0x1, 0x10, 8, 48 },
};
static struct uncore_event_desc spr_uncore_iio_freerunning_events[] = {
/* Free-Running IIO CLOCKS Counter */
INTEL_UNCORE_EVENT_DESC(ioclk, "event=0xff,umask=0x10"),
/* Free-Running IIO BANDWIDTH IN Counters */
INTEL_UNCORE_EVENT_DESC(bw_in_port0, "event=0xff,umask=0x20"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1, "event=0xff,umask=0x21"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2, "event=0xff,umask=0x22"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3, "event=0xff,umask=0x23"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4, "event=0xff,umask=0x24"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5, "event=0xff,umask=0x25"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6, "event=0xff,umask=0x26"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7, "event=0xff,umask=0x27"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_in_port7.unit, "MiB"),
/* Free-Running IIO BANDWIDTH OUT Counters */
INTEL_UNCORE_EVENT_DESC(bw_out_port0, "event=0xff,umask=0x30"),
INTEL_UNCORE_EVENT_DESC(bw_out_port0.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port0.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1, "event=0xff,umask=0x31"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port1.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2, "event=0xff,umask=0x32"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port2.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3, "event=0xff,umask=0x33"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port3.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4, "event=0xff,umask=0x34"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port4.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5, "event=0xff,umask=0x35"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port5.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6, "event=0xff,umask=0x36"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port6.unit, "MiB"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7, "event=0xff,umask=0x37"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7.scale, "3.814697266e-6"),
INTEL_UNCORE_EVENT_DESC(bw_out_port7.unit, "MiB"),
{ /* end: all zeroes */ },
};
static struct intel_uncore_type spr_uncore_iio_free_running = {
.name = "iio_free_running",
.num_counters = 17,
.num_freerunning_types = SPR_IIO_FREERUNNING_TYPE_MAX,
.freerunning = spr_iio_freerunning,
.ops = &skx_uncore_iio_freerunning_ops,
.event_descs = spr_uncore_iio_freerunning_events,
.event_descs = snr_uncore_iio_freerunning_events,
.format_group = &skx_uncore_iio_freerunning_format_group,
};

View File

@@ -229,9 +229,6 @@ register unsigned long current_stack_pointer asm(_ASM_SP);
#define _ASM_EXTABLE_UA(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_UACCESS)
#define _ASM_EXTABLE_CPY(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_COPY)
#define _ASM_EXTABLE_FAULT(from, to) \
_ASM_EXTABLE_TYPE(from, to, EX_TYPE_FAULT)

View File

@@ -36,7 +36,7 @@
#define EX_TYPE_DEFAULT 1
#define EX_TYPE_FAULT 2
#define EX_TYPE_UACCESS 3
#define EX_TYPE_COPY 4
/* unused, was: #define EX_TYPE_COPY 4 */
#define EX_TYPE_CLEAR_FS 5
#define EX_TYPE_FPU_RESTORE 6
#define EX_TYPE_BPF 7

View File

@@ -159,6 +159,8 @@
#define INTEL_FAM6_GRANITERAPIDS_D 0xAE
#define INTEL_GRANITERAPIDS_D IFM(6, 0xAE)
#define INTEL_BARTLETTLAKE IFM(6, 0xD7) /* Raptor Cove */
/* "Hybrid" Processors (P-Core/E-Core) */
#define INTEL_FAM6_LAKEFIELD 0x8A /* Sunny Cove / Tremont */

View File

@@ -56,6 +56,28 @@ static __always_inline void native_halt(void)
#endif
#ifndef CONFIG_PARAVIRT
#ifndef __ASSEMBLY__
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
static __always_inline void arch_safe_halt(void)
{
native_safe_halt();
}
/*
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
static __always_inline void halt(void)
{
native_halt();
}
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PARAVIRT */
#ifdef CONFIG_PARAVIRT_XXL
#include <asm/paravirt.h>
#else
@@ -77,24 +99,6 @@ static __always_inline void arch_local_irq_enable(void)
native_irq_enable();
}
/*
* Used in the idle loop; sti takes one instruction cycle
* to complete:
*/
static __always_inline void arch_safe_halt(void)
{
native_safe_halt();
}
/*
* Used when interrupts are already enabled or to
* shutdown the processor:
*/
static __always_inline void halt(void)
{
native_halt();
}
/*
* For spinlocks, etc:
*/

View File

@@ -103,6 +103,16 @@ static inline void notify_page_enc_status_changed(unsigned long pfn,
PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
}
static __always_inline void arch_safe_halt(void)
{
PVOP_VCALL0(irq.safe_halt);
}
static inline void halt(void)
{
PVOP_VCALL0(irq.halt);
}
#ifdef CONFIG_PARAVIRT_XXL
static inline void load_sp0(unsigned long sp0)
{
@@ -168,16 +178,6 @@ static inline void __write_cr4(unsigned long x)
PVOP_VCALL1(cpu.write_cr4, x);
}
static __always_inline void arch_safe_halt(void)
{
PVOP_VCALL0(irq.safe_halt);
}
static inline void halt(void)
{
PVOP_VCALL0(irq.halt);
}
extern noinstr void pv_native_wbinvd(void);
static __always_inline void wbinvd(void)

View File

@@ -130,10 +130,9 @@ struct pv_irq_ops {
struct paravirt_callee_save save_fl;
struct paravirt_callee_save irq_disable;
struct paravirt_callee_save irq_enable;
#endif
void (*safe_halt)(void);
void (*halt)(void);
#endif
} __no_randomize_layout;
struct pv_mmu_ops {

View File

@@ -46,7 +46,7 @@ void tdx_get_ve_info(struct ve_info *ve);
bool tdx_handle_virt_exception(struct pt_regs *regs, struct ve_info *ve);
void tdx_safe_halt(void);
void tdx_halt(void);
bool tdx_early_handle_ve(struct pt_regs *regs);
@@ -55,7 +55,7 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
#else
static inline void tdx_early_init(void) { };
static inline void tdx_safe_halt(void) { };
static inline void tdx_halt(void) { };
static inline bool tdx_early_handle_ve(struct pt_regs *regs) { return false; }

View File

@@ -62,11 +62,6 @@ void xen_arch_unregister_cpu(int num);
#ifdef CONFIG_PVH
void __init xen_pvh_init(struct boot_params *boot_params);
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
#ifdef CONFIG_XEN_PVH
void __init xen_reserve_extra_memory(struct boot_params *bootp);
#else
static inline void xen_reserve_extra_memory(struct boot_params *bootp) { }
#endif
#endif
/* Lazy mode for batching updates / context switch */

View File

@@ -825,7 +825,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c)
* (model = 0x14) and later actually support it.
* (AMD Erratum #110, docId: 25759).
*/
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) {
if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM) && !cpu_has(c, X86_FEATURE_HYPERVISOR)) {
clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
if (!rdmsrl_amd_safe(0xc001100d, &value)) {
value &= ~BIT_64(32);
@@ -1039,6 +1039,16 @@ static void init_amd_zen1(struct cpuinfo_x86 *c)
pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
setup_force_cpu_bug(X86_BUG_DIV0);
/*
* Turn off the Instructions Retired free counter on machines that are
* susceptible to erratum #1054 "Instructions Retired Performance
* Counter May Be Inaccurate".
*/
if (c->x86_model < 0x30) {
msr_clear_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
clear_cpu_cap(c, X86_FEATURE_IRPERF);
}
}
static bool cpu_has_zenbleed_microcode(void)
@@ -1185,13 +1195,8 @@ static void init_amd(struct cpuinfo_x86 *c)
if (!cpu_feature_enabled(X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
/*
* Turn on the Instructions Retired free counter on machines not
* susceptible to erratum #1054 "Instructions Retired Performance
* Counter May Be Inaccurate".
*/
if (cpu_has(c, X86_FEATURE_IRPERF) &&
(boot_cpu_has(X86_FEATURE_ZEN1) && c->x86_model > 0x2f))
/* Enable the Instructions Retired free counter */
if (cpu_has(c, X86_FEATURE_IRPERF))
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
check_null_seg_clears_base(c);

View File

@@ -1574,7 +1574,7 @@ static void __init spec_ctrl_disable_kernel_rrsba(void)
rrsba_disabled = true;
}
static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode)
{
/*
* Similar to context switches, there are two types of RSB attacks
@@ -1598,27 +1598,30 @@ static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_
*/
switch (mode) {
case SPECTRE_V2_NONE:
return;
break;
case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
}
return;
case SPECTRE_V2_EIBRS_LFENCE:
case SPECTRE_V2_EIBRS_RETPOLINE:
if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
}
break;
case SPECTRE_V2_RETPOLINE:
case SPECTRE_V2_LFENCE:
case SPECTRE_V2_IBRS:
pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n");
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
return;
}
break;
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
default:
pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n");
dump_stack();
break;
}
}
/*
@@ -1844,10 +1847,7 @@ static void __init spectre_v2_select_mitigation(void)
*
* FIXME: Is this pointless for retbleed-affected AMD?
*/
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
spectre_v2_select_rsb_mitigation(mode);
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS

View File

@@ -1168,7 +1168,13 @@ static void __split_lock_reenable(struct work_struct *work)
{
sld_update_msr(true);
}
static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
/*
* In order for each CPU to schedule its delayed work independently of the
* others, delayed work struct must be per-CPU. This is not required when
* sysctl_sld_mitigate is enabled because of the semaphore that limits
* the number of simultaneously scheduled delayed works to 1.
*/
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);
/*
* If a CPU goes offline with pending delayed work to re-enable split lock
@@ -1189,7 +1195,7 @@ static int splitlock_cpu_offline(unsigned int cpu)
static void split_lock_warn(unsigned long ip)
{
struct delayed_work *work;
struct delayed_work *work = NULL;
int cpu;
if (!current->reported_split_lock)
@@ -1211,11 +1217,17 @@ static void split_lock_warn(unsigned long ip)
if (down_interruptible(&buslock_sem) == -EINTR)
return;
work = &sl_reenable_unlock;
} else {
work = &sl_reenable;
}
cpu = get_cpu();
if (!work) {
work = this_cpu_ptr(&sl_reenable);
/* Deferred initialization of per-CPU struct */
if (!work->work.func)
INIT_DELAYED_WORK(work, __split_lock_reenable);
}
schedule_delayed_work_on(cpu, work, 2);
/* Disable split lock detection on this CPU to make progress */

View File

@@ -288,14 +288,12 @@ static noinstr int error_context(struct mce *m, struct pt_regs *regs)
copy_user = is_copy_from_user(regs);
instrumentation_end();
switch (fixup_type) {
case EX_TYPE_UACCESS:
case EX_TYPE_COPY:
if (!copy_user)
return IN_KERNEL;
m->kflags |= MCE_IN_KERNEL_COPYIN;
fallthrough;
if (copy_user) {
m->kflags |= MCE_IN_KERNEL_COPYIN | MCE_IN_KERNEL_RECOV;
return IN_KERNEL_RECOV;
}
switch (fixup_type) {
case EX_TYPE_FAULT_MCE_SAFE:
case EX_TYPE_DEFAULT_MCE_SAFE:
m->kflags |= MCE_IN_KERNEL_RECOV;

View File

@@ -201,6 +201,12 @@ static bool need_sha_check(u32 cur_rev)
case 0xa70c0: return cur_rev <= 0xa70C009; break;
case 0xaa001: return cur_rev <= 0xaa00116; break;
case 0xaa002: return cur_rev <= 0xaa00218; break;
case 0xb0021: return cur_rev <= 0xb002146; break;
case 0xb1010: return cur_rev <= 0xb101046; break;
case 0xb2040: return cur_rev <= 0xb204031; break;
case 0xb4040: return cur_rev <= 0xb404031; break;
case 0xb6000: return cur_rev <= 0xb600031; break;
case 0xb7000: return cur_rev <= 0xb700031; break;
default: break;
}
@@ -216,8 +222,7 @@ static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsi
struct sha256_state s;
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
x86_family(bsp_cpuid_1_eax) > 0x19)
if (x86_family(bsp_cpuid_1_eax) < 0x17)
return true;
if (!need_sha_check(cur_rev))

View File

@@ -753,22 +753,21 @@ void __init e820__memory_setup_extended(u64 phys_addr, u32 data_len)
void __init e820__register_nosave_regions(unsigned long limit_pfn)
{
int i;
unsigned long pfn = 0;
u64 last_addr = 0;
for (i = 0; i < e820_table->nr_entries; i++) {
struct e820_entry *entry = &e820_table->entries[i];
if (pfn < PFN_UP(entry->addr))
register_nosave_region(pfn, PFN_UP(entry->addr));
pfn = PFN_DOWN(entry->addr + entry->size);
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
register_nosave_region(PFN_UP(entry->addr), pfn);
continue;
if (pfn >= limit_pfn)
break;
if (last_addr < entry->addr)
register_nosave_region(PFN_DOWN(last_addr), PFN_UP(entry->addr));
last_addr = entry->addr + entry->size;
}
register_nosave_region(PFN_DOWN(last_addr), limit_pfn);
}
#ifdef CONFIG_ACPI

View File

@@ -46,6 +46,7 @@ bool __init pit_timer_init(void)
* VMMs otherwise steal CPU time just to pointlessly waggle
* the (masked) IRQ.
*/
scoped_guard(irq)
clockevent_i8253_disable();
return false;
}

View File

@@ -142,6 +142,11 @@ int paravirt_disable_iospace(void)
return request_resource(&ioport_resource, &reserve_ioports);
}
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
}
#ifdef CONFIG_PARAVIRT_XXL
static noinstr void pv_native_write_cr2(unsigned long val)
{
@@ -162,11 +167,6 @@ noinstr void pv_native_wbinvd(void)
{
native_wbinvd();
}
static noinstr void pv_native_safe_halt(void)
{
native_safe_halt();
}
#endif
struct pv_info pv_info = {
@@ -224,9 +224,11 @@ struct paravirt_patch_template pv_ops = {
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
#endif /* CONFIG_PARAVIRT_XXL */
/* Irq HLT ops. */
.irq.safe_halt = pv_native_safe_halt,
.irq.halt = native_halt,
#endif /* CONFIG_PARAVIRT_XXL */
/* Mmu ops. */
.mmu.flush_tlb_user = native_flush_tlb_local,

View File

@@ -956,7 +956,7 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
static_call_update(x86_idle, mwait_idle);
} else if (cpu_feature_enabled(X86_FEATURE_TDX_GUEST)) {
pr_info("using TDX aware idle routine\n");
static_call_update(x86_idle, tdx_safe_halt);
static_call_update(x86_idle, tdx_halt);
} else
static_call_update(x86_idle, default_idle);
}

View File

@@ -33,25 +33,55 @@
#include <asm/smap.h>
#include <asm/gsseg.h>
/*
* The first GDT descriptor is reserved as 'NULL descriptor'. As bits 0
* and 1 of a segment selector, i.e., the RPL bits, are NOT used to index
* GDT, selector values 0~3 all point to the NULL descriptor, thus values
* 0, 1, 2 and 3 are all valid NULL selector values.
*
* However IRET zeros ES, FS, GS, and DS segment registers if any of them
* is found to have any nonzero NULL selector value, which can be used by
* userspace in pre-FRED systems to spot any interrupt/exception by loading
* a nonzero NULL selector and waiting for it to become zero. Before FRED
* there was nothing software could do to prevent such an information leak.
*
* ERETU, the only legit instruction to return to userspace from kernel
* under FRED, by design does NOT zero any segment register to avoid this
* problem behavior.
*
* As such, leave NULL selector values 0~3 unchanged.
*/
static inline u16 fixup_rpl(u16 sel)
{
return sel <= 3 ? sel : sel | 3;
}
#ifdef CONFIG_IA32_EMULATION
#include <asm/ia32_unistd.h>
static inline void reload_segments(struct sigcontext_32 *sc)
{
unsigned int cur;
u16 cur;
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
savesegment(gs, cur);
if ((sc->gs | 0x03) != cur)
load_gs_index(sc->gs | 0x03);
if (fixup_rpl(sc->gs) != cur)
load_gs_index(fixup_rpl(sc->gs));
savesegment(fs, cur);
if ((sc->fs | 0x03) != cur)
loadsegment(fs, sc->fs | 0x03);
if (fixup_rpl(sc->fs) != cur)
loadsegment(fs, fixup_rpl(sc->fs));
savesegment(ds, cur);
if ((sc->ds | 0x03) != cur)
loadsegment(ds, sc->ds | 0x03);
if (fixup_rpl(sc->ds) != cur)
loadsegment(ds, fixup_rpl(sc->ds));
savesegment(es, cur);
if ((sc->es | 0x03) != cur)
loadsegment(es, sc->es | 0x03);
if (fixup_rpl(sc->es) != cur)
loadsegment(es, fixup_rpl(sc->es));
}
#define sigset32_t compat_sigset_t
@@ -105,18 +135,12 @@ static bool ia32_restore_sigcontext(struct pt_regs *regs,
regs->orig_ax = -1;
#ifdef CONFIG_IA32_EMULATION
/*
* Reload fs and gs if they have changed in the signal
* handler. This does not handle long fs/gs base changes in
* the handler, but does not clobber them at least in the
* normal case.
*/
reload_segments(&sc);
#else
loadsegment(gs, sc.gs);
regs->fs = sc.fs;
regs->es = sc.es;
regs->ds = sc.ds;
loadsegment(gs, fixup_rpl(sc.gs));
regs->fs = fixup_rpl(sc.fs);
regs->es = fixup_rpl(sc.es);
regs->ds = fixup_rpl(sc.ds);
#endif
return fpu__restore_sig(compat_ptr(sc.fpstate), 1);

View File

@@ -1011,8 +1011,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
}
break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
union cpuid10_edx edx;
union cpuid10_eax eax = { };
union cpuid10_edx edx = { };
if (!enable_pmu || !static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
@@ -1028,8 +1028,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (kvm_pmu_cap.version)
edx.split.anythread_deprecated = 1;
edx.split.reserved1 = 0;
edx.split.reserved2 = 0;
entry->eax = eax.full;
entry->ebx = kvm_pmu_cap.events_mask;
@@ -1303,7 +1301,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break;
/* AMD Extended Performance Monitoring and Debug */
case 0x80000022: {
union cpuid_0x80000022_ebx ebx;
union cpuid_0x80000022_ebx ebx = { };
entry->ecx = entry->edx = 0;
if (!enable_pmu || !kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2)) {

View File

@@ -820,7 +820,7 @@ static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
* Allocating new amd_iommu_pi_data, which will get
* add to the per-vcpu ir_list.
*/
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_ATOMIC | __GFP_ACCOUNT);
if (!ir) {
ret = -ENOMEM;
goto out;
@@ -896,6 +896,7 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
bool enable_remapped_mode = true;
int idx, ret = 0;
if (!kvm_arch_has_assigned_device(kvm) ||
@@ -933,6 +934,8 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
kvm_vcpu_apicv_active(&svm->vcpu)) {
struct amd_iommu_pi_data pi;
enable_remapped_mode = false;
/* Try to enable guest_mode in IRTE */
pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
AVIC_HPA_MASK);
@@ -951,7 +954,22 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
*/
if (!ret && pi.is_guest_mode)
svm_ir_list_add(svm, &pi);
} else {
}
if (!ret && svm) {
trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
e->gsi, vcpu_info.vector,
vcpu_info.pi_desc_addr, set);
}
if (ret < 0) {
pr_err("%s: failed to update PI IRTE\n", __func__);
goto out;
}
}
ret = 0;
if (enable_remapped_mode) {
/* Use legacy mode in IRTE */
struct amd_iommu_pi_data pi;
@@ -979,20 +997,6 @@ int avic_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
svm_ir_list_del(to_svm(vcpu), &pi);
}
}
if (!ret && svm) {
trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
e->gsi, vcpu_info.vector,
vcpu_info.pi_desc_addr, set);
}
if (ret < 0) {
pr_err("%s: failed to update PI IRTE\n", __func__);
goto out;
}
}
ret = 0;
out:
srcu_read_unlock(&kvm->irq_srcu, idx);
return ret;

View File

@@ -274,6 +274,7 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
bool enable_remapped_mode = true;
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
@@ -312,21 +313,8 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
kvm_set_msi_irq(kvm, e, &irq);
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
!kvm_irq_is_postable(&irq)) {
/*
* Make sure the IRTE is in remapped mode if
* we don't handle it in posted mode.
*/
ret = irq_set_vcpu_affinity(host_irq, NULL);
if (ret < 0) {
printk(KERN_INFO
"failed to back to remapped mode, irq: %u\n",
host_irq);
goto out;
}
!kvm_irq_is_postable(&irq))
continue;
}
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
vcpu_info.vector = irq.vector;
@@ -334,11 +322,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
trace_kvm_pi_irte_update(host_irq, vcpu->vcpu_id, e->gsi,
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
if (set)
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
else
ret = irq_set_vcpu_affinity(host_irq, NULL);
if (!set)
continue;
enable_remapped_mode = false;
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",
__func__);
@@ -346,6 +335,9 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
}
}
if (enable_remapped_mode)
ret = irq_set_vcpu_affinity(host_irq, NULL);
ret = 0;
out:
srcu_read_unlock(&kvm->irq_srcu, idx);

View File

@@ -11396,6 +11396,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
if (kvm_mpx_supported())
kvm_load_guest_fpu(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
r = kvm_apic_accept_events(vcpu);
if (r < 0)
goto out;
@@ -11409,6 +11411,8 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
mp_state->mp_state = vcpu->arch.mp_state;
out:
kvm_vcpu_srcu_read_unlock(vcpu);
if (kvm_mpx_supported())
kvm_put_guest_fpu(vcpu);
vcpu_put(vcpu);
@@ -13293,7 +13297,8 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
struct kvm_kernel_irq_routing_entry *new)
{
if (new->type != KVM_IRQ_ROUTING_MSI)
if (old->type != KVM_IRQ_ROUTING_MSI ||
new->type != KVM_IRQ_ROUTING_MSI)
return true;
return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));

View File

@@ -163,13 +163,6 @@ static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
return ex_handler_default(fixup, regs);
}
static bool ex_handler_copy(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
return ex_handler_fault(fixup, regs, trapnr);
}
static bool ex_handler_msr(const struct exception_table_entry *fixup,
struct pt_regs *regs, bool wrmsr, bool safe, int reg)
{
@@ -267,8 +260,6 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
return ex_handler_fault(e, regs, trapnr);
case EX_TYPE_UACCESS:
return ex_handler_uaccess(e, regs, trapnr, fault_addr);
case EX_TYPE_COPY:
return ex_handler_copy(e, regs, trapnr);
case EX_TYPE_CLEAR_FS:
return ex_handler_clear_fs(e, regs);
case EX_TYPE_FPU_RESTORE:

View File

@@ -2374,7 +2374,7 @@ static int __set_pages_np(struct page *page, int numpages)
.pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.flags = CPA_NO_CHECK_ALIAS };
/*
@@ -2453,7 +2453,7 @@ int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW)),
.mask_clr = __pgprot(~page_flags & (_PAGE_NX|_PAGE_RW|_PAGE_DIRTY)),
.flags = CPA_NO_CHECK_ALIAS,
};
@@ -2496,7 +2496,7 @@ int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
.pgd = pgd,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY),
.flags = CPA_NO_CHECK_ALIAS,
};

View File

@@ -392,9 +392,9 @@ static void cond_mitigation(struct task_struct *next)
prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec);
/*
* Avoid user/user BTB poisoning by flushing the branch predictor
* when switching between processes. This stops one process from
* doing Spectre-v2 attacks on another.
* Avoid user->user BTB/RSB poisoning by flushing them when switching
* between processes. This stops one process from doing Spectre-v2
* attacks on another.
*
* Both, the conditional and the always IBPB mode use the mm
* pointer to avoid the IBPB when switching between tasks of the

View File

@@ -74,9 +74,6 @@ static void __init init_pvh_bootparams(bool xen_guest)
} else
xen_raw_printk("Warning: Can fit ISA range into e820\n");
if (xen_guest)
xen_reserve_extra_memory(&pvh_bootparams);
pvh_bootparams.hdr.cmd_line_ptr =
pvh_start_info.cmdline_paddr;

View File

@@ -100,7 +100,12 @@ SYM_CODE_START_LOCAL(pvh_start_xen)
xor %edx, %edx
wrmsr
call xen_prepare_pvh
/* Call xen_prepare_pvh() via the kernel virtual mapping */
leaq xen_prepare_pvh(%rip), %rax
subq phys_base(%rip), %rax
addq $__START_KERNEL_map, %rax
ANNOTATE_RETPOLINE_SAFE
call *%rax
/* startup_64 expects boot_params in %rsi. */
mov $_pa(pvh_bootparams), %rsi

View File

@@ -75,6 +75,9 @@ EXPORT_SYMBOL(xen_start_flags);
*/
struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
static __ref void xen_get_vendor(void)
{
init_cpu_devs();
@@ -471,6 +474,13 @@ int __init arch_xen_unpopulated_init(struct resource **res)
xen_free_unpopulated_pages(1, &pg);
}
/*
* Account for the region being in the physmap but unpopulated.
* The value in xen_released_pages is used by the balloon
* driver to know how much of the physmap is unpopulated and
* set an accurate initial memory target.
*/
xen_released_pages += xen_extra_mem[i].n_pfns;
/* Zero so region is not also added to the balloon driver. */
xen_extra_mem[i].n_pfns = 0;
}

View File

@@ -8,6 +8,7 @@
#include <asm/io_apic.h>
#include <asm/hypervisor.h>
#include <asm/e820/api.h>
#include <asm/setup.h>
#include <xen/xen.h>
#include <asm/xen/interface.h>
@@ -26,47 +27,6 @@
bool __ro_after_init xen_pvh;
EXPORT_SYMBOL_GPL(xen_pvh);
void __init xen_pvh_init(struct boot_params *boot_params)
{
xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
if (xen_initial_domain())
x86_init.oem.arch_setup = xen_add_preferred_consoles;
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
if (xen_initial_domain()) {
struct xen_platform_op op = {
.cmd = XENPF_get_dom0_console,
};
int ret = HYPERVISOR_platform_op(&op);
if (ret > 0)
xen_init_vga(&op.u.dom0_console,
min(ret * sizeof(char),
sizeof(op.u.dom0_console)),
&boot_params->screen_info);
}
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
{
struct xen_memory_map memmap;
int rc;
memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc) {
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
BUG();
}
boot_params_p->e820_entries = memmap.nr_entries;
}
/*
* Reserve e820 UNUSABLE regions to inflate the memory balloon.
*
@@ -81,8 +41,9 @@ void __init mem_map_via_hcall(struct boot_params *boot_params_p)
* hypervisor should notify us which memory ranges are suitable for creating
* foreign mappings, but that's not yet implemented.
*/
void __init xen_reserve_extra_memory(struct boot_params *bootp)
static void __init pvh_reserve_extra_memory(void)
{
struct boot_params *bootp = &boot_params;
unsigned int i, ram_pages = 0, extra_pages;
for (i = 0; i < bootp->e820_entries; i++) {
@@ -133,3 +94,51 @@ void __init xen_reserve_extra_memory(struct boot_params *bootp)
xen_add_extra_mem(PFN_UP(e->addr), pages);
}
}
static void __init pvh_arch_setup(void)
{
pvh_reserve_extra_memory();
if (xen_initial_domain())
xen_add_preferred_consoles();
}
void __init xen_pvh_init(struct boot_params *boot_params)
{
xen_pvh = 1;
xen_domain_type = XEN_HVM_DOMAIN;
xen_start_flags = pvh_start_info.flags;
x86_init.oem.arch_setup = pvh_arch_setup;
x86_init.oem.banner = xen_banner;
xen_efi_init(boot_params);
if (xen_initial_domain()) {
struct xen_platform_op op = {
.cmd = XENPF_get_dom0_console,
};
int ret = HYPERVISOR_platform_op(&op);
if (ret > 0)
xen_init_vga(&op.u.dom0_console,
min(ret * sizeof(char),
sizeof(op.u.dom0_console)),
&boot_params->screen_info);
}
}
void __init mem_map_via_hcall(struct boot_params *boot_params_p)
{
struct xen_memory_map memmap;
int rc;
memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table);
set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table);
rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
if (rc) {
xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
BUG();
}
boot_params_p->e820_entries = memmap.nr_entries;
}

View File

@@ -38,9 +38,6 @@
#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
/* Number of pages released from the initial allocation. */
unsigned long xen_released_pages;
/* Memory map would allow PCI passthrough. */
bool xen_pv_pci_possible;

View File

@@ -854,6 +854,8 @@ out_unregister_ia_ranges:
out_debugfs_remove:
blk_debugfs_remove(disk);
mutex_unlock(&q->sysfs_lock);
if (queue_is_mq(q))
blk_mq_sysfs_unregister(disk);
out_put_queue_kobj:
kobject_put(&disk->queue_kobj);
mutex_unlock(&q->sysfs_dir_lock);

View File

@@ -17,10 +17,14 @@
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/module.h>
#ifdef __GENKSYMS__ // CRC fix for e307c54ac819 ("crypto: null - Use spin lock instead of mutex")
#include <linux/mm.h>
#else
#include <linux/spinlock.h>
#endif
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
static DEFINE_SPINLOCK(crypto_default_null_skcipher_lock);
static struct crypto_sync_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
@@ -152,23 +156,32 @@ MODULE_ALIAS_CRYPTO("cipher_null");
struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
{
struct crypto_sync_skcipher *ntfm = NULL;
struct crypto_sync_skcipher *tfm;
mutex_lock(&crypto_default_null_skcipher_lock);
spin_lock_bh(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(tfm))
goto unlock;
spin_unlock_bh(&crypto_default_null_skcipher_lock);
ntfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(ntfm))
return ntfm;
spin_lock_bh(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
tfm = ntfm;
ntfm = NULL;
crypto_default_null_skcipher = tfm;
}
}
crypto_default_null_skcipher_refcnt++;
spin_unlock_bh(&crypto_default_null_skcipher_lock);
unlock:
mutex_unlock(&crypto_default_null_skcipher_lock);
crypto_free_sync_skcipher(ntfm);
return tfm;
}
@@ -176,12 +189,16 @@ EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
void crypto_put_default_null_skcipher(void)
{
mutex_lock(&crypto_default_null_skcipher_lock);
struct crypto_sync_skcipher *tfm = NULL;
spin_lock_bh(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
crypto_free_sync_skcipher(crypto_default_null_skcipher);
tfm = crypto_default_null_skcipher;
crypto_default_null_skcipher = NULL;
}
mutex_unlock(&crypto_default_null_skcipher_lock);
spin_unlock_bh(&crypto_default_null_skcipher_lock);
crypto_free_sync_skcipher(tfm);
}
EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);

View File

@@ -2301,6 +2301,34 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
DMI_MATCH(DMI_PRODUCT_FAMILY, "103C_5336AN HP ZHAN 66 Pro"),
},
},
/*
* Lenovo Legion Go S; touchscreen blocks HW sleep when woken up from EC
* https://gitlab.freedesktop.org/drm/amd/-/issues/3929
*/
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83L3"),
}
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83N6"),
}
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83Q2"),
}
},
{
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "83Q3"),
}
},
{ },
};

View File

@@ -22,8 +22,8 @@ static const char * const profile_names[] = {
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
static ssize_t platform_profile_choices_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
int len = 0;
@@ -49,8 +49,8 @@ static ssize_t platform_profile_choices_show(struct device *dev,
return len;
}
static ssize_t platform_profile_show(struct device *dev,
struct device_attribute *attr,
static ssize_t platform_profile_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
enum platform_profile_option profile = PLATFORM_PROFILE_BALANCED;
@@ -77,8 +77,8 @@ static ssize_t platform_profile_show(struct device *dev,
return sysfs_emit(buf, "%s\n", profile_names[profile]);
}
static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
static ssize_t platform_profile_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err, i;
@@ -115,12 +115,12 @@ static ssize_t platform_profile_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RO(platform_profile_choices);
static DEVICE_ATTR_RW(platform_profile);
static struct kobj_attribute attr_platform_profile_choices = __ATTR_RO(platform_profile_choices);
static struct kobj_attribute attr_platform_profile = __ATTR_RW(platform_profile);
static struct attribute *platform_profile_attrs[] = {
&dev_attr_platform_profile_choices.attr,
&dev_attr_platform_profile.attr,
&attr_platform_profile_choices.attr,
&attr_platform_profile.attr,
NULL
};

View File

@@ -229,7 +229,7 @@ static int acpi_pptt_leaf_node(struct acpi_table_header *table_hdr,
node_entry = ACPI_PTR_DIFF(node, table_hdr);
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor *);
proc_sz = sizeof(struct acpi_pptt_processor);
while ((unsigned long)entry + proc_sz < table_end) {
cpu_node = (struct acpi_pptt_processor *)entry;
@@ -270,7 +270,7 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
table_end = (unsigned long)table_hdr + table_hdr->length;
entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr,
sizeof(struct acpi_table_pptt));
proc_sz = sizeof(struct acpi_pptt_processor *);
proc_sz = sizeof(struct acpi_pptt_processor);
/* find the processor structure associated with this cpuid */
while ((unsigned long)entry + proc_sz < table_end) {

View File

@@ -591,6 +591,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9215),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
.driver_data = board_ahci_yes_fbs },
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9235),

View File

@@ -1496,8 +1496,15 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.command = ATA_CMD_PACKET;
/* is it pointless to prefer PIO for "safety reasons"? */
if (ap->flags & ATA_FLAG_PIO_DMA) {
/*
* Do not use DMA if the connected device only supports PIO, even if the
* port prefers PIO commands via DMA.
*
* Ideally, we should call atapi_check_dma() to check if it is safe for
* the LLD to use DMA for REQUEST_SENSE, but we don't have a qc.
* Since we can't check the command, perhaps we should only use pio?
*/
if ((ap->flags & ATA_FLAG_PIO_DMA) && !(dev->flags & ATA_DFLAG_PIO)) {
tf.protocol = ATAPI_PROT_DMA;
tf.feature |= ATAPI_PKT_DMA;
} else {

View File

@@ -1365,6 +1365,8 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
unsigned int err_mask, tag;
u8 *sense, sk = 0, asc = 0, ascq = 0;
u64 sense_valid, val;
u16 extended_sense;
bool aux_icc_valid;
int ret = 0;
err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2);
@@ -1384,6 +1386,8 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) |
((u64)buf[10] << 16) | ((u64)buf[11] << 24);
extended_sense = get_unaligned_le16(&buf[14]);
aux_icc_valid = extended_sense & BIT(15);
ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_EH) ||
@@ -1411,6 +1415,17 @@ int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
continue;
}
qc->result_tf.nsect = sense[6];
qc->result_tf.hob_nsect = sense[7];
qc->result_tf.lbal = sense[8];
qc->result_tf.lbam = sense[9];
qc->result_tf.lbah = sense[10];
qc->result_tf.hob_lbal = sense[11];
qc->result_tf.hob_lbam = sense[12];
qc->result_tf.hob_lbah = sense[13];
if (aux_icc_valid)
qc->result_tf.auxiliary = get_unaligned_le32(&sense[16]);
/* Set sense without also setting scsicmd->result */
scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE,
qc->scsicmd->sense_buffer, sk,

View File

@@ -2354,8 +2354,8 @@ static unsigned int ata_msense_control_ata_feature(struct ata_device *dev,
*/
put_unaligned_be16(ATA_FEATURE_SUB_MPAGE_LEN - 4, &buf[2]);
if (dev->flags & ATA_DFLAG_CDL)
buf[4] = 0x02; /* Support T2A and T2B pages */
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
buf[4] = 0x02; /* T2A and T2B pages enabled */
else
buf[4] = 0;
@@ -3764,12 +3764,11 @@ static int ata_mselect_control_spg0(struct ata_queued_cmd *qc,
}
/*
* Translate MODE SELECT control mode page, sub-pages f2h (ATA feature mode
* Translate MODE SELECT control mode page, sub-page f2h (ATA feature mode
* page) into a SET FEATURES command.
*/
static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
const u8 *buf, int len,
u16 *fp)
static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
const u8 *buf, int len, u16 *fp)
{
struct ata_device *dev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
@@ -3787,17 +3786,27 @@ static unsigned int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
/* Check cdl_ctrl */
switch (buf[0] & 0x03) {
case 0:
/* Disable CDL */
/* Disable CDL if it is enabled */
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
return 0;
ata_dev_dbg(dev, "Disabling CDL\n");
cdl_action = 0;
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
break;
case 0x02:
/* Enable CDL T2A/T2B: NCQ priority must be disabled */
/*
* Enable CDL if not already enabled. Since this is mutually
* exclusive with NCQ priority, allow this only if NCQ priority
* is disabled.
*/
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
return 0;
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
ata_dev_err(dev,
"NCQ priority must be disabled to enable CDL\n");
return -EINVAL;
}
ata_dev_dbg(dev, "Enabling CDL\n");
cdl_action = 1;
dev->flags |= ATA_DFLAG_CDL_ENABLED;
break;

View File

@@ -223,10 +223,16 @@ static int pxa_ata_probe(struct platform_device *pdev)
ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
resource_size(cmd_res));
if (!ap->ioaddr.cmd_addr)
return -ENOMEM;
ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
resource_size(ctl_res));
if (!ap->ioaddr.ctl_addr)
return -ENOMEM;
ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
resource_size(dma_res));
if (!ap->ioaddr.bmdma_addr)
return -ENOMEM;
/*
* Adjust register offsets

View File

@@ -1117,9 +1117,14 @@ static int pdc20621_prog_dimm0(struct ata_host *host)
mmio += PDC_CHIP0_OFS;
for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
pdc_i2c_read_data[i].reg,
&spd0[pdc_i2c_read_data[i].ofs]);
&spd0[pdc_i2c_read_data[i].ofs])) {
dev_err(host->dev,
"Failed in i2c read at index %d: device=%#x, reg=%#x\n",
i, PDC_DIMM0_SPD_DEV_ADDRESS, pdc_i2c_read_data[i].reg);
return -EIO;
}
data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
@@ -1284,6 +1289,8 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
if (size < 0)
return size;
dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */

Some files were not shown because too many files have changed in this diff Show More