Merge android15-6.6 into android15-6.6-lts
This merges the android15-6.6 branch into the -lts branch, catching it up with the latest changes in there. It contains the following commits: *2dabc476cf
FROMGIT: pinmux: fix race causing mux_owner NULL with active mux_usecount *fe630a0415
FROMGIT: f2fs: introduce reserved_pin_section sysfs entry *615449fbac
ANDROID: GKI: Update symbol list for xiaomi *7f4572a697
ANDROID: export folio_deactivate() for GKI purpose. *46e269016e
FROMGIT: exfat: fdatasync flag should be same like generic_write_sync() *789dd354a8
ANDROID: KVM: arm64: Don't update IOMMU under memory pressure *f99b0f6dd2
ANDROID: KVM: arm64: Increase the pkvm reclaim buffer size *9e89b97c13
Revert "ANDROID: Track per-process dmabuf RSS" *b1eeaed7fb
Revert "ANDROID: Track per-process dmabuf RSS HWM" *30cf816a50
Revert "ANDROID: Track per-process dmabuf PSS" *ad0b76e69f
Revert "ANDROID: fixup task_struct to avoid ABI breakage" *b26826e8ff
Revert "ANDROID: fixup dma_buf struct to avoid ABI breakage" *1f02134847
Revert "ANDROID: Add dmabuf RSS trace event" *a9597c7b32
ANDROID: GKI: Update symbol list for Amlogic *c8fdc081cf
ANDROID: Add dmabuf RSS trace event *e9f7ac1c25
ANDROID: fixup dma_buf struct to avoid ABI breakage *59af12872d
ANDROID: fixup task_struct to avoid ABI breakage *0bf76c5311
ANDROID: Track per-process dmabuf PSS *bddab7cf5d
ANDROID: Track per-process dmabuf RSS HWM *f44d593749
ANDROID: Track per-process dmabuf RSS *250bbe1cbf
ANDROID: GKI: Update symbol list for Pixel Watch *96c29dad8f
UPSTREAM: blk-cgroup: check for pd_(alloc|free)_fn in blkcg_activate_policy() *be36ded303
ANDROID: Revert "cpufreq: Avoid using inconsistent policy->min and policy->max" *5f592a6260
ANDROID: ABI: Update symbol list for sunxi *573a6732fc
ANDROID: GKI: Update symbols list file for honor White list the vm_normal_folio_pmd *949ed5baba
ANDROID: mm: export vm_normal_folio_pmd to allow vendors to implement simplified smaps *5b1c4cc086
ANDROID: Add the dma header to aarch64 allowlist *84bb4ef623
UPSTREAM: usb: gadget: u_serial: Fix race condition in TTY wakeup *925ea90047
ANDROID: kvm: arm64: add per_cpu/cpuX/trace file *65f295739c
ANDROID: kvm: arm64: start hypervisor event IDs from 1 *6d27de405a
ANDROID: KVM: arm64: use hyp_trace_raw_fops for trace_pipe_raw *326b0bd632
BACKPORT: FROMGIT: sched/core: Fix migrate_swap() vs. hotplug *cc8b083f6f
ANDROID: ABI: Update pixel symbol list *6d61bc2d2d
ANDROID: restricted vendor_hook: add swap_readpage_bdev_sync *2bc327484e
BACKPORT: mm: page_alloc: tighten up find_suitable_fallback() *b5b61c9e57
BACKPORT: mm: page_alloc: speed up fallbacks in rmqueue_bulk() *ae27d6c79c
UPSTREAM: mm/page_alloc: clarify should_claim_block() commentary *59eb95395c
BACKPORT: mm/page_alloc: clarify terminology in migratetype fallback code *c746bc1949
BACKPORT: mm: page_alloc: group fallback functions together *707dfe67d6
UPSTREAM: mm: page_alloc: remove remnants of unlocked migratetype updates *65b7c505d9
BACKPORT: mm: page_alloc: don't steal single pages from biggest buddy *4e131ac87c
UPSTREAM: mm/page_alloc: add some detailed comments in can_steal_fallback *bf5861fc36
UPSTREAM: mm: page_alloc: simpify page del and expand *f45ef0a06f
UPSTREAM: mm: page_alloc: change move_freepages() to __move_freepages_block() *e0a00524db
ANDROID: gki_defconfig: Enable CONFIG_UDMABUF *cb35713803
ANDROID: scsi: ufs: add UFSHCD_ANDROID_QUIRK_NO_IS_READ_ON_H8 *279274c126
ANDROID: virt: gunyah: Replace arm_smccc_1_1_smc with arm_smccc_1_1_invoke *3b5bd5416e
UPSTREAM: net: fix udp gso skb_segment after pull from frag_list *08115cdf70
UPSTREAM: posix-cpu-timers: fix race between handle_posix_cpu_timers() and posix_cpu_timer_del() Change-Id: Ic6caff4e9d1624f1cb4d752a683f612ba3e40f4c Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -1025,6 +1025,7 @@ ddk_headers(
|
|||||||
name = "all_headers_allowlist_aarch64",
|
name = "all_headers_allowlist_aarch64",
|
||||||
hdrs = [
|
hdrs = [
|
||||||
"drivers/dma-buf/heaps/deferred-free-helper.h",
|
"drivers/dma-buf/heaps/deferred-free-helper.h",
|
||||||
|
"drivers/dma/dmaengine.h",
|
||||||
"drivers/extcon/extcon.h",
|
"drivers/extcon/extcon.h",
|
||||||
"drivers/pci/controller/dwc/pcie-designware.h",
|
"drivers/pci/controller/dwc/pcie-designware.h",
|
||||||
"drivers/thermal/thermal_core.h",
|
"drivers/thermal/thermal_core.h",
|
||||||
@@ -1046,6 +1047,7 @@ ddk_headers(
|
|||||||
"arch/arm64/include",
|
"arch/arm64/include",
|
||||||
"arch/arm64/include/uapi",
|
"arch/arm64/include/uapi",
|
||||||
"drivers/dma-buf",
|
"drivers/dma-buf",
|
||||||
|
"drivers/dma",
|
||||||
"drivers/extcon",
|
"drivers/extcon",
|
||||||
"drivers/pci/controller/dwc",
|
"drivers/pci/controller/dwc",
|
||||||
"drivers/thermal",
|
"drivers/thermal",
|
||||||
|
@@ -858,3 +858,12 @@ Description: This is a read-only entry to show the value of sb.s_encoding_flags,
|
|||||||
SB_ENC_STRICT_MODE_FL 0x00000001
|
SB_ENC_STRICT_MODE_FL 0x00000001
|
||||||
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
|
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
|
||||||
============================ ==========
|
============================ ==========
|
||||||
|
|
||||||
|
What: /sys/fs/f2fs/<disk>/reserved_pin_section
|
||||||
|
Date: June 2025
|
||||||
|
Contact: "Chao Yu" <chao@kernel.org>
|
||||||
|
Description: This threshold is used to control triggering garbage collection while
|
||||||
|
fallocating on pinned file, so, it can guarantee there is enough free
|
||||||
|
reserved section before preallocating on pinned file.
|
||||||
|
By default, the value is ovp_sections, especially, for zoned ufs, the
|
||||||
|
value is 1.
|
||||||
|
@@ -8228,6 +8228,11 @@ pointer_reference {
|
|||||||
kind: POINTER
|
kind: POINTER
|
||||||
pointee_type_id: 0x15e4d187
|
pointee_type_id: 0x15e4d187
|
||||||
}
|
}
|
||||||
|
pointer_reference {
|
||||||
|
id: 0x0fe9f911
|
||||||
|
kind: POINTER
|
||||||
|
pointee_type_id: 0x15e702d9
|
||||||
|
}
|
||||||
pointer_reference {
|
pointer_reference {
|
||||||
id: 0x0fe9ffda
|
id: 0x0fe9ffda
|
||||||
kind: POINTER
|
kind: POINTER
|
||||||
@@ -17573,6 +17578,11 @@ pointer_reference {
|
|||||||
kind: POINTER
|
kind: POINTER
|
||||||
pointee_type_id: 0x9e7aaf3f
|
pointee_type_id: 0x9e7aaf3f
|
||||||
}
|
}
|
||||||
|
pointer_reference {
|
||||||
|
id: 0x2d0e9efd
|
||||||
|
kind: POINTER
|
||||||
|
pointee_type_id: 0x9e7a9d6b
|
||||||
|
}
|
||||||
pointer_reference {
|
pointer_reference {
|
||||||
id: 0x2d0fdd7c
|
id: 0x2d0fdd7c
|
||||||
kind: POINTER
|
kind: POINTER
|
||||||
@@ -27928,6 +27938,11 @@ pointer_reference {
|
|||||||
kind: POINTER
|
kind: POINTER
|
||||||
pointee_type_id: 0xca7029d8
|
pointee_type_id: 0xca7029d8
|
||||||
}
|
}
|
||||||
|
pointer_reference {
|
||||||
|
id: 0x380eb497
|
||||||
|
kind: POINTER
|
||||||
|
pointee_type_id: 0xca7a34c0
|
||||||
|
}
|
||||||
pointer_reference {
|
pointer_reference {
|
||||||
id: 0x381020ff
|
id: 0x381020ff
|
||||||
kind: POINTER
|
kind: POINTER
|
||||||
@@ -35043,6 +35058,11 @@ qualified {
|
|||||||
qualifier: CONST
|
qualifier: CONST
|
||||||
qualified_type_id: 0x592e728c
|
qualified_type_id: 0x592e728c
|
||||||
}
|
}
|
||||||
|
qualified {
|
||||||
|
id: 0xca7a34c0
|
||||||
|
qualifier: CONST
|
||||||
|
qualified_type_id: 0x59af6589
|
||||||
|
}
|
||||||
qualified {
|
qualified {
|
||||||
id: 0xca8285c3
|
id: 0xca8285c3
|
||||||
qualifier: CONST
|
qualifier: CONST
|
||||||
@@ -99904,6 +99924,11 @@ member {
|
|||||||
type_id: 0x37e7a473
|
type_id: 0x37e7a473
|
||||||
offset: 768
|
offset: 768
|
||||||
}
|
}
|
||||||
|
member {
|
||||||
|
id: 0x36181e96
|
||||||
|
name: "funcs"
|
||||||
|
type_id: 0x380eb497
|
||||||
|
}
|
||||||
member {
|
member {
|
||||||
id: 0x36184afd
|
id: 0x36184afd
|
||||||
name: "funcs"
|
name: "funcs"
|
||||||
@@ -152610,6 +152635,12 @@ member {
|
|||||||
type_id: 0x9bd401b6
|
type_id: 0x9bd401b6
|
||||||
offset: 16
|
offset: 16
|
||||||
}
|
}
|
||||||
|
member {
|
||||||
|
id: 0xd3327091
|
||||||
|
name: "panel"
|
||||||
|
type_id: 0x10617cac
|
||||||
|
offset: 192
|
||||||
|
}
|
||||||
member {
|
member {
|
||||||
id: 0xd3a8d2cb
|
id: 0xd3a8d2cb
|
||||||
name: "panel"
|
name: "panel"
|
||||||
@@ -152633,6 +152664,17 @@ member {
|
|||||||
type_id: 0x2a670b41
|
type_id: 0x2a670b41
|
||||||
offset: 9024
|
offset: 9024
|
||||||
}
|
}
|
||||||
|
member {
|
||||||
|
id: 0xf2e51365
|
||||||
|
name: "panel_prepared"
|
||||||
|
type_id: 0x2d0e9efd
|
||||||
|
}
|
||||||
|
member {
|
||||||
|
id: 0x289370ad
|
||||||
|
name: "panel_unpreparing"
|
||||||
|
type_id: 0x2d0e9efd
|
||||||
|
offset: 64
|
||||||
|
}
|
||||||
member {
|
member {
|
||||||
id: 0x616a797d
|
id: 0x616a797d
|
||||||
name: "panic"
|
name: "panic"
|
||||||
@@ -239344,6 +239386,27 @@ struct_union {
|
|||||||
member_id: 0x3a2d3750
|
member_id: 0x3a2d3750
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
struct_union {
|
||||||
|
id: 0x15e702d9
|
||||||
|
kind: STRUCT
|
||||||
|
name: "drm_panel_follower"
|
||||||
|
definition {
|
||||||
|
bytesize: 32
|
||||||
|
member_id: 0x36181e96
|
||||||
|
member_id: 0x7c00ebb3
|
||||||
|
member_id: 0xd3327091
|
||||||
|
}
|
||||||
|
}
|
||||||
|
struct_union {
|
||||||
|
id: 0x59af6589
|
||||||
|
kind: STRUCT
|
||||||
|
name: "drm_panel_follower_funcs"
|
||||||
|
definition {
|
||||||
|
bytesize: 16
|
||||||
|
member_id: 0xf2e51365
|
||||||
|
member_id: 0x289370ad
|
||||||
|
}
|
||||||
|
}
|
||||||
struct_union {
|
struct_union {
|
||||||
id: 0x5c75f1b8
|
id: 0x5c75f1b8
|
||||||
kind: STRUCT
|
kind: STRUCT
|
||||||
@@ -308489,6 +308552,11 @@ function {
|
|||||||
parameter_id: 0x0258f96e
|
parameter_id: 0x0258f96e
|
||||||
parameter_id: 0xd41e888f
|
parameter_id: 0xd41e888f
|
||||||
}
|
}
|
||||||
|
function {
|
||||||
|
id: 0x13622fd7
|
||||||
|
return_type_id: 0x48b5725f
|
||||||
|
parameter_id: 0x0fe9f911
|
||||||
|
}
|
||||||
function {
|
function {
|
||||||
id: 0x1362a71c
|
id: 0x1362a71c
|
||||||
return_type_id: 0x48b5725f
|
return_type_id: 0x48b5725f
|
||||||
@@ -321953,6 +322021,13 @@ function {
|
|||||||
parameter_id: 0x27a7c613
|
parameter_id: 0x27a7c613
|
||||||
parameter_id: 0x4585663f
|
parameter_id: 0x4585663f
|
||||||
}
|
}
|
||||||
|
function {
|
||||||
|
id: 0x5e21336c
|
||||||
|
return_type_id: 0x2170d06d
|
||||||
|
parameter_id: 0x0a134144
|
||||||
|
parameter_id: 0x33756485
|
||||||
|
parameter_id: 0xae60496e
|
||||||
|
}
|
||||||
function {
|
function {
|
||||||
id: 0x5e29431a
|
id: 0x5e29431a
|
||||||
return_type_id: 0x295c7202
|
return_type_id: 0x295c7202
|
||||||
@@ -328930,6 +329005,12 @@ function {
|
|||||||
parameter_id: 0x391f15ea
|
parameter_id: 0x391f15ea
|
||||||
parameter_id: 0xf435685e
|
parameter_id: 0xf435685e
|
||||||
}
|
}
|
||||||
|
function {
|
||||||
|
id: 0x9294d8c1
|
||||||
|
return_type_id: 0x6720d32f
|
||||||
|
parameter_id: 0x3c01aef6
|
||||||
|
parameter_id: 0x051414e1
|
||||||
|
}
|
||||||
function {
|
function {
|
||||||
id: 0x92956fd0
|
id: 0x92956fd0
|
||||||
return_type_id: 0x6720d32f
|
return_type_id: 0x6720d32f
|
||||||
@@ -345546,6 +345627,12 @@ function {
|
|||||||
parameter_id: 0x0258f96e
|
parameter_id: 0x0258f96e
|
||||||
parameter_id: 0x0fa01494
|
parameter_id: 0x0fa01494
|
||||||
}
|
}
|
||||||
|
function {
|
||||||
|
id: 0x9d297a90
|
||||||
|
return_type_id: 0x6720d32f
|
||||||
|
parameter_id: 0x0258f96e
|
||||||
|
parameter_id: 0x0fe9f911
|
||||||
|
}
|
||||||
function {
|
function {
|
||||||
id: 0x9d2c14da
|
id: 0x9d2c14da
|
||||||
return_type_id: 0x6720d32f
|
return_type_id: 0x6720d32f
|
||||||
@@ -348123,6 +348210,11 @@ function {
|
|||||||
parameter_id: 0x0c2e195c
|
parameter_id: 0x0c2e195c
|
||||||
parameter_id: 0x3ca4f8de
|
parameter_id: 0x3ca4f8de
|
||||||
}
|
}
|
||||||
|
function {
|
||||||
|
id: 0x9e7a9d6b
|
||||||
|
return_type_id: 0x6720d32f
|
||||||
|
parameter_id: 0x0fe9f911
|
||||||
|
}
|
||||||
function {
|
function {
|
||||||
id: 0x9e7aaf3f
|
id: 0x9e7aaf3f
|
||||||
return_type_id: 0x6720d32f
|
return_type_id: 0x6720d32f
|
||||||
@@ -360789,6 +360881,15 @@ elf_symbol {
|
|||||||
type_id: 0x9baf3eaf
|
type_id: 0x9baf3eaf
|
||||||
full_name: "__traceiter_android_rvh_show_max_freq"
|
full_name: "__traceiter_android_rvh_show_max_freq"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0xb80ecc98
|
||||||
|
name: "__traceiter_android_rvh_swap_readpage_bdev_sync"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0xecf99d88
|
||||||
|
type_id: 0x9bab3090
|
||||||
|
full_name: "__traceiter_android_rvh_swap_readpage_bdev_sync"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0x3b650ee3
|
id: 0x3b650ee3
|
||||||
name: "__traceiter_android_rvh_tcp_rcv_spurious_retrans"
|
name: "__traceiter_android_rvh_tcp_rcv_spurious_retrans"
|
||||||
@@ -367899,6 +368000,15 @@ elf_symbol {
|
|||||||
type_id: 0x18ccbd2c
|
type_id: 0x18ccbd2c
|
||||||
full_name: "__tracepoint_android_rvh_show_max_freq"
|
full_name: "__tracepoint_android_rvh_show_max_freq"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x64ce7cd6
|
||||||
|
name: "__tracepoint_android_rvh_swap_readpage_bdev_sync"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: OBJECT
|
||||||
|
crc: 0x72fbf2a6
|
||||||
|
type_id: 0x18ccbd2c
|
||||||
|
full_name: "__tracepoint_android_rvh_swap_readpage_bdev_sync"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0x5380a8d5
|
id: 0x5380a8d5
|
||||||
name: "__tracepoint_android_rvh_tcp_rcv_spurious_retrans"
|
name: "__tracepoint_android_rvh_tcp_rcv_spurious_retrans"
|
||||||
@@ -389888,6 +389998,15 @@ elf_symbol {
|
|||||||
type_id: 0xfa1de4ef
|
type_id: 0xfa1de4ef
|
||||||
full_name: "drm_is_current_master"
|
full_name: "drm_is_current_master"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0xa3983618
|
||||||
|
name: "drm_is_panel_follower"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0xcfdfa487
|
||||||
|
type_id: 0xfe32655f
|
||||||
|
full_name: "drm_is_panel_follower"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0xc8af6225
|
id: 0xc8af6225
|
||||||
name: "drm_kms_helper_connector_hotplug_event"
|
name: "drm_kms_helper_connector_hotplug_event"
|
||||||
@@ -390536,6 +390655,15 @@ elf_symbol {
|
|||||||
type_id: 0x14800eb8
|
type_id: 0x14800eb8
|
||||||
full_name: "drm_panel_add"
|
full_name: "drm_panel_add"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x2b742694
|
||||||
|
name: "drm_panel_add_follower"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0x2db618bd
|
||||||
|
type_id: 0x9d297a90
|
||||||
|
full_name: "drm_panel_add_follower"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0xd67ad69f
|
id: 0xd67ad69f
|
||||||
name: "drm_panel_bridge_add_typed"
|
name: "drm_panel_bridge_add_typed"
|
||||||
@@ -390626,6 +390754,15 @@ elf_symbol {
|
|||||||
type_id: 0x14800eb8
|
type_id: 0x14800eb8
|
||||||
full_name: "drm_panel_remove"
|
full_name: "drm_panel_remove"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x6016204a
|
||||||
|
name: "drm_panel_remove_follower"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0x397cfaf5
|
||||||
|
type_id: 0x13622fd7
|
||||||
|
full_name: "drm_panel_remove_follower"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0x046720ab
|
id: 0x046720ab
|
||||||
name: "drm_panel_unprepare"
|
name: "drm_panel_unprepare"
|
||||||
@@ -393051,6 +393188,15 @@ elf_symbol {
|
|||||||
type_id: 0xf6f86f1f
|
type_id: 0xf6f86f1f
|
||||||
full_name: "folio_clear_dirty_for_io"
|
full_name: "folio_clear_dirty_for_io"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x1ac8aa52
|
||||||
|
name: "folio_deactivate"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0x7abc9b3a
|
||||||
|
type_id: 0x18c46588
|
||||||
|
full_name: "folio_deactivate"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0xf83588d6
|
id: 0xf83588d6
|
||||||
name: "folio_end_private_2"
|
name: "folio_end_private_2"
|
||||||
@@ -393078,6 +393224,15 @@ elf_symbol {
|
|||||||
type_id: 0x637004ab
|
type_id: 0x637004ab
|
||||||
full_name: "folio_mapping"
|
full_name: "folio_mapping"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0xd2e101fd
|
||||||
|
name: "folio_mark_accessed"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0x74311ee4
|
||||||
|
type_id: 0x18c46588
|
||||||
|
full_name: "folio_mark_accessed"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0xcef0ca54
|
id: 0xcef0ca54
|
||||||
name: "folio_mark_dirty"
|
name: "folio_mark_dirty"
|
||||||
@@ -396727,6 +396882,24 @@ elf_symbol {
|
|||||||
type_id: 0x13e1603f
|
type_id: 0x13e1603f
|
||||||
full_name: "hid_destroy_device"
|
full_name: "hid_destroy_device"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x1706be22
|
||||||
|
name: "hid_driver_reset_resume"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0x371549c9
|
||||||
|
type_id: 0x9ef9d283
|
||||||
|
full_name: "hid_driver_reset_resume"
|
||||||
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x4c3911f0
|
||||||
|
name: "hid_driver_suspend"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0xe6a4222b
|
||||||
|
type_id: 0x9d398c85
|
||||||
|
full_name: "hid_driver_suspend"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0x8717f26f
|
id: 0x8717f26f
|
||||||
name: "hid_hw_close"
|
name: "hid_hw_close"
|
||||||
@@ -422588,6 +422761,15 @@ elf_symbol {
|
|||||||
type_id: 0x909c23c2
|
type_id: 0x909c23c2
|
||||||
full_name: "snd_soc_get_dai_id"
|
full_name: "snd_soc_get_dai_id"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x4086fab0
|
||||||
|
name: "snd_soc_get_dai_name"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0x347721f4
|
||||||
|
type_id: 0x9294d8c1
|
||||||
|
full_name: "snd_soc_get_dai_name"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0xa64c7fe5
|
id: 0xa64c7fe5
|
||||||
name: "snd_soc_get_dai_via_args"
|
name: "snd_soc_get_dai_via_args"
|
||||||
@@ -434533,6 +434715,15 @@ elf_symbol {
|
|||||||
type_id: 0xfc37fa4b
|
type_id: 0xfc37fa4b
|
||||||
full_name: "vm_node_stat"
|
full_name: "vm_node_stat"
|
||||||
}
|
}
|
||||||
|
elf_symbol {
|
||||||
|
id: 0x4e194253
|
||||||
|
name: "vm_normal_folio_pmd"
|
||||||
|
is_defined: true
|
||||||
|
symbol_type: FUNCTION
|
||||||
|
crc: 0xa737dbaa
|
||||||
|
type_id: 0x5e21336c
|
||||||
|
full_name: "vm_normal_folio_pmd"
|
||||||
|
}
|
||||||
elf_symbol {
|
elf_symbol {
|
||||||
id: 0x2570ceae
|
id: 0x2570ceae
|
||||||
name: "vm_normal_page"
|
name: "vm_normal_page"
|
||||||
@@ -436958,6 +437149,7 @@ interface {
|
|||||||
symbol_id: 0x1228e7e9
|
symbol_id: 0x1228e7e9
|
||||||
symbol_id: 0x73c83ef4
|
symbol_id: 0x73c83ef4
|
||||||
symbol_id: 0x46515de8
|
symbol_id: 0x46515de8
|
||||||
|
symbol_id: 0xb80ecc98
|
||||||
symbol_id: 0x3b650ee3
|
symbol_id: 0x3b650ee3
|
||||||
symbol_id: 0xcf016f05
|
symbol_id: 0xcf016f05
|
||||||
symbol_id: 0x79480d0a
|
symbol_id: 0x79480d0a
|
||||||
@@ -437748,6 +437940,7 @@ interface {
|
|||||||
symbol_id: 0x8a4070f7
|
symbol_id: 0x8a4070f7
|
||||||
symbol_id: 0x00b7ed82
|
symbol_id: 0x00b7ed82
|
||||||
symbol_id: 0xe8cacf26
|
symbol_id: 0xe8cacf26
|
||||||
|
symbol_id: 0x64ce7cd6
|
||||||
symbol_id: 0x5380a8d5
|
symbol_id: 0x5380a8d5
|
||||||
symbol_id: 0x1f12a317
|
symbol_id: 0x1f12a317
|
||||||
symbol_id: 0x454d16cc
|
symbol_id: 0x454d16cc
|
||||||
@@ -440188,6 +440381,7 @@ interface {
|
|||||||
symbol_id: 0x3a6e27e9
|
symbol_id: 0x3a6e27e9
|
||||||
symbol_id: 0xc9aa2ffd
|
symbol_id: 0xc9aa2ffd
|
||||||
symbol_id: 0xec79cf1c
|
symbol_id: 0xec79cf1c
|
||||||
|
symbol_id: 0xa3983618
|
||||||
symbol_id: 0xc8af6225
|
symbol_id: 0xc8af6225
|
||||||
symbol_id: 0x8a043efe
|
symbol_id: 0x8a043efe
|
||||||
symbol_id: 0x3c6b600d
|
symbol_id: 0x3c6b600d
|
||||||
@@ -440260,6 +440454,7 @@ interface {
|
|||||||
symbol_id: 0xc73568f4
|
symbol_id: 0xc73568f4
|
||||||
symbol_id: 0x124ae77d
|
symbol_id: 0x124ae77d
|
||||||
symbol_id: 0xdc6725cf
|
symbol_id: 0xdc6725cf
|
||||||
|
symbol_id: 0x2b742694
|
||||||
symbol_id: 0xd67ad69f
|
symbol_id: 0xd67ad69f
|
||||||
symbol_id: 0x48cde8a9
|
symbol_id: 0x48cde8a9
|
||||||
symbol_id: 0x633d0644
|
symbol_id: 0x633d0644
|
||||||
@@ -440270,6 +440465,7 @@ interface {
|
|||||||
symbol_id: 0xad1d778f
|
symbol_id: 0xad1d778f
|
||||||
symbol_id: 0xcf81b673
|
symbol_id: 0xcf81b673
|
||||||
symbol_id: 0x864914fa
|
symbol_id: 0x864914fa
|
||||||
|
symbol_id: 0x6016204a
|
||||||
symbol_id: 0x046720ab
|
symbol_id: 0x046720ab
|
||||||
symbol_id: 0x3c07bbff
|
symbol_id: 0x3c07bbff
|
||||||
symbol_id: 0xbdb562b1
|
symbol_id: 0xbdb562b1
|
||||||
@@ -440539,9 +440735,11 @@ interface {
|
|||||||
symbol_id: 0x3c7c2553
|
symbol_id: 0x3c7c2553
|
||||||
symbol_id: 0x06c58be7
|
symbol_id: 0x06c58be7
|
||||||
symbol_id: 0xab55569c
|
symbol_id: 0xab55569c
|
||||||
|
symbol_id: 0x1ac8aa52
|
||||||
symbol_id: 0xf83588d6
|
symbol_id: 0xf83588d6
|
||||||
symbol_id: 0xa1c5bd8d
|
symbol_id: 0xa1c5bd8d
|
||||||
symbol_id: 0x159a69a3
|
symbol_id: 0x159a69a3
|
||||||
|
symbol_id: 0xd2e101fd
|
||||||
symbol_id: 0xcef0ca54
|
symbol_id: 0xcef0ca54
|
||||||
symbol_id: 0x39840ab2
|
symbol_id: 0x39840ab2
|
||||||
symbol_id: 0xc05a6c7d
|
symbol_id: 0xc05a6c7d
|
||||||
@@ -440946,6 +441144,8 @@ interface {
|
|||||||
symbol_id: 0xccc593d6
|
symbol_id: 0xccc593d6
|
||||||
symbol_id: 0x97a02af0
|
symbol_id: 0x97a02af0
|
||||||
symbol_id: 0x2ffc7c7e
|
symbol_id: 0x2ffc7c7e
|
||||||
|
symbol_id: 0x1706be22
|
||||||
|
symbol_id: 0x4c3911f0
|
||||||
symbol_id: 0x8717f26f
|
symbol_id: 0x8717f26f
|
||||||
symbol_id: 0x361004c8
|
symbol_id: 0x361004c8
|
||||||
symbol_id: 0xcf5ea9a2
|
symbol_id: 0xcf5ea9a2
|
||||||
@@ -443815,6 +444015,7 @@ interface {
|
|||||||
symbol_id: 0x7918ef41
|
symbol_id: 0x7918ef41
|
||||||
symbol_id: 0x97843792
|
symbol_id: 0x97843792
|
||||||
symbol_id: 0x54622a57
|
symbol_id: 0x54622a57
|
||||||
|
symbol_id: 0x4086fab0
|
||||||
symbol_id: 0xa64c7fe5
|
symbol_id: 0xa64c7fe5
|
||||||
symbol_id: 0x5eb2e502
|
symbol_id: 0x5eb2e502
|
||||||
symbol_id: 0x33a917a0
|
symbol_id: 0x33a917a0
|
||||||
@@ -445141,6 +445342,7 @@ interface {
|
|||||||
symbol_id: 0xdc09fb10
|
symbol_id: 0xdc09fb10
|
||||||
symbol_id: 0x5849ff8e
|
symbol_id: 0x5849ff8e
|
||||||
symbol_id: 0xaf85c216
|
symbol_id: 0xaf85c216
|
||||||
|
symbol_id: 0x4e194253
|
||||||
symbol_id: 0x2570ceae
|
symbol_id: 0x2570ceae
|
||||||
symbol_id: 0xacc76406
|
symbol_id: 0xacc76406
|
||||||
symbol_id: 0xef2c49d1
|
symbol_id: 0xef2c49d1
|
||||||
|
@@ -1,3 +1,5 @@
|
|||||||
|
|
||||||
|
|
||||||
[abi_symbol_list]
|
[abi_symbol_list]
|
||||||
add_cpu
|
add_cpu
|
||||||
add_device_randomness
|
add_device_randomness
|
||||||
@@ -209,10 +211,12 @@
|
|||||||
consume_skb
|
consume_skb
|
||||||
contig_page_data
|
contig_page_data
|
||||||
__contpte_try_unfold
|
__contpte_try_unfold
|
||||||
|
_copy_from_iter
|
||||||
copy_from_kernel_nofault
|
copy_from_kernel_nofault
|
||||||
__copy_overflow
|
__copy_overflow
|
||||||
copy_page_from_iter_atomic
|
copy_page_from_iter_atomic
|
||||||
copy_splice_read
|
copy_splice_read
|
||||||
|
_copy_to_iter
|
||||||
cpu_all_bits
|
cpu_all_bits
|
||||||
cpu_bit_bitmap
|
cpu_bit_bitmap
|
||||||
cpufreq_boost_enabled
|
cpufreq_boost_enabled
|
||||||
@@ -245,10 +249,13 @@
|
|||||||
crypto_aead_setauthsize
|
crypto_aead_setauthsize
|
||||||
crypto_aead_setkey
|
crypto_aead_setkey
|
||||||
crypto_ahash_digest
|
crypto_ahash_digest
|
||||||
|
crypto_ahash_final
|
||||||
|
crypto_ahash_finup
|
||||||
crypto_ahash_setkey
|
crypto_ahash_setkey
|
||||||
crypto_alloc_aead
|
crypto_alloc_aead
|
||||||
crypto_alloc_ahash
|
crypto_alloc_ahash
|
||||||
crypto_alloc_base
|
crypto_alloc_base
|
||||||
|
crypto_alloc_rng
|
||||||
crypto_alloc_shash
|
crypto_alloc_shash
|
||||||
crypto_alloc_skcipher
|
crypto_alloc_skcipher
|
||||||
crypto_cipher_encrypt_one
|
crypto_cipher_encrypt_one
|
||||||
@@ -258,13 +265,17 @@
|
|||||||
crypto_dequeue_request
|
crypto_dequeue_request
|
||||||
crypto_destroy_tfm
|
crypto_destroy_tfm
|
||||||
crypto_enqueue_request
|
crypto_enqueue_request
|
||||||
|
crypto_get_default_null_skcipher
|
||||||
crypto_has_alg
|
crypto_has_alg
|
||||||
crypto_init_queue
|
crypto_init_queue
|
||||||
__crypto_memneq
|
__crypto_memneq
|
||||||
|
crypto_put_default_null_skcipher
|
||||||
crypto_register_ahash
|
crypto_register_ahash
|
||||||
crypto_register_alg
|
crypto_register_alg
|
||||||
crypto_register_shash
|
crypto_register_shash
|
||||||
crypto_register_skcipher
|
crypto_register_skcipher
|
||||||
|
crypto_req_done
|
||||||
|
crypto_rng_reset
|
||||||
crypto_sha1_finup
|
crypto_sha1_finup
|
||||||
crypto_sha1_update
|
crypto_sha1_update
|
||||||
crypto_shash_digest
|
crypto_shash_digest
|
||||||
@@ -623,6 +634,7 @@
|
|||||||
drm_atomic_set_mode_prop_for_crtc
|
drm_atomic_set_mode_prop_for_crtc
|
||||||
drm_atomic_state_alloc
|
drm_atomic_state_alloc
|
||||||
drm_atomic_state_clear
|
drm_atomic_state_clear
|
||||||
|
drm_atomic_state_default_release
|
||||||
__drm_atomic_state_free
|
__drm_atomic_state_free
|
||||||
drm_compat_ioctl
|
drm_compat_ioctl
|
||||||
drm_connector_attach_content_type_property
|
drm_connector_attach_content_type_property
|
||||||
@@ -793,6 +805,7 @@
|
|||||||
extcon_set_state
|
extcon_set_state
|
||||||
extcon_set_state_sync
|
extcon_set_state_sync
|
||||||
extcon_unregister_notifier
|
extcon_unregister_notifier
|
||||||
|
extract_iter_to_sg
|
||||||
fasync_helper
|
fasync_helper
|
||||||
fault_in_iov_iter_readable
|
fault_in_iov_iter_readable
|
||||||
__fdget
|
__fdget
|
||||||
@@ -1102,8 +1115,10 @@
|
|||||||
ioremap_prot
|
ioremap_prot
|
||||||
io_schedule
|
io_schedule
|
||||||
iounmap
|
iounmap
|
||||||
|
iov_iter_advance
|
||||||
iov_iter_alignment
|
iov_iter_alignment
|
||||||
iov_iter_init
|
iov_iter_init
|
||||||
|
iov_iter_npages
|
||||||
iov_iter_revert
|
iov_iter_revert
|
||||||
iov_iter_zero
|
iov_iter_zero
|
||||||
iput
|
iput
|
||||||
@@ -1269,12 +1284,14 @@
|
|||||||
__local_bh_enable_ip
|
__local_bh_enable_ip
|
||||||
__lock_buffer
|
__lock_buffer
|
||||||
lockref_get
|
lockref_get
|
||||||
|
lock_sock_nested
|
||||||
logfc
|
logfc
|
||||||
log_post_read_mmio
|
log_post_read_mmio
|
||||||
log_post_write_mmio
|
log_post_write_mmio
|
||||||
log_read_mmio
|
log_read_mmio
|
||||||
log_write_mmio
|
log_write_mmio
|
||||||
lookup_bdev
|
lookup_bdev
|
||||||
|
lookup_user_key
|
||||||
loops_per_jiffy
|
loops_per_jiffy
|
||||||
LZ4_decompress_safe
|
LZ4_decompress_safe
|
||||||
LZ4_decompress_safe_partial
|
LZ4_decompress_safe_partial
|
||||||
@@ -1726,6 +1743,8 @@
|
|||||||
proc_mkdir
|
proc_mkdir
|
||||||
proc_mkdir_data
|
proc_mkdir_data
|
||||||
proc_remove
|
proc_remove
|
||||||
|
proto_register
|
||||||
|
proto_unregister
|
||||||
__pskb_copy_fclone
|
__pskb_copy_fclone
|
||||||
pskb_expand_head
|
pskb_expand_head
|
||||||
__pskb_pull_tail
|
__pskb_pull_tail
|
||||||
@@ -1845,6 +1864,8 @@
|
|||||||
release_firmware
|
release_firmware
|
||||||
__release_region
|
__release_region
|
||||||
release_resource
|
release_resource
|
||||||
|
release_sock
|
||||||
|
release_sock
|
||||||
remap_pfn_range
|
remap_pfn_range
|
||||||
remap_vmalloc_range
|
remap_vmalloc_range
|
||||||
remove_cpu
|
remove_cpu
|
||||||
@@ -1940,6 +1961,8 @@
|
|||||||
sdio_writel
|
sdio_writel
|
||||||
sdio_writesb
|
sdio_writesb
|
||||||
sdio_writew
|
sdio_writew
|
||||||
|
security_sk_clone
|
||||||
|
security_sock_graft
|
||||||
send_sig
|
send_sig
|
||||||
seq_list_next
|
seq_list_next
|
||||||
seq_list_start
|
seq_list_start
|
||||||
@@ -2000,6 +2023,7 @@
|
|||||||
single_open_size
|
single_open_size
|
||||||
single_release
|
single_release
|
||||||
si_swapinfo
|
si_swapinfo
|
||||||
|
sk_alloc
|
||||||
skb_add_rx_frag
|
skb_add_rx_frag
|
||||||
skb_checksum_help
|
skb_checksum_help
|
||||||
skb_clone
|
skb_clone
|
||||||
@@ -2026,6 +2050,7 @@
|
|||||||
skb_scrub_packet
|
skb_scrub_packet
|
||||||
skb_trim
|
skb_trim
|
||||||
skb_tstamp_tx
|
skb_tstamp_tx
|
||||||
|
sk_free
|
||||||
skip_spaces
|
skip_spaces
|
||||||
smpboot_register_percpu_thread
|
smpboot_register_percpu_thread
|
||||||
smp_call_function
|
smp_call_function
|
||||||
@@ -2046,6 +2071,7 @@
|
|||||||
snd_pcm_lib_preallocate_pages
|
snd_pcm_lib_preallocate_pages
|
||||||
snd_pcm_period_elapsed
|
snd_pcm_period_elapsed
|
||||||
snd_pcm_rate_to_rate_bit
|
snd_pcm_rate_to_rate_bit
|
||||||
|
snd_pcm_set_managed_buffer_all
|
||||||
snd_pcm_stop
|
snd_pcm_stop
|
||||||
snd_pcm_stop_xrun
|
snd_pcm_stop_xrun
|
||||||
_snd_pcm_stream_lock_irqsave
|
_snd_pcm_stream_lock_irqsave
|
||||||
@@ -2068,6 +2094,7 @@
|
|||||||
snd_soc_dai_set_tdm_slot
|
snd_soc_dai_set_tdm_slot
|
||||||
snd_soc_dapm_get_enum_double
|
snd_soc_dapm_get_enum_double
|
||||||
snd_soc_dapm_put_enum_double
|
snd_soc_dapm_put_enum_double
|
||||||
|
snd_soc_get_dai_name
|
||||||
snd_soc_get_volsw
|
snd_soc_get_volsw
|
||||||
snd_soc_get_volsw_range
|
snd_soc_get_volsw_range
|
||||||
snd_soc_info_enum_double
|
snd_soc_info_enum_double
|
||||||
@@ -2082,6 +2109,7 @@
|
|||||||
snd_soc_of_parse_audio_simple_widgets
|
snd_soc_of_parse_audio_simple_widgets
|
||||||
snd_soc_of_parse_card_name
|
snd_soc_of_parse_card_name
|
||||||
snd_soc_of_parse_tdm_slot
|
snd_soc_of_parse_tdm_slot
|
||||||
|
snd_soc_of_put_dai_link_codecs
|
||||||
snd_soc_pm_ops
|
snd_soc_pm_ops
|
||||||
snd_soc_put_volsw
|
snd_soc_put_volsw
|
||||||
snd_soc_put_volsw_range
|
snd_soc_put_volsw_range
|
||||||
@@ -2090,7 +2118,25 @@
|
|||||||
snd_soc_unregister_component
|
snd_soc_unregister_component
|
||||||
snprintf
|
snprintf
|
||||||
__sock_create
|
__sock_create
|
||||||
|
sock_init_data
|
||||||
|
sock_kfree_s
|
||||||
|
sock_kmalloc
|
||||||
|
sock_kzfree_s
|
||||||
|
sock_no_accept
|
||||||
|
sock_no_bind
|
||||||
|
sock_no_connect
|
||||||
|
sock_no_getname
|
||||||
|
sock_no_ioctl
|
||||||
|
sock_no_listen
|
||||||
|
sock_no_mmap
|
||||||
|
sock_no_recvmsg
|
||||||
|
sock_no_sendmsg
|
||||||
|
sock_no_shutdown
|
||||||
|
sock_no_socketpair
|
||||||
|
sock_register
|
||||||
sock_release
|
sock_release
|
||||||
|
sock_unregister
|
||||||
|
sock_wake_async
|
||||||
sock_wfree
|
sock_wfree
|
||||||
sort
|
sort
|
||||||
spi_add_device
|
spi_add_device
|
||||||
@@ -2172,6 +2218,7 @@
|
|||||||
sysfs_create_file_ns
|
sysfs_create_file_ns
|
||||||
sysfs_create_files
|
sysfs_create_files
|
||||||
sysfs_create_group
|
sysfs_create_group
|
||||||
|
sysfs_create_groups
|
||||||
sysfs_create_link
|
sysfs_create_link
|
||||||
sysfs_emit
|
sysfs_emit
|
||||||
__sysfs_match_string
|
__sysfs_match_string
|
||||||
@@ -2574,10 +2621,12 @@
|
|||||||
wakeup_source_register
|
wakeup_source_register
|
||||||
wakeup_source_unregister
|
wakeup_source_unregister
|
||||||
__wake_up_sync
|
__wake_up_sync
|
||||||
|
__wake_up_sync_key
|
||||||
__warn_flushing_systemwide_wq
|
__warn_flushing_systemwide_wq
|
||||||
__warn_printk
|
__warn_printk
|
||||||
wireless_nlevent_flush
|
wireless_nlevent_flush
|
||||||
wireless_send_event
|
wireless_send_event
|
||||||
|
woken_wake_function
|
||||||
work_busy
|
work_busy
|
||||||
write_cache_pages
|
write_cache_pages
|
||||||
write_inode_now
|
write_inode_now
|
||||||
|
@@ -94,6 +94,7 @@
|
|||||||
bio_crypt_set_ctx
|
bio_crypt_set_ctx
|
||||||
zero_fill_bio_iter
|
zero_fill_bio_iter
|
||||||
percpu_ref_is_zero
|
percpu_ref_is_zero
|
||||||
|
vm_normal_folio_pmd
|
||||||
__trace_bputs
|
__trace_bputs
|
||||||
__traceiter_android_vh_proactive_compact_wmark_high
|
__traceiter_android_vh_proactive_compact_wmark_high
|
||||||
__tracepoint_android_vh_proactive_compact_wmark_high
|
__tracepoint_android_vh_proactive_compact_wmark_high
|
||||||
|
@@ -2669,6 +2669,7 @@
|
|||||||
__traceiter_android_rvh_setscheduler_prio
|
__traceiter_android_rvh_setscheduler_prio
|
||||||
__traceiter_android_rvh_set_task_cpu
|
__traceiter_android_rvh_set_task_cpu
|
||||||
__traceiter_android_rvh_set_user_nice_locked
|
__traceiter_android_rvh_set_user_nice_locked
|
||||||
|
__traceiter_android_rvh_swap_readpage_bdev_sync
|
||||||
__traceiter_android_rvh_tick_entry
|
__traceiter_android_rvh_tick_entry
|
||||||
__traceiter_android_rvh_try_to_wake_up_success
|
__traceiter_android_rvh_try_to_wake_up_success
|
||||||
__traceiter_android_rvh_uclamp_eff_get
|
__traceiter_android_rvh_uclamp_eff_get
|
||||||
@@ -2808,6 +2809,7 @@
|
|||||||
__tracepoint_android_rvh_setscheduler_prio
|
__tracepoint_android_rvh_setscheduler_prio
|
||||||
__tracepoint_android_rvh_set_task_cpu
|
__tracepoint_android_rvh_set_task_cpu
|
||||||
__tracepoint_android_rvh_set_user_nice_locked
|
__tracepoint_android_rvh_set_user_nice_locked
|
||||||
|
__tracepoint_android_rvh_swap_readpage_bdev_sync
|
||||||
__tracepoint_android_rvh_tick_entry
|
__tracepoint_android_rvh_tick_entry
|
||||||
__tracepoint_android_rvh_try_to_wake_up_success
|
__tracepoint_android_rvh_try_to_wake_up_success
|
||||||
__tracepoint_android_rvh_uclamp_eff_get
|
__tracepoint_android_rvh_uclamp_eff_get
|
||||||
|
@@ -288,6 +288,7 @@
|
|||||||
delayed_work_timer_fn
|
delayed_work_timer_fn
|
||||||
destroy_workqueue
|
destroy_workqueue
|
||||||
dev_addr_mod
|
dev_addr_mod
|
||||||
|
_dev_alert
|
||||||
dev_alloc_name
|
dev_alloc_name
|
||||||
__dev_change_net_namespace
|
__dev_change_net_namespace
|
||||||
dev_close
|
dev_close
|
||||||
@@ -869,6 +870,7 @@
|
|||||||
gpiod_get_raw_value
|
gpiod_get_raw_value
|
||||||
gpiod_get_raw_value_cansleep
|
gpiod_get_raw_value_cansleep
|
||||||
gpiod_get_value
|
gpiod_get_value
|
||||||
|
gpiod_is_active_low
|
||||||
gpiod_set_raw_value
|
gpiod_set_raw_value
|
||||||
gpiod_set_value
|
gpiod_set_value
|
||||||
gpiod_set_value_cansleep
|
gpiod_set_value_cansleep
|
||||||
@@ -2091,6 +2093,7 @@
|
|||||||
tick_nohz_get_sleep_length
|
tick_nohz_get_sleep_length
|
||||||
timer_delete
|
timer_delete
|
||||||
timer_delete_sync
|
timer_delete_sync
|
||||||
|
timer_shutdown_sync
|
||||||
topology_clear_scale_freq_source
|
topology_clear_scale_freq_source
|
||||||
topology_update_done
|
topology_update_done
|
||||||
topology_update_thermal_pressure
|
topology_update_thermal_pressure
|
||||||
@@ -2171,6 +2174,10 @@
|
|||||||
__traceiter_mmap_lock_acquire_returned
|
__traceiter_mmap_lock_acquire_returned
|
||||||
__traceiter_mmap_lock_released
|
__traceiter_mmap_lock_released
|
||||||
__traceiter_mmap_lock_start_locking
|
__traceiter_mmap_lock_start_locking
|
||||||
|
__traceiter_rwmmio_post_read
|
||||||
|
__traceiter_rwmmio_post_write
|
||||||
|
__traceiter_rwmmio_read
|
||||||
|
__traceiter_rwmmio_write
|
||||||
__traceiter_sched_overutilized_tp
|
__traceiter_sched_overutilized_tp
|
||||||
__traceiter_sched_switch
|
__traceiter_sched_switch
|
||||||
__traceiter_sk_data_ready
|
__traceiter_sk_data_ready
|
||||||
@@ -2246,6 +2253,10 @@
|
|||||||
tracepoint_probe_register
|
tracepoint_probe_register
|
||||||
tracepoint_probe_register_prio
|
tracepoint_probe_register_prio
|
||||||
tracepoint_probe_unregister
|
tracepoint_probe_unregister
|
||||||
|
__tracepoint_rwmmio_post_read
|
||||||
|
__tracepoint_rwmmio_post_write
|
||||||
|
__tracepoint_rwmmio_read
|
||||||
|
__tracepoint_rwmmio_write
|
||||||
__tracepoint_sched_overutilized_tp
|
__tracepoint_sched_overutilized_tp
|
||||||
__tracepoint_sched_switch
|
__tracepoint_sched_switch
|
||||||
__tracepoint_sk_data_ready
|
__tracepoint_sk_data_ready
|
||||||
|
@@ -91,3 +91,8 @@
|
|||||||
__tracepoint_dwc3_readl
|
__tracepoint_dwc3_readl
|
||||||
__tracepoint_dwc3_writel
|
__tracepoint_dwc3_writel
|
||||||
pinctrl_gpio_set_config
|
pinctrl_gpio_set_config
|
||||||
|
drm_is_panel_follower
|
||||||
|
drm_panel_add_follower
|
||||||
|
drm_panel_remove_follower
|
||||||
|
hid_driver_reset_resume
|
||||||
|
hid_driver_suspend
|
||||||
|
@@ -197,6 +197,10 @@
|
|||||||
__tracepoint_android_rvh_dequeue_task_fair
|
__tracepoint_android_rvh_dequeue_task_fair
|
||||||
__tracepoint_android_rvh_entity_tick
|
__tracepoint_android_rvh_entity_tick
|
||||||
|
|
||||||
|
# required by mi_damon.ko
|
||||||
|
folio_deactivate
|
||||||
|
folio_mark_accessed
|
||||||
|
|
||||||
#required by cpq.ko
|
#required by cpq.ko
|
||||||
elv_rb_former_request
|
elv_rb_former_request
|
||||||
elv_rb_latter_request
|
elv_rb_latter_request
|
||||||
|
@@ -581,6 +581,7 @@ CONFIG_RTC_CLASS=y
|
|||||||
CONFIG_RTC_LIB_KUNIT_TEST=m
|
CONFIG_RTC_LIB_KUNIT_TEST=m
|
||||||
CONFIG_RTC_DRV_PL030=y
|
CONFIG_RTC_DRV_PL030=y
|
||||||
CONFIG_RTC_DRV_PL031=y
|
CONFIG_RTC_DRV_PL031=y
|
||||||
|
CONFIG_UDMABUF=y
|
||||||
CONFIG_DMABUF_HEAPS=y
|
CONFIG_DMABUF_HEAPS=y
|
||||||
CONFIG_DMABUF_SYSFS_STATS=y
|
CONFIG_DMABUF_SYSFS_STATS=y
|
||||||
CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y
|
CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y
|
||||||
|
@@ -593,7 +593,7 @@ static inline unsigned long host_s2_pgtable_pages(void)
|
|||||||
* Maximum number of consitutents allowed in a descriptor. This number is
|
* Maximum number of consitutents allowed in a descriptor. This number is
|
||||||
* arbitrary, see comment below on SG_MAX_SEGMENTS in hyp_ffa_proxy_pages().
|
* arbitrary, see comment below on SG_MAX_SEGMENTS in hyp_ffa_proxy_pages().
|
||||||
*/
|
*/
|
||||||
#define KVM_FFA_MAX_NR_CONSTITUENTS 4096
|
#define KVM_FFA_MAX_NR_CONSTITUENTS 12288
|
||||||
|
|
||||||
static inline unsigned long hyp_ffa_proxy_pages(void)
|
static inline unsigned long hyp_ffa_proxy_pages(void)
|
||||||
{
|
{
|
||||||
|
@@ -491,17 +491,9 @@ int __pkvm_prot_finalize(void)
|
|||||||
|
|
||||||
int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size)
|
int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size)
|
||||||
{
|
{
|
||||||
int ret;
|
|
||||||
|
|
||||||
hyp_assert_lock_held(&host_mmu.lock);
|
hyp_assert_lock_held(&host_mmu.lock);
|
||||||
|
|
||||||
ret = kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
|
return kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
kvm_iommu_host_stage2_idmap(start, start + size, 0);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int host_stage2_unmap_unmoveable_regs(void)
|
static int host_stage2_unmap_unmoveable_regs(void)
|
||||||
|
@@ -250,7 +250,10 @@ bool hyp_trace_init_event_early(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct dentry *event_tracefs;
|
static struct dentry *event_tracefs;
|
||||||
static unsigned int last_event_id;
|
// Event IDs should be positive integers, hence starting from 1 here.
|
||||||
|
// NOTE: this introduces ID clash between hypervisor events and kernel events.
|
||||||
|
// For now this doesn't seem to cause problems, but we should fix it...
|
||||||
|
static unsigned int last_event_id = 1;
|
||||||
|
|
||||||
struct hyp_event_table {
|
struct hyp_event_table {
|
||||||
struct hyp_event *start;
|
struct hyp_event *start;
|
||||||
|
@@ -861,7 +861,9 @@ int hyp_trace_init_tracefs(void)
|
|||||||
tracefs_create_file("trace_pipe", TRACEFS_MODE_READ, per_cpu_dir,
|
tracefs_create_file("trace_pipe", TRACEFS_MODE_READ, per_cpu_dir,
|
||||||
(void *)cpu, &hyp_trace_pipe_fops);
|
(void *)cpu, &hyp_trace_pipe_fops);
|
||||||
tracefs_create_file("trace_pipe_raw", TRACEFS_MODE_READ, per_cpu_dir,
|
tracefs_create_file("trace_pipe_raw", TRACEFS_MODE_READ, per_cpu_dir,
|
||||||
(void *)cpu, &hyp_trace_pipe_fops);
|
(void *)cpu, &hyp_trace_raw_fops);
|
||||||
|
tracefs_create_file("trace", TRACEFS_MODE_WRITE, per_cpu_dir,
|
||||||
|
(void *)cpu, &hyp_trace_fops);
|
||||||
}
|
}
|
||||||
|
|
||||||
hyp_trace_init_event_tracefs(root);
|
hyp_trace_init_event_tracefs(root);
|
||||||
|
@@ -535,6 +535,7 @@ CONFIG_LEDS_TRIGGER_TRANSIENT=y
|
|||||||
CONFIG_EDAC=y
|
CONFIG_EDAC=y
|
||||||
CONFIG_RTC_CLASS=y
|
CONFIG_RTC_CLASS=y
|
||||||
CONFIG_RTC_LIB_KUNIT_TEST=m
|
CONFIG_RTC_LIB_KUNIT_TEST=m
|
||||||
|
CONFIG_UDMABUF=y
|
||||||
CONFIG_DMABUF_HEAPS=y
|
CONFIG_DMABUF_HEAPS=y
|
||||||
CONFIG_DMABUF_SYSFS_STATS=y
|
CONFIG_DMABUF_SYSFS_STATS=y
|
||||||
CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y
|
CONFIG_DMABUF_HEAPS_DEFERRED_FREE=y
|
||||||
|
@@ -1566,6 +1566,14 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
|
|||||||
if (blkcg_policy_enabled(q, pol))
|
if (blkcg_policy_enabled(q, pol))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Policy is allowed to be registered without pd_alloc_fn/pd_free_fn,
|
||||||
|
* for example, ioprio. Such policy will work on blkcg level, not disk
|
||||||
|
* level, and don't need to be activated.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(!pol->pd_alloc_fn || !pol->pd_free_fn))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (queue_is_mq(q))
|
if (queue_is_mq(q))
|
||||||
blk_mq_freeze_queue(q);
|
blk_mq_freeze_queue(q);
|
||||||
retry:
|
retry:
|
||||||
@@ -1745,9 +1753,12 @@ int blkcg_policy_register(struct blkcg_policy *pol)
|
|||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
|
/*
|
||||||
|
* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs, and policy
|
||||||
|
* without pd_alloc_fn/pd_free_fn can't be activated.
|
||||||
|
*/
|
||||||
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
|
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
|
||||||
(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
|
(!pol->pd_alloc_fn ^ !pol->pd_free_fn))
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
|
||||||
/* register @pol */
|
/* register @pol */
|
||||||
|
@@ -625,6 +625,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_migration_target_bypass);
|
|||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_writepage);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_writepage);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_readpage_bdev_sync);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_readpage_bdev_sync);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_swap_readpage_bdev_sync);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dpm_wait_start);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dpm_wait_start);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dpm_wait_finish);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dpm_wait_finish);
|
||||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sync_irq_wait_start);
|
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sync_irq_wait_start);
|
||||||
|
@@ -543,6 +543,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
|||||||
unsigned int idx;
|
unsigned int idx;
|
||||||
unsigned int old_target_freq = target_freq;
|
unsigned int old_target_freq = target_freq;
|
||||||
|
|
||||||
|
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||||
trace_android_vh_cpufreq_resolve_freq(policy, &target_freq, old_target_freq);
|
trace_android_vh_cpufreq_resolve_freq(policy, &target_freq, old_target_freq);
|
||||||
|
|
||||||
if (!policy->freq_table)
|
if (!policy->freq_table)
|
||||||
@@ -568,22 +569,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
|
|||||||
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
|
||||||
unsigned int target_freq)
|
unsigned int target_freq)
|
||||||
{
|
{
|
||||||
unsigned int min = READ_ONCE(policy->min);
|
return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
|
||||||
unsigned int max = READ_ONCE(policy->max);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* If this function runs in parallel with cpufreq_set_policy(), it may
|
|
||||||
* read policy->min before the update and policy->max after the update
|
|
||||||
* or the other way around, so there is no ordering guarantee.
|
|
||||||
*
|
|
||||||
* Resolve this by always honoring the max (in case it comes from
|
|
||||||
* thermal throttling or similar).
|
|
||||||
*/
|
|
||||||
if (unlikely(min > max))
|
|
||||||
min = max;
|
|
||||||
|
|
||||||
return __resolve_freq(policy, clamp_val(target_freq, min, max),
|
|
||||||
CPUFREQ_RELATION_LE);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
|
||||||
|
|
||||||
@@ -2369,7 +2355,6 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
|||||||
if (cpufreq_disabled())
|
if (cpufreq_disabled())
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
|
||||||
target_freq = __resolve_freq(policy, target_freq, relation);
|
target_freq = __resolve_freq(policy, target_freq, relation);
|
||||||
|
|
||||||
trace_android_vh_cpufreq_target(policy, &target_freq, old_target_freq);
|
trace_android_vh_cpufreq_target(policy, &target_freq, old_target_freq);
|
||||||
@@ -2662,15 +2647,11 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
|
|||||||
* Resolve policy min/max to available frequencies. It ensures
|
* Resolve policy min/max to available frequencies. It ensures
|
||||||
* no frequency resolution will neither overshoot the requested maximum
|
* no frequency resolution will neither overshoot the requested maximum
|
||||||
* nor undershoot the requested minimum.
|
* nor undershoot the requested minimum.
|
||||||
*
|
|
||||||
* Avoid storing intermediate values in policy->max or policy->min and
|
|
||||||
* compiler optimizations around them because they may be accessed
|
|
||||||
* concurrently by cpufreq_driver_resolve_freq() during the update.
|
|
||||||
*/
|
*/
|
||||||
WRITE_ONCE(policy->max, __resolve_freq(policy, new_data.max, CPUFREQ_RELATION_H));
|
policy->min = new_data.min;
|
||||||
new_data.min = __resolve_freq(policy, new_data.min, CPUFREQ_RELATION_L);
|
policy->max = new_data.max;
|
||||||
WRITE_ONCE(policy->min, new_data.min > policy->max ? policy->max : new_data.min);
|
policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
|
||||||
|
policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
|
||||||
trace_cpu_frequency_limits(policy);
|
trace_cpu_frequency_limits(policy);
|
||||||
|
|
||||||
policy->cached_target_freq = UINT_MAX;
|
policy->cached_target_freq = UINT_MAX;
|
||||||
|
@@ -238,6 +238,15 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
|
|||||||
if (desc->mux_usecount)
|
if (desc->mux_usecount)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (gpio_range) {
|
||||||
|
owner = desc->gpio_owner;
|
||||||
|
desc->gpio_owner = NULL;
|
||||||
|
} else {
|
||||||
|
owner = desc->mux_owner;
|
||||||
|
desc->mux_owner = NULL;
|
||||||
|
desc->mux_setting = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -249,17 +258,6 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
|
|||||||
else if (ops->free)
|
else if (ops->free)
|
||||||
ops->free(pctldev, pin);
|
ops->free(pctldev, pin);
|
||||||
|
|
||||||
scoped_guard(mutex, &desc->mux_lock) {
|
|
||||||
if (gpio_range) {
|
|
||||||
owner = desc->gpio_owner;
|
|
||||||
desc->gpio_owner = NULL;
|
|
||||||
} else {
|
|
||||||
owner = desc->mux_owner;
|
|
||||||
desc->mux_owner = NULL;
|
|
||||||
desc->mux_setting = NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module_put(pctldev->owner);
|
module_put(pctldev->owner);
|
||||||
|
|
||||||
return owner;
|
return owner;
|
||||||
|
@@ -7018,6 +7018,11 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
|
|||||||
if (enabled_intr_status)
|
if (enabled_intr_status)
|
||||||
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
|
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
|
||||||
|
|
||||||
|
if (hba->android_quirks &
|
||||||
|
UFSHCD_ANDROID_QUIRK_NO_IS_READ_ON_H8 &&
|
||||||
|
intr_status & UIC_HIBERNATE_ENTER)
|
||||||
|
break;
|
||||||
|
|
||||||
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -291,8 +291,8 @@ __acquires(&port->port_lock)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_tty_wake && port->port.tty)
|
if (do_tty_wake)
|
||||||
tty_wakeup(port->port.tty);
|
tty_port_tty_wakeup(&port->port);
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -573,7 +573,7 @@ static int gs_start_io(struct gs_port *port)
|
|||||||
gs_start_tx(port);
|
gs_start_tx(port);
|
||||||
/* Unblock any pending writes into our circular buffer, in case
|
/* Unblock any pending writes into our circular buffer, in case
|
||||||
* we didn't in gs_start_tx() */
|
* we didn't in gs_start_tx() */
|
||||||
tty_wakeup(port->port.tty);
|
tty_port_tty_wakeup(&port->port);
|
||||||
} else {
|
} else {
|
||||||
/* Free reqs only if we are still connected */
|
/* Free reqs only if we are still connected */
|
||||||
if (port->port_usb) {
|
if (port->port_usb) {
|
||||||
|
@@ -187,7 +187,7 @@ static bool gunyah_has_qcom_extensions(void)
|
|||||||
uuid_t uuid;
|
uuid_t uuid;
|
||||||
u32 *up;
|
u32 *up;
|
||||||
|
|
||||||
arm_smccc_1_1_smc(GUNYAH_QCOM_EXT_CALL_UUID_ID, &res);
|
arm_smccc_1_1_invoke(GUNYAH_QCOM_EXT_CALL_UUID_ID, &res);
|
||||||
|
|
||||||
up = (u32 *)&uuid.b[0];
|
up = (u32 *)&uuid.b[0];
|
||||||
up[0] = lower_32_bits(res.a0);
|
up[0] = lower_32_bits(res.a0);
|
||||||
|
@@ -610,9 +610,8 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
|
|||||||
if (pos > valid_size)
|
if (pos > valid_size)
|
||||||
pos = valid_size;
|
pos = valid_size;
|
||||||
|
|
||||||
if (iocb_is_dsync(iocb) && iocb->ki_pos > pos) {
|
if (iocb->ki_pos > pos) {
|
||||||
ssize_t err = vfs_fsync_range(file, pos, iocb->ki_pos - 1,
|
ssize_t err = generic_write_sync(iocb, iocb->ki_pos - pos);
|
||||||
iocb->ki_flags & IOCB_SYNC);
|
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@@ -1703,6 +1703,9 @@ struct f2fs_sb_info {
|
|||||||
/* for skip statistic */
|
/* for skip statistic */
|
||||||
unsigned long long skipped_gc_rwsem; /* FG_GC only */
|
unsigned long long skipped_gc_rwsem; /* FG_GC only */
|
||||||
|
|
||||||
|
/* free sections reserved for pinned file */
|
||||||
|
unsigned int reserved_pin_section;
|
||||||
|
|
||||||
/* threshold for gc trials on pinned files */
|
/* threshold for gc trials on pinned files */
|
||||||
unsigned short gc_pin_file_threshold;
|
unsigned short gc_pin_file_threshold;
|
||||||
struct f2fs_rwsem pin_sem;
|
struct f2fs_rwsem pin_sem;
|
||||||
|
@@ -1859,9 +1859,8 @@ next_alloc:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (has_not_enough_free_secs(sbi, 0, f2fs_sb_has_blkzoned(sbi) ?
|
if (has_not_enough_free_secs(sbi, 0,
|
||||||
ZONED_PIN_SEC_REQUIRED_COUNT :
|
sbi->reserved_pin_section)) {
|
||||||
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
|
|
||||||
f2fs_down_write(&sbi->gc_lock);
|
f2fs_down_write(&sbi->gc_lock);
|
||||||
stat_inc_gc_call_count(sbi, FOREGROUND);
|
stat_inc_gc_call_count(sbi, FOREGROUND);
|
||||||
err = f2fs_gc(sbi, &gc_control);
|
err = f2fs_gc(sbi, &gc_control);
|
||||||
|
@@ -4668,6 +4668,10 @@ try_onemore:
|
|||||||
/* get segno of first zoned block device */
|
/* get segno of first zoned block device */
|
||||||
sbi->first_zoned_segno = get_first_zoned_segno(sbi);
|
sbi->first_zoned_segno = get_first_zoned_segno(sbi);
|
||||||
|
|
||||||
|
sbi->reserved_pin_section = f2fs_sb_has_blkzoned(sbi) ?
|
||||||
|
ZONED_PIN_SEC_REQUIRED_COUNT :
|
||||||
|
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi));
|
||||||
|
|
||||||
/* Read accumulated write IO statistics if exists */
|
/* Read accumulated write IO statistics if exists */
|
||||||
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
|
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
|
||||||
if (__exist_node_summaries(sbi))
|
if (__exist_node_summaries(sbi))
|
||||||
|
@@ -824,6 +824,13 @@ out:
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!strcmp(a->attr.name, "reserved_pin_section")) {
|
||||||
|
if (t > GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))
|
||||||
|
return -EINVAL;
|
||||||
|
*ui = (unsigned int)t;
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
*ui = (unsigned int)t;
|
*ui = (unsigned int)t;
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
@@ -1130,6 +1137,7 @@ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
|
|||||||
F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
|
F2FS_SBI_GENERAL_RW_ATTR(blkzone_alloc_policy);
|
||||||
#endif
|
#endif
|
||||||
F2FS_SBI_GENERAL_RW_ATTR(carve_out);
|
F2FS_SBI_GENERAL_RW_ATTR(carve_out);
|
||||||
|
F2FS_SBI_GENERAL_RW_ATTR(reserved_pin_section);
|
||||||
|
|
||||||
/* STAT_INFO ATTR */
|
/* STAT_INFO ATTR */
|
||||||
#ifdef CONFIG_F2FS_STAT_FS
|
#ifdef CONFIG_F2FS_STAT_FS
|
||||||
@@ -1323,6 +1331,7 @@ static struct attribute *f2fs_attrs[] = {
|
|||||||
ATTR_LIST(last_age_weight),
|
ATTR_LIST(last_age_weight),
|
||||||
ATTR_LIST(max_read_extent_count),
|
ATTR_LIST(max_read_extent_count),
|
||||||
ATTR_LIST(carve_out),
|
ATTR_LIST(carve_out),
|
||||||
|
ATTR_LIST(reserved_pin_section),
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
ATTRIBUTE_GROUPS(f2fs);
|
ATTRIBUTE_GROUPS(f2fs);
|
||||||
|
@@ -549,6 +549,10 @@ DECLARE_HOOK(android_vh_swap_readpage_bdev_sync,
|
|||||||
TP_PROTO(struct block_device *bdev, sector_t sector,
|
TP_PROTO(struct block_device *bdev, sector_t sector,
|
||||||
struct page *page, bool *read),
|
struct page *page, bool *read),
|
||||||
TP_ARGS(bdev, sector, page, read));
|
TP_ARGS(bdev, sector, page, read));
|
||||||
|
DECLARE_RESTRICTED_HOOK(android_rvh_swap_readpage_bdev_sync,
|
||||||
|
TP_PROTO(struct block_device *bdev, sector_t sector,
|
||||||
|
struct page *page, bool *read),
|
||||||
|
TP_ARGS(bdev, sector, page, read), 4);
|
||||||
DECLARE_HOOK(android_vh_alloc_flags_cma_adjust,
|
DECLARE_HOOK(android_vh_alloc_flags_cma_adjust,
|
||||||
TP_PROTO(gfp_t gfp_mask, unsigned int *alloc_flags),
|
TP_PROTO(gfp_t gfp_mask, unsigned int *alloc_flags),
|
||||||
TP_ARGS(gfp_mask, alloc_flags));
|
TP_ARGS(gfp_mask, alloc_flags));
|
||||||
|
@@ -704,6 +704,9 @@ enum ufshcd_android_quirks {
|
|||||||
|
|
||||||
/* Set IID to one. */
|
/* Set IID to one. */
|
||||||
UFSHCD_ANDROID_QUIRK_SET_IID_TO_ONE = 1 << 30,
|
UFSHCD_ANDROID_QUIRK_SET_IID_TO_ONE = 1 << 30,
|
||||||
|
|
||||||
|
/* Do not read IS after H8 enter */
|
||||||
|
UFSHCD_ANDROID_QUIRK_NO_IS_READ_ON_H8 = 1 << 31,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ufshcd_caps {
|
enum ufshcd_caps {
|
||||||
|
@@ -4073,6 +4073,11 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
|
|||||||
|
|
||||||
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
|
static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
if (p->sched_class == &stop_sched_class)
|
||||||
|
return false;
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not complicate things with the async wake_list while the CPU is
|
* Do not complicate things with the async wake_list while the CPU is
|
||||||
* in hotplug state.
|
* in hotplug state.
|
||||||
|
@@ -82,18 +82,15 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
|
static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
|
||||||
struct cpu_stop_work *work,
|
struct cpu_stop_work *work)
|
||||||
struct wake_q_head *wakeq)
|
|
||||||
{
|
{
|
||||||
list_add_tail(&work->list, &stopper->works);
|
list_add_tail(&work->list, &stopper->works);
|
||||||
wake_q_add(wakeq, stopper->thread);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* queue @work to @stopper. if offline, @work is completed immediately */
|
/* queue @work to @stopper. if offline, @work is completed immediately */
|
||||||
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
||||||
{
|
{
|
||||||
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
||||||
DEFINE_WAKE_Q(wakeq);
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
bool enabled;
|
bool enabled;
|
||||||
|
|
||||||
@@ -101,12 +98,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
|
|||||||
raw_spin_lock_irqsave(&stopper->lock, flags);
|
raw_spin_lock_irqsave(&stopper->lock, flags);
|
||||||
enabled = stopper->enabled;
|
enabled = stopper->enabled;
|
||||||
if (enabled)
|
if (enabled)
|
||||||
__cpu_stop_queue_work(stopper, work, &wakeq);
|
__cpu_stop_queue_work(stopper, work);
|
||||||
else if (work->done)
|
else if (work->done)
|
||||||
cpu_stop_signal_done(work->done);
|
cpu_stop_signal_done(work->done);
|
||||||
raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
raw_spin_unlock_irqrestore(&stopper->lock, flags);
|
||||||
|
|
||||||
wake_up_q(&wakeq);
|
if (enabled)
|
||||||
|
wake_up_process(stopper->thread);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
return enabled;
|
return enabled;
|
||||||
@@ -264,7 +262,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
|
|||||||
{
|
{
|
||||||
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
|
struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
|
||||||
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
|
struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
|
||||||
DEFINE_WAKE_Q(wakeq);
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
@@ -300,8 +297,8 @@ retry:
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
__cpu_stop_queue_work(stopper1, work1, &wakeq);
|
__cpu_stop_queue_work(stopper1, work1);
|
||||||
__cpu_stop_queue_work(stopper2, work2, &wakeq);
|
__cpu_stop_queue_work(stopper2, work2);
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
raw_spin_unlock(&stopper2->lock);
|
raw_spin_unlock(&stopper2->lock);
|
||||||
@@ -316,7 +313,10 @@ unlock:
|
|||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
wake_up_q(&wakeq);
|
if (!err) {
|
||||||
|
wake_up_process(stopper1->thread);
|
||||||
|
wake_up_process(stopper2->thread);
|
||||||
|
}
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
|
@@ -2279,7 +2279,6 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
|||||||
ret = COMPACT_NO_SUITABLE_PAGE;
|
ret = COMPACT_NO_SUITABLE_PAGE;
|
||||||
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
|
for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
|
||||||
struct free_area *area = &cc->zone->free_area[order];
|
struct free_area *area = &cc->zone->free_area[order];
|
||||||
bool can_steal;
|
|
||||||
|
|
||||||
/* Job done if page is free of the right migratetype */
|
/* Job done if page is free of the right migratetype */
|
||||||
if (!free_area_empty(area, migratetype))
|
if (!free_area_empty(area, migratetype))
|
||||||
@@ -2295,8 +2294,7 @@ static enum compact_result __compact_finished(struct compact_control *cc)
|
|||||||
* Job done if allocation would steal freepages from
|
* Job done if allocation would steal freepages from
|
||||||
* other migratetype buddy lists.
|
* other migratetype buddy lists.
|
||||||
*/
|
*/
|
||||||
if (find_suitable_fallback(area, order, migratetype,
|
if (find_suitable_fallback(area, order, migratetype, true) >= 0)
|
||||||
true, &can_steal) != -1)
|
|
||||||
/*
|
/*
|
||||||
* Movable pages are OK in any pageblock. If we are
|
* Movable pages are OK in any pageblock. If we are
|
||||||
* stealing for a non-movable allocation, make sure
|
* stealing for a non-movable allocation, make sure
|
||||||
|
@@ -815,7 +815,7 @@ void init_cma_reserved_pageblock(struct page *page);
|
|||||||
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
|
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
|
||||||
|
|
||||||
int find_suitable_fallback(struct free_area *area, unsigned int order,
|
int find_suitable_fallback(struct free_area *area, unsigned int order,
|
||||||
int migratetype, bool only_stealable, bool *can_steal);
|
int migratetype, bool claimable);
|
||||||
|
|
||||||
static inline bool free_area_empty(struct free_area *area, int migratetype)
|
static inline bool free_area_empty(struct free_area *area, int migratetype)
|
||||||
{
|
{
|
||||||
|
@@ -713,6 +713,7 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
|
|||||||
return page_folio(page);
|
return page_folio(page);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(vm_normal_folio_pmd);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void restore_exclusive_pte(struct vm_area_struct *vma,
|
static void restore_exclusive_pte(struct vm_area_struct *vma,
|
||||||
|
699
mm/page_alloc.c
699
mm/page_alloc.c
@@ -1518,11 +1518,11 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
|
|||||||
*
|
*
|
||||||
* -- nyc
|
* -- nyc
|
||||||
*/
|
*/
|
||||||
static inline void expand(struct zone *zone, struct page *page,
|
static inline unsigned int expand(struct zone *zone, struct page *page, int low,
|
||||||
int low, int high, int migratetype)
|
int high, int migratetype)
|
||||||
{
|
{
|
||||||
unsigned long size = 1 << high;
|
unsigned int size = 1 << high;
|
||||||
unsigned long nr_added = 0;
|
unsigned int nr_added = 0;
|
||||||
|
|
||||||
while (high > low) {
|
while (high > low) {
|
||||||
high--;
|
high--;
|
||||||
@@ -1542,7 +1542,19 @@ static inline void expand(struct zone *zone, struct page *page,
|
|||||||
set_buddy_order(&page[size], high);
|
set_buddy_order(&page[size], high);
|
||||||
nr_added += size;
|
nr_added += size;
|
||||||
}
|
}
|
||||||
account_freepages(zone, nr_added, migratetype);
|
|
||||||
|
return nr_added;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void page_del_and_expand(struct zone *zone,
|
||||||
|
struct page *page, int low,
|
||||||
|
int high, int migratetype)
|
||||||
|
{
|
||||||
|
int nr_pages = 1 << high;
|
||||||
|
|
||||||
|
__del_page_from_free_list(page, zone, high, migratetype);
|
||||||
|
nr_pages -= expand(zone, page, low, high, migratetype);
|
||||||
|
account_freepages(zone, -nr_pages, migratetype);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void check_new_page_bad(struct page *page)
|
static void check_new_page_bad(struct page *page)
|
||||||
@@ -1727,8 +1739,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
|
|||||||
page = get_page_from_free_area(area, migratetype);
|
page = get_page_from_free_area(area, migratetype);
|
||||||
if (!page)
|
if (!page)
|
||||||
continue;
|
continue;
|
||||||
del_page_from_free_list(page, zone, current_order, migratetype);
|
|
||||||
expand(zone, page, order, current_order, migratetype);
|
page_del_and_expand(zone, page, order, current_order,
|
||||||
|
migratetype);
|
||||||
trace_mm_page_alloc_zone_locked(page, order, migratetype,
|
trace_mm_page_alloc_zone_locked(page, order, migratetype,
|
||||||
pcp_allowed_order(order) &&
|
pcp_allowed_order(order) &&
|
||||||
migratetype < MIGRATE_PCPTYPES);
|
migratetype < MIGRATE_PCPTYPES);
|
||||||
@@ -1766,18 +1779,18 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
|
|||||||
* Change the type of a block and move all its free pages to that
|
* Change the type of a block and move all its free pages to that
|
||||||
* type's freelist.
|
* type's freelist.
|
||||||
*/
|
*/
|
||||||
static int move_freepages(struct zone *zone, unsigned long start_pfn,
|
static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
|
||||||
unsigned long end_pfn, int old_mt, int new_mt)
|
int old_mt, int new_mt)
|
||||||
{
|
{
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long pfn;
|
unsigned long pfn, end_pfn;
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
int pages_moved = 0;
|
int pages_moved = 0;
|
||||||
|
|
||||||
VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
|
VM_WARN_ON(start_pfn & (pageblock_nr_pages - 1));
|
||||||
VM_WARN_ON(start_pfn + pageblock_nr_pages - 1 != end_pfn);
|
end_pfn = pageblock_end_pfn(start_pfn);
|
||||||
|
|
||||||
for (pfn = start_pfn; pfn <= end_pfn;) {
|
for (pfn = start_pfn; pfn < end_pfn;) {
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
if (!PageBuddy(page)) {
|
if (!PageBuddy(page)) {
|
||||||
pfn++;
|
pfn++;
|
||||||
@@ -1803,14 +1816,13 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn,
|
|||||||
|
|
||||||
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
|
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
|
||||||
unsigned long *start_pfn,
|
unsigned long *start_pfn,
|
||||||
unsigned long *end_pfn,
|
|
||||||
int *num_free, int *num_movable)
|
int *num_free, int *num_movable)
|
||||||
{
|
{
|
||||||
unsigned long pfn, start, end;
|
unsigned long pfn, start, end;
|
||||||
|
|
||||||
pfn = page_to_pfn(page);
|
pfn = page_to_pfn(page);
|
||||||
start = pageblock_start_pfn(pfn);
|
start = pageblock_start_pfn(pfn);
|
||||||
end = pageblock_end_pfn(pfn) - 1;
|
end = pageblock_end_pfn(pfn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The caller only has the lock for @zone, don't touch ranges
|
* The caller only has the lock for @zone, don't touch ranges
|
||||||
@@ -1821,16 +1833,15 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
|
|||||||
*/
|
*/
|
||||||
if (!zone_spans_pfn(zone, start))
|
if (!zone_spans_pfn(zone, start))
|
||||||
return false;
|
return false;
|
||||||
if (!zone_spans_pfn(zone, end))
|
if (!zone_spans_pfn(zone, end - 1))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
*start_pfn = start;
|
*start_pfn = start;
|
||||||
*end_pfn = end;
|
|
||||||
|
|
||||||
if (num_free) {
|
if (num_free) {
|
||||||
*num_free = 0;
|
*num_free = 0;
|
||||||
*num_movable = 0;
|
*num_movable = 0;
|
||||||
for (pfn = start; pfn <= end;) {
|
for (pfn = start; pfn < end;) {
|
||||||
page = pfn_to_page(pfn);
|
page = pfn_to_page(pfn);
|
||||||
if (PageBuddy(page)) {
|
if (PageBuddy(page)) {
|
||||||
int nr = 1 << buddy_order(page);
|
int nr = 1 << buddy_order(page);
|
||||||
@@ -1856,13 +1867,12 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page,
|
|||||||
static int move_freepages_block(struct zone *zone, struct page *page,
|
static int move_freepages_block(struct zone *zone, struct page *page,
|
||||||
int old_mt, int new_mt)
|
int old_mt, int new_mt)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn, end_pfn;
|
unsigned long start_pfn;
|
||||||
|
|
||||||
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
|
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
|
||||||
NULL, NULL))
|
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
return move_freepages(zone, start_pfn, end_pfn, old_mt, new_mt);
|
return __move_freepages_block(zone, start_pfn, old_mt, new_mt);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_ISOLATION
|
#ifdef CONFIG_MEMORY_ISOLATION
|
||||||
@@ -1933,10 +1943,9 @@ static void split_large_buddy(struct zone *zone, struct page *page,
|
|||||||
bool move_freepages_block_isolate(struct zone *zone, struct page *page,
|
bool move_freepages_block_isolate(struct zone *zone, struct page *page,
|
||||||
int migratetype)
|
int migratetype)
|
||||||
{
|
{
|
||||||
unsigned long start_pfn, end_pfn, pfn;
|
unsigned long start_pfn, pfn;
|
||||||
|
|
||||||
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
|
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
|
||||||
NULL, NULL))
|
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* No splits needed if buddies can't span multiple blocks */
|
/* No splits needed if buddies can't span multiple blocks */
|
||||||
@@ -1967,8 +1976,9 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
move:
|
move:
|
||||||
move_freepages(zone, start_pfn, end_pfn,
|
__move_freepages_block(zone, start_pfn,
|
||||||
get_pfnblock_migratetype(page, start_pfn), migratetype);
|
get_pfnblock_migratetype(page, start_pfn),
|
||||||
|
migratetype);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_MEMORY_ISOLATION */
|
#endif /* CONFIG_MEMORY_ISOLATION */
|
||||||
@@ -1984,39 +1994,6 @@ static void change_pageblock_range(struct page *pageblock_page,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* When we are falling back to another migratetype during allocation, try to
|
|
||||||
* steal extra free pages from the same pageblocks to satisfy further
|
|
||||||
* allocations, instead of polluting multiple pageblocks.
|
|
||||||
*
|
|
||||||
* If we are stealing a relatively large buddy page, it is likely there will
|
|
||||||
* be more free pages in the pageblock, so try to steal them all. For
|
|
||||||
* reclaimable and unmovable allocations, we steal regardless of page size,
|
|
||||||
* as fragmentation caused by those allocations polluting movable pageblocks
|
|
||||||
* is worse than movable allocations stealing from unmovable and reclaimable
|
|
||||||
* pageblocks.
|
|
||||||
*/
|
|
||||||
static bool can_steal_fallback(unsigned int order, int start_mt)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* Leaving this order check is intended, although there is
|
|
||||||
* relaxed order check in next check. The reason is that
|
|
||||||
* we can actually steal whole pageblock if this condition met,
|
|
||||||
* but, below check doesn't guarantee it and that is just heuristic
|
|
||||||
* so could be changed anytime.
|
|
||||||
*/
|
|
||||||
if (order >= pageblock_order)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (order >= pageblock_order / 2 ||
|
|
||||||
start_mt == MIGRATE_RECLAIMABLE ||
|
|
||||||
start_mt == MIGRATE_UNMOVABLE ||
|
|
||||||
page_group_by_mobility_disabled)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool boost_watermark(struct zone *zone)
|
static inline bool boost_watermark(struct zone *zone)
|
||||||
{
|
{
|
||||||
unsigned long max_boost;
|
unsigned long max_boost;
|
||||||
@@ -2055,36 +2032,102 @@ static inline bool boost_watermark(struct zone *zone)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function implements actual steal behaviour. If order is large enough, we
|
* When we are falling back to another migratetype during allocation, should we
|
||||||
* can claim the whole pageblock for the requested migratetype. If not, we check
|
* try to claim an entire block to satisfy further allocations, instead of
|
||||||
* the pageblock for constituent pages; if at least half of the pages are free
|
* polluting multiple pageblocks?
|
||||||
* or compatible, we can still claim the whole block, so pages freed in the
|
|
||||||
* future will be put on the correct free list. Otherwise, we isolate exactly
|
|
||||||
* the order we need from the fallback block and leave its migratetype alone.
|
|
||||||
*/
|
*/
|
||||||
static struct page *
|
static bool should_try_claim_block(unsigned int order, int start_mt)
|
||||||
steal_suitable_fallback(struct zone *zone, struct page *page,
|
|
||||||
int current_order, int order, int start_type,
|
|
||||||
unsigned int alloc_flags, bool whole_block)
|
|
||||||
{
|
{
|
||||||
int free_pages, movable_pages, alike_pages;
|
/*
|
||||||
unsigned long start_pfn, end_pfn;
|
* Leaving this order check is intended, although there is
|
||||||
int block_type;
|
* relaxed order check in next check. The reason is that
|
||||||
|
* we can actually claim the whole pageblock if this condition met,
|
||||||
block_type = get_pageblock_migratetype(page);
|
* but, below check doesn't guarantee it and that is just heuristic
|
||||||
|
* so could be changed anytime.
|
||||||
|
*/
|
||||||
|
if (order >= pageblock_order)
|
||||||
|
return true;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This can happen due to races and we want to prevent broken
|
* Above a certain threshold, always try to claim, as it's likely there
|
||||||
* highatomic accounting.
|
* will be more free pages in the pageblock.
|
||||||
*/
|
*/
|
||||||
if (is_migrate_highatomic(block_type))
|
if (order >= pageblock_order / 2)
|
||||||
goto single_page;
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unmovable/reclaimable allocations would cause permanent
|
||||||
|
* fragmentations if they fell back to allocating from a movable block
|
||||||
|
* (polluting it), so we try to claim the whole block regardless of the
|
||||||
|
* allocation size. Later movable allocations can always steal from this
|
||||||
|
* block, which is less problematic.
|
||||||
|
*/
|
||||||
|
if (start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (page_group_by_mobility_disabled)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Movable pages won't cause permanent fragmentation, so when you alloc
|
||||||
|
* small pages, we just need to temporarily steal unmovable or
|
||||||
|
* reclaimable pages that are closest to the request size. After a
|
||||||
|
* while, memory compaction may occur to form large contiguous pages,
|
||||||
|
* and the next movable allocation may not need to steal.
|
||||||
|
*/
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check whether there is a suitable fallback freepage with requested order.
|
||||||
|
* If claimable is true, this function returns fallback_mt only if
|
||||||
|
* we would do this whole-block claiming. This would help to reduce
|
||||||
|
* fragmentation due to mixed migratetype pages in one pageblock.
|
||||||
|
*/
|
||||||
|
int find_suitable_fallback(struct free_area *area, unsigned int order,
|
||||||
|
int migratetype, bool claimable)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if (claimable && !should_try_claim_block(order, migratetype))
|
||||||
|
return -2;
|
||||||
|
|
||||||
|
if (area->nr_free == 0)
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
for (i = 0; i < MIGRATE_FALLBACKS - 1 ; i++) {
|
||||||
|
int fallback_mt = fallbacks[migratetype][i];
|
||||||
|
|
||||||
|
if (!free_area_empty(area, fallback_mt))
|
||||||
|
return fallback_mt;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This function implements actual block claiming behaviour. If order is large
|
||||||
|
* enough, we can claim the whole pageblock for the requested migratetype. If
|
||||||
|
* not, we check the pageblock for constituent pages; if at least half of the
|
||||||
|
* pages are free or compatible, we can still claim the whole block, so pages
|
||||||
|
* freed in the future will be put on the correct free list.
|
||||||
|
*/
|
||||||
|
static struct page *
|
||||||
|
try_to_claim_block(struct zone *zone, struct page *page,
|
||||||
|
int current_order, int order, int start_type,
|
||||||
|
int block_type, unsigned int alloc_flags)
|
||||||
|
{
|
||||||
|
int free_pages, movable_pages, alike_pages;
|
||||||
|
unsigned long start_pfn;
|
||||||
|
|
||||||
/* Take ownership for orders >= pageblock_order */
|
/* Take ownership for orders >= pageblock_order */
|
||||||
if (current_order >= pageblock_order) {
|
if (current_order >= pageblock_order) {
|
||||||
|
unsigned int nr_added;
|
||||||
|
|
||||||
del_page_from_free_list(page, zone, current_order, block_type);
|
del_page_from_free_list(page, zone, current_order, block_type);
|
||||||
change_pageblock_range(page, current_order, start_type);
|
change_pageblock_range(page, current_order, start_type);
|
||||||
expand(zone, page, order, current_order, start_type);
|
nr_added = expand(zone, page, order, current_order, start_type);
|
||||||
|
account_freepages(zone, nr_added, start_type);
|
||||||
return page;
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2096,14 +2139,10 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
|
|||||||
if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
|
if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
|
||||||
set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
|
set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
|
||||||
|
|
||||||
/* We are not allowed to try stealing from the whole block */
|
|
||||||
if (!whole_block)
|
|
||||||
goto single_page;
|
|
||||||
|
|
||||||
/* moving whole block can fail due to zone boundary conditions */
|
/* moving whole block can fail due to zone boundary conditions */
|
||||||
if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn,
|
if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
|
||||||
&free_pages, &movable_pages))
|
&movable_pages))
|
||||||
goto single_page;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Determine how many pages are compatible with our allocation.
|
* Determine how many pages are compatible with our allocation.
|
||||||
@@ -2132,216 +2171,23 @@ steal_suitable_fallback(struct zone *zone, struct page *page,
|
|||||||
*/
|
*/
|
||||||
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
|
if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
|
||||||
page_group_by_mobility_disabled) {
|
page_group_by_mobility_disabled) {
|
||||||
move_freepages(zone, start_pfn, end_pfn, block_type, start_type);
|
__move_freepages_block(zone, start_pfn, block_type, start_type);
|
||||||
return __rmqueue_smallest(zone, order, start_type);
|
return __rmqueue_smallest(zone, order, start_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
single_page:
|
return NULL;
|
||||||
del_page_from_free_list(page, zone, current_order, block_type);
|
|
||||||
expand(zone, page, order, current_order, block_type);
|
|
||||||
return page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether there is a suitable fallback freepage with requested order.
|
* Try to allocate from some fallback migratetype by claiming the entire block,
|
||||||
* If only_stealable is true, this function returns fallback_mt only if
|
* i.e. converting it to the allocation's start migratetype.
|
||||||
* we can steal other freepages all together. This would help to reduce
|
|
||||||
* fragmentation due to mixed migratetype pages in one pageblock.
|
|
||||||
*/
|
|
||||||
int find_suitable_fallback(struct free_area *area, unsigned int order,
|
|
||||||
int migratetype, bool only_stealable, bool *can_steal)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
int fallback_mt;
|
|
||||||
|
|
||||||
if (area->nr_free == 0)
|
|
||||||
return -1;
|
|
||||||
|
|
||||||
*can_steal = false;
|
|
||||||
for (i = 0; i < MIGRATE_FALLBACKS - 1 ; i++) {
|
|
||||||
fallback_mt = fallbacks[migratetype][i];
|
|
||||||
if (free_area_empty(area, fallback_mt))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (can_steal_fallback(order, migratetype))
|
|
||||||
*can_steal = true;
|
|
||||||
|
|
||||||
if (!only_stealable)
|
|
||||||
return fallback_mt;
|
|
||||||
|
|
||||||
if (*can_steal)
|
|
||||||
return fallback_mt;
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Reserve the pageblock(s) surrounding an allocation request for
|
|
||||||
* exclusive use of high-order atomic allocations if there are no
|
|
||||||
* empty page blocks that contain a page with a suitable order
|
|
||||||
*/
|
|
||||||
static void reserve_highatomic_pageblock(struct page *page, int order,
|
|
||||||
struct zone *zone)
|
|
||||||
{
|
|
||||||
int mt;
|
|
||||||
unsigned long max_managed, flags;
|
|
||||||
bool bypass = false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The number reserved as: minimum is 1 pageblock, maximum is
|
|
||||||
* roughly 1% of a zone. But if 1% of a zone falls below a
|
|
||||||
* pageblock size, then don't reserve any pageblocks.
|
|
||||||
* Check is race-prone but harmless.
|
|
||||||
*/
|
|
||||||
if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
|
|
||||||
return;
|
|
||||||
max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
|
|
||||||
if (zone->nr_reserved_highatomic >= max_managed)
|
|
||||||
return;
|
|
||||||
trace_android_vh_reserve_highatomic_bypass(page, &bypass);
|
|
||||||
if (bypass)
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
|
||||||
|
|
||||||
/* Recheck the nr_reserved_highatomic limit under the lock */
|
|
||||||
if (zone->nr_reserved_highatomic >= max_managed)
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
/* Yoink! */
|
|
||||||
mt = get_pageblock_migratetype(page);
|
|
||||||
/* Only reserve normal pageblocks (i.e., they can merge with others) */
|
|
||||||
if (!migratetype_is_mergeable(mt))
|
|
||||||
goto out_unlock;
|
|
||||||
|
|
||||||
if (order < pageblock_order) {
|
|
||||||
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
|
|
||||||
goto out_unlock;
|
|
||||||
zone->nr_reserved_highatomic += pageblock_nr_pages;
|
|
||||||
} else {
|
|
||||||
change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
|
|
||||||
zone->nr_reserved_highatomic += 1 << order;
|
|
||||||
}
|
|
||||||
|
|
||||||
out_unlock:
|
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Used when an allocation is about to fail under memory pressure. This
|
|
||||||
* potentially hurts the reliability of high-order allocations when under
|
|
||||||
* intense memory pressure but failed atomic allocations should be easier
|
|
||||||
* to recover from than an OOM.
|
|
||||||
*
|
|
||||||
* If @force is true, try to unreserve pageblocks even though highatomic
|
|
||||||
* pageblock is exhausted.
|
|
||||||
*/
|
|
||||||
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
|
||||||
bool force)
|
|
||||||
{
|
|
||||||
struct zonelist *zonelist = ac->zonelist;
|
|
||||||
unsigned long flags;
|
|
||||||
struct zoneref *z;
|
|
||||||
struct zone *zone;
|
|
||||||
struct page *page;
|
|
||||||
int order;
|
|
||||||
bool skip_unreserve_highatomic = false;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
|
|
||||||
ac->nodemask) {
|
|
||||||
/*
|
|
||||||
* Preserve at least one pageblock unless memory pressure
|
|
||||||
* is really high.
|
|
||||||
*/
|
|
||||||
if (!force && zone->nr_reserved_highatomic <=
|
|
||||||
pageblock_nr_pages)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
trace_android_vh_unreserve_highatomic_bypass(force, zone,
|
|
||||||
&skip_unreserve_highatomic);
|
|
||||||
if (skip_unreserve_highatomic)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
|
||||||
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
|
||||||
struct free_area *area = &(zone->free_area[order]);
|
|
||||||
int mt;
|
|
||||||
|
|
||||||
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
|
|
||||||
if (!page)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
mt = get_pageblock_migratetype(page);
|
|
||||||
/*
|
|
||||||
* In page freeing path, migratetype change is racy so
|
|
||||||
* we can counter several free pages in a pageblock
|
|
||||||
* in this loop although we changed the pageblock type
|
|
||||||
* from highatomic to ac->migratetype. So we should
|
|
||||||
* adjust the count once.
|
|
||||||
*/
|
|
||||||
if (is_migrate_highatomic(mt)) {
|
|
||||||
unsigned long size;
|
|
||||||
/*
|
|
||||||
* It should never happen but changes to
|
|
||||||
* locking could inadvertently allow a per-cpu
|
|
||||||
* drain to add pages to MIGRATE_HIGHATOMIC
|
|
||||||
* while unreserving so be safe and watch for
|
|
||||||
* underflows.
|
|
||||||
*/
|
|
||||||
size = max(pageblock_nr_pages, 1UL << order);
|
|
||||||
size = min(size, zone->nr_reserved_highatomic);
|
|
||||||
zone->nr_reserved_highatomic -= size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert to ac->migratetype and avoid the normal
|
|
||||||
* pageblock stealing heuristics. Minimally, the caller
|
|
||||||
* is doing the work and needs the pages. More
|
|
||||||
* importantly, if the block was always converted to
|
|
||||||
* MIGRATE_UNMOVABLE or another type then the number
|
|
||||||
* of pageblocks that cannot be completely freed
|
|
||||||
* may increase.
|
|
||||||
*/
|
|
||||||
if (order < pageblock_order)
|
|
||||||
ret = move_freepages_block(zone, page, mt,
|
|
||||||
ac->migratetype);
|
|
||||||
else {
|
|
||||||
move_to_free_list(page, zone, order, mt,
|
|
||||||
ac->migratetype);
|
|
||||||
change_pageblock_range(page, order,
|
|
||||||
ac->migratetype);
|
|
||||||
ret = 1;
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Reserving the block(s) already succeeded,
|
|
||||||
* so this should not fail on zone boundaries.
|
|
||||||
*/
|
|
||||||
WARN_ON_ONCE(ret == -1);
|
|
||||||
if (ret > 0) {
|
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&zone->lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Try finding a free buddy page on the fallback list and put it on the free
|
|
||||||
* list of requested migratetype, possibly along with other pages from the same
|
|
||||||
* block, depending on fragmentation avoidance heuristics. Returns true if
|
|
||||||
* fallback was found so that __rmqueue_smallest() can grab it.
|
|
||||||
*
|
*
|
||||||
* The use of signed ints for order and current_order is a deliberate
|
* The use of signed ints for order and current_order is a deliberate
|
||||||
* deviation from the rest of this file, to make the for loop
|
* deviation from the rest of this file, to make the for loop
|
||||||
* condition simpler.
|
* condition simpler.
|
||||||
*/
|
*/
|
||||||
static __always_inline struct page *
|
static __always_inline struct page *
|
||||||
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
|
||||||
unsigned int alloc_flags)
|
unsigned int alloc_flags)
|
||||||
{
|
{
|
||||||
struct free_area *area;
|
struct free_area *area;
|
||||||
@@ -2349,7 +2195,6 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
|||||||
int min_order = order;
|
int min_order = order;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int fallback_mt;
|
int fallback_mt;
|
||||||
bool can_steal;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not steal pages from freelists belonging to other pageblocks
|
* Do not steal pages from freelists belonging to other pageblocks
|
||||||
@@ -2368,62 +2213,73 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
|
|||||||
--current_order) {
|
--current_order) {
|
||||||
area = &(zone->free_area[current_order]);
|
area = &(zone->free_area[current_order]);
|
||||||
fallback_mt = find_suitable_fallback(area, current_order,
|
fallback_mt = find_suitable_fallback(area, current_order,
|
||||||
start_migratetype, false, &can_steal);
|
start_migratetype, true);
|
||||||
|
|
||||||
|
/* No block in that order */
|
||||||
if (fallback_mt == -1)
|
if (fallback_mt == -1)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/*
|
/* Advanced into orders too low to claim, abort */
|
||||||
* We cannot steal all free pages from the pageblock and the
|
if (fallback_mt == -2)
|
||||||
* requested migratetype is movable. In that case it's better to
|
break;
|
||||||
* steal and split the smallest available page instead of the
|
|
||||||
* largest available page, because even if the next movable
|
|
||||||
* allocation falls back into a different pageblock than this
|
|
||||||
* one, it won't cause permanent fragmentation.
|
|
||||||
*/
|
|
||||||
if (!can_steal && start_migratetype == MIGRATE_MOVABLE
|
|
||||||
&& current_order > order)
|
|
||||||
goto find_smallest;
|
|
||||||
|
|
||||||
goto do_steal;
|
page = get_page_from_free_area(area, fallback_mt);
|
||||||
|
page = try_to_claim_block(zone, page, current_order, order,
|
||||||
|
start_migratetype, fallback_mt,
|
||||||
|
alloc_flags);
|
||||||
|
if (page) {
|
||||||
|
trace_mm_page_alloc_extfrag(page, order, current_order,
|
||||||
|
start_migratetype, fallback_mt);
|
||||||
|
return page;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Try to steal a single page from some fallback migratetype. Leave the rest of
|
||||||
|
* the block as its current migratetype, potentially causing fragmentation.
|
||||||
|
*/
|
||||||
|
static __always_inline struct page *
|
||||||
|
__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
|
||||||
|
{
|
||||||
|
struct free_area *area;
|
||||||
|
int current_order;
|
||||||
|
struct page *page;
|
||||||
|
int fallback_mt;
|
||||||
|
|
||||||
find_smallest:
|
|
||||||
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
|
for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) {
|
||||||
area = &(zone->free_area[current_order]);
|
area = &(zone->free_area[current_order]);
|
||||||
fallback_mt = find_suitable_fallback(area, current_order,
|
fallback_mt = find_suitable_fallback(area, current_order,
|
||||||
start_migratetype, false, &can_steal);
|
start_migratetype, false);
|
||||||
if (fallback_mt != -1)
|
if (fallback_mt == -1)
|
||||||
break;
|
continue;
|
||||||
|
|
||||||
|
page = get_page_from_free_area(area, fallback_mt);
|
||||||
|
page_del_and_expand(zone, page, order, current_order, fallback_mt);
|
||||||
|
trace_mm_page_alloc_extfrag(page, order, current_order,
|
||||||
|
start_migratetype, fallback_mt);
|
||||||
|
return page;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
return NULL;
|
||||||
* This should not happen - we already found a suitable fallback
|
|
||||||
* when looking for the largest page.
|
|
||||||
*/
|
|
||||||
VM_BUG_ON(current_order > MAX_ORDER);
|
|
||||||
|
|
||||||
do_steal:
|
|
||||||
page = get_page_from_free_area(area, fallback_mt);
|
|
||||||
|
|
||||||
/* take off list, maybe claim block, expand remainder */
|
|
||||||
page = steal_suitable_fallback(zone, page, current_order, order,
|
|
||||||
start_migratetype, alloc_flags, can_steal);
|
|
||||||
|
|
||||||
trace_mm_page_alloc_extfrag(page, order, current_order,
|
|
||||||
start_migratetype, fallback_mt);
|
|
||||||
|
|
||||||
return page;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum rmqueue_mode {
|
||||||
|
RMQUEUE_NORMAL,
|
||||||
|
RMQUEUE_CMA,
|
||||||
|
RMQUEUE_CLAIM,
|
||||||
|
RMQUEUE_STEAL,
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do the hard work of removing an element from the buddy allocator.
|
* Do the hard work of removing an element from the buddy allocator.
|
||||||
* Call me with the zone->lock already held.
|
* Call me with the zone->lock already held.
|
||||||
*/
|
*/
|
||||||
static __always_inline struct page *
|
static __always_inline struct page *
|
||||||
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
||||||
unsigned int alloc_flags)
|
unsigned int alloc_flags, enum rmqueue_mode *mode)
|
||||||
{
|
{
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
@@ -2446,16 +2302,48 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
page = __rmqueue_smallest(zone, order, migratetype);
|
/*
|
||||||
if (unlikely(!page)) {
|
* First try the freelists of the requested migratetype, then try
|
||||||
if (!cma_redirect_restricted() && alloc_flags & ALLOC_CMA)
|
* fallbacks modes with increasing levels of fragmentation risk.
|
||||||
|
*
|
||||||
|
* The fallback logic is expensive and rmqueue_bulk() calls in
|
||||||
|
* a loop with the zone->lock held, meaning the freelists are
|
||||||
|
* not subject to any outside changes. Remember in *mode where
|
||||||
|
* we found pay dirt, to save us the search on the next call.
|
||||||
|
*/
|
||||||
|
switch (*mode) {
|
||||||
|
case RMQUEUE_NORMAL:
|
||||||
|
page = __rmqueue_smallest(zone, order, migratetype);
|
||||||
|
if (page)
|
||||||
|
return page;
|
||||||
|
fallthrough;
|
||||||
|
case RMQUEUE_CMA:
|
||||||
|
if (!cma_redirect_restricted() && alloc_flags & ALLOC_CMA) {
|
||||||
page = __rmqueue_cma_fallback(zone, order);
|
page = __rmqueue_cma_fallback(zone, order);
|
||||||
|
if (page) {
|
||||||
if (!page)
|
*mode = RMQUEUE_CMA;
|
||||||
page = __rmqueue_fallback(zone, order, migratetype,
|
return page;
|
||||||
alloc_flags);
|
}
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
|
case RMQUEUE_CLAIM:
|
||||||
|
page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
|
||||||
|
if (page) {
|
||||||
|
/* Replenished preferred freelist, back to normal mode. */
|
||||||
|
*mode = RMQUEUE_NORMAL;
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
|
case RMQUEUE_STEAL:
|
||||||
|
if (!(alloc_flags & ALLOC_NOFRAGMENT)) {
|
||||||
|
page = __rmqueue_steal(zone, order, migratetype);
|
||||||
|
if (page) {
|
||||||
|
*mode = RMQUEUE_STEAL;
|
||||||
|
return page;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return page;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2467,6 +2355,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|||||||
unsigned long count, struct list_head *list,
|
unsigned long count, struct list_head *list,
|
||||||
int migratetype, unsigned int alloc_flags)
|
int migratetype, unsigned int alloc_flags)
|
||||||
{
|
{
|
||||||
|
enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@@ -2482,7 +2371,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
|
|||||||
if (cma_redirect_restricted() && is_migrate_cma(migratetype))
|
if (cma_redirect_restricted() && is_migrate_cma(migratetype))
|
||||||
page = __rmqueue_cma_fallback(zone, order);
|
page = __rmqueue_cma_fallback(zone, order);
|
||||||
else
|
else
|
||||||
page = __rmqueue(zone, order, migratetype, alloc_flags);
|
page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
|
||||||
|
|
||||||
if (unlikely(page == NULL))
|
if (unlikely(page == NULL))
|
||||||
break;
|
break;
|
||||||
@@ -3038,9 +2927,12 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
|
|||||||
alloc_flags & ALLOC_CMA)
|
alloc_flags & ALLOC_CMA)
|
||||||
page = __rmqueue_cma_fallback(zone, order);
|
page = __rmqueue_cma_fallback(zone, order);
|
||||||
|
|
||||||
if (!page)
|
if (!page) {
|
||||||
|
enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
|
||||||
|
|
||||||
page = __rmqueue(zone, order, migratetype,
|
page = __rmqueue(zone, order, migratetype,
|
||||||
alloc_flags);
|
alloc_flags, &rmqm);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* If the allocation fails, allow OOM handling and
|
* If the allocation fails, allow OOM handling and
|
||||||
* order-0 (atomic) allocs access to HIGHATOMIC
|
* order-0 (atomic) allocs access to HIGHATOMIC
|
||||||
@@ -3225,6 +3117,151 @@ noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
|
|||||||
}
|
}
|
||||||
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
|
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reserve the pageblock(s) surrounding an allocation request for
|
||||||
|
* exclusive use of high-order atomic allocations if there are no
|
||||||
|
* empty page blocks that contain a page with a suitable order
|
||||||
|
*/
|
||||||
|
static void reserve_highatomic_pageblock(struct page *page, int order,
|
||||||
|
struct zone *zone)
|
||||||
|
{
|
||||||
|
int mt;
|
||||||
|
unsigned long max_managed, flags;
|
||||||
|
bool bypass = false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The number reserved as: minimum is 1 pageblock, maximum is
|
||||||
|
* roughly 1% of a zone. But if 1% of a zone falls below a
|
||||||
|
* pageblock size, then don't reserve any pageblocks.
|
||||||
|
* Check is race-prone but harmless.
|
||||||
|
*/
|
||||||
|
if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
|
||||||
|
return;
|
||||||
|
max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
|
||||||
|
if (zone->nr_reserved_highatomic >= max_managed)
|
||||||
|
return;
|
||||||
|
trace_android_vh_reserve_highatomic_bypass(page, &bypass);
|
||||||
|
if (bypass)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
|
|
||||||
|
/* Recheck the nr_reserved_highatomic limit under the lock */
|
||||||
|
if (zone->nr_reserved_highatomic >= max_managed)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
/* Yoink! */
|
||||||
|
mt = get_pageblock_migratetype(page);
|
||||||
|
/* Only reserve normal pageblocks (i.e., they can merge with others) */
|
||||||
|
if (!migratetype_is_mergeable(mt))
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
if (order < pageblock_order) {
|
||||||
|
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
|
||||||
|
goto out_unlock;
|
||||||
|
zone->nr_reserved_highatomic += pageblock_nr_pages;
|
||||||
|
} else {
|
||||||
|
change_pageblock_range(page, order, MIGRATE_HIGHATOMIC);
|
||||||
|
zone->nr_reserved_highatomic += 1 << order;
|
||||||
|
}
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Used when an allocation is about to fail under memory pressure. This
|
||||||
|
* potentially hurts the reliability of high-order allocations when under
|
||||||
|
* intense memory pressure but failed atomic allocations should be easier
|
||||||
|
* to recover from than an OOM.
|
||||||
|
*
|
||||||
|
* If @force is true, try to unreserve pageblocks even though highatomic
|
||||||
|
* pageblock is exhausted.
|
||||||
|
*/
|
||||||
|
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||||
|
bool force)
|
||||||
|
{
|
||||||
|
struct zonelist *zonelist = ac->zonelist;
|
||||||
|
unsigned long flags;
|
||||||
|
struct zoneref *z;
|
||||||
|
struct zone *zone;
|
||||||
|
struct page *page;
|
||||||
|
int order;
|
||||||
|
bool skip_unreserve_highatomic = false;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
|
||||||
|
ac->nodemask) {
|
||||||
|
/*
|
||||||
|
* Preserve at least one pageblock unless memory pressure
|
||||||
|
* is really high.
|
||||||
|
*/
|
||||||
|
if (!force && zone->nr_reserved_highatomic <=
|
||||||
|
pageblock_nr_pages)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
trace_android_vh_unreserve_highatomic_bypass(force, zone,
|
||||||
|
&skip_unreserve_highatomic);
|
||||||
|
if (skip_unreserve_highatomic)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
|
for (order = 0; order < NR_PAGE_ORDERS; order++) {
|
||||||
|
struct free_area *area = &(zone->free_area[order]);
|
||||||
|
unsigned long size;
|
||||||
|
|
||||||
|
page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
|
||||||
|
if (!page)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It should never happen but changes to
|
||||||
|
* locking could inadvertently allow a per-cpu
|
||||||
|
* drain to add pages to MIGRATE_HIGHATOMIC
|
||||||
|
* while unreserving so be safe and watch for
|
||||||
|
* underflows.
|
||||||
|
*/
|
||||||
|
size = max(pageblock_nr_pages, 1UL << order);
|
||||||
|
size = min(size, zone->nr_reserved_highatomic);
|
||||||
|
zone->nr_reserved_highatomic -= size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Convert to ac->migratetype and avoid the normal
|
||||||
|
* pageblock stealing heuristics. Minimally, the caller
|
||||||
|
* is doing the work and needs the pages. More
|
||||||
|
* importantly, if the block was always converted to
|
||||||
|
* MIGRATE_UNMOVABLE or another type then the number
|
||||||
|
* of pageblocks that cannot be completely freed
|
||||||
|
* may increase.
|
||||||
|
*/
|
||||||
|
if (order < pageblock_order)
|
||||||
|
ret = move_freepages_block(zone, page,
|
||||||
|
MIGRATE_HIGHATOMIC,
|
||||||
|
ac->migratetype);
|
||||||
|
else {
|
||||||
|
move_to_free_list(page, zone, order,
|
||||||
|
MIGRATE_HIGHATOMIC,
|
||||||
|
ac->migratetype);
|
||||||
|
change_pageblock_range(page, order,
|
||||||
|
ac->migratetype);
|
||||||
|
ret = 1;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Reserving the block(s) already succeeded,
|
||||||
|
* so this should not fail on zone boundaries.
|
||||||
|
*/
|
||||||
|
WARN_ON_ONCE(ret == -1);
|
||||||
|
if (ret > 0) {
|
||||||
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock_irqrestore(&zone->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline long __zone_watermark_unusable_free(struct zone *z,
|
static inline long __zone_watermark_unusable_free(struct zone *z,
|
||||||
unsigned int order, unsigned int alloc_flags)
|
unsigned int order, unsigned int alloc_flags)
|
||||||
{
|
{
|
||||||
|
13
mm/page_io.c
13
mm/page_io.c
@@ -471,6 +471,19 @@ static void swap_readpage_bdev_sync(struct folio *folio,
|
|||||||
struct bio bio;
|
struct bio bio;
|
||||||
bool read = false;
|
bool read = false;
|
||||||
|
|
||||||
|
trace_android_rvh_swap_readpage_bdev_sync(sis->bdev,
|
||||||
|
swap_page_sector(&folio->page) + get_start_sect(sis->bdev),
|
||||||
|
&folio->page, &read);
|
||||||
|
if (read) {
|
||||||
|
count_vm_events(PSWPIN, folio_nr_pages(folio));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* trace_android_vh_swap_readpage_bdev_sync is deprecated, and
|
||||||
|
* should not be carried over into later kernels.
|
||||||
|
* Use trace_android_rvh_swap_readpage_bdev_sync instead.
|
||||||
|
*/
|
||||||
trace_android_vh_swap_readpage_bdev_sync(sis->bdev,
|
trace_android_vh_swap_readpage_bdev_sync(sis->bdev,
|
||||||
swap_page_sector(&folio->page) + get_start_sect(sis->bdev),
|
swap_page_sector(&folio->page) + get_start_sect(sis->bdev),
|
||||||
&folio->page, &read);
|
&folio->page, &read);
|
||||||
|
@@ -736,6 +736,7 @@ void folio_deactivate(struct folio *folio)
|
|||||||
local_unlock(&cpu_fbatches.lock);
|
local_unlock(&cpu_fbatches.lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(folio_deactivate);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* folio_mark_lazyfree - make an anon folio lazyfree
|
* folio_mark_lazyfree - make an anon folio lazyfree
|
||||||
|
Reference in New Issue
Block a user