Merge android15-6.6 into android15-6.6-lts
This merges the android15-6.6 branch into the -lts branch, catching it up with the latest changes in there. It contains the following commits: *9f62a273a8
Merge tag 'android15-6.6.89_r00' into android15-6.6 *74885fb49d
ANDROID: GKI: Update oplus symbol list *b7dfd1f443
ANDROID: mm: Rename tracepoint symbol for brevity *bf4c7e4789
ANDROID: gki_defconfig: Enable Tegra Host1x *a28d0a49e3
ANDROID: db845c: publish the tests.zip package *e992e004d7
ANDROID: GKI: update symbol list file for xiaomi *80750a978d
ANDROID: vendor_hooks: Add hooks for pcp related optimization. *3075bdc56b
ANDROID: ABI: Update pixel symbol list *ba26a9d32c
ANDROID: vendor_hook: add trace_android_rvh_setscheduler_prio *9ba0b26f62
ANDROID: binder: fix minimum node priority comparison *ed212f01bf
ANDROID: 16K: x86_64: Remove /dev/userfaultfd *0dac95bbe5
UPSTREAM: mm/vmscan: wake up flushers conditionally to avoid cgroup OOM *a15483adeb
ANDROID: 16K: Remove ELF padding entry from map_file ranges *f8de2aa994
UPSTREAM: mm: userfaultfd: correct dirty flags set for both present and swap pte *81e9737fdd
FROMGIT: f2fs: handle error cases of memory donation *a516066c86
ANDROID: GKI: Update oplus symbol list *75fcaabc13
ANDROID: mm: add vendor hook to activate/unactivate folios *ecf4b9d960
UPSTREAM: net/tls: fix kernel panic when alloc_page failed *51c7181ce8
UPSTREAM: net_sched: sch_sfq: move the limit validation *408ba9d165
UPSTREAM: net_sched: sch_sfq: use a temporary work area for validating configuration *1d830abe10
FROMGIT: genirq: Retain depth for managed IRQs across CPU hotplug *f57486f69d
FROMGIT: ufs: core: support updating device command timeout *f9fc2b36bc
FROMGIT: f2fs: sysfs: export linear_lookup in features directory *0dc2db6d2c
FROMGIT: f2fs: sysfs: add encoding_flags entry *1bae181e45
FROMGIT: f2fs: support to disable linear lookup fallback *77a6a96651
FROMGIT: perf/aux: Allocate non-contiguous AUX pages by default *33d0bb4218
FROMGIT: dma-buf: insert memory barrier before updating num_fences *733da5e0a1
ANDROID: GKI: Update symbol list for vivo *64c1ca9367
ANDROID: GKI: Update symbol list file for xiaomi *c5defcb638
ANDROID: mm: export __pte_offset_map/unuse_swap_pte/read_swap_cache_async *f0e8c43c00
ANDROID: GKI: Update symbol list file for xiaomi *380bf96cc0
ANDROID: vendor_hooks: Add vendor hook for mmput *91941f94dd
ANDROID: ABI: Update pixel symbol list *f8b3046f5a
ANDROID: vendor_hook: add swap_readpage_bdev_sync *c2041f7858
ANDROID: GKI: explicit include of stringify.h *0ea9105b4a
UPSTREAM: codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog() *783e329e85
FROMLIST: scsi: ufs: core: Increase the UIC command timeout further *e4a087feec
FROMLIST: BACKPORT: mm: add nr_free_highatomic in show_free_areas *4f512a4a91
UPSTREAM: f2fs: optimize f2fs DIO overwrites *e484e2c6dc
ANDROID: ABI: Update pixel symbol list *70bc15d432
ANDROID: ABI: Update pixel symbol list *8f5494d90c
ANDROID: adjust check_flags parameter *1133098a07
ANDROID: Update the pixel symbol list *69f386369b
ANDROID: GKI: Enable CONFIG_MEMFD_ASHMEM_SHIM *60334ce1a7
ANDROID: mm: shmem: Use memfd-ashmem-shim ioctl handler *cdceb5104f
ANDROID: mm/memfd-ashmem-shim: Introduce shim layer *272d4285dd
BACKPORT: FROMGIT: coresight: core: Disable helpers for devices that fail to enable *2956315ed5
BACKPORT: FROMGIT: coresight: catu: Introduce refcount and spinlock for enabling/disabling *335e1cd00d
ANDROID: ABI: Update pixel symbol list *4f3001cf60
ANDROID: add microdroid_minimal target *de7ce42a5b
UPSTREAM: exfat: fix just enough dentries but allocate a new cluster to dir *d99ee12da2
UPSTREAM: firmware: arm_ffa: Skip the first/partition ID when parsing vCPU list *5b1e6a4126
ANDROID: db845c: Update symbol list *ad83b9bbd1
ANDROID: virtual_device: Update symbol list *c28e15d88c
FROMLIST: asm-generic/io.h: Skip trace helpers if rwmmio events are disabled *421c7da597
UPSTREAM: firmware: arm_ffa: Explicitly cast return value from NOTIFICATION_INFO_GET *9126bec1d3
UPSTREAM: selftests/mm: fix condition in uffd_move_test_common() *e4e26ed369
UPSTREAM: fork: avoid inappropriate uprobe access to invalid mm *30f20c253e
UPSTREAM: mm/damon: fix order of arguments in damos_before_apply tracepoint *fef0f4bb0f
UPSTREAM: mm, swap: fix allocation and scanning race with swapoff *25b5438c35
UPSTREAM: mm: fix docs for the kernel parameter ``thp_anon=`` *0733a21734
UPSTREAM: fork: do not invoke uffd on fork if error occurs *77b59b4e6f
UPSTREAM: PM: domains: Fix alloc/free in dev_pm_domain_attach|detach_list() *adb161976c
UPSTREAM: mm: migrate: annotate data-race in migrate_folio_unmap() *ed9919b89d
UPSTREAM: nios2: Only use built-in devicetree blob if configured to do so *18bdd14e45
UPSTREAM: mm/damon/sysfs-schemes: fix wrong DAMOS tried regions update timeout setup *958df9c52f
UPSTREAM: mm/damon/core: avoid divide-by-zero from pseudo-moving window length calculation *7bf8c86a5c
ANDROID: modifies the order of log commits after calling the hook Change-Id: I922ccda9cdfe2cab123cebdf8bde4ee18e48fa98 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
29
BUILD.bazel
29
BUILD.bazel
@@ -241,6 +241,24 @@ kernel_build(
|
||||
page_size = "16k",
|
||||
)
|
||||
|
||||
kernel_build(
|
||||
name = "kernel_aarch64_microdroid_minimal",
|
||||
srcs = ["//common:kernel_aarch64_sources"],
|
||||
outs = [
|
||||
"Image",
|
||||
"System.map",
|
||||
"modules.builtin",
|
||||
"modules.builtin.modinfo",
|
||||
"vmlinux",
|
||||
"vmlinux.symvers",
|
||||
],
|
||||
build_config = "build.config.microdroid.aarch64",
|
||||
defconfig_fragments = ["arch/arm64/configs/microdroid_minimal.fragment"],
|
||||
make_goals = [
|
||||
"Image",
|
||||
],
|
||||
)
|
||||
|
||||
copy_to_dist_dir(
|
||||
name = "kernel_aarch64_microdroid_16k_dist",
|
||||
data = [
|
||||
@@ -261,6 +279,16 @@ copy_to_dist_dir(
|
||||
log = "info",
|
||||
)
|
||||
|
||||
copy_to_dist_dir(
|
||||
name = "kernel_aarch64_microdroid_minimal_dist",
|
||||
data = [
|
||||
":kernel_aarch64_microdroid_minimal",
|
||||
],
|
||||
dist_dir = "out/kernel_aarch64_microdroid_minimal/dist",
|
||||
flat = True,
|
||||
log = "info",
|
||||
)
|
||||
|
||||
# Microdroid is not a real device. The kernel image is built with special
|
||||
# configs to reduce the size. Hence, not using mixed build.
|
||||
kernel_build(
|
||||
@@ -605,6 +633,7 @@ copy_to_dist_dir(
|
||||
":kernel_aarch64",
|
||||
":kernel_aarch64_modules",
|
||||
":kernel_aarch64_additional_artifacts",
|
||||
":tests_zip_arm64",
|
||||
],
|
||||
dist_dir = "out/db845/dist",
|
||||
flat = True,
|
||||
|
@@ -270,7 +270,7 @@ Description: Shows all enabled kernel features.
|
||||
inode_checksum, flexible_inline_xattr, quota_ino,
|
||||
inode_crtime, lost_found, verity, sb_checksum,
|
||||
casefold, readonly, compression, test_dummy_encryption_v2,
|
||||
atomic_write, pin_file, encrypted_casefold.
|
||||
atomic_write, pin_file, encrypted_casefold, linear_lookup.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/inject_rate
|
||||
Date: May 2016
|
||||
@@ -845,3 +845,16 @@ Description: For several zoned storage devices, vendors will provide extra space
|
||||
reserved_blocks. However, it is not enough, since this extra space should
|
||||
not be shown to users. So, with this new sysfs node, we can hide the space
|
||||
by substracting reserved_blocks from total bytes.
|
||||
|
||||
What: /sys/fs/f2fs/<disk>/encoding_flags
|
||||
Date: April 2025
|
||||
Contact: "Chao Yu" <chao@kernel.org>
|
||||
Description: This is a read-only entry to show the value of sb.s_encoding_flags, the
|
||||
value is hexadecimal.
|
||||
|
||||
============================ ==========
|
||||
Flag_Name Flag_Value
|
||||
============================ ==========
|
||||
SB_ENC_STRICT_MODE_FL 0x00000001
|
||||
SB_ENC_NO_COMPAT_FALLBACK_FL 0x00000002
|
||||
============================ ==========
|
||||
|
@@ -6478,7 +6478,7 @@
|
||||
0: no polling (default)
|
||||
|
||||
thp_anon= [KNL]
|
||||
Format: <size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>
|
||||
Format: <size>[KMG],<size>[KMG]:<state>;<size>[KMG]-<size>[KMG]:<state>
|
||||
state is one of "always", "madvise", "never" or "inherit".
|
||||
Control the default behavior of the system with respect
|
||||
to anonymous transparent hugepages.
|
||||
|
@@ -293,7 +293,7 @@ control by passing the parameter ``transparent_hugepage=always`` or
|
||||
kernel command line.
|
||||
|
||||
Alternatively, each supported anonymous THP size can be controlled by
|
||||
passing ``thp_anon=<size>,<size>[KMG]:<state>;<size>-<size>[KMG]:<state>``,
|
||||
passing ``thp_anon=<size>[KMG],<size>[KMG]:<state>;<size>[KMG]-<size>[KMG]:<state>``,
|
||||
where ``<size>`` is the THP size (must be a power of 2 of PAGE_SIZE and
|
||||
supported anonymous THP) and ``<state>`` is one of ``always``, ``madvise``,
|
||||
``never`` or ``inherit``.
|
||||
|
@@ -340891,6 +340891,15 @@ function {
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x18af3144
|
||||
}
|
||||
function {
|
||||
id: 0x9bab3090
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0x0c2e195c
|
||||
parameter_id: 0xd0b3a203
|
||||
parameter_id: 0x06835e9c
|
||||
parameter_id: 0x11cfee5a
|
||||
}
|
||||
function {
|
||||
id: 0x9bab32de
|
||||
return_type_id: 0x6720d32f
|
||||
@@ -345833,6 +345842,13 @@ function {
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0x6d7f5ff6
|
||||
}
|
||||
function {
|
||||
id: 0x9d90f040
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x045d9eb9
|
||||
parameter_id: 0x4585663f
|
||||
parameter_id: 0x4585663f
|
||||
}
|
||||
function {
|
||||
id: 0x9d917bd1
|
||||
return_type_id: 0x6720d32f
|
||||
@@ -349069,6 +349085,15 @@ function {
|
||||
parameter_id: 0x0e56cd62
|
||||
parameter_id: 0x6720d32f
|
||||
}
|
||||
function {
|
||||
id: 0x9f8c5a8c
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x0a134144
|
||||
parameter_id: 0x21082bfc
|
||||
parameter_id: 0x33756485
|
||||
parameter_id: 0x27162aac
|
||||
parameter_id: 0x2170d06d
|
||||
}
|
||||
function {
|
||||
id: 0x9f8d0629
|
||||
return_type_id: 0x6720d32f
|
||||
@@ -350845,6 +350870,15 @@ function {
|
||||
return_type_id: 0x12209d55
|
||||
parameter_id: 0x06835e9c
|
||||
}
|
||||
function {
|
||||
id: 0xb799ab59
|
||||
return_type_id: 0x06835e9c
|
||||
parameter_id: 0x27162aac
|
||||
parameter_id: 0xf1a6dfed
|
||||
parameter_id: 0x0a134144
|
||||
parameter_id: 0x33756485
|
||||
parameter_id: 0x0c225e5c
|
||||
}
|
||||
function {
|
||||
id: 0xb7d21421
|
||||
return_type_id: 0x06835e9c
|
||||
@@ -353208,6 +353242,13 @@ function {
|
||||
parameter_id: 0x4585663f
|
||||
parameter_id: 0x4585663f
|
||||
}
|
||||
function {
|
||||
id: 0xef327025
|
||||
return_type_id: 0x32bee099
|
||||
parameter_id: 0x21082bfc
|
||||
parameter_id: 0x33756485
|
||||
parameter_id: 0x21082bfc
|
||||
}
|
||||
function {
|
||||
id: 0xef356aa2
|
||||
return_type_id: 0x2e8d5f05
|
||||
@@ -358063,6 +358104,15 @@ elf_symbol {
|
||||
type_id: 0x5659e8fb
|
||||
full_name: "__pskb_pull_tail"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x3a9d1f63
|
||||
name: "__pte_offset_map"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x8ac1bb00
|
||||
type_id: 0xef327025
|
||||
full_name: "__pte_offset_map"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x541b27fa
|
||||
name: "__pte_offset_map_lock"
|
||||
@@ -360189,6 +360239,15 @@ elf_symbol {
|
||||
type_id: 0x9bdbdcc4
|
||||
full_name: "__traceiter_android_rvh_setscheduler"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x1228e7e9
|
||||
name: "__traceiter_android_rvh_setscheduler_prio"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x381769d0
|
||||
type_id: 0x9bdbdcc4
|
||||
full_name: "__traceiter_android_rvh_setscheduler_prio"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x73c83ef4
|
||||
name: "__traceiter_android_rvh_shmem_get_folio"
|
||||
@@ -361863,6 +361922,15 @@ elf_symbol {
|
||||
type_id: 0x9bc0ce91
|
||||
full_name: "__traceiter_android_vh_filemap_fault_end"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x980c4b2d
|
||||
name: "__traceiter_android_vh_filemap_fault_pre_folio_locked"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x907ba169
|
||||
type_id: 0x9b2a7922
|
||||
full_name: "__traceiter_android_vh_filemap_fault_pre_folio_locked"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x734973d4
|
||||
name: "__traceiter_android_vh_filemap_fault_start"
|
||||
@@ -361872,6 +361940,15 @@ elf_symbol {
|
||||
type_id: 0x9bc0ce91
|
||||
full_name: "__traceiter_android_vh_filemap_fault_start"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xddad3a11
|
||||
name: "__traceiter_android_vh_filemap_folio_mapped"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xf97a448f
|
||||
type_id: 0x9b2a7922
|
||||
full_name: "__traceiter_android_vh_filemap_folio_mapped"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x49c69e22
|
||||
name: "__traceiter_android_vh_filemap_get_folio"
|
||||
@@ -361962,6 +362039,24 @@ elf_symbol {
|
||||
type_id: 0x9bccd612
|
||||
full_name: "__traceiter_android_vh_flush_wq_wait_start"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x5dd57eff
|
||||
name: "__traceiter_android_vh_folio_add_lru_folio_activate"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xa4608c4d
|
||||
type_id: 0x9b2e0ad9
|
||||
full_name: "__traceiter_android_vh_folio_add_lru_folio_activate"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xef70c295
|
||||
name: "__traceiter_android_vh_folio_remove_rmap_ptes"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xcab0a328
|
||||
type_id: 0x9b2a7922
|
||||
full_name: "__traceiter_android_vh_folio_remove_rmap_ptes"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x1fca37bf
|
||||
name: "__traceiter_android_vh_folio_trylock_clear"
|
||||
@@ -362007,6 +362102,15 @@ elf_symbol {
|
||||
type_id: 0x9bb6ab04
|
||||
full_name: "__traceiter_android_vh_free_one_page_bypass"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xb3cdc04a
|
||||
name: "__traceiter_android_vh_free_one_page_flag_check"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x04792578
|
||||
type_id: 0x9bb68fe1
|
||||
full_name: "__traceiter_android_vh_free_one_page_flag_check"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xcbee3d67
|
||||
name: "__traceiter_android_vh_free_pages_ok_bypass"
|
||||
@@ -362826,6 +362930,15 @@ elf_symbol {
|
||||
type_id: 0x9bcd4ff7
|
||||
full_name: "__traceiter_android_vh_mmput"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xb047209a
|
||||
name: "__traceiter_android_vh_mmput_mm"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x4487b11a
|
||||
type_id: 0x9bc3617d
|
||||
full_name: "__traceiter_android_vh_mmput_mm"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xa77effd1
|
||||
name: "__traceiter_android_vh_modify_scan_control"
|
||||
@@ -363186,6 +363299,15 @@ elf_symbol {
|
||||
type_id: 0x9b26096d
|
||||
full_name: "__traceiter_android_vh_page_should_be_protected"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x1fc96009
|
||||
name: "__traceiter_android_vh_pageset_update"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xf5c57c7a
|
||||
type_id: 0x9bb71cb9
|
||||
full_name: "__traceiter_android_vh_pageset_update"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x3c5e668a
|
||||
name: "__traceiter_android_vh_pagetypeinfo_show"
|
||||
@@ -364221,6 +364343,15 @@ elf_symbol {
|
||||
type_id: 0x9be885da
|
||||
full_name: "__traceiter_android_vh_split_large_folio_bypass"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x62172596
|
||||
name: "__traceiter_android_vh_swap_readpage_bdev_sync"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x9dd13ec3
|
||||
type_id: 0x9bab3090
|
||||
full_name: "__traceiter_android_vh_swap_readpage_bdev_sync"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xb993d4e0
|
||||
name: "__traceiter_android_vh_swap_writepage"
|
||||
@@ -367173,6 +367304,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_rvh_setscheduler"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x8a4070f7
|
||||
name: "__tracepoint_android_rvh_setscheduler_prio"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0xff3dd73f
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_rvh_setscheduler_prio"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x00b7ed82
|
||||
name: "__tracepoint_android_rvh_shmem_get_folio"
|
||||
@@ -368847,6 +368987,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_filemap_fault_end"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xf045a23f
|
||||
name: "__tracepoint_android_vh_filemap_fault_pre_folio_locked"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0xabdab21f
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_filemap_fault_pre_folio_locked"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xdac9a862
|
||||
name: "__tracepoint_android_vh_filemap_fault_start"
|
||||
@@ -368856,6 +369005,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_filemap_fault_start"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x9eaf9eab
|
||||
name: "__tracepoint_android_vh_filemap_folio_mapped"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x2ba956e2
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_filemap_folio_mapped"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x6d970e8c
|
||||
name: "__tracepoint_android_vh_filemap_get_folio"
|
||||
@@ -368946,6 +369104,24 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_flush_wq_wait_start"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x5027d955
|
||||
name: "__tracepoint_android_vh_folio_add_lru_folio_activate"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x829c52db
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_folio_add_lru_folio_activate"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xb2684007
|
||||
name: "__tracepoint_android_vh_folio_remove_rmap_ptes"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x44c3eae6
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_folio_remove_rmap_ptes"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xa3ede5d5
|
||||
name: "__tracepoint_android_vh_folio_trylock_clear"
|
||||
@@ -368991,6 +369167,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_free_one_page_bypass"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x46b5c0ec
|
||||
name: "__tracepoint_android_vh_free_one_page_flag_check"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x9f05dcf1
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_free_one_page_flag_check"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x770fd919
|
||||
name: "__tracepoint_android_vh_free_pages_ok_bypass"
|
||||
@@ -369810,6 +369995,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_mmput"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xcd372e94
|
||||
name: "__tracepoint_android_vh_mmput_mm"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x21144ff6
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_mmput_mm"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xe2ad11db
|
||||
name: "__tracepoint_android_vh_modify_scan_control"
|
||||
@@ -370170,6 +370364,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_page_should_be_protected"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x94cb1cab
|
||||
name: "__tracepoint_android_vh_pageset_update"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0xb9bf3d10
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_pageset_update"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x2f61d5a4
|
||||
name: "__tracepoint_android_vh_pagetypeinfo_show"
|
||||
@@ -371205,6 +371408,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_split_large_folio_bypass"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xefa20960
|
||||
name: "__tracepoint_android_vh_swap_readpage_bdev_sync"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0xf294e790
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_swap_readpage_bdev_sync"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xd5f8f162
|
||||
name: "__tracepoint_android_vh_swap_writepage"
|
||||
@@ -378016,6 +378228,15 @@ elf_symbol {
|
||||
type_id: 0x9d80e32f
|
||||
full_name: "clk_save_context"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x7a8f92d4
|
||||
name: "clk_set_duty_cycle"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x665e92a0
|
||||
type_id: 0x9d90f040
|
||||
full_name: "clk_set_duty_cycle"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xb8a759f7
|
||||
name: "clk_set_max_rate"
|
||||
@@ -387128,6 +387349,15 @@ elf_symbol {
|
||||
type_id: 0x94751139
|
||||
full_name: "drm_atomic_helper_swap_state"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x09621b81
|
||||
name: "drm_atomic_helper_unprepare_planes"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x1cfc8680
|
||||
type_id: 0x1edd7ece
|
||||
full_name: "drm_atomic_helper_unprepare_planes"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xace15cad
|
||||
name: "drm_atomic_helper_update_legacy_modeset_state"
|
||||
@@ -413947,6 +414177,15 @@ elf_symbol {
|
||||
type_id: 0x6599d98d
|
||||
full_name: "read_sanitised_ftr_reg"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x04f125b0
|
||||
name: "read_swap_cache_async"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xdb73214a
|
||||
type_id: 0xb799ab59
|
||||
full_name: "read_swap_cache_async"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x68b1776b
|
||||
name: "readahead_expand"
|
||||
@@ -428480,6 +428719,15 @@ elf_symbol {
|
||||
type_id: 0x13e0bf07
|
||||
full_name: "unregister_wide_hw_breakpoint"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x7a4af1ff
|
||||
name: "unuse_swap_pte"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xe6c4ab11
|
||||
type_id: 0x9f8c5a8c
|
||||
full_name: "unuse_swap_pte"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xaa01498e
|
||||
name: "up"
|
||||
@@ -432333,6 +432581,15 @@ elf_symbol {
|
||||
type_id: 0x9ea858b8
|
||||
full_name: "vfs_create"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x3d9a7462
|
||||
name: "vfs_fadvise"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xa875d8db
|
||||
type_id: 0x9b2c93fd
|
||||
full_name: "vfs_fadvise"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x39607d61
|
||||
name: "vfs_fsync"
|
||||
@@ -435702,6 +435959,7 @@ interface {
|
||||
symbol_id: 0xc37b6db0
|
||||
symbol_id: 0xa676022e
|
||||
symbol_id: 0x577da3d6
|
||||
symbol_id: 0x3a9d1f63
|
||||
symbol_id: 0x541b27fa
|
||||
symbol_id: 0xe573a8ef
|
||||
symbol_id: 0xf00cbe99
|
||||
@@ -435938,6 +436196,7 @@ interface {
|
||||
symbol_id: 0x9b0cc890
|
||||
symbol_id: 0x559e0725
|
||||
symbol_id: 0xa01b20ce
|
||||
symbol_id: 0x1228e7e9
|
||||
symbol_id: 0x73c83ef4
|
||||
symbol_id: 0x46515de8
|
||||
symbol_id: 0x3b650ee3
|
||||
@@ -436124,7 +436383,9 @@ interface {
|
||||
symbol_id: 0x93a4717b
|
||||
symbol_id: 0x6091a763
|
||||
symbol_id: 0x19fc04e0
|
||||
symbol_id: 0x980c4b2d
|
||||
symbol_id: 0x734973d4
|
||||
symbol_id: 0xddad3a11
|
||||
symbol_id: 0x49c69e22
|
||||
symbol_id: 0xb7d91f76
|
||||
symbol_id: 0x4eda1196
|
||||
@@ -436135,11 +436396,14 @@ interface {
|
||||
symbol_id: 0xd44631ef
|
||||
symbol_id: 0xf110e9f6
|
||||
symbol_id: 0x5f626e29
|
||||
symbol_id: 0x5dd57eff
|
||||
symbol_id: 0xef70c295
|
||||
symbol_id: 0x1fca37bf
|
||||
symbol_id: 0xe6702595
|
||||
symbol_id: 0xa51a0f0c
|
||||
symbol_id: 0x9e91661b
|
||||
symbol_id: 0xc189c2a7
|
||||
symbol_id: 0xb3cdc04a
|
||||
symbol_id: 0xcbee3d67
|
||||
symbol_id: 0x26a07b7d
|
||||
symbol_id: 0x02fa90e7
|
||||
@@ -436231,6 +436495,7 @@ interface {
|
||||
symbol_id: 0xab76d6cc
|
||||
symbol_id: 0xc168df93
|
||||
symbol_id: 0x5612c9d1
|
||||
symbol_id: 0xb047209a
|
||||
symbol_id: 0xa77effd1
|
||||
symbol_id: 0x306a6019
|
||||
symbol_id: 0xb1c564a3
|
||||
@@ -436271,6 +436536,7 @@ interface {
|
||||
symbol_id: 0x03508ed9
|
||||
symbol_id: 0x3246acbb
|
||||
symbol_id: 0xb4d5ffdc
|
||||
symbol_id: 0x1fc96009
|
||||
symbol_id: 0x3c5e668a
|
||||
symbol_id: 0x13b0736e
|
||||
symbol_id: 0xc72f2012
|
||||
@@ -436386,6 +436652,7 @@ interface {
|
||||
symbol_id: 0xa082ad86
|
||||
symbol_id: 0x57cb67b6
|
||||
symbol_id: 0x7068c1fb
|
||||
symbol_id: 0x62172596
|
||||
symbol_id: 0xb993d4e0
|
||||
symbol_id: 0x445e3749
|
||||
symbol_id: 0x3f14ea30
|
||||
@@ -436714,6 +436981,7 @@ interface {
|
||||
symbol_id: 0x42fff08e
|
||||
symbol_id: 0x74f29f73
|
||||
symbol_id: 0xe48123a4
|
||||
symbol_id: 0x8a4070f7
|
||||
symbol_id: 0x00b7ed82
|
||||
symbol_id: 0xe8cacf26
|
||||
symbol_id: 0x5380a8d5
|
||||
@@ -436900,7 +437168,9 @@ interface {
|
||||
symbol_id: 0x50a83025
|
||||
symbol_id: 0x0c03d499
|
||||
symbol_id: 0x197d6be2
|
||||
symbol_id: 0xf045a23f
|
||||
symbol_id: 0xdac9a862
|
||||
symbol_id: 0x9eaf9eab
|
||||
symbol_id: 0x6d970e8c
|
||||
symbol_id: 0xb34d9200
|
||||
symbol_id: 0x223c9b64
|
||||
@@ -436911,11 +437181,14 @@ interface {
|
||||
symbol_id: 0xb548aa95
|
||||
symbol_id: 0xbb05ffe8
|
||||
symbol_id: 0x0a50cb7b
|
||||
symbol_id: 0x5027d955
|
||||
symbol_id: 0xb2684007
|
||||
symbol_id: 0xa3ede5d5
|
||||
symbol_id: 0x539bf337
|
||||
symbol_id: 0x4f752d52
|
||||
symbol_id: 0x901d0e89
|
||||
symbol_id: 0x04a824b5
|
||||
symbol_id: 0x46b5c0ec
|
||||
symbol_id: 0x770fd919
|
||||
symbol_id: 0x9afee12f
|
||||
symbol_id: 0x55b68bc5
|
||||
@@ -437007,6 +437280,7 @@ interface {
|
||||
symbol_id: 0x0403b7c6
|
||||
symbol_id: 0x3c1c2029
|
||||
symbol_id: 0x41c8d09b
|
||||
symbol_id: 0xcd372e94
|
||||
symbol_id: 0xe2ad11db
|
||||
symbol_id: 0x65023f2f
|
||||
symbol_id: 0xd5097f8d
|
||||
@@ -437047,6 +437321,7 @@ interface {
|
||||
symbol_id: 0x988bfcb7
|
||||
symbol_id: 0x4a5e6e41
|
||||
symbol_id: 0x352038ba
|
||||
symbol_id: 0x94cb1cab
|
||||
symbol_id: 0x2f61d5a4
|
||||
symbol_id: 0xa4c454d8
|
||||
symbol_id: 0x7d42b7c8
|
||||
@@ -437162,6 +437437,7 @@ interface {
|
||||
symbol_id: 0x227ce62c
|
||||
symbol_id: 0xb11763a0
|
||||
symbol_id: 0x02bb68bd
|
||||
symbol_id: 0xefa20960
|
||||
symbol_id: 0xd5f8f162
|
||||
symbol_id: 0x052557d7
|
||||
symbol_id: 0x6fb8113a
|
||||
@@ -437919,6 +438195,7 @@ interface {
|
||||
symbol_id: 0xffe637ce
|
||||
symbol_id: 0xd84adb21
|
||||
symbol_id: 0xed719736
|
||||
symbol_id: 0x7a8f92d4
|
||||
symbol_id: 0xb8a759f7
|
||||
symbol_id: 0xa7901a1d
|
||||
symbol_id: 0x2b1e3d59
|
||||
@@ -438928,6 +439205,7 @@ interface {
|
||||
symbol_id: 0x5f99d405
|
||||
symbol_id: 0x308067a6
|
||||
symbol_id: 0xe375001c
|
||||
symbol_id: 0x09621b81
|
||||
symbol_id: 0xace15cad
|
||||
symbol_id: 0x38dc8db4
|
||||
symbol_id: 0xf8c5a501
|
||||
@@ -441902,6 +442180,7 @@ interface {
|
||||
symbol_id: 0x5b227eda
|
||||
symbol_id: 0x5c3d4ca3
|
||||
symbol_id: 0xfdbe6bc0
|
||||
symbol_id: 0x04f125b0
|
||||
symbol_id: 0x68b1776b
|
||||
symbol_id: 0x2886690b
|
||||
symbol_id: 0x46082c90
|
||||
@@ -443516,6 +443795,7 @@ interface {
|
||||
symbol_id: 0x75699b4c
|
||||
symbol_id: 0xc9d63690
|
||||
symbol_id: 0xe5d6d580
|
||||
symbol_id: 0x7a4af1ff
|
||||
symbol_id: 0xaa01498e
|
||||
symbol_id: 0xe109b3fc
|
||||
symbol_id: 0xb9af2013
|
||||
@@ -443944,6 +444224,7 @@ interface {
|
||||
symbol_id: 0x9181a5f3
|
||||
symbol_id: 0xa7ab6134
|
||||
symbol_id: 0x7b568874
|
||||
symbol_id: 0x3d9a7462
|
||||
symbol_id: 0x39607d61
|
||||
symbol_id: 0x1499f6a1
|
||||
symbol_id: 0xfd383511
|
||||
|
@@ -126,3 +126,9 @@ type 'struct io_ring_ctx' changed
|
||||
28 members ('struct wait_queue_head poll_wq' .. 'struct page** sqe_pages') changed
|
||||
offset changed by -64
|
||||
|
||||
1 function symbol(s) removed
|
||||
'int __traceiter_android_vh_filemap_fault_before_folio_locked(void*, struct folio*)'
|
||||
|
||||
1 variable symbol(s) removed
|
||||
'struct tracepoint __tracepoint_android_vh_filemap_fault_before_folio_locked'
|
||||
|
||||
|
@@ -796,7 +796,15 @@
|
||||
trace_event_raw_init
|
||||
trace_event_reg
|
||||
trace_handle_return
|
||||
__traceiter_rwmmio_post_read
|
||||
__traceiter_rwmmio_post_write
|
||||
__traceiter_rwmmio_read
|
||||
__traceiter_rwmmio_write
|
||||
__traceiter_sk_data_ready
|
||||
__tracepoint_rwmmio_post_read
|
||||
__tracepoint_rwmmio_post_write
|
||||
__tracepoint_rwmmio_read
|
||||
__tracepoint_rwmmio_write
|
||||
__tracepoint_sk_data_ready
|
||||
trace_raw_output_prep
|
||||
__trace_trigger_soft_disabled
|
||||
|
@@ -332,6 +332,10 @@
|
||||
__traceiter_android_vh_should_skip_zone
|
||||
__traceiter_android_vh_update_unmapped_area_info
|
||||
__traceiter_android_vh_reuse_whole_anon_folio
|
||||
__tracepoint_android_vh_folio_add_lru_folio_activate
|
||||
__tracepoint_android_vh_filemap_fault_pre_folio_locked
|
||||
__tracepoint_android_vh_filemap_folio_mapped
|
||||
__tracepoint_android_vh_folio_remove_rmap_ptes
|
||||
__traceiter_block_bio_queue
|
||||
__traceiter_block_getrq
|
||||
__traceiter_block_rq_complete
|
||||
@@ -497,6 +501,10 @@
|
||||
__tracepoint_android_vh_should_skip_zone
|
||||
__tracepoint_android_vh_update_unmapped_area_info
|
||||
__tracepoint_android_vh_reuse_whole_anon_folio
|
||||
__traceiter_android_vh_folio_add_lru_folio_activate
|
||||
__traceiter_android_vh_filemap_fault_pre_folio_locked
|
||||
__traceiter_android_vh_filemap_folio_mapped
|
||||
__traceiter_android_vh_folio_remove_rmap_ptes
|
||||
__tracepoint_block_bio_queue
|
||||
__tracepoint_block_getrq
|
||||
__tracepoint_block_rq_complete
|
||||
|
@@ -57,6 +57,8 @@
|
||||
balance_push_callback
|
||||
base64_decode
|
||||
bcmp
|
||||
bdev_end_io_acct
|
||||
bdev_start_io_acct
|
||||
bin2hex
|
||||
__bio_add_page
|
||||
bio_add_page
|
||||
@@ -536,6 +538,7 @@
|
||||
devm_watchdog_register_device
|
||||
_dev_notice
|
||||
dev_pm_clear_wake_irq
|
||||
dev_pm_domain_attach
|
||||
dev_pm_domain_attach_by_id
|
||||
dev_pm_domain_attach_by_name
|
||||
dev_pm_domain_detach
|
||||
@@ -712,19 +715,23 @@
|
||||
drm_atomic_helper_bridge_destroy_state
|
||||
__drm_atomic_helper_bridge_duplicate_state
|
||||
drm_atomic_helper_bridge_duplicate_state
|
||||
drm_atomic_helper_bridge_propagate_bus_fmt
|
||||
__drm_atomic_helper_bridge_reset
|
||||
drm_atomic_helper_bridge_reset
|
||||
drm_atomic_helper_calc_timestamping_constants
|
||||
drm_atomic_helper_check
|
||||
drm_atomic_helper_check_modeset
|
||||
drm_atomic_helper_check_planes
|
||||
drm_atomic_helper_check_plane_state
|
||||
drm_atomic_helper_cleanup_planes
|
||||
drm_atomic_helper_commit
|
||||
drm_atomic_helper_commit_cleanup_done
|
||||
drm_atomic_helper_commit_duplicated_state
|
||||
drm_atomic_helper_commit_hw_done
|
||||
drm_atomic_helper_commit_modeset_enables
|
||||
drm_atomic_helper_commit_planes
|
||||
drm_atomic_helper_commit_tail
|
||||
drm_atomic_helper_commit_tail_rpm
|
||||
__drm_atomic_helper_connector_destroy_state
|
||||
drm_atomic_helper_connector_destroy_state
|
||||
__drm_atomic_helper_connector_duplicate_state
|
||||
@@ -733,7 +740,9 @@
|
||||
drm_atomic_helper_connector_reset
|
||||
__drm_atomic_helper_crtc_destroy_state
|
||||
__drm_atomic_helper_crtc_duplicate_state
|
||||
drm_atomic_helper_crtc_duplicate_state
|
||||
__drm_atomic_helper_crtc_reset
|
||||
drm_atomic_helper_crtc_reset
|
||||
drm_atomic_helper_dirtyfb
|
||||
drm_atomic_helper_disable_plane
|
||||
drm_atomic_helper_disable_planes_on_crtc
|
||||
@@ -750,6 +759,7 @@
|
||||
drm_atomic_helper_setup_commit
|
||||
drm_atomic_helper_shutdown
|
||||
drm_atomic_helper_swap_state
|
||||
drm_atomic_helper_unprepare_planes
|
||||
drm_atomic_helper_update_legacy_modeset_state
|
||||
drm_atomic_helper_update_plane
|
||||
drm_atomic_helper_wait_for_dependencies
|
||||
@@ -887,6 +897,7 @@
|
||||
drm_modeset_unlock
|
||||
drm_mode_sort
|
||||
drm_mode_vrefresh
|
||||
__drmm_universal_plane_alloc
|
||||
drm_object_attach_property
|
||||
drm_object_property_set_value
|
||||
drm_of_find_panel_or_bridge
|
||||
@@ -1337,6 +1348,7 @@
|
||||
__ipv6_addr_type
|
||||
__irq_alloc_descs
|
||||
__irq_apply_affinity_hint
|
||||
irq_check_status_bit
|
||||
irq_create_mapping_affinity
|
||||
irq_create_of_mapping
|
||||
irq_dispose_mapping
|
||||
@@ -1348,7 +1360,9 @@
|
||||
irq_domain_xlate_onecell
|
||||
irq_domain_xlate_twocell
|
||||
irq_force_affinity
|
||||
irq_get_irqchip_state
|
||||
irq_get_irq_data
|
||||
irq_get_percpu_devid_partition
|
||||
irq_modify_status
|
||||
irq_of_parse_and_map
|
||||
__irq_resolve_mapping
|
||||
@@ -1357,10 +1371,12 @@
|
||||
irq_set_chained_handler_and_data
|
||||
irq_set_chip_and_handler_name
|
||||
irq_set_chip_data
|
||||
irq_set_irqchip_state
|
||||
irq_set_irq_type
|
||||
irq_set_irq_wake
|
||||
irq_to_desc
|
||||
irq_work_queue
|
||||
irq_work_run
|
||||
irq_work_sync
|
||||
is_vmalloc_addr
|
||||
jiffies
|
||||
@@ -1710,6 +1726,7 @@
|
||||
of_get_named_gpio
|
||||
of_get_next_available_child
|
||||
of_get_next_child
|
||||
of_get_next_cpu_node
|
||||
of_get_next_parent
|
||||
of_get_parent
|
||||
of_get_property
|
||||
@@ -1850,6 +1867,7 @@
|
||||
perf_aux_output_begin
|
||||
perf_aux_output_end
|
||||
perf_aux_output_flag
|
||||
perf_aux_output_skip
|
||||
perf_event_addr_filters_sync
|
||||
perf_event_create_kernel_counter
|
||||
perf_event_disable
|
||||
@@ -1886,6 +1904,7 @@
|
||||
pinctrl_gpio_direction_output
|
||||
pinctrl_lookup_state
|
||||
pinctrl_pm_select_default_state
|
||||
pinctrl_pm_select_idle_state
|
||||
pinctrl_pm_select_sleep_state
|
||||
pinctrl_remove_gpio_range
|
||||
pinctrl_select_state
|
||||
@@ -2100,6 +2119,7 @@
|
||||
regmap_async_complete
|
||||
regmap_bulk_read
|
||||
regmap_bulk_write
|
||||
regmap_check_range_table
|
||||
regmap_field_read
|
||||
regmap_field_update_bits_base
|
||||
regmap_get_device
|
||||
@@ -2150,6 +2170,7 @@
|
||||
remove_proc_subtree
|
||||
remove_resource
|
||||
remove_wait_queue
|
||||
report_iommu_fault
|
||||
request_firmware
|
||||
request_firmware_direct
|
||||
request_firmware_nowait
|
||||
@@ -2271,6 +2292,7 @@
|
||||
__sg_page_iter_next
|
||||
__sg_page_iter_start
|
||||
shmem_file_setup
|
||||
__show_mem
|
||||
si_meminfo
|
||||
simple_attr_open
|
||||
simple_attr_read
|
||||
@@ -2584,6 +2606,7 @@
|
||||
__thermal_zone_get_trip
|
||||
thermal_zone_get_trip
|
||||
thermal_zone_get_zone_by_name
|
||||
this_cpu_has_cap
|
||||
thread_group_cputime_adjusted
|
||||
tick_nohz_get_idle_calls_cpu
|
||||
time64_to_tm
|
||||
@@ -2618,6 +2641,7 @@
|
||||
__traceiter_android_rvh_enqueue_task
|
||||
__traceiter_android_rvh_enqueue_task_fair
|
||||
__traceiter_android_rvh_find_lowest_rq
|
||||
__traceiter_android_rvh_hw_protection_shutdown
|
||||
__traceiter_android_rvh_iommu_alloc_insert_iova
|
||||
__traceiter_android_rvh_iommu_iovad_init_alloc_algo
|
||||
__traceiter_android_rvh_iommu_limit_align_shift
|
||||
@@ -2640,6 +2664,7 @@
|
||||
__traceiter_android_rvh_set_cpus_allowed_by_task
|
||||
__traceiter_android_rvh_set_iowait
|
||||
__traceiter_android_rvh_setscheduler
|
||||
__traceiter_android_rvh_setscheduler_prio
|
||||
__traceiter_android_rvh_set_task_cpu
|
||||
__traceiter_android_rvh_set_user_nice_locked
|
||||
__traceiter_android_rvh_tick_entry
|
||||
@@ -2651,6 +2676,7 @@
|
||||
__traceiter_android_rvh_update_load_avg
|
||||
__traceiter_android_rvh_update_misfit_status
|
||||
__traceiter_android_rvh_update_rt_rq_load_avg
|
||||
__traceiter_android_rvh_usb_dev_suspend
|
||||
__traceiter_android_rvh_util_est_update
|
||||
__traceiter_android_rvh_util_fits_cpu
|
||||
__traceiter_android_rvh_vmscan_kswapd_done
|
||||
@@ -2668,9 +2694,10 @@
|
||||
__traceiter_android_vh_dup_task_struct
|
||||
__traceiter_android_vh_early_resume_begin
|
||||
__traceiter_android_vh_enable_thermal_genl_check
|
||||
__traceiter_android_vh_filemap_get_folio
|
||||
__traceiter_android_vh_free_pages_prepare_init
|
||||
__traceiter_android_vh_ep_create_wakeup_source
|
||||
__traceiter_android_vh_filemap_get_folio
|
||||
__traceiter_android_vh_free_one_page_flag_check
|
||||
__traceiter_android_vh_free_pages_prepare_init
|
||||
__traceiter_android_vh_ipi_stop
|
||||
__traceiter_android_vh_mm_compaction_begin
|
||||
__traceiter_android_vh_mm_compaction_end
|
||||
@@ -2684,9 +2711,11 @@
|
||||
__traceiter_android_vh_setscheduler_uclamp
|
||||
__traceiter_android_vh_set_task_comm
|
||||
__traceiter_android_vh_si_meminfo_adjust
|
||||
__traceiter_android_vh_swap_readpage_bdev_sync
|
||||
__traceiter_android_vh_swap_writepage
|
||||
__traceiter_android_vh_sysrq_crash
|
||||
__traceiter_android_vh_tune_swappiness
|
||||
__traceiter_android_vh_timerfd_create
|
||||
__traceiter_android_vh_tune_swappiness
|
||||
__traceiter_android_vh_typec_store_partner_src_caps
|
||||
__traceiter_android_vh_typec_tcpm_log
|
||||
__traceiter_android_vh_typec_tcpm_modify_src_caps
|
||||
@@ -2700,6 +2729,7 @@
|
||||
__traceiter_android_vh_ufs_send_uic_command
|
||||
__traceiter_android_vh_ufs_update_sdev
|
||||
__traceiter_android_vh_ufs_update_sysfs
|
||||
__traceiter_android_vh_usb_dev_resume
|
||||
__traceiter_android_vh_use_amu_fie
|
||||
__traceiter_clock_set_rate
|
||||
__traceiter_cma_alloc_finish
|
||||
@@ -2720,6 +2750,10 @@
|
||||
__traceiter_pelt_irq_tp
|
||||
__traceiter_pelt_rt_tp
|
||||
__traceiter_pelt_se_tp
|
||||
__traceiter_rwmmio_post_read
|
||||
__traceiter_rwmmio_post_write
|
||||
__traceiter_rwmmio_read
|
||||
__traceiter_rwmmio_write
|
||||
__traceiter_sched_cpu_capacity_tp
|
||||
__traceiter_sched_overutilized_tp
|
||||
__traceiter_sched_switch
|
||||
@@ -2745,6 +2779,7 @@
|
||||
__tracepoint_android_rvh_enqueue_task
|
||||
__tracepoint_android_rvh_enqueue_task_fair
|
||||
__tracepoint_android_rvh_find_lowest_rq
|
||||
__tracepoint_android_rvh_hw_protection_shutdown
|
||||
__tracepoint_android_rvh_iommu_alloc_insert_iova
|
||||
__tracepoint_android_rvh_iommu_iovad_init_alloc_algo
|
||||
__tracepoint_android_rvh_iommu_limit_align_shift
|
||||
@@ -2767,6 +2802,7 @@
|
||||
__tracepoint_android_rvh_set_cpus_allowed_by_task
|
||||
__tracepoint_android_rvh_set_iowait
|
||||
__tracepoint_android_rvh_setscheduler
|
||||
__tracepoint_android_rvh_setscheduler_prio
|
||||
__tracepoint_android_rvh_set_task_cpu
|
||||
__tracepoint_android_rvh_set_user_nice_locked
|
||||
__tracepoint_android_rvh_tick_entry
|
||||
@@ -2778,6 +2814,7 @@
|
||||
__tracepoint_android_rvh_update_load_avg
|
||||
__tracepoint_android_rvh_update_misfit_status
|
||||
__tracepoint_android_rvh_update_rt_rq_load_avg
|
||||
__tracepoint_android_rvh_usb_dev_suspend
|
||||
__tracepoint_android_rvh_util_est_update
|
||||
__tracepoint_android_rvh_util_fits_cpu
|
||||
__tracepoint_android_rvh_vmscan_kswapd_done
|
||||
@@ -2795,9 +2832,10 @@
|
||||
__tracepoint_android_vh_dup_task_struct
|
||||
__tracepoint_android_vh_early_resume_begin
|
||||
__tracepoint_android_vh_enable_thermal_genl_check
|
||||
__tracepoint_android_vh_filemap_get_folio
|
||||
__tracepoint_android_vh_free_pages_prepare_init
|
||||
__tracepoint_android_vh_ep_create_wakeup_source
|
||||
__tracepoint_android_vh_filemap_get_folio
|
||||
__tracepoint_android_vh_free_one_page_flag_check
|
||||
__tracepoint_android_vh_free_pages_prepare_init
|
||||
__tracepoint_android_vh_ipi_stop
|
||||
__tracepoint_android_vh_mm_compaction_begin
|
||||
__tracepoint_android_vh_mm_compaction_end
|
||||
@@ -2811,9 +2849,11 @@
|
||||
__tracepoint_android_vh_setscheduler_uclamp
|
||||
__tracepoint_android_vh_set_task_comm
|
||||
__tracepoint_android_vh_si_meminfo_adjust
|
||||
__tracepoint_android_vh_swap_readpage_bdev_sync
|
||||
__tracepoint_android_vh_swap_writepage
|
||||
__tracepoint_android_vh_sysrq_crash
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_timerfd_create
|
||||
__tracepoint_android_vh_tune_swappiness
|
||||
__tracepoint_android_vh_typec_store_partner_src_caps
|
||||
__tracepoint_android_vh_typec_tcpm_log
|
||||
__tracepoint_android_vh_typec_tcpm_modify_src_caps
|
||||
@@ -2827,6 +2867,7 @@
|
||||
__tracepoint_android_vh_ufs_send_uic_command
|
||||
__tracepoint_android_vh_ufs_update_sdev
|
||||
__tracepoint_android_vh_ufs_update_sysfs
|
||||
__tracepoint_android_vh_usb_dev_resume
|
||||
__tracepoint_android_vh_use_amu_fie
|
||||
__tracepoint_clock_set_rate
|
||||
__tracepoint_cma_alloc_finish
|
||||
@@ -2849,6 +2890,10 @@
|
||||
__tracepoint_pelt_se_tp
|
||||
tracepoint_probe_register
|
||||
tracepoint_probe_unregister
|
||||
__tracepoint_rwmmio_post_read
|
||||
__tracepoint_rwmmio_post_write
|
||||
__tracepoint_rwmmio_read
|
||||
__tracepoint_rwmmio_write
|
||||
__tracepoint_sched_cpu_capacity_tp
|
||||
__tracepoint_sched_overutilized_tp
|
||||
__tracepoint_sched_switch
|
||||
|
@@ -1235,6 +1235,14 @@
|
||||
memparse
|
||||
register_virtio_device
|
||||
synchronize_irq
|
||||
__traceiter_rwmmio_post_read
|
||||
__traceiter_rwmmio_post_write
|
||||
__traceiter_rwmmio_read
|
||||
__traceiter_rwmmio_write
|
||||
__tracepoint_rwmmio_post_read
|
||||
__tracepoint_rwmmio_post_write
|
||||
__tracepoint_rwmmio_read
|
||||
__tracepoint_rwmmio_write
|
||||
unregister_virtio_device
|
||||
virtio_config_changed
|
||||
virtio_device_freeze
|
||||
|
@@ -13,6 +13,7 @@
|
||||
blk_stat_enable_accounting
|
||||
__brelse
|
||||
class_find_device
|
||||
clk_set_duty_cycle
|
||||
cpufreq_unregister_notifier
|
||||
crypto_register_alg
|
||||
crypto_unregister_alg
|
||||
|
@@ -524,8 +524,19 @@ __traceiter_android_vh_gzvm_handle_demand_page_post
|
||||
__tracepoint_android_vh_gzvm_handle_demand_page_pre
|
||||
__tracepoint_android_vh_gzvm_handle_demand_page_post
|
||||
|
||||
# required by pte preread
|
||||
read_swap_cache_async
|
||||
unuse_swap_pte
|
||||
__pte_offset_map
|
||||
vfs_fadvise
|
||||
|
||||
# required by launch_boost driver
|
||||
__traceiter_android_vh_mmput_mm
|
||||
__tracepoint_android_vh_mmput_mm
|
||||
__traceiter_android_vh_filemap_read
|
||||
__tracepoint_android_vh_filemap_read
|
||||
__traceiter_android_vh_filemap_map_pages_range
|
||||
__tracepoint_android_vh_filemap_map_pages_range
|
||||
|
||||
__traceiter_android_vh_pageset_update
|
||||
__tracepoint_android_vh_pageset_update
|
||||
|
@@ -133,6 +133,7 @@ CONFIG_ANON_VMA_NAME=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_LRU_GEN=y
|
||||
CONFIG_LRU_GEN_ENABLED=y
|
||||
CONFIG_MEMFD_ASHMEM_SHIM=y
|
||||
CONFIG_DAMON=y
|
||||
CONFIG_DAMON_VADDR=y
|
||||
CONFIG_DAMON_SYSFS=y
|
||||
|
0
arch/arm64/configs/microdroid_minimal.fragment
Normal file
0
arch/arm64/configs/microdroid_minimal.fragment
Normal file
@@ -21,7 +21,8 @@
|
||||
|
||||
void __init early_init_devtree(void *params)
|
||||
{
|
||||
__be32 *dtb = (u32 *)__dtb_start;
|
||||
__be32 __maybe_unused *dtb = (u32 *)__dtb_start;
|
||||
|
||||
#if defined(CONFIG_NIOS2_DTB_AT_PHYS_ADDR)
|
||||
if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
|
||||
OF_DT_HEADER) {
|
||||
@@ -30,8 +31,11 @@ void __init early_init_devtree(void *params)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NIOS2_DTB_SOURCE_BOOL
|
||||
if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
|
||||
params = (void *)__dtb_start;
|
||||
#endif
|
||||
|
||||
early_init_dt_scan(params, __pa(params));
|
||||
}
|
||||
|
@@ -124,6 +124,7 @@ CONFIG_ANON_VMA_NAME=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
CONFIG_LRU_GEN=y
|
||||
CONFIG_LRU_GEN_ENABLED=y
|
||||
CONFIG_MEMFD_ASHMEM_SHIM=y
|
||||
CONFIG_DAMON=y
|
||||
CONFIG_DAMON_VADDR=y
|
||||
CONFIG_DAMON_SYSFS=y
|
||||
|
@@ -1793,6 +1793,8 @@ static __init int pt_init(void)
|
||||
|
||||
if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries))
|
||||
pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG;
|
||||
else
|
||||
pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_PREFER_LARGE;
|
||||
|
||||
pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE;
|
||||
pt_pmu.pmu.attr_groups = pt_attr_groups;
|
||||
|
@@ -827,8 +827,8 @@ static void binder_transaction_priority(struct binder_thread *thread,
|
||||
desired.sched_policy = SCHED_NORMAL;
|
||||
}
|
||||
|
||||
if (node_prio.prio < t->priority.prio ||
|
||||
(node_prio.prio == t->priority.prio &&
|
||||
if (node_prio.prio < desired.prio ||
|
||||
(node_prio.prio == desired.prio &&
|
||||
node_prio.sched_policy == SCHED_FIFO)) {
|
||||
/*
|
||||
* In case the minimum priority on the node is
|
||||
|
@@ -550,6 +550,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages_prepare_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages_ok_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_list_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages_prepare_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_one_page_flag_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_post_alloc_hook);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_new_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_split_large_folio_bypass);
|
||||
@@ -620,6 +621,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_group_exit);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_migration_target_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_writepage);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_readpage_bdev_sync);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dpm_wait_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dpm_wait_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sync_irq_wait_start);
|
||||
@@ -669,3 +671,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_kswapd_shrink_node);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_keep_reclaimed_folio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_clear_reclaimed_folio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_evict_folios_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_folio_add_lru_folio_activate);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_fault_pre_folio_locked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_filemap_folio_mapped);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_folio_remove_rmap_ptes);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pageset_update);
|
||||
|
@@ -195,6 +195,7 @@ int dev_pm_domain_attach_list(struct device *dev,
|
||||
struct device *pd_dev = NULL;
|
||||
int ret, i, num_pds = 0;
|
||||
bool by_id = true;
|
||||
size_t size;
|
||||
u32 pd_flags = data ? data->pd_flags : 0;
|
||||
u32 link_flags = pd_flags & PD_FLAG_NO_DEV_LINK ? 0 :
|
||||
DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME;
|
||||
@@ -217,19 +218,17 @@ int dev_pm_domain_attach_list(struct device *dev,
|
||||
if (num_pds <= 0)
|
||||
return 0;
|
||||
|
||||
pds = devm_kzalloc(dev, sizeof(*pds), GFP_KERNEL);
|
||||
pds = kzalloc(sizeof(*pds), GFP_KERNEL);
|
||||
if (!pds)
|
||||
return -ENOMEM;
|
||||
|
||||
pds->pd_devs = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_devs),
|
||||
GFP_KERNEL);
|
||||
if (!pds->pd_devs)
|
||||
return -ENOMEM;
|
||||
|
||||
pds->pd_links = devm_kcalloc(dev, num_pds, sizeof(*pds->pd_links),
|
||||
GFP_KERNEL);
|
||||
if (!pds->pd_links)
|
||||
return -ENOMEM;
|
||||
size = sizeof(*pds->pd_devs) + sizeof(*pds->pd_links);
|
||||
pds->pd_devs = kcalloc(num_pds, size, GFP_KERNEL);
|
||||
if (!pds->pd_devs) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pds;
|
||||
}
|
||||
pds->pd_links = (void *)(pds->pd_devs + num_pds);
|
||||
|
||||
if (link_flags && pd_flags & PD_FLAG_DEV_LINK_ON)
|
||||
link_flags |= DL_FLAG_RPM_ACTIVE;
|
||||
@@ -272,6 +271,9 @@ err_attach:
|
||||
device_link_del(pds->pd_links[i]);
|
||||
dev_pm_domain_detach(pds->pd_devs[i], true);
|
||||
}
|
||||
kfree(pds->pd_devs);
|
||||
free_pds:
|
||||
kfree(pds);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_domain_attach_list);
|
||||
@@ -318,6 +320,9 @@ void dev_pm_domain_detach_list(struct dev_pm_domain_list *list)
|
||||
device_link_del(list->pd_links[i]);
|
||||
dev_pm_domain_detach(list->pd_devs[i], true);
|
||||
}
|
||||
|
||||
kfree(list->pd_devs);
|
||||
kfree(list);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_domain_detach_list);
|
||||
|
||||
|
@@ -313,8 +313,9 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
|
||||
count++;
|
||||
|
||||
dma_resv_list_set(fobj, i, fence, usage);
|
||||
/* pointer update must be visible before we extend the num_fences */
|
||||
smp_store_mb(fobj->num_fences, count);
|
||||
/* fence update must be visible before we extend the num_fences */
|
||||
smp_wmb();
|
||||
fobj->num_fences = count;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_add_fence);
|
||||
|
||||
|
@@ -760,7 +760,7 @@ static void ffa_notification_info_get(void)
|
||||
}, &ret);
|
||||
|
||||
if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
|
||||
if (ret.a2 != FFA_RET_NO_DATA)
|
||||
if ((s32)ret.a2 != FFA_RET_NO_DATA)
|
||||
pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
|
||||
ret.a0, ret.a2);
|
||||
return;
|
||||
@@ -796,7 +796,7 @@ static void ffa_notification_info_get(void)
|
||||
}
|
||||
|
||||
/* Per vCPU Notification */
|
||||
for (idx = 0; idx < ids_count[list]; idx++) {
|
||||
for (idx = 1; idx < ids_count[list]; idx++) {
|
||||
if (ids_processed >= max_ids - 1)
|
||||
break;
|
||||
|
||||
|
@@ -456,12 +456,17 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, enum cs_mode cs_mode,
|
||||
static int catu_enable(struct coresight_device *csdev, enum cs_mode mode,
|
||||
void *data)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
|
||||
|
||||
CS_UNLOCK(catu_drvdata->base);
|
||||
rc = catu_enable_hw(catu_drvdata, mode, data);
|
||||
CS_LOCK(catu_drvdata->base);
|
||||
guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
|
||||
if (atomic_read(&csdev->refcnt) == 0) {
|
||||
CS_UNLOCK(catu_drvdata->base);
|
||||
rc = catu_enable_hw(catu_drvdata, mode, data);
|
||||
CS_LOCK(catu_drvdata->base);
|
||||
}
|
||||
if (!rc)
|
||||
atomic_inc(&csdev->refcnt);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -484,12 +489,15 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
|
||||
|
||||
static int catu_disable(struct coresight_device *csdev, void *__unused)
|
||||
{
|
||||
int rc;
|
||||
int rc = 0;
|
||||
struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
|
||||
|
||||
CS_UNLOCK(catu_drvdata->base);
|
||||
rc = catu_disable_hw(catu_drvdata);
|
||||
CS_LOCK(catu_drvdata->base);
|
||||
guard(raw_spinlock_irqsave)(&catu_drvdata->spinlock);
|
||||
if (atomic_dec_return(&csdev->refcnt) == 0) {
|
||||
CS_UNLOCK(catu_drvdata->base);
|
||||
rc = catu_disable_hw(catu_drvdata);
|
||||
CS_LOCK(catu_drvdata->base);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -556,6 +564,7 @@ static int catu_probe(struct amba_device *adev, const struct amba_id *id)
|
||||
dev->platform_data = pdata;
|
||||
|
||||
drvdata->base = base;
|
||||
raw_spin_lock_init(&drvdata->spinlock);
|
||||
catu_desc.access = CSDEV_ACCESS_IOMEM(base);
|
||||
catu_desc.pdata = pdata;
|
||||
catu_desc.dev = dev;
|
||||
|
@@ -64,6 +64,7 @@ struct catu_drvdata {
|
||||
void __iomem *base;
|
||||
struct coresight_device *csdev;
|
||||
int irq;
|
||||
raw_spinlock_t spinlock;
|
||||
};
|
||||
|
||||
#define CATU_REG32(name, offset) \
|
||||
|
@@ -574,7 +574,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
|
||||
/* Enable all helpers adjacent to the path first */
|
||||
ret = coresight_enable_helpers(csdev, mode, sink_data);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_disable_path;
|
||||
/*
|
||||
* ETF devices are tricky... They can be a link or a sink,
|
||||
* depending on how they are configured. If an ETF has been
|
||||
@@ -595,8 +595,10 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
|
||||
* that need disabling. Disabling the path here
|
||||
* would mean we could disrupt an existing session.
|
||||
*/
|
||||
if (ret)
|
||||
if (ret) {
|
||||
coresight_disable_helpers(csdev);
|
||||
goto out;
|
||||
}
|
||||
break;
|
||||
case CORESIGHT_DEV_TYPE_SOURCE:
|
||||
/* sources are enabled from either sysFS or Perf */
|
||||
@@ -606,16 +608,19 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
|
||||
child = list_next_entry(nd, link)->csdev;
|
||||
ret = coresight_enable_link(csdev, parent, child);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_disable_helpers;
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
ret = -EINVAL;
|
||||
goto err_disable_helpers;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
err:
|
||||
err_disable_helpers:
|
||||
coresight_disable_helpers(csdev);
|
||||
err_disable_path:
|
||||
coresight_disable_path_from(path, nd);
|
||||
goto out;
|
||||
}
|
||||
|
@@ -57,7 +57,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(ufshcd_command);
|
||||
/* UIC command timeout, unit: ms */
|
||||
enum {
|
||||
UIC_CMD_TIMEOUT_DEFAULT = 500,
|
||||
UIC_CMD_TIMEOUT_MAX = 2000,
|
||||
UIC_CMD_TIMEOUT_MAX = 5000,
|
||||
};
|
||||
/* NOP OUT retries waiting for NOP IN response */
|
||||
#define NOP_OUT_RETRIES 10
|
||||
@@ -67,7 +67,11 @@ enum {
|
||||
/* Query request retries */
|
||||
#define QUERY_REQ_RETRIES 3
|
||||
/* Query request timeout */
|
||||
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
|
||||
enum {
|
||||
QUERY_REQ_TIMEOUT_MIN = 1,
|
||||
QUERY_REQ_TIMEOUT_DEFAULT = 1500,
|
||||
QUERY_REQ_TIMEOUT_MAX = 30000
|
||||
};
|
||||
|
||||
/* Advanced RPMB request timeout */
|
||||
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
|
||||
@@ -134,7 +138,24 @@ static const struct kernel_param_ops uic_cmd_timeout_ops = {
|
||||
|
||||
module_param_cb(uic_cmd_timeout, &uic_cmd_timeout_ops, &uic_cmd_timeout, 0644);
|
||||
MODULE_PARM_DESC(uic_cmd_timeout,
|
||||
"UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
|
||||
"UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 5 seconds inclusively");
|
||||
|
||||
static unsigned int dev_cmd_timeout = QUERY_REQ_TIMEOUT_DEFAULT;
|
||||
|
||||
static int dev_cmd_timeout_set(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
return param_set_uint_minmax(val, kp, QUERY_REQ_TIMEOUT_MIN,
|
||||
QUERY_REQ_TIMEOUT_MAX);
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops dev_cmd_timeout_ops = {
|
||||
.set = dev_cmd_timeout_set,
|
||||
.get = param_get_uint,
|
||||
};
|
||||
|
||||
module_param_cb(dev_cmd_timeout, &dev_cmd_timeout_ops, &dev_cmd_timeout, 0644);
|
||||
MODULE_PARM_DESC(dev_cmd_timeout,
|
||||
"UFS Device command timeout in milliseconds. Defaults to 1.5s. Supported values range from 1ms to 30 seconds inclusively");
|
||||
|
||||
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
|
||||
({ \
|
||||
@@ -3266,7 +3287,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
|
||||
struct ufs_query_req *request = NULL;
|
||||
struct ufs_query_res *response = NULL;
|
||||
int err, selector = 0;
|
||||
int timeout = QUERY_REQ_TIMEOUT;
|
||||
int timeout = dev_cmd_timeout;
|
||||
|
||||
BUG_ON(!hba);
|
||||
|
||||
@@ -3366,7 +3387,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
|
||||
|
||||
if (err) {
|
||||
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
|
||||
@@ -3466,7 +3487,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
|
||||
|
||||
if (err) {
|
||||
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
|
||||
@@ -7213,7 +7234,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
|
||||
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
|
||||
* read the response directly ignoring all errors.
|
||||
*/
|
||||
ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
|
||||
ufshcd_wait_for_dev_cmd(hba, lrbp, dev_cmd_timeout);
|
||||
|
||||
/* just copy the upiu response as it is */
|
||||
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
|
||||
@@ -8651,7 +8672,7 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
|
||||
|
||||
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
|
||||
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, dev_cmd_timeout);
|
||||
|
||||
if (err)
|
||||
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
|
||||
|
@@ -237,7 +237,7 @@ static int exfat_search_empty_slot(struct super_block *sb,
|
||||
dentry = 0;
|
||||
}
|
||||
|
||||
while (dentry + num_entries < total_entries &&
|
||||
while (dentry + num_entries <= total_entries &&
|
||||
clu.dir != EXFAT_EOF_CLUSTER) {
|
||||
i = dentry & (dentries_per_clu - 1);
|
||||
|
||||
|
@@ -4218,7 +4218,13 @@ static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
|
||||
map.m_next_pgofs = &next_pgofs;
|
||||
map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
|
||||
inode->i_write_hint);
|
||||
if (flags & IOMAP_WRITE)
|
||||
|
||||
/*
|
||||
* If the blocks being overwritten are already allocated,
|
||||
* f2fs_map_lock and f2fs_balance_fs are not necessary.
|
||||
*/
|
||||
if ((flags & IOMAP_WRITE) &&
|
||||
!f2fs_overwrite_io(inode, offset, length))
|
||||
map.m_may_create = true;
|
||||
|
||||
err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
|
||||
|
@@ -409,7 +409,8 @@ start_find_entry:
|
||||
|
||||
out:
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
if (IS_CASEFOLDED(dir) && !de && use_hash) {
|
||||
if (!sb_no_casefold_compat_fallback(dir->i_sb) &&
|
||||
IS_CASEFOLDED(dir) && !de && use_hash) {
|
||||
use_hash = false;
|
||||
goto start_find_entry;
|
||||
}
|
||||
|
@@ -814,6 +814,7 @@ enum {
|
||||
FI_ATOMIC_DIRTIED, /* indicate atomic file is dirtied */
|
||||
FI_ATOMIC_REPLACE, /* indicate atomic replace */
|
||||
FI_OPENED_FILE, /* indicate file has been opened */
|
||||
FI_DONATE_FINISHED, /* indicate page donation of file has been finished */
|
||||
FI_MAX, /* max flag, never be used */
|
||||
};
|
||||
|
||||
|
@@ -2473,19 +2473,20 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void f2fs_keep_noreuse_range(struct inode *inode,
|
||||
static int f2fs_keep_noreuse_range(struct inode *inode,
|
||||
loff_t offset, loff_t len)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
u64 max_bytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
|
||||
u64 start, end;
|
||||
int ret = 0;
|
||||
|
||||
if (!S_ISREG(inode->i_mode))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (offset >= max_bytes || len > max_bytes ||
|
||||
(offset + len) > max_bytes)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
start = offset >> PAGE_SHIFT;
|
||||
end = DIV_ROUND_UP(offset + len, PAGE_SIZE);
|
||||
@@ -2493,7 +2494,7 @@ static void f2fs_keep_noreuse_range(struct inode *inode,
|
||||
inode_lock(inode);
|
||||
if (f2fs_is_atomic_file(inode)) {
|
||||
inode_unlock(inode);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&sbi->inode_lock[DONATE_INODE]);
|
||||
@@ -2502,7 +2503,12 @@ static void f2fs_keep_noreuse_range(struct inode *inode,
|
||||
if (!list_empty(&F2FS_I(inode)->gdonate_list)) {
|
||||
list_del_init(&F2FS_I(inode)->gdonate_list);
|
||||
sbi->donate_files--;
|
||||
}
|
||||
if (is_inode_flag_set(inode, FI_DONATE_FINISHED))
|
||||
ret = -EALREADY;
|
||||
else
|
||||
set_inode_flag(inode, FI_DONATE_FINISHED);
|
||||
} else
|
||||
ret = -ENOENT;
|
||||
} else {
|
||||
if (list_empty(&F2FS_I(inode)->gdonate_list)) {
|
||||
list_add_tail(&F2FS_I(inode)->gdonate_list,
|
||||
@@ -2514,9 +2520,12 @@ static void f2fs_keep_noreuse_range(struct inode *inode,
|
||||
}
|
||||
F2FS_I(inode)->donate_start = start;
|
||||
F2FS_I(inode)->donate_end = end - 1;
|
||||
clear_inode_flag(inode, FI_DONATE_FINISHED);
|
||||
}
|
||||
spin_unlock(&sbi->inode_lock[DONATE_INODE]);
|
||||
inode_unlock(inode);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
|
||||
@@ -5250,8 +5259,8 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
|
||||
f2fs_compressed_file(inode)))
|
||||
f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
|
||||
else if (advice == POSIX_FADV_NOREUSE)
|
||||
f2fs_keep_noreuse_range(inode, offset, len);
|
||||
return 0;
|
||||
err = f2fs_keep_noreuse_range(inode, offset, len);
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@@ -184,10 +184,17 @@ static unsigned int do_reclaim_caches(struct f2fs_sb_info *sbi,
|
||||
if (!inode)
|
||||
continue;
|
||||
|
||||
len = fi->donate_end - fi->donate_start + 1;
|
||||
npages = npages < len ? 0 : npages - len;
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
inode_lock(inode);
|
||||
if (!is_inode_flag_set(inode, FI_DONATE_FINISHED)) {
|
||||
len = fi->donate_end - fi->donate_start + 1;
|
||||
npages = npages < len ? 0 : npages - len;
|
||||
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
fi->donate_start, fi->donate_end);
|
||||
set_inode_flag(inode, FI_DONATE_FINISHED);
|
||||
}
|
||||
inode_unlock(inode);
|
||||
|
||||
iput(inode);
|
||||
cond_resched();
|
||||
}
|
||||
|
@@ -274,6 +274,13 @@ static ssize_t encoding_show(struct f2fs_attr *a,
|
||||
return sysfs_emit(buf, "(none)\n");
|
||||
}
|
||||
|
||||
static ssize_t encoding_flags_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%x\n",
|
||||
le16_to_cpu(F2FS_RAW_SUPER(sbi)->s_encoding_flags));
|
||||
}
|
||||
|
||||
static ssize_t mounted_time_sec_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
@@ -1158,6 +1165,7 @@ F2FS_GENERAL_RO_ATTR(features);
|
||||
F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
|
||||
F2FS_GENERAL_RO_ATTR(unusable);
|
||||
F2FS_GENERAL_RO_ATTR(encoding);
|
||||
F2FS_GENERAL_RO_ATTR(encoding_flags);
|
||||
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
|
||||
F2FS_GENERAL_RO_ATTR(main_blkaddr);
|
||||
F2FS_GENERAL_RO_ATTR(pending_discard);
|
||||
@@ -1199,6 +1207,9 @@ F2FS_FEATURE_RO_ATTR(readonly);
|
||||
F2FS_FEATURE_RO_ATTR(compression);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(pin_file);
|
||||
#ifdef CONFIG_UNICODE
|
||||
F2FS_FEATURE_RO_ATTR(linear_lookup);
|
||||
#endif
|
||||
|
||||
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
|
||||
static struct attribute *f2fs_attrs[] = {
|
||||
@@ -1270,6 +1281,7 @@ static struct attribute *f2fs_attrs[] = {
|
||||
ATTR_LIST(reserved_blocks),
|
||||
ATTR_LIST(current_reserved_blocks),
|
||||
ATTR_LIST(encoding),
|
||||
ATTR_LIST(encoding_flags),
|
||||
ATTR_LIST(mounted_time_sec),
|
||||
#ifdef CONFIG_F2FS_STAT_FS
|
||||
ATTR_LIST(cp_foreground_calls),
|
||||
@@ -1347,6 +1359,9 @@ static struct attribute *f2fs_feat_attrs[] = {
|
||||
BASE_ATTR_LIST(compression),
|
||||
#endif
|
||||
BASE_ATTR_LIST(pin_file),
|
||||
#ifdef CONFIG_UNICODE
|
||||
BASE_ATTR_LIST(linear_lookup),
|
||||
#endif
|
||||
NULL,
|
||||
};
|
||||
ATTRIBUTE_GROUPS(f2fs_feat);
|
||||
|
@@ -65,6 +65,7 @@
|
||||
#include <linux/namei.h>
|
||||
#include <linux/mnt_namespace.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pgsize_migration.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/kallsyms.h>
|
||||
@@ -2476,7 +2477,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
|
||||
}
|
||||
|
||||
p->start = vma->vm_start;
|
||||
p->end = vma->vm_end;
|
||||
p->end = VMA_PAD_START(vma);
|
||||
p->mode = vma->vm_file->f_mode;
|
||||
}
|
||||
mmap_read_unlock(mm);
|
||||
|
@@ -723,6 +723,34 @@ void dup_userfaultfd_complete(struct list_head *fcs)
|
||||
}
|
||||
}
|
||||
|
||||
void dup_userfaultfd_fail(struct list_head *fcs)
|
||||
{
|
||||
struct userfaultfd_fork_ctx *fctx, *n;
|
||||
|
||||
/*
|
||||
* An error has occurred on fork, we will tear memory down, but have
|
||||
* allocated memory for fctx's and raised reference counts for both the
|
||||
* original and child contexts (and on the mm for each as a result).
|
||||
*
|
||||
* These would ordinarily be taken care of by a user handling the event,
|
||||
* but we are no longer doing so, so manually clean up here.
|
||||
*
|
||||
* mm tear down will take care of cleaning up VMA contexts.
|
||||
*/
|
||||
list_for_each_entry_safe(fctx, n, fcs, list) {
|
||||
struct userfaultfd_ctx *octx = fctx->orig;
|
||||
struct userfaultfd_ctx *ctx = fctx->new;
|
||||
|
||||
atomic_dec(&octx->mmap_changing);
|
||||
VM_BUG_ON(atomic_read(&octx->mmap_changing) < 0);
|
||||
userfaultfd_ctx_put(octx);
|
||||
userfaultfd_ctx_put(ctx);
|
||||
|
||||
list_del(&fctx->list);
|
||||
kfree(fctx);
|
||||
}
|
||||
}
|
||||
|
||||
void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
||||
struct vm_userfaultfd_ctx *vm_ctx)
|
||||
{
|
||||
@@ -2307,6 +2335,9 @@ static int __init userfaultfd_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (__PAGE_SIZE != PAGE_SIZE)
|
||||
return 0;
|
||||
|
||||
ret = misc_register(&userfaultfd_misc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@@ -74,6 +74,7 @@
|
||||
#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
|
||||
#include <linux/tracepoint-defs.h>
|
||||
|
||||
#define rwmmio_tracepoint_enabled(tracepoint) tracepoint_enabled(tracepoint)
|
||||
DECLARE_TRACEPOINT(rwmmio_write);
|
||||
DECLARE_TRACEPOINT(rwmmio_post_write);
|
||||
DECLARE_TRACEPOINT(rwmmio_read);
|
||||
@@ -90,6 +91,7 @@ void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
|
||||
|
||||
#else
|
||||
|
||||
#define rwmmio_tracepoint_enabled(tracepoint) false
|
||||
static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
|
||||
unsigned long caller_addr, unsigned long caller_addr0) {}
|
||||
static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
|
||||
@@ -188,11 +190,13 @@ static inline u8 readb(const volatile void __iomem *addr)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_br();
|
||||
val = __raw_readb(addr);
|
||||
__io_ar(val);
|
||||
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -203,11 +207,13 @@ static inline u16 readw(const volatile void __iomem *addr)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_br();
|
||||
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
|
||||
__io_ar(val);
|
||||
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -218,11 +224,13 @@ static inline u32 readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_br();
|
||||
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
|
||||
__io_ar(val);
|
||||
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -234,11 +242,13 @@ static inline u64 readq(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_br();
|
||||
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
|
||||
__io_ar(val);
|
||||
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -248,11 +258,13 @@ static inline u64 readq(const volatile void __iomem *addr)
|
||||
#define writeb writeb
|
||||
static inline void writeb(u8 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_bw();
|
||||
__raw_writeb(value, addr);
|
||||
__io_aw();
|
||||
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -260,11 +272,13 @@ static inline void writeb(u8 value, volatile void __iomem *addr)
|
||||
#define writew writew
|
||||
static inline void writew(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_bw();
|
||||
__raw_writew((u16 __force)cpu_to_le16(value), addr);
|
||||
__io_aw();
|
||||
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -272,11 +286,13 @@ static inline void writew(u16 value, volatile void __iomem *addr)
|
||||
#define writel writel
|
||||
static inline void writel(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_bw();
|
||||
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
|
||||
__io_aw();
|
||||
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -285,11 +301,13 @@ static inline void writel(u32 value, volatile void __iomem *addr)
|
||||
#define writeq writeq
|
||||
static inline void writeq(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
__io_bw();
|
||||
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
|
||||
__io_aw();
|
||||
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
@@ -305,9 +323,11 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(8, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __raw_readb(addr);
|
||||
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -318,9 +338,11 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(16, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
|
||||
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -331,9 +353,11 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(32, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
|
||||
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -344,9 +368,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_read))
|
||||
log_read_mmio(64, addr, _THIS_IP_, _RET_IP_);
|
||||
val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
|
||||
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_read))
|
||||
log_post_read_mmio(val, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
return val;
|
||||
}
|
||||
#endif
|
||||
@@ -355,9 +381,11 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr)
|
||||
#define writeb_relaxed writeb_relaxed
|
||||
static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writeb(value, addr);
|
||||
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 8, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -365,9 +393,11 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
|
||||
#define writew_relaxed writew_relaxed
|
||||
static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writew((u16 __force)cpu_to_le16(value), addr);
|
||||
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 16, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -375,9 +405,11 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
|
||||
#define writel_relaxed writel_relaxed
|
||||
static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writel((u32 __force)__cpu_to_le32(value), addr);
|
||||
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 32, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -385,9 +417,11 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
|
||||
#define writeq_relaxed writeq_relaxed
|
||||
static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_write))
|
||||
log_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
__raw_writeq((u64 __force)__cpu_to_le64(value), addr);
|
||||
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
if (rwmmio_tracepoint_enabled(rwmmio_post_write))
|
||||
log_post_write_mmio(value, 64, addr, _THIS_IP_, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#define _ANDROID_KABI_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
/*
|
||||
* Worker macros, don't use these, use the ones without a leading '_'
|
||||
|
@@ -1129,11 +1129,19 @@ extern int send_sigurg(struct fown_struct *fown);
|
||||
#define SB_NOUSER BIT(31)
|
||||
|
||||
/* These flags relate to encoding and casefolding */
|
||||
#define SB_ENC_STRICT_MODE_FL (1 << 0)
|
||||
#define SB_ENC_STRICT_MODE_FL (1 << 0)
|
||||
#define SB_ENC_NO_COMPAT_FALLBACK_FL (1 << 1)
|
||||
|
||||
#define sb_has_strict_encoding(sb) \
|
||||
(sb->s_encoding_flags & SB_ENC_STRICT_MODE_FL)
|
||||
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
#define sb_no_casefold_compat_fallback(sb) \
|
||||
(sb->s_encoding_flags & SB_ENC_NO_COMPAT_FALLBACK_FL)
|
||||
#else
|
||||
#define sb_no_casefold_compat_fallback(sb) (1)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Umount options
|
||||
*/
|
||||
|
@@ -291,6 +291,7 @@ struct perf_event_pmu_context;
|
||||
#define PERF_PMU_CAP_NO_EXCLUDE 0x0040
|
||||
#define PERF_PMU_CAP_AUX_OUTPUT 0x0080
|
||||
#define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100
|
||||
#define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400
|
||||
|
||||
struct perf_output_handle;
|
||||
|
||||
|
@@ -5,6 +5,9 @@
|
||||
extern unsigned long generic_max_swapfile_size(void);
|
||||
unsigned long arch_max_swapfile_size(void);
|
||||
|
||||
extern int unuse_swap_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, swp_entry_t entry, struct folio *folio);
|
||||
|
||||
/* Maximum swapfile size supported for the arch (not inclusive). */
|
||||
extern unsigned long swapfile_maximum_size;
|
||||
/* Whether swap migration entry supports storing A/D bits for the arch */
|
||||
|
@@ -233,6 +233,7 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
|
||||
|
||||
extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
|
||||
extern void dup_userfaultfd_complete(struct list_head *);
|
||||
void dup_userfaultfd_fail(struct list_head *);
|
||||
|
||||
extern void mremap_userfaultfd_prep(struct vm_area_struct *,
|
||||
struct vm_userfaultfd_ctx *);
|
||||
@@ -308,6 +309,10 @@ static inline void dup_userfaultfd_complete(struct list_head *l)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dup_userfaultfd_fail(struct list_head *l)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
||||
struct vm_userfaultfd_ctx *ctx)
|
||||
{
|
||||
|
@@ -15,7 +15,7 @@ TRACE_EVENT_CONDITION(damos_before_apply,
|
||||
unsigned int target_idx, struct damon_region *r,
|
||||
unsigned int nr_regions, bool do_trace),
|
||||
|
||||
TP_ARGS(context_idx, target_idx, scheme_idx, r, nr_regions, do_trace),
|
||||
TP_ARGS(context_idx, scheme_idx, target_idx, r, nr_regions, do_trace),
|
||||
|
||||
TP_CONDITION(do_trace),
|
||||
|
||||
|
@@ -458,6 +458,9 @@ DECLARE_HOOK(android_vh_free_unref_page_list_bypass,
|
||||
DECLARE_HOOK(android_vh_free_pages_prepare_init,
|
||||
TP_PROTO(struct page *page, int nr_pages, bool *init),
|
||||
TP_ARGS(page, nr_pages, init));
|
||||
DECLARE_HOOK(android_vh_free_one_page_flag_check,
|
||||
TP_PROTO(unsigned long *flags),
|
||||
TP_ARGS(flags));
|
||||
DECLARE_HOOK(android_vh_post_alloc_hook,
|
||||
TP_PROTO(struct page *page, unsigned int order, bool *init),
|
||||
TP_ARGS(page, order, init));
|
||||
@@ -530,6 +533,10 @@ DECLARE_HOOK(android_vh_migration_target_bypass,
|
||||
DECLARE_HOOK(android_vh_swap_writepage,
|
||||
TP_PROTO(unsigned long *sis_flags, struct page *page),
|
||||
TP_ARGS(sis_flags, page));
|
||||
DECLARE_HOOK(android_vh_swap_readpage_bdev_sync,
|
||||
TP_PROTO(struct block_device *bdev, sector_t sector,
|
||||
struct page *page, bool *read),
|
||||
TP_ARGS(bdev, sector, page, read));
|
||||
DECLARE_HOOK(android_vh_alloc_flags_cma_adjust,
|
||||
TP_PROTO(gfp_t gfp_mask, unsigned int *alloc_flags),
|
||||
TP_ARGS(gfp_mask, alloc_flags));
|
||||
@@ -574,6 +581,21 @@ DECLARE_HOOK(android_vh_alloc_swap_slot_cache,
|
||||
DECLARE_HOOK(android_vh_calculate_totalreserve_pages,
|
||||
TP_PROTO(bool *skip),
|
||||
TP_ARGS(skip));
|
||||
DECLARE_HOOK(android_vh_folio_add_lru_folio_activate,
|
||||
TP_PROTO(struct folio *folio, bool *bypass),
|
||||
TP_ARGS(folio, bypass));
|
||||
DECLARE_HOOK(android_vh_filemap_fault_pre_folio_locked,
|
||||
TP_PROTO(struct folio *folio),
|
||||
TP_ARGS(folio));
|
||||
DECLARE_HOOK(android_vh_filemap_folio_mapped,
|
||||
TP_PROTO(struct folio *folio),
|
||||
TP_ARGS(folio));
|
||||
DECLARE_HOOK(android_vh_folio_remove_rmap_ptes,
|
||||
TP_PROTO(struct folio *folio),
|
||||
TP_ARGS(folio));
|
||||
DECLARE_HOOK(android_vh_pageset_update,
|
||||
TP_PROTO(unsigned long *high, unsigned long *batch),
|
||||
TP_ARGS(high, batch));
|
||||
#endif /* _TRACE_HOOK_MM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@@ -84,6 +84,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler_prio,
|
||||
TP_PROTO(struct task_struct *p),
|
||||
TP_ARGS(p), 1);
|
||||
|
||||
struct sched_group;
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_group,
|
||||
TP_PROTO(struct sched_group *busiest, struct rq *dst_rq, int *out_balance),
|
||||
@@ -434,6 +438,10 @@ DECLARE_HOOK(android_vh_mmput,
|
||||
TP_PROTO(void *unused),
|
||||
TP_ARGS(unused));
|
||||
|
||||
DECLARE_HOOK(android_vh_mmput_mm,
|
||||
TP_PROTO(struct mm_struct *mm),
|
||||
TP_ARGS(mm));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_attach_entity_load_avg,
|
||||
TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
|
||||
TP_ARGS(cfs_rq, se), 1);
|
||||
|
@@ -183,6 +183,7 @@ config GKI_HIDDEN_RPMSG_CONFIGS
|
||||
config GKI_HIDDEN_GPU_CONFIGS
|
||||
bool "Hidden GPU configuration needed for GKI"
|
||||
select TRACE_GPU_MEM
|
||||
select TEGRA_HOST1X_CONTEXT_BUS
|
||||
help
|
||||
Dummy config option used to enable the hidden GPU config.
|
||||
These are normally selected implicitly when a module
|
||||
|
@@ -673,15 +673,23 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
|
||||
{
|
||||
bool overwrite = !(flags & RING_BUFFER_WRITABLE);
|
||||
int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
|
||||
int ret = -ENOMEM, max_order;
|
||||
bool use_contiguous_pages = event->pmu->capabilities & (
|
||||
PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_AUX_PREFER_LARGE);
|
||||
/*
|
||||
* Initialize max_order to 0 for page allocation. This allocates single
|
||||
* pages to minimize memory fragmentation. This is overridden if the
|
||||
* PMU needs or prefers contiguous pages (use_contiguous_pages = true).
|
||||
*/
|
||||
int max_order = 0;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (!has_aux(event))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!overwrite) {
|
||||
/*
|
||||
* Watermark defaults to half the buffer, and so does the
|
||||
* max_order, to aid PMU drivers in double buffering.
|
||||
* Watermark defaults to half the buffer, to aid PMU drivers
|
||||
* in double buffering.
|
||||
*/
|
||||
if (!watermark)
|
||||
watermark = min_t(unsigned long,
|
||||
@@ -689,16 +697,19 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event,
|
||||
(unsigned long)nr_pages << (PAGE_SHIFT - 1));
|
||||
|
||||
/*
|
||||
* Use aux_watermark as the basis for chunking to
|
||||
* help PMU drivers honor the watermark.
|
||||
* If using contiguous pages, use aux_watermark as the basis
|
||||
* for chunking to help PMU drivers honor the watermark.
|
||||
*/
|
||||
max_order = get_order(watermark);
|
||||
if (use_contiguous_pages)
|
||||
max_order = get_order(watermark);
|
||||
} else {
|
||||
/*
|
||||
* We need to start with the max_order that fits in nr_pages,
|
||||
* not the other way around, hence ilog2() and not get_order.
|
||||
* If using contiguous pages, we need to start with the
|
||||
* max_order that fits in nr_pages, not the other way around,
|
||||
* hence ilog2() and not get_order.
|
||||
*/
|
||||
max_order = ilog2(nr_pages);
|
||||
if (use_contiguous_pages)
|
||||
max_order = ilog2(nr_pages);
|
||||
watermark = 0;
|
||||
}
|
||||
|
||||
|
@@ -666,11 +666,8 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
|
||||
LIST_HEAD(uf);
|
||||
VMA_ITERATOR(vmi, mm, 0);
|
||||
|
||||
uprobe_start_dup_mmap();
|
||||
if (mmap_write_lock_killable(oldmm)) {
|
||||
retval = -EINTR;
|
||||
goto fail_uprobe_end;
|
||||
}
|
||||
if (mmap_write_lock_killable(oldmm))
|
||||
return -EINTR;
|
||||
flush_cache_dup_mm(oldmm);
|
||||
uprobe_dup_mmap(oldmm, mm);
|
||||
/*
|
||||
@@ -806,9 +803,10 @@ out:
|
||||
mmap_write_unlock(mm);
|
||||
flush_tlb_mm(oldmm);
|
||||
mmap_write_unlock(oldmm);
|
||||
dup_userfaultfd_complete(&uf);
|
||||
fail_uprobe_end:
|
||||
uprobe_end_dup_mmap();
|
||||
if (!retval)
|
||||
dup_userfaultfd_complete(&uf);
|
||||
else
|
||||
dup_userfaultfd_fail(&uf);
|
||||
return retval;
|
||||
|
||||
fail_nomem_anon_vma_fork:
|
||||
@@ -1404,6 +1402,7 @@ void mmput(struct mm_struct *mm)
|
||||
|
||||
if (atomic_dec_and_test(&mm->mm_users)) {
|
||||
trace_android_vh_mmput(NULL);
|
||||
trace_android_vh_mmput_mm(mm);
|
||||
__mmput(mm);
|
||||
}
|
||||
}
|
||||
@@ -1721,9 +1720,11 @@ static struct mm_struct *dup_mm(struct task_struct *tsk,
|
||||
if (!mm_init(mm, tsk, mm->user_ns))
|
||||
goto fail_nomem;
|
||||
|
||||
uprobe_start_dup_mmap();
|
||||
err = dup_mmap(mm, oldmm);
|
||||
if (err)
|
||||
goto free_pt;
|
||||
uprobe_end_dup_mmap();
|
||||
|
||||
mm->hiwater_rss = get_mm_rss(mm);
|
||||
mm->hiwater_vm = mm->total_vm;
|
||||
@@ -1738,6 +1739,8 @@ free_pt:
|
||||
mm->binfmt = NULL;
|
||||
mm_init_owner(mm, NULL);
|
||||
mmput(mm);
|
||||
if (err)
|
||||
uprobe_end_dup_mmap();
|
||||
|
||||
fail_nomem:
|
||||
return NULL;
|
||||
|
@@ -224,6 +224,19 @@ __irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||
return IRQ_STARTUP_ABORT;
|
||||
return IRQ_STARTUP_MANAGED;
|
||||
}
|
||||
|
||||
void irq_startup_managed(struct irq_desc *desc)
|
||||
{
|
||||
/*
|
||||
* Only start it up when the disable depth is 1, so that a disable,
|
||||
* hotunplug, hotplug sequence does not end up enabling it during
|
||||
* hotplug unconditionally.
|
||||
*/
|
||||
desc->depth--;
|
||||
if (!desc->depth)
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
||||
}
|
||||
|
||||
#else
|
||||
static __always_inline int
|
||||
__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
|
||||
@@ -276,6 +289,7 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force)
|
||||
ret = __irq_startup(desc);
|
||||
break;
|
||||
case IRQ_STARTUP_ABORT:
|
||||
desc->depth = 1;
|
||||
irqd_set_managed_shutdown(d);
|
||||
return 0;
|
||||
}
|
||||
@@ -308,7 +322,13 @@ void irq_shutdown(struct irq_desc *desc)
|
||||
{
|
||||
if (irqd_is_started(&desc->irq_data)) {
|
||||
clear_irq_resend(desc);
|
||||
desc->depth = 1;
|
||||
/*
|
||||
* Increment disable depth, so that a managed shutdown on
|
||||
* CPU hotunplug preserves the actual disabled state when the
|
||||
* CPU comes back online. See irq_startup_managed().
|
||||
*/
|
||||
desc->depth++;
|
||||
|
||||
if (desc->irq_data.chip->irq_shutdown) {
|
||||
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
|
||||
irq_state_set_disabled(desc);
|
||||
|
@@ -219,7 +219,7 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
|
||||
return;
|
||||
|
||||
if (irqd_is_managed_and_shutdown(data))
|
||||
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
|
||||
irq_startup_managed(desc);
|
||||
|
||||
/*
|
||||
* If the interrupt can only be directed to a single target
|
||||
|
@@ -87,6 +87,7 @@ extern void __enable_irq(struct irq_desc *desc);
|
||||
extern int irq_activate(struct irq_desc *desc);
|
||||
extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
|
||||
extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
|
||||
extern void irq_startup_managed(struct irq_desc *desc);
|
||||
|
||||
extern void irq_shutdown(struct irq_desc *desc);
|
||||
extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
|
||||
|
@@ -2303,14 +2303,15 @@ int vprintk_store(int facility, int level,
|
||||
if (dev_info)
|
||||
memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
|
||||
|
||||
trace_android_rvh_logbuf(prb, &r);
|
||||
trace_android_vh_logbuf(prb, &r);
|
||||
|
||||
/* A message without a trailing newline can be continued. */
|
||||
if (!(flags & LOG_NEWLINE))
|
||||
prb_commit(&e);
|
||||
else
|
||||
prb_final_commit(&e);
|
||||
|
||||
trace_android_rvh_logbuf(prb, &r);
|
||||
trace_android_vh_logbuf(prb, &r);
|
||||
ret = text_len + trunc_msg_len;
|
||||
out:
|
||||
printk_exit_irqrestore(recursion_ptr, irqflags);
|
||||
|
@@ -7192,6 +7192,7 @@ static void __setscheduler_prio(struct task_struct *p, int prio)
|
||||
p->sched_class = &fair_sched_class;
|
||||
|
||||
p->prio = prio;
|
||||
trace_android_rvh_setscheduler_prio(p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
|
@@ -27,6 +27,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice_locked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler_prio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update);
|
||||
@@ -110,6 +111,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_cgroup_css_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_reweight_entity);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_context_switch);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mmput_mm);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_attach_entity_load_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_detach_entity_load_avg);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg);
|
||||
|
11
mm/Kconfig
11
mm/Kconfig
@@ -1316,6 +1316,17 @@ config LOCK_MM_AND_FIND_VMA
|
||||
bool
|
||||
depends on !STACK_GROWSUP
|
||||
|
||||
config MEMFD_ASHMEM_SHIM
|
||||
bool "Memfd ashmem ioctl compatibility support"
|
||||
depends on MEMFD_CREATE
|
||||
help
|
||||
This provides compatibility support for ashmem ioctl commands against
|
||||
memfd file descriptors. This is useful for compatibility on Android
|
||||
for older applications that may use ashmem's ioctl commands on the
|
||||
now memfds passed to them.
|
||||
|
||||
Unless you are running Android, say N.
|
||||
|
||||
source "mm/damon/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
@@ -138,6 +138,7 @@ obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
|
||||
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
|
||||
obj-$(CONFIG_HMM_MIRROR) += hmm.o
|
||||
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
|
||||
obj-$(CONFIG_MEMFD_ASHMEM_SHIM) += memfd-ashmem-shim.o
|
||||
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
|
||||
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
|
||||
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
|
||||
|
@@ -1693,7 +1693,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed,
|
||||
* aggr_interval, owing to validation of damon_set_attrs().
|
||||
*/
|
||||
if (attrs->sample_interval)
|
||||
len_window = attrs->aggr_interval / attrs->sample_interval;
|
||||
len_window = damon_max_nr_accesses(attrs);
|
||||
r->nr_accesses_bp = damon_moving_sum(r->nr_accesses_bp,
|
||||
r->last_nr_accesses * 10000, len_window,
|
||||
accessed ? 10000 : 0);
|
||||
|
@@ -1926,7 +1926,7 @@ static void damos_tried_regions_init_upd_status(
|
||||
sysfs_regions->upd_timeout_jiffies = jiffies +
|
||||
2 * usecs_to_jiffies(scheme->apply_interval_us ?
|
||||
scheme->apply_interval_us :
|
||||
ctx->attrs.sample_interval);
|
||||
ctx->attrs.aggr_interval);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -3439,6 +3439,8 @@ retry_find:
|
||||
}
|
||||
}
|
||||
|
||||
trace_android_vh_filemap_fault_pre_folio_locked(folio);
|
||||
|
||||
if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))
|
||||
goto out_retry;
|
||||
|
||||
@@ -3740,6 +3742,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||
nr_pages, &mmap_miss);
|
||||
|
||||
folio_unlock(folio);
|
||||
trace_android_vh_filemap_folio_mapped(folio);
|
||||
folio_put(folio);
|
||||
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
|
||||
pte_unmap_unlock(vmf->pte, vmf->ptl);
|
||||
|
@@ -470,6 +470,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
|
||||
#define K(x) ((x) << (PAGE_SHIFT-10))
|
||||
|
||||
extern char * const zone_names[MAX_NR_ZONES];
|
||||
extern unsigned long free_highatomics[MAX_NR_ZONES];
|
||||
|
||||
/* perform sanity checks on struct pages being allocated or freed */
|
||||
DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled);
|
||||
|
52
mm/memfd-ashmem-shim-internal.h
Normal file
52
mm/memfd-ashmem-shim-internal.h
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* Ashmem compatability for memfd
|
||||
*
|
||||
* Copyright (c) 2025, Google LLC.
|
||||
* Author: Isaac J. Manjarres <isaacmanjarres@google.com>
|
||||
*/
|
||||
|
||||
#ifndef _MM_MEMFD_ASHMEM_SHIM_INTERNAL_H
|
||||
#define _MM_MEMFD_ASHMEM_SHIM_INTERNAL_H
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define ASHMEM_NAME_LEN 256
|
||||
|
||||
/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
|
||||
#define ASHMEM_NOT_PURGED 0
|
||||
#define ASHMEM_WAS_PURGED 1
|
||||
|
||||
/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
|
||||
#define ASHMEM_IS_UNPINNED 0
|
||||
#define ASHMEM_IS_PINNED 1
|
||||
|
||||
struct ashmem_pin {
|
||||
__u32 offset; /* offset into region, in bytes, page-aligned */
|
||||
__u32 len; /* length forward from offset, in bytes, page-aligned */
|
||||
};
|
||||
|
||||
#define __ASHMEMIOC 0x77
|
||||
|
||||
#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
|
||||
#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
|
||||
#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
|
||||
#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
|
||||
#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
|
||||
#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
|
||||
#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
|
||||
#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
|
||||
#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
|
||||
#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
|
||||
#define ASHMEM_GET_FILE_ID _IOR(__ASHMEMIOC, 11, unsigned long)
|
||||
|
||||
/* support of 32bit userspace on 64bit platforms */
|
||||
#ifdef CONFIG_COMPAT
|
||||
#define COMPAT_ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, compat_size_t)
|
||||
#define COMPAT_ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned int)
|
||||
#endif
|
||||
|
||||
#endif /* _MM_MEMFD_ASHMEM_SHIM_INTERNAL_H */
|
213
mm/memfd-ashmem-shim.c
Normal file
213
mm/memfd-ashmem-shim.c
Normal file
@@ -0,0 +1,213 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
/*
|
||||
* Ashmem compatability for memfd
|
||||
*
|
||||
* Copyright (c) 2025, Google LLC.
|
||||
* Author: Isaac J. Manjarres <isaacmanjarres@google.com>
|
||||
*/
|
||||
|
||||
#include <asm-generic/mman-common.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/memfd.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "memfd-ashmem-shim.h"
|
||||
#include "memfd-ashmem-shim-internal.h"
|
||||
|
||||
/* memfd file names all start with memfd: */
|
||||
#define MEMFD_PREFIX "memfd:"
|
||||
#define MEMFD_PREFIX_LEN (sizeof(MEMFD_PREFIX) - 1)
|
||||
|
||||
static const char *get_memfd_name(struct file *file)
|
||||
{
|
||||
/* This pointer is always valid, so no need to check if it's NULL. */
|
||||
const char *file_name = file->f_path.dentry->d_name.name;
|
||||
|
||||
if (file_name != strstr(file_name, MEMFD_PREFIX))
|
||||
return NULL;
|
||||
|
||||
return file_name;
|
||||
}
|
||||
|
||||
static long get_name(struct file *file, void __user *name)
|
||||
{
|
||||
const char *file_name = get_memfd_name(file);
|
||||
size_t len;
|
||||
|
||||
if (!file_name)
|
||||
return -EINVAL;
|
||||
|
||||
/* Strip MEMFD_PREFIX to retain compatibility with ashmem driver. */
|
||||
file_name = &file_name[MEMFD_PREFIX_LEN];
|
||||
|
||||
/*
|
||||
* The expectation is that the user provided buffer is ASHMEM_NAME_LEN in size, which is
|
||||
* larger than the maximum size of a name for a memfd buffer, so the name should always fit
|
||||
* within the given buffer.
|
||||
*
|
||||
* However, we should ensure that the string will indeed fit in the user provided buffer.
|
||||
*
|
||||
* Add 1 to the copy size to account for the NUL terminator
|
||||
*/
|
||||
len = strlen(file_name) + 1;
|
||||
if (len > ASHMEM_NAME_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
return copy_to_user(name, file_name, len) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static long get_prot_mask(struct file *file)
|
||||
{
|
||||
long prot_mask = PROT_READ | PROT_EXEC;
|
||||
long seals = memfd_fcntl(file, F_GET_SEALS, 0);
|
||||
|
||||
if (seals < 0)
|
||||
return seals;
|
||||
|
||||
/* memfds are readable and executable by default. Only writability can be changed. */
|
||||
if (!(seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)))
|
||||
prot_mask |= PROT_WRITE;
|
||||
|
||||
return prot_mask;
|
||||
}
|
||||
|
||||
static long set_prot_mask(struct file *file, unsigned long prot)
|
||||
{
|
||||
long curr_prot = get_prot_mask(file);
|
||||
long ret = 0;
|
||||
|
||||
if (curr_prot < 0)
|
||||
return curr_prot;
|
||||
|
||||
/*
|
||||
* memfds are always readable and executable; there is no way to remove either mapping
|
||||
* permission, nor is there a known usecase that requires it.
|
||||
*
|
||||
* Attempting to remove either of these mapping permissions will return successfully, but
|
||||
* will be a nop, as the buffer will still be mappable with these permissions.
|
||||
*/
|
||||
prot |= PROT_READ | PROT_EXEC;
|
||||
|
||||
/* Only allow permissions to be removed. */
|
||||
if ((curr_prot & prot) != prot)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Removing PROT_WRITE:
|
||||
*
|
||||
* We could prevent any other mappings from having write permissions by adding the
|
||||
* F_SEAL_WRITE mapping. However, that would conflict with known usecases where it is
|
||||
* desirable to maintain an existing writable mapping, but forbid future writable mappings.
|
||||
*
|
||||
* To support those usecases, we use F_SEAL_FUTURE_WRITE.
|
||||
*/
|
||||
if (!(prot & PROT_WRITE))
|
||||
ret = memfd_fcntl(file, F_ADD_SEALS, F_SEAL_FUTURE_WRITE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* memfd_ashmem_shim_ioctl - ioctl handler for ashmem commands
|
||||
* @file: The shmem file.
|
||||
* @cmd: The ioctl command.
|
||||
* @arg: The argument for the ioctl command.
|
||||
*
|
||||
* The purpose of this handler is to allow old applications to continue working
|
||||
* on newer kernels by allowing them to invoke ashmem ioctl commands on memfds.
|
||||
*
|
||||
* The ioctl handler attempts to retain as much compatibility with the ashmem
|
||||
* driver as possible.
|
||||
*/
|
||||
long memfd_ashmem_shim_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
long ret = -ENOTTY;
|
||||
unsigned long inode_nr;
|
||||
|
||||
switch (cmd) {
|
||||
/*
|
||||
* Older applications won't create memfds and try to use ASHMEM_SET_NAME/ASHMEM_SET_SIZE on
|
||||
* them intentionally.
|
||||
*
|
||||
* Instead, we can end up in this scenario if an old application receives a memfd that was
|
||||
* created by another process.
|
||||
*
|
||||
* However, the current process shouldn't expect to be able to reliably [re]name/size a
|
||||
* buffer that was shared with it, since the process that shared that buffer with it, or
|
||||
* any other process that references the buffer could have already mapped it.
|
||||
*
|
||||
* Additionally in the case of ASHMEM_SET_SIZE, when processes create memfds that are going
|
||||
* to be shared with other processes in Android, they also specify the size of the memory
|
||||
* region and seal the file against any size changes. Therefore, ASHMEM_SET_SIZE should not
|
||||
* be supported anyway.
|
||||
*
|
||||
* Therefore, it is reasonable to return -EINVAL here, as if the buffer was already mapped.
|
||||
*/
|
||||
case ASHMEM_SET_NAME:
|
||||
case ASHMEM_SET_SIZE:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
case ASHMEM_GET_NAME:
|
||||
ret = get_name(file, (void __user *)arg);
|
||||
break;
|
||||
case ASHMEM_GET_SIZE:
|
||||
ret = i_size_read(file_inode(file));
|
||||
break;
|
||||
case ASHMEM_SET_PROT_MASK:
|
||||
ret = set_prot_mask(file, arg);
|
||||
break;
|
||||
case ASHMEM_GET_PROT_MASK:
|
||||
ret = get_prot_mask(file);
|
||||
break;
|
||||
/*
|
||||
* Unpinning ashmem buffers was deprecated with the release of Android 10,
|
||||
* as it did not yield any remarkable benefits. Therefore, ignore pinning
|
||||
* related requests.
|
||||
*
|
||||
* This makes it so that memory is always "pinned" or never entirely freed
|
||||
* until all references to the ashmem buffer are dropped. The memory occupied
|
||||
* by the buffer is still subject to being reclaimed (swapped out) under memory
|
||||
* pressure, but that is not the same as being freed.
|
||||
*
|
||||
* This makes it so that:
|
||||
*
|
||||
* 1. Memory is always pinned and therefore never purged.
|
||||
* 2. Requests to unpin memory (make it a candidate for being freed) are ignored.
|
||||
*/
|
||||
case ASHMEM_PIN:
|
||||
ret = ASHMEM_NOT_PURGED;
|
||||
break;
|
||||
case ASHMEM_UNPIN:
|
||||
ret = 0;
|
||||
break;
|
||||
case ASHMEM_GET_PIN_STATUS:
|
||||
ret = ASHMEM_IS_PINNED;
|
||||
break;
|
||||
case ASHMEM_PURGE_ALL_CACHES:
|
||||
ret = capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
|
||||
break;
|
||||
case ASHMEM_GET_FILE_ID:
|
||||
inode_nr = file_inode(file)->i_ino;
|
||||
if (copy_to_user((void __user *)arg, &inode_nr, sizeof(inode_nr)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
long memfd_ashmem_shim_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
if (cmd == COMPAT_ASHMEM_SET_SIZE)
|
||||
cmd = ASHMEM_SET_SIZE;
|
||||
else if (cmd == COMPAT_ASHMEM_SET_PROT_MASK)
|
||||
cmd = ASHMEM_SET_PROT_MASK;
|
||||
|
||||
return memfd_ashmem_shim_ioctl(file, cmd, arg);
|
||||
}
|
||||
#endif
|
21
mm/memfd-ashmem-shim.h
Normal file
21
mm/memfd-ashmem-shim.h
Normal file
@@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __MM_MEMFD_ASHMEM_SHIM_H
|
||||
#define __MM_MEMFD_ASHMEM_SHIM_H
|
||||
|
||||
/*
|
||||
* mm/memfd-ashmem-shim.h
|
||||
*
|
||||
* Ashmem compatability for memfd
|
||||
*
|
||||
* Copyright (c) 2025, Google LLC.
|
||||
* Author: Isaac J. Manjarres <isaacmanjarres@google.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/fs.h>
|
||||
|
||||
long memfd_ashmem_shim_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
#ifdef CONFIG_COMPAT
|
||||
long memfd_ashmem_shim_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
#endif
|
||||
#endif /* __MM_MEMFD_ASHMEM_SHIM_H */
|
@@ -1146,7 +1146,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
|
||||
int rc = -EAGAIN;
|
||||
int old_page_state = 0;
|
||||
struct anon_vma *anon_vma = NULL;
|
||||
bool is_lru = !__folio_test_movable(src);
|
||||
bool is_lru = data_race(!__folio_test_movable(src));
|
||||
bool locked = false;
|
||||
bool dst_locked = false;
|
||||
|
||||
|
@@ -908,6 +908,7 @@ static inline void __free_one_page(struct page *page,
|
||||
bool to_tail;
|
||||
int max_order = zone_max_order(zone);
|
||||
bool bypass = false;
|
||||
unsigned long check_flags;
|
||||
|
||||
trace_android_vh_free_one_page_bypass(page, zone, order,
|
||||
migratetype, (int)fpi_flags, &bypass);
|
||||
@@ -916,7 +917,9 @@ static inline void __free_one_page(struct page *page,
|
||||
return;
|
||||
|
||||
VM_BUG_ON(!zone_is_initialized(zone));
|
||||
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
|
||||
check_flags = PAGE_FLAGS_CHECK_AT_PREP;
|
||||
trace_android_vh_free_one_page_flag_check(&check_flags);
|
||||
VM_BUG_ON_PAGE(page->flags & check_flags, page);
|
||||
|
||||
VM_BUG_ON(migratetype == -1);
|
||||
|
||||
@@ -5818,6 +5821,7 @@ static int zone_highsize(struct zone *zone, int batch, int cpu_online)
|
||||
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
|
||||
unsigned long batch)
|
||||
{
|
||||
trace_android_vh_pageset_update(&high, &batch);
|
||||
WRITE_ONCE(pcp->batch, batch);
|
||||
WRITE_ONCE(pcp->high, high);
|
||||
}
|
||||
|
@@ -469,6 +469,15 @@ static void swap_readpage_bdev_sync(struct folio *folio,
|
||||
{
|
||||
struct bio_vec bv;
|
||||
struct bio bio;
|
||||
bool read = false;
|
||||
|
||||
trace_android_vh_swap_readpage_bdev_sync(sis->bdev,
|
||||
swap_page_sector(&folio->page) + get_start_sect(sis->bdev),
|
||||
&folio->page, &read);
|
||||
if (read) {
|
||||
count_vm_events(PSWPIN, folio_nr_pages(folio));
|
||||
return;
|
||||
}
|
||||
|
||||
bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
|
||||
bio.bi_iter.bi_sector = swap_page_sector(&folio->page);
|
||||
|
@@ -305,6 +305,7 @@ nomap:
|
||||
rcu_read_unlock();
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__pte_offset_map);
|
||||
|
||||
pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd,
|
||||
unsigned long addr, spinlock_t **ptlp)
|
||||
|
@@ -1629,6 +1629,8 @@ void folio_remove_rmap_ptes(struct folio *folio, struct page *page,
|
||||
int nr_pages, struct vm_area_struct *vma)
|
||||
{
|
||||
__folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE);
|
||||
|
||||
trace_android_vh_folio_remove_rmap_ptes(folio);
|
||||
}
|
||||
|
||||
/**
|
||||
|
10
mm/shmem.c
10
mm/shmem.c
@@ -89,6 +89,10 @@ static struct vfsmount *shm_mnt;
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#ifdef CONFIG_MEMFD_ASHMEM_SHIM
|
||||
#include "memfd-ashmem-shim.h"
|
||||
#endif
|
||||
|
||||
#define BLOCKS_PER_PAGE (PAGE_SIZE/512)
|
||||
#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
|
||||
|
||||
@@ -4506,6 +4510,12 @@ static const struct file_operations shmem_file_operations = {
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fallocate = shmem_fallocate,
|
||||
#endif
|
||||
#ifdef CONFIG_MEMFD_ASHMEM_SHIM
|
||||
.unlocked_ioctl = memfd_ashmem_shim_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = memfd_ashmem_shim_compat_ioctl,
|
||||
#endif
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct inode_operations shmem_inode_operations = {
|
||||
|
@@ -320,6 +320,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
|
||||
" low:%lukB"
|
||||
" high:%lukB"
|
||||
" reserved_highatomic:%luKB"
|
||||
" free_highatomic:%luKB"
|
||||
" active_anon:%lukB"
|
||||
" inactive_anon:%lukB"
|
||||
" active_file:%lukB"
|
||||
@@ -341,6 +342,7 @@ static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_z
|
||||
K(low_wmark_pages(zone)),
|
||||
K(high_wmark_pages(zone)),
|
||||
K(zone->nr_reserved_highatomic),
|
||||
K(free_highatomics[zone_idx(zone)]),
|
||||
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
|
||||
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
|
||||
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
|
||||
|
@@ -504,8 +504,13 @@ void folio_add_lru(struct folio *folio)
|
||||
|
||||
/* see the comment in lru_gen_add_folio() */
|
||||
if (lru_gen_enabled() && !folio_test_unevictable(folio) &&
|
||||
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC))
|
||||
folio_set_active(folio);
|
||||
lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) {
|
||||
bool bypass = false;
|
||||
|
||||
trace_android_vh_folio_add_lru_folio_activate(folio, &bypass);
|
||||
if (!bypass)
|
||||
folio_set_active(folio);
|
||||
}
|
||||
|
||||
folio_get(folio);
|
||||
local_lock(&cpu_fbatches.lock);
|
||||
|
@@ -549,6 +549,8 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
|
||||
return retpage;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(read_swap_cache_async);
|
||||
|
||||
static unsigned int __swapin_nr_pages(unsigned long prev_offset,
|
||||
unsigned long offset,
|
||||
int hits,
|
||||
|
@@ -690,13 +690,16 @@ static bool cluster_scan_range(struct swap_info_struct *si,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
|
||||
static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
|
||||
unsigned int start, unsigned char usage,
|
||||
unsigned int order)
|
||||
{
|
||||
struct swap_info_ext *sie = to_swap_info_ext(si);
|
||||
unsigned int nr_pages = 1 << order;
|
||||
|
||||
if (!(si->flags & SWP_WRITEOK))
|
||||
return false;
|
||||
|
||||
if (cluster_is_free(ci)) {
|
||||
if (nr_pages < SWAPFILE_CLUSTER) {
|
||||
list_move_tail(&ci->list, &si->nonfull_clusters[order]);
|
||||
@@ -717,6 +720,8 @@ static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
|
||||
list_move_tail(&ci->list, &sie->full_clusters);
|
||||
ci->flags = CLUSTER_FLAG_FULL;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
|
||||
@@ -740,7 +745,10 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigne
|
||||
|
||||
while (offset <= end) {
|
||||
if (cluster_scan_range(si, ci, offset, nr_pages)) {
|
||||
cluster_alloc_range(si, ci, offset, usage, order);
|
||||
if (!cluster_alloc_range(si, ci, offset, usage, order)) {
|
||||
offset = SWAP_NEXT_INVALID;
|
||||
goto done;
|
||||
}
|
||||
*foundp = offset;
|
||||
if (ci->count == SWAPFILE_CLUSTER) {
|
||||
offset = SWAP_NEXT_INVALID;
|
||||
@@ -836,7 +844,11 @@ new_cluster:
|
||||
if (!list_empty(&si->free_clusters)) {
|
||||
ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
|
||||
offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
|
||||
VM_BUG_ON(!found);
|
||||
/*
|
||||
* Either we didn't touch the cluster due to swapoff,
|
||||
* or the allocation must success.
|
||||
*/
|
||||
VM_BUG_ON((si->flags & SWP_WRITEOK) && !found);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@@ -1060,6 +1072,8 @@ static int cluster_alloc_swap(struct swap_info_struct *si,
|
||||
|
||||
VM_BUG_ON(!si->cluster_info);
|
||||
|
||||
si->flags += SWP_SCANNING;
|
||||
|
||||
while (n_ret < nr) {
|
||||
unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
|
||||
|
||||
@@ -1068,6 +1082,8 @@ static int cluster_alloc_swap(struct swap_info_struct *si,
|
||||
slots[n_ret++] = swp_entry(si->type, offset);
|
||||
}
|
||||
|
||||
si->flags -= SWP_SCANNING;
|
||||
|
||||
return n_ret;
|
||||
}
|
||||
|
||||
@@ -2188,6 +2204,13 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int unuse_swap_pte(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, swp_entry_t entry, struct folio *folio)
|
||||
{
|
||||
return unuse_pte(vma, pmd, addr, entry, folio);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unuse_swap_pte);
|
||||
|
||||
static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long addr, unsigned long end,
|
||||
unsigned int type)
|
||||
|
@@ -1057,8 +1057,13 @@ static int move_present_pte(struct mm_struct *mm,
|
||||
WRITE_ONCE(src_folio->index, linear_page_index(dst_vma, dst_addr));
|
||||
|
||||
orig_dst_pte = mk_pte(&src_folio->page, dst_vma->vm_page_prot);
|
||||
/* Follow mremap() behavior and treat the entry dirty after the move */
|
||||
orig_dst_pte = pte_mkwrite(pte_mkdirty(orig_dst_pte), dst_vma);
|
||||
/* Set soft dirty bit so userspace can notice the pte was moved */
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
orig_dst_pte = pte_mksoft_dirty(orig_dst_pte);
|
||||
#endif
|
||||
if (pte_dirty(orig_src_pte))
|
||||
orig_dst_pte = pte_mkdirty(orig_dst_pte);
|
||||
orig_dst_pte = pte_mkwrite(orig_dst_pte, dst_vma);
|
||||
|
||||
set_pte_at(mm, dst_addr, dst_pte, orig_dst_pte);
|
||||
out:
|
||||
@@ -1092,6 +1097,9 @@ static int move_swap_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma,
|
||||
}
|
||||
|
||||
orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
orig_src_pte = pte_swp_mksoft_dirty(orig_src_pte);
|
||||
#endif
|
||||
set_pte_at(mm, dst_addr, dst_pte, orig_src_pte);
|
||||
double_pt_unlock(dst_ptl, src_ptl);
|
||||
|
||||
|
25
mm/vmscan.c
25
mm/vmscan.c
@@ -5053,6 +5053,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
|
||||
int tier_idx)
|
||||
{
|
||||
bool success;
|
||||
bool dirty, writeback;
|
||||
int gen = folio_lru_gen(folio);
|
||||
int type = folio_is_file_lru(folio);
|
||||
int zone = folio_zonenum(folio);
|
||||
@@ -5107,9 +5108,17 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c
|
||||
return true;
|
||||
}
|
||||
|
||||
dirty = folio_test_dirty(folio);
|
||||
writeback = folio_test_writeback(folio);
|
||||
if (type == LRU_GEN_FILE && dirty) {
|
||||
sc->nr.file_taken += delta;
|
||||
if (!writeback)
|
||||
sc->nr.unqueued_dirty += delta;
|
||||
}
|
||||
|
||||
/* waiting for writeback */
|
||||
if (folio_test_locked(folio) || folio_test_writeback(folio) ||
|
||||
(type == LRU_GEN_FILE && folio_test_dirty(folio))) {
|
||||
if (folio_test_locked(folio) || writeback ||
|
||||
(type == LRU_GEN_FILE && dirty)) {
|
||||
gen = folio_inc_gen(lruvec, folio, true);
|
||||
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
|
||||
return true;
|
||||
@@ -5226,7 +5235,8 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc,
|
||||
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH,
|
||||
scanned, skipped, isolated,
|
||||
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
|
||||
|
||||
if (type == LRU_GEN_FILE)
|
||||
sc->nr.file_taken += isolated;
|
||||
/*
|
||||
* There might not be eligible folios due to reclaim_idx. Check the
|
||||
* remaining to prevent livelock if it's not making progress.
|
||||
@@ -5355,6 +5365,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap
|
||||
return scanned;
|
||||
retry:
|
||||
reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
|
||||
sc->nr.unqueued_dirty += stat.nr_unqueued_dirty;
|
||||
sc->nr_reclaimed += reclaimed;
|
||||
trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
|
||||
scanned, reclaimed, &stat, sc->priority,
|
||||
@@ -5583,6 +5594,13 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/*
|
||||
* If too many file cache in the coldest generation can't be evicted
|
||||
* due to being dirty, wake up the flusher.
|
||||
*/
|
||||
if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken)
|
||||
wakeup_flusher_threads(WB_REASON_VMSCAN);
|
||||
|
||||
/* whether this lruvec should be rotated */
|
||||
return nr_to_scan < 0;
|
||||
}
|
||||
@@ -6727,6 +6745,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
|
||||
|
||||
trace_android_vh_shrink_node(pgdat, sc->target_mem_cgroup);
|
||||
if (lru_gen_enabled() && root_reclaim(sc)) {
|
||||
memset(&sc->nr, 0, sizeof(sc->nr));
|
||||
lru_gen_shrink_node(pgdat, sc);
|
||||
return;
|
||||
}
|
||||
|
@@ -395,7 +395,6 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
|
||||
return 0;
|
||||
|
||||
shinfo = skb_shinfo(strp->anchor);
|
||||
shinfo->frag_list = NULL;
|
||||
|
||||
/* If we don't know the length go max plus page for cipher overhead */
|
||||
need_spc = strp->stm.full_len ?: TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
|
||||
@@ -411,6 +410,8 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
|
||||
page, 0, 0);
|
||||
}
|
||||
|
||||
shinfo->frag_list = NULL;
|
||||
|
||||
strp->copy_mode = 1;
|
||||
strp->stm.offset = 0;
|
||||
|
||||
|
@@ -1186,7 +1186,7 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size,
|
||||
nr, count, count_verify[src_offs + nr + i]);
|
||||
}
|
||||
}
|
||||
if (step_size > page_size) {
|
||||
if (chunk_size > page_size) {
|
||||
area_src = orig_area_src;
|
||||
area_dst = orig_area_dst;
|
||||
}
|
||||
|
Reference in New Issue
Block a user