Merge android15-6.6 into android15-6.6-lts

This merges the android15-6.6 branch into the -lts branch, catching
it up with the latest changes in there.

It contains the following commits:

* 98aebd3013 Merge tag 'android15-6.6.98_r00' into android15-6.6
* 8dd7c74e66 ANDROID: KVM: arm64: iommu: Add new ops iotlb_sync_map()
* e9aa22409a UPSTREAM: printk: Improve memory usage logging during boot
* db93ab496d ANDROID: Enable support for LZ4 compression for microdroid
* be350eab93 Revert "ANDROID: Make vsock virtio packet buff size configurable"
* d89d5bb9ca ANDROID: BACKPORT: vsock/virtio: Limit size of TX buffers
* 35ae5c9699 FROMGIT: BACKPORT: vsock/virtio: Allocate nonlinear SKBs for handling large transmit buffers
* 080548dfc3 FROMGIT: vsock/virtio: Rename virtio_vsock_skb_rx_put()
* b2fe83cd4d FROMGIT: vhost/vsock: Allocate nonlinear SKBs for handling large receive buffers
* 5044ed745e FROMGIT: vsock/virtio: Move SKB allocation lower-bound check to callers
* 80b36821c2 FROMGIT: BACKPORT: vsock/virtio: Rename virtio_vsock_alloc_skb()
* a2d2066afa FROMGIT: vsock/virtio: Resize receive buffers so that each SKB fits in a 4K page
* 75d7beaade FROMGIT: vsock/virtio: Move length check to callers of virtio_vsock_skb_rx_put()
* 20063c7e48 FROMGIT: vsock/virtio: Validate length in packet header before skb_put()
* b8845cd48c FROMGIT: vhost/vsock: Avoid allocating arbitrarily-sized SKBs
* 45954b1a14 UPSTREAM: vsock/virtio: non-linear skb handling for tap
* 82309a607c UPSTREAM: vsock/virtio: support to send non-linear skb
* 1852eda58f UPSTREAM: vsock/virtio/vhost: read data from non-linear skb
* ba6b79ffee ANDROID: GKI: Update vivo symbol list.
* 4a1101db03 ANDROID: vendor_hook: Added hook for memory reclaim tuning
* 5f135711ba ANDROID: vendor_hook: Added hook for memory allocation tuning
* a2ba3d525d ANDROID: mm: Fix incorrect call to try_to_map_unused_to_zeropage
* 63b3e8c48a BACKPORT: mm/huge_memory: only split PMD mapping when necessary in unmap_folio()
* be16e97d16 ANDROID: Align x86-64 microdroid kernel configs to aarch64 kernel configs
* e4fc14c2f7 ANDROID: Remove miscellaneous kernel support from microdroid
* e9edbc2946 ANDROID: Disable transparent hugepage support on microdroid
* fbececc25c ANDROID: Remove support for ZONE_DMA and ZONE_DMA32
* 43e9f43a25 ANDROID: Remove CPU hotplug support from microdroid
* ae6aa31c0a ANDROID: Remove suspend and power management code for microdroid
* 1e8b933b8d ANDROID: Remove DMA-BUF support from microdroid
* 1cfe1f2af4 ANDROID: GKI: Update symbol list for Pixel Watch
* d8ad35c3f4 ANDROID: Update AutoFDO profile for 6.6.92
* 53889dfa13 ANDROID: Remove GKI ABI related padding from structures for microdroid
* 3bd1e69c2c ANDROID: fuse-bpf: Fix readdir for getdents
* 599c52e842 ANDROID: fuse-bpf: Fix the issue of abnormal lseek system calls
* 0a79a0e659 ANDROID: Disable IOMMU support for microdroid
* 6688be0f3d ANDROID: Disable BTI and enable UNWIND_PATCH_PAC_INTO_SCS for microdroid
* f9927690d8 ANDROID: GKI: Update Nvidia symbol list

Change-Id: I6a91a32b3b8705a410ae8772d48d47cefc238e97
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-07-26 11:25:39 +00:00
27 changed files with 1630 additions and 195 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -213,3 +213,7 @@ type 'struct scm_stat' changed
type 'struct scm_fp_list' changed
member 'bool dead' was added
type 'struct kvm_iommu_ops' changed
member 'u64 android_kabi_reserved1' was removed
member 'union { int(* iotlb_sync_map)(struct kvm_hyp_iommu_domain*, unsigned long, size_t); struct { u64 android_kabi_reserved1; }; union { }; }' was added

View File

@@ -1,175 +1,416 @@
[abi_symbol_list]
# commonly used symbols
alloc_chrdev_region
__alloc_pages
__alloc_skb
alloc_workqueue
alt_cb_patch_nops
anon_inode_getfile
__arch_copy_from_user
__arch_copy_to_user
arm64_use_ng_mappings
__bitmap_clear
bitmap_find_next_zero_area_off
__bitmap_set
bpf_trace_run1
bpf_trace_run2
bpf_trace_run3
bpf_trace_run4
bpf_trace_run5
bpf_trace_run6
bpf_trace_run8
cancel_delayed_work
cancel_delayed_work_sync
capable
cdev_add
cdev_del
cdev_init
__check_object_size
class_create
class_destroy
clk_disable
clk_enable
clk_get_rate
clk_prepare
clk_set_rate
clk_unprepare
complete
__const_udelay
cpu_number
__cpu_online_mask
__cpu_possible_mask
debugfs_create_bool
debugfs_create_dir
debugfs_create_file
debugfs_create_u32
debugfs_create_u64
debugfs_remove
delayed_work_timer_fn
destroy_workqueue
dev_driver_string
_dev_err
dev_get_by_name
device_create
device_create_file
device_del
device_destroy
device_unregister
_dev_info
devm_clk_get
devm_clk_put
devm_free_irq
devm_gpio_request_one
devm_ioremap
devm_ioremap_resource
devm_kfree
devm_kmalloc
devm_memremap
devm_of_platform_populate
devm_request_threaded_irq
__devm_reset_control_get
dev_set_name
_dev_warn
disable_irq
disable_irq_nosync
dma_alloc_attrs
dma_buf_attach
dma_buf_detach
dma_buf_export
dma_buf_get
dma_buf_map_attachment
dma_buf_put
dma_buf_unmap_attachment
dma_fence_add_callback
dma_fence_array_ops
dma_fence_context_alloc
dma_fence_init
dma_fence_release
dma_fence_remove_callback
dma_fence_signal
dma_fence_signal_timestamp_locked
dma_fence_wait_timeout
dma_free_attrs
dma_map_page_attrs
dma_map_resource
dma_map_sg_attrs
dma_set_coherent_mask
dma_set_mask
dma_sync_single_for_cpu
dma_unmap_resource
dma_unmap_sg_attrs
down_read
down_write
down_write_trylock
enable_irq
__fdget
fd_install
fget
_find_first_zero_bit
_find_next_bit
finish_wait
flush_delayed_work
fortify_panic
fput
free_irq
__free_pages
generic_file_llseek
__get_free_pages
__get_task_comm
get_unused_fd_flags
gpiod_get_raw_value
gpiod_set_raw_value
gpiod_to_irq
gpio_to_desc
hrtimer_cancel
hrtimer_init
hrtimer_start_range_ns
init_net
__init_rwsem
__init_swait_queue_head
init_timer_key
init_wait_entry
__init_waitqueue_head
iommu_get_domain_for_dev
ioremap_prot
iounmap
is_vmalloc_addr
jiffies
jiffies_to_timespec64
jiffies_to_usecs
kasan_flag_enabled
kasprintf
kfree
kimage_voffset
__kmalloc
kmalloc_caches
kmalloc_large
kmalloc_trace
kmem_cache_alloc
kmem_cache_create
kmem_cache_destroy
kmem_cache_free
krealloc
kstrdup
kstrtouint
kthread_create_on_node
kthread_should_stop
kthread_stop
ktime_get
ktime_get_mono_fast_ns
ktime_get_real_ts64
ktime_get_with_offset
kvfree_call_rcu
__list_add_valid_or_report
__list_del_entry_valid_or_report
log_post_read_mmio
log_post_write_mmio
log_read_mmio
log_write_mmio
memcmp
memcpy
__memcpy_fromio
memset
memstart_addr
mod_timer
module_layout
__msecs_to_jiffies
msleep
__mutex_init
mutex_lock
mutex_trylock
mutex_unlock
netlink_unicast
noop_llseek
__num_online_cpus
of_device_get_match_data
of_device_is_available
of_device_is_compatible
of_dma_configure_id
of_find_compatible_node
of_find_device_by_node
of_find_matching_node_and_match
of_find_node_by_name
of_find_node_opts_by_path
of_find_property
of_get_named_gpio
of_get_next_child
of_get_property
of_machine_compatible_match
of_match_device
__of_parse_phandle_with_args
of_property_count_elems_of_size
of_property_match_string
of_property_read_string
of_property_read_string_helper
of_property_read_u32_index
of_property_read_variable_u32_array
of_property_read_variable_u8_array
panic
param_ops_bool
perf_trace_buf_alloc
perf_trace_run_bpf_submit
pfn_is_map_memory
pid_task
platform_device_unregister
__platform_driver_register
platform_driver_unregister
platform_get_irq
platform_get_irq_byname
platform_get_resource
platform_get_resource_byname
__platform_register_drivers
platform_unregister_drivers
__pm_runtime_disable
pm_runtime_enable
pm_runtime_force_suspend
__pm_runtime_idle
__pm_runtime_resume
pm_runtime_set_autosuspend_delay
__pm_runtime_suspend
__pm_runtime_use_autosuspend
preempt_schedule
preempt_schedule_notrace
prepare_to_wait_event
_printk
put_device
__put_task_struct
put_unused_fd
queue_delayed_work_on
queue_work_on
___ratelimit
_raw_spin_lock
_raw_spin_lock_irqsave
_raw_spin_unlock
_raw_spin_unlock_irqrestore
refcount_dec_and_mutex_lock
refcount_warn_saturate
register_chrdev_region
register_pm_notifier
release_firmware
remap_pfn_range
request_threaded_irq
reset_control_assert
reset_control_reset
schedule
schedule_timeout
scnprintf
seq_lseek
seq_printf
seq_puts
seq_read
seq_write
sg_alloc_table
sg_alloc_table_from_pages_segment
sg_free_table
sg_init_table
sg_next
simple_attr_open
simple_attr_read
simple_attr_release
simple_attr_write
single_open
single_release
snprintf
soc_device_match
split_page
sprintf
sscanf
__stack_chk_fail
strchr
strcmp
strcpy
strlen
strncmp
strncpy
strnlen
strscpy
__sw_hweight64
sync_file_create
sync_file_get_fence
sysfs_create_group
sysfs_remove_group
system_cpucaps
system_wq
tegra_bpmp_transfer
tegra_ivc_notified
tegra_ivc_read_advance
tegra_ivc_read_get_next_frame
tegra_ivc_reset
tegra_ivc_write_advance
tegra_ivc_write_get_next_frame
tegra_sku_info
trace_event_buffer_commit
trace_event_buffer_reserve
trace_event_printf
trace_event_raw_init
trace_event_reg
trace_handle_return
__traceiter_rwmmio_post_read
__traceiter_rwmmio_post_write
__traceiter_rwmmio_read
__traceiter_rwmmio_write
__tracepoint_rwmmio_post_read
__tracepoint_rwmmio_post_write
__tracepoint_rwmmio_read
__tracepoint_rwmmio_write
trace_print_hex_seq
trace_raw_output_prep
__trace_trigger_soft_disabled
__udelay
unregister_chrdev_region
up
up_read
up_write
usleep_range_state
vfree
vmalloc
vmap
vsnprintf
vunmap
vzalloc
wait_for_completion
wait_for_completion_interruptible
wait_for_completion_timeout
__wake_up
wake_up_process
__warn_printk
xa_destroy
xa_erase
xa_load
# required by ivc-cdev.ko
device_del
devm_free_irq
noop_llseek
remap_pfn_range
# required by cpuidle-tegra-auto.ko
cpuidle_register
cpuidle_unregister
# required by host1x-emu.ko
nsecs_to_jiffies64
# required by host1x.ko
alloc_iova
__free_iova
free_iova
host1x_context_device_bus_type
iommu_detach_group
iova_cache_get
iova_cache_put
pm_generic_freeze
pm_generic_poweroff
pm_generic_restore
pm_generic_thaw
reset_control_bulk_acquire
reset_control_bulk_release
# required by ivc_ext.ko
dma_sync_single_for_cpu
__memcpy_toio
# required by nvgpu.ko
async_synchronize_cookie
dev_pm_domain_set
pcie_reset_flr
pci_find_host_bridge
pci_ignore_hotplug
tegra_bpmp_get
tegra_fuse_readl
# required by nvmap.ko
arch_invalidate_pmem
# required by nvsciipc.ko
_dev_notice
__fdget
find_get_pid
fput
platform_device_register_full
platform_device_unregister
sprintf
# required by tegra_bpmp.ko
clk_hw_determine_rate_no_reparent
clk_hw_get_name
clk_hw_unregister
debugfs_create_dir
debugfs_create_file
debugfs_remove
dentry_path_raw
devm_clk_hw_register
devm_reset_controller_register
dma_alloc_attrs
dma_free_attrs
_find_next_bit
kmalloc_large
kstrdup
ktime_get
of_clk_add_hw_provider
of_device_get_match_data
of_genpd_add_provider_onecell
__of_parse_phandle_with_args
of_platform_default_populate
pm_genpd_init
pm_genpd_remove
seq_lseek
seq_read
seq_write
single_open_size
single_release
strncpy
tegra_bpmp_free_mrq
tegra_bpmp_mrq_is_supported
tegra_bpmp_mrq_return
tegra_bpmp_request_mrq
tegra_bpmp_transfer
tegra_bpmp_transfer_atomic
tegra_sku_info
# required by tegra_hv.ko
arm64_use_ng_mappings
class_create_file_ns
ioremap_prot
iounmap
irq_get_irq_data
memstart_addr
of_add_property
of_chosen
of_find_compatible_node
of_irq_get
pfn_is_map_memory
tegra_ivc_init
# required by tegra_hv_pm_ctl.ko
__alloc_skb
find_vpid
finish_wait
init_net
init_wait_entry
msleep
__netlink_kernel_create
netlink_unicast
__nlmsg_put
prepare_to_wait_event
register_pm_notifier
schedule
strcmp
wait_for_completion_timeout
# required by tegra_hv_vblk_oops.ko
delayed_work_timer_fn
dma_map_page_attrs
__get_free_pages
is_vmalloc_addr
queue_delayed_work_on
# required by tegra_vblk.ko
blk_execute_rq
@@ -192,41 +433,20 @@
blk_queue_physical_block_size
blk_queue_write_cache
__blk_rq_map_sg
capable
__cpu_possible_mask
del_gendisk
device_add_disk
device_create_file
disable_irq
disk_check_media_change
dma_map_sg_attrs
dma_unmap_sg_attrs
enable_irq
_find_first_zero_bit
jiffies
kasan_flag_enabled
kthread_create_on_cpu
kthread_create_on_node
__list_add_valid_or_report
__list_del_entry_valid_or_report
mod_timer
__num_online_cpus
of_find_node_by_name
put_disk
queue_work_on
_raw_spin_lock_irqsave
_raw_spin_unlock_irqrestore
__register_blkdev
sched_setattr_nocheck
set_capacity
set_disk_ro
sg_init_table
sg_nents
__sw_hweight64
timer_delete
unregister_blkdev
vfree
vzalloc
wait_for_completion
wait_for_completion_interruptible
wake_up_process
# required by tegradisp-drm.ko
drm_edid_override_connector_update
drm_file_get_master
drm_mode_create_dvi_i_properties

View File

@@ -487,6 +487,7 @@
divider_ro_round_rate_parent
divider_round_rate_parent
dma_alloc_attrs
dma_alloc_noncontiguous
dma_alloc_pages
dma_async_device_register
dma_async_device_unregister
@@ -522,6 +523,7 @@
dma_fence_signal_timestamp_locked
dma_fence_wait_timeout
dma_free_attrs
dma_free_noncontiguous
dma_free_pages
dma_get_sgtable_attrs
dma_get_slave_channel
@@ -553,6 +555,8 @@
dma_sync_single_for_device
dma_unmap_page_attrs
dma_unmap_sg_attrs
dma_vmap_noncontiguous
dma_vunmap_noncontiguous
do_trace_netlink_extack
double_rq_lock
do_wait_intr
@@ -632,8 +636,10 @@
drm_crtc_init_with_planes
drm_crtc_send_vblank_event
drm_crtc_set_max_vblank_count
drm_crtc_vblank_get
drm_crtc_vblank_off
drm_crtc_vblank_on
drm_crtc_vblank_put
drm_crtc_vblank_reset
drm_crtc_wait_one_vblank
___drm_dbg
@@ -2440,6 +2446,7 @@
__xa_alloc
__xa_alloc_cyclic
xa_destroy
__xa_erase
xa_erase
xa_find
xa_find_after

View File

@@ -157,7 +157,16 @@
__traceiter_android_vh_lruvec_add_folio
__traceiter_android_vh_lruvec_del_folio
__traceiter_android_vh_mempool_alloc_skip_wait
__traceiter_android_vh_mm_customize_ac
__traceiter_android_vh_mm_customize_file_is_tiny
__traceiter_android_vh_mm_customize_lru_add_dst
__traceiter_android_vh_mm_customize_pgdat_balanced
__traceiter_android_vh_mm_customize_rmqueue
__traceiter_android_vh_mm_customize_suitable_zone
__traceiter_android_vh_mm_customize_zone_pageset
__traceiter_android_vh_mm_customize_zone_max_order
__traceiter_android_vh_mm_free_page
__traceiter_android_vh_mm_isolate_priv_lru
__traceiter_android_vh_mmap_region
__traceiter_android_vh_mutex_init
__traceiter_android_vh_mutex_unlock_slowpath
@@ -291,7 +300,16 @@
__tracepoint_android_vh_lruvec_add_folio
__tracepoint_android_vh_lruvec_del_folio
__tracepoint_android_vh_mempool_alloc_skip_wait
__tracepoint_android_vh_mm_customize_ac
__tracepoint_android_vh_mm_customize_file_is_tiny
__tracepoint_android_vh_mm_customize_lru_add_dst
__tracepoint_android_vh_mm_customize_pgdat_balanced
__tracepoint_android_vh_mm_customize_rmqueue
__tracepoint_android_vh_mm_customize_suitable_zone
__tracepoint_android_vh_mm_customize_zone_pageset
__tracepoint_android_vh_mm_customize_zone_max_order
__tracepoint_android_vh_mm_free_page
__tracepoint_android_vh_mm_isolate_priv_lru
__tracepoint_android_vh_mmap_region
__tracepoint_android_vh_mutex_init
__tracepoint_android_vh_mutex_unlock_slowpath

View File

@@ -5,19 +5,22 @@ optimize kernel builds for specific architectures and kernel versions.
## kernel.afdo
kernel.afdo is an AArch64 kernel profile collected on kernel version 6.6.82 (
SHA b62ea68f41a901d5f07f48bd6f1d3a117d801411, build server ID 13287877) using Pixel 6.
kernel.afdo is an AArch64 kernel profile collected on kernel version 6.6.92 (
SHA fe630a04152399fa0646fa16cabae8dee2901a20, build server ID P100391429) using Pixel 6.
### Performance improvements
| Benchmark | Improvement |
| --------------------- | ----------- |
| Boot time | 2.2% |
| Cold App launch time | 2.7% |
| Binder-rpc | 4.4% |
| Binder-addints | 14.1% |
| Hwbinder | 17.0% |
| Bionic (syscall_mmap) | 1.6% |
| Benchmark | Improvement |
| --------------------- | ------------------------------------------------------------------------ |
| Boot time | 1.5% |
| Cold App launch time | 3.3% ((Only for two apps, most app launch tests are broken b/432087996)) |
| Binder-rpc | 4.4% |
| Binder-addints | 15.4% |
| Hwbinder | 15.2% |
| Bionic (syscall_mmap) | 5.6% |
| Bionic (pthread) | 1.9% |
| Bionic (stdio) | 5.4% |
| Bionic (all) | 2.9% |
Benchmark results were tested on Pixel 6.

Binary file not shown.

View File

@@ -25,14 +25,14 @@ CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=32
CONFIG_PARAVIRT_TIME_ACCOUNTING=y
CONFIG_ARM64_SW_TTBR0_PAN=y
# CONFIG_ARM64_BTI_KERNEL is not set
CONFIG_RANDOMIZE_BASE=y
# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y
CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off cgroup_disable=pressure ioremap_guard panic=-1 bootconfig"
CONFIG_CMDLINE_EXTEND=y
# CONFIG_EFI is not set
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
# CONFIG_SUSPEND is not set
CONFIG_CPU_FREQ=y
CONFIG_ANDROID_V_CPUFREQ_VIRT=y
CONFIG_VIRTUALIZATION=y
@@ -52,8 +52,8 @@ CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
# CONFIG_COMPAT_BRK is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
# CONFIG_ZONE_DMA is not set
# CONFIG_ZONE_DMA32 is not set
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
@@ -70,8 +70,7 @@ CONFIG_PCI_IOV=y
# CONFIG_VGA_ARB is not set
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
# CONFIG_FW_LOADER is not set
CONFIG_ARM_SCMI_PROTOCOL=y
# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
CONFIG_ZRAM=y
@@ -106,19 +105,20 @@ CONFIG_POWER_RESET_SYSCON=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
# CONFIG_HID is not set
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_NVMEM is not set
CONFIG_RTC_DRV_PL030=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_SYSFS_STATS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_STAGING=y
# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_HWSPINLOCK=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_ANDROID_KABI_RESERVE is not set
# CONFIG_ANDROID_VENDOR_OEM_DATA is not set
CONFIG_EXT4_FS=y
# CONFIG_EXT4_USE_FOR_EXT2 is not set
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -138,9 +138,9 @@ CONFIG_STATIC_USERMODEHELPER_PATH=""
CONFIG_SECURITY_SELINUX=y
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_HCTR2=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_POLYVAL_ARM64_CE=y

View File

@@ -115,6 +115,7 @@ enum __kvm_host_smccc_func {
__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iova_to_phys,
__KVM_HOST_SMCCC_FUNC___pkvm_host_hvc_pd,
__KVM_HOST_SMCCC_FUNC___pkvm_stage2_snapshot,
__KVM_HOST_SMCCC_FUNC___pkvm_host_iommu_iotlb_sync_map,
/*
* Start of the dynamically registered hypercalls. Start a bit

View File

@@ -54,6 +54,8 @@ void kvm_iommu_iotlb_gather_add_page(struct kvm_hyp_iommu_domain *domain,
void kvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,
enum kvm_pgtable_prot prot);
int kvm_iommu_snapshot_host_stage2(struct kvm_hyp_iommu_domain *domain);
int kvm_iommu_iotlb_sync_map(pkvm_handle_t domain_id,
unsigned long iova, size_t size);
#define KVM_IOMMU_PADDR_CACHE_MAX ((size_t)511)
/**
@@ -112,6 +114,7 @@ static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu)
* @map_pages: Map pages in a domain.
* @unmap_pages: Unmap pages from a domain.
* @iova_to_phys: get physical address from IOVA in a domain.
* @iotlb_sync_map: Sync mapping created using @map_pages to the hardware.
*/
struct kvm_iommu_ops {
int (*init)(unsigned long arg);
@@ -138,7 +141,8 @@ struct kvm_iommu_ops {
struct iommu_iotlb_gather *gather,
struct kvm_iommu_paddr_cache *cache);
phys_addr_t (*iova_to_phys)(struct kvm_hyp_iommu_domain *domain, unsigned long iova);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_USE(1, int (*iotlb_sync_map)(struct kvm_hyp_iommu_domain *domain,
unsigned long iova, size_t size));
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);

View File

@@ -1574,6 +1574,17 @@ static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_c
hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
}
static void handle___pkvm_host_iommu_iotlb_sync_map(struct kvm_cpu_context *host_ctxt)
{
unsigned long ret;
DECLARE_REG(pkvm_handle_t, domain, host_ctxt, 1);
DECLARE_REG(unsigned long, iova, host_ctxt, 2);
DECLARE_REG(size_t, size, host_ctxt, 3);
ret = kvm_iommu_iotlb_sync_map(domain, iova, size);
hyp_reqs_smccc_encode(ret, host_ctxt, this_cpu_ptr(&host_hyp_reqs));
}
static void handle___pkvm_iommu_init(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_iommu_ops *, ops, host_ctxt, 1);
@@ -1671,6 +1682,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_host_iommu_iova_to_phys),
HANDLE_FUNC(__pkvm_host_hvc_pd),
HANDLE_FUNC(__pkvm_stage2_snapshot),
HANDLE_FUNC(__pkvm_host_iommu_iotlb_sync_map),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)

View File

@@ -825,3 +825,28 @@ int kvm_iommu_snapshot_host_stage2(struct kvm_hyp_iommu_domain *domain)
return ret;
}
int kvm_iommu_iotlb_sync_map(pkvm_handle_t domain_id,
unsigned long iova, size_t size)
{
struct kvm_hyp_iommu_domain *domain;
int ret;
if (!kvm_iommu_ops || !kvm_iommu_ops->iotlb_sync_map)
return -ENODEV;
if (!size || (iova + size < iova))
return -EINVAL;
if (domain_id == KVM_IOMMU_DOMAIN_IDMAP_ID)
return -EINVAL;
domain = handle_to_domain(domain_id);
if (!domain || domain_get(domain))
return -EINVAL;
ret = kvm_iommu_ops->iotlb_sync_map(domain, iova, size);
domain_put(domain);
return ret;
}

View File

@@ -32,9 +32,7 @@ CONFIG_NR_CPUS=32
CONFIG_EFI=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure ioremap_guard panic=-1 bootconfig acpi=noirq"
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
# CONFIG_SUSPEND is not set
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -53,8 +51,7 @@ CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
# CONFIG_COMPAT_BRK is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
# CONFIG_ZONE_DMA is not set
CONFIG_ANON_VMA_NAME=y
CONFIG_USERFAULTFD=y
CONFIG_LRU_GEN=y
@@ -74,8 +71,7 @@ CONFIG_PCI_MSI=y
CONFIG_PCI_IOV=y
# CONFIG_VGA_ARB is not set
CONFIG_PCI_ENDPOINT=y
CONFIG_FW_LOADER_USER_HELPER=y
# CONFIG_FW_CACHE is not set
# CONFIG_FW_LOADER is not set
CONFIG_OF=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -122,11 +118,12 @@ CONFIG_MFD_SYSCON=y
# CONFIG_USB_SUPPORT is not set
CONFIG_EDAC=y
CONFIG_RTC_CLASS=y
CONFIG_DMABUF_HEAPS=y
CONFIG_DMABUF_SYSFS_STATS=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_STAGING=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_ANDROID_KABI_RESERVE is not set
# CONFIG_ANDROID_VENDOR_OEM_DATA is not set
CONFIG_LIBNVDIMM=y
CONFIG_EXT4_FS=y
# CONFIG_EXT4_USE_FOR_EXT2 is not set
@@ -200,6 +197,7 @@ CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_CRYPTO_HCTR2=y
CONFIG_CRYPTO_LZO=y
CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_AES_NI_INTEL=y
CONFIG_CRYPTO_POLYVAL_CLMUL_NI=y
CONFIG_CRYPTO_SHA1_SSSE3=y

View File

@@ -682,3 +682,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_folio_remove_rmap_ptes);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pageset_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_xhci_full_reset_on_remove);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mempool_alloc_skip_wait);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_ac);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_rmqueue);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_suitable_zone);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_max_order);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_zone_pageset);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_lru_add_dst);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_isolate_priv_lru);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_pgdat_balanced);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mm_customize_file_is_tiny);

View File

@@ -114,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct sk_buff *skb;
unsigned out, in;
size_t nbytes;
u32 offset;
int head;
skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
@@ -156,7 +157,8 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
}
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
payload_len = skb->len;
offset = VIRTIO_VSOCK_SKB_CB(skb)->offset;
payload_len = skb->len - offset;
hdr = virtio_vsock_hdr(skb);
/* If the packet is greater than the space available in the
@@ -197,8 +199,10 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
break;
}
nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
if (skb_copy_datagram_iter(skb,
offset,
&iov_iter,
payload_len)) {
kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
@@ -212,13 +216,13 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
added = true;
skb_pull(skb, payload_len);
VIRTIO_VSOCK_SKB_CB(skb)->offset += payload_len;
total_len += payload_len;
/* If we didn't send all the payload we can requeue the packet
* to send it with the next available buffer.
*/
if (skb->len > 0) {
if (VIRTIO_VSOCK_SKB_CB(skb)->offset < skb->len) {
hdr->flags |= cpu_to_le32(flags_to_restore);
/* We are queueing the same skb to handle
@@ -340,6 +344,10 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
len = iov_length(vq->iov, out);
if (len < VIRTIO_VSOCK_SKB_HEADROOM ||
len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM)
return NULL;
/* len contains both payload and hdr */
skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
if (!skb)
@@ -363,18 +371,15 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
return skb;
/* The pkt is too big or the length in the header is invalid */
if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
payload_len + sizeof(*hdr) > len) {
if (payload_len + sizeof(*hdr) > len) {
kfree_skb(skb);
return NULL;
}
virtio_vsock_skb_rx_put(skb);
virtio_vsock_skb_put(skb, payload_len);
nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
payload_len, nbytes);
if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
kfree_skb(skb);
return NULL;
}

View File

@@ -406,23 +406,26 @@ int fuse_lseek_backing(struct fuse_bpf_args *fa, struct file *file, loff_t offse
struct file *backing_file = fuse_file->backing_file;
loff_t ret;
/* TODO: Handle changing of the file handle */
if (offset == 0) {
if (whence == SEEK_CUR) {
flo->offset = file->f_pos;
return flo->offset;
return 0;
}
if (whence == SEEK_SET) {
flo->offset = vfs_setpos(file, 0, 0);
return flo->offset;
return 0;
}
}
inode_lock(file->f_inode);
backing_file->f_pos = file->f_pos;
ret = vfs_llseek(backing_file, fli->offset, fli->whence);
flo->offset = ret;
if (!IS_ERR(ERR_PTR(ret))) {
flo->offset = ret;
ret = 0;
}
inode_unlock(file->f_inode);
return ret;
}
@@ -2363,8 +2366,11 @@ static bool filldir(struct dir_context *ctx, const char *name, int namelen,
return true;
}
static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx)
static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx,
loff_t next_offset)
{
char *buffstart = buf;
while (nbytes >= FUSE_NAME_OFFSET) {
struct fuse_dirent *dirent = (struct fuse_dirent *) buf;
size_t reclen = FUSE_DIRENT_SIZE(dirent);
@@ -2378,12 +2384,18 @@ static int parse_dirfile(char *buf, size_t nbytes, struct dir_context *ctx)
ctx->pos = dirent->off;
if (!dir_emit(ctx, dirent->name, dirent->namelen, dirent->ino,
dirent->type))
break;
dirent->type)) {
// If we can't make any progress, user buffer is too small
if (buf == buffstart)
return -EINVAL;
else
return 0;
}
buf += reclen;
nbytes -= reclen;
}
ctx->pos = next_offset;
return 0;
}
@@ -2430,13 +2442,12 @@ void *fuse_readdir_finalize(struct fuse_bpf_args *fa,
struct file *backing_dir = ff->backing_file;
int err = 0;
err = parse_dirfile(fa->out_args[1].value, fa->out_args[1].size, ctx);
err = parse_dirfile(fa->out_args[1].value, fa->out_args[1].size, ctx, fro->offset);
*force_again = !!fro->again;
if (*force_again && !*allow_force)
err = -EINVAL;
ctx->pos = fro->offset;
backing_dir->f_pos = fro->offset;
backing_dir->f_pos = ctx->pos;
free_page((unsigned long) fa->out_args[1].value);
return ERR_PTR(err);

View File

@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
u32 offset;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -46,31 +47,50 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false;
}
static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb)
static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len)
{
u32 len;
DEBUG_NET_WARN_ON_ONCE(skb->len);
len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
if (len > 0)
if (skb_is_nonlinear(skb))
skb->len = len;
else
skb_put(skb, len);
}
static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
static inline struct sk_buff *
__virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
unsigned int data_len,
gfp_t mask)
{
struct sk_buff *skb;
int err;
if (size < VIRTIO_VSOCK_SKB_HEADROOM)
return NULL;
skb = alloc_skb(size, mask);
skb = alloc_skb_with_frags(header_len, data_len,
PAGE_ALLOC_COSTLY_ORDER, &err, mask);
if (!skb)
return NULL;
skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
skb->data_len = data_len;
return skb;
}
static inline struct sk_buff *
virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
{
return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
}
static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
{
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
return virtio_vsock_alloc_linear_skb(size, mask);
size -= VIRTIO_VSOCK_SKB_HEADROOM;
return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
size, mask);
}
static inline void
virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb)
{
@@ -110,10 +130,14 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb)
return (size_t)(skb_end_pointer(skb) - skb->head);
}
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
/* Dimension the RX SKB so that the entire thing fits exactly into
* a single 4KiB page. This avoids wasting memory due to alloc_skb()
* rounding up to the next page order and also means that we
* don't leave higher-order pages sitting around in the RX queue.
*/
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE virtio_transport_max_vsock_pkt_buf_size
extern uint virtio_transport_max_vsock_pkt_buf_size;
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
enum {
VSOCK_VQ_RX = 0, /* for host to guest data */

View File

@@ -615,6 +615,28 @@ DECLARE_HOOK(android_vh_pageset_update,
DECLARE_HOOK(android_vh_mempool_alloc_skip_wait,
TP_PROTO(gfp_t *gfp_flags, bool *skip_wait),
TP_ARGS(gfp_flags, skip_wait));
DECLARE_HOOK(android_vh_mm_customize_ac,
TP_PROTO(gfp_t gfp, unsigned int order, struct zonelist **zonelist,
struct zoneref **preferred_zoneref, enum zone_type *highest_zoneidx,
unsigned int *alloc_flags),
TP_ARGS(gfp, order, zonelist, preferred_zoneref, highest_zoneidx, alloc_flags));
DECLARE_HOOK(android_vh_mm_customize_rmqueue,
TP_PROTO(struct zone *zone, unsigned int order, unsigned int *alloc_flags,
int *migratetype),
TP_ARGS(zone, order, alloc_flags, migratetype));
DECLARE_HOOK(android_vh_mm_customize_suitable_zone,
TP_PROTO(struct zone *zone, gfp_t gfp, int order, enum zone_type highest_zoneidx,
bool *use_this_zone, bool *suitable),
TP_ARGS(zone, gfp, order, highest_zoneidx, use_this_zone, suitable));
DECLARE_HOOK(android_vh_mm_customize_zone_max_order,
TP_PROTO(struct zone *zone, int *max_order),
TP_ARGS(zone, max_order));
DECLARE_HOOK(android_vh_mm_customize_zone_pageset,
TP_PROTO(struct zone *zone, int *new_high, int *new_batch),
TP_ARGS(zone, new_high, new_batch));
DECLARE_HOOK(android_vh_mm_customize_lru_add_dst,
TP_PROTO(struct lruvec *lruvec, struct folio *src, struct folio *dst, bool *added),
TP_ARGS(lruvec, src, dst, added));
#endif /* _TRACE_HOOK_MM_H */
/* This part must be outside protection */

View File

@@ -133,6 +133,17 @@ DECLARE_HOOK(android_vh_direct_reclaim_end,
DECLARE_HOOK(android_vh_should_split_folio_to_list,
TP_PROTO(struct folio *folio, bool *should_split_to_list),
TP_ARGS(folio, should_split_to_list));
DECLARE_HOOK(android_vh_mm_isolate_priv_lru,
TP_PROTO(unsigned long nr_to_scan, struct lruvec *lruvec, enum lru_list lru,
struct list_head *dst, int reclaim_idx, bool may_unmap,
unsigned long *nr_scanned, unsigned long *nr_taken),
TP_ARGS(nr_to_scan, lruvec, lru, dst, reclaim_idx, may_unmap, nr_scanned, nr_taken));
DECLARE_HOOK(android_vh_mm_customize_pgdat_balanced,
TP_PROTO(int order, int highest_zoneidx, bool *balanced, bool *customized),
TP_ARGS(order, highest_zoneidx, balanced, customized));
DECLARE_HOOK(android_vh_mm_customize_file_is_tiny,
TP_PROTO(unsigned int may_swap, int order, int highest_zoneidx, bool *file_is_tiny),
TP_ARGS(may_swap, order, highest_zoneidx, file_is_tiny));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@@ -1128,6 +1128,17 @@ static unsigned int __init add_to_rb(struct printk_ringbuffer *rb,
static char setup_text_buf[PRINTKRB_RECORD_MAX] __initdata;
static void print_log_buf_usage_stats(void)
{
unsigned int descs_count = log_buf_len >> PRB_AVGBITS;
size_t meta_data_size;
meta_data_size = descs_count * (sizeof(struct prb_desc) + sizeof(struct printk_info));
pr_info("log buffer data + meta data: %u + %zu = %zu bytes\n",
log_buf_len, meta_data_size, log_buf_len + meta_data_size);
}
void __init setup_log_buf(int early)
{
struct printk_info *new_infos;
@@ -1157,20 +1168,25 @@ void __init setup_log_buf(int early)
if (!early && !new_log_buf_len)
log_buf_add_cpu();
if (!new_log_buf_len)
if (!new_log_buf_len) {
/* Show the memory stats only once. */
if (!early)
goto out;
return;
}
new_descs_count = new_log_buf_len >> PRB_AVGBITS;
if (new_descs_count == 0) {
pr_err("new_log_buf_len: %lu too small\n", new_log_buf_len);
return;
goto out;
}
new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
if (unlikely(!new_log_buf)) {
pr_err("log_buf_len: %lu text bytes not available\n",
new_log_buf_len);
return;
goto out;
}
new_descs_size = new_descs_count * sizeof(struct prb_desc);
@@ -1233,7 +1249,7 @@ void __init setup_log_buf(int early)
prb_next_seq(&printk_rb_static) - seq);
}
pr_info("log_buf_len: %u bytes\n", log_buf_len);
print_log_buf_usage_stats();
pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
return;
@@ -1242,6 +1258,8 @@ err_free_descs:
memblock_free(new_descs, new_descs_size);
err_free_log_buf:
memblock_free(new_log_buf, new_log_buf_len);
out:
print_log_buf_usage_stats();
}
static bool __read_mostly ignore_loglevel;

View File

@@ -2886,11 +2886,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
static void unmap_folio(struct folio *folio)
{
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD |
TTU_SYNC;
enum ttu_flags ttu_flags = TTU_RMAP_LOCKED | TTU_SYNC;
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
if (folio_test_pmd_mappable(folio))
ttu_flags |= TTU_SPLIT_HUGE_PMD;
/*
* Anon pages need migration entries to preserve them, but file
* pages can simply be left unmapped, then faulted back on demand.
@@ -3119,6 +3121,12 @@ static void reset_src_folio(struct folio *src)
static bool lru_add_dst(struct lruvec *lruvec, struct folio *src, struct folio *dst)
{
bool added = false;
trace_android_vh_mm_customize_lru_add_dst(lruvec, src, dst, &added);
if (added)
return true;
if (folio_can_split(src))
return false;
@@ -3338,7 +3346,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
if (nr_dropped)
shmem_uncharge(head->mapping->host, nr_dropped);
remap_page(folio, nr, PageAnon(head) ? RMP_USE_SHARED_ZEROPAGE : 0);
remap_page(folio, nr, (can_split && PageAnon(head)) ? RMP_USE_SHARED_ZEROPAGE : 0);
for (i = 0; i < nr; i++) {
struct page *subpage = folio_dst_page(folio, i);

View File

@@ -869,7 +869,10 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
static int zone_max_order(struct zone *zone)
{
return zone->order && zone_idx(zone) == ZONE_NOMERGE ? zone->order : MAX_ORDER;
int max_order = zone->order && zone_idx(zone) == ZONE_NOMERGE ? zone->order : MAX_ORDER;
trace_android_vh_mm_customize_zone_max_order(zone, &max_order);
return max_order;
}
/*
@@ -3087,6 +3090,8 @@ struct page *rmqueue(struct zone *preferred_zone,
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
trace_android_vh_mm_customize_rmqueue(zone, order, &alloc_flags, &migratetype);
if (likely(pcp_allowed_order(order))) {
page = rmqueue_pcplist(preferred_zone, zone, order,
migratetype, alloc_flags);
@@ -3546,12 +3551,25 @@ retry:
z = ac->preferred_zoneref;
for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
ac->nodemask) {
bool use_this_zone = false;
bool suitable = true;
struct page *page;
unsigned long mark;
if (!zone_is_suitable(zone, order))
continue;
trace_android_vh_mm_customize_suitable_zone(zone, gfp_mask, order, ac->highest_zoneidx,
&use_this_zone, &suitable);
if (!suitable)
continue;
if (use_this_zone)
goto try_this_zone;
/*
* This hook is deprecated by trace_android_vh_mm_customize_suitable_zone.
*/
trace_android_vh_should_skip_zone(zone, gfp_mask, order,
ac->migratetype, &should_skip_zone);
if (should_skip_zone)
@@ -4993,6 +5011,9 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
&alloc_gfp, &alloc_flags))
return NULL;
trace_android_vh_mm_customize_ac(gfp, order, &ac.zonelist, &ac.preferred_zoneref,
&ac.highest_zoneidx, &alloc_flags);
trace_android_rvh_try_alloc_pages_gfp(&page, order, gfp, gfp_zone(gfp));
if (page)
goto out;
@@ -5953,6 +5974,8 @@ static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
zone->pageset_batch == new_batch)
return;
trace_android_vh_mm_customize_zone_pageset(zone, &new_high, &new_batch);
zone->pageset_high = new_high;
zone->pageset_batch = new_batch;

View File

@@ -2390,6 +2390,12 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan,
unsigned long skipped = 0;
unsigned long scan, total_scan, nr_pages;
LIST_HEAD(folios_skipped);
unsigned long nr_scanned_before = *nr_scanned;
trace_android_vh_mm_isolate_priv_lru(nr_to_scan, lruvec, lru, dst, sc->reclaim_idx,
sc->may_unmap, nr_scanned, &nr_taken);
if (*nr_scanned != nr_scanned_before)
return nr_taken;
total_scan = 0;
scan = 0;
@@ -2647,7 +2653,7 @@ unsigned long shrink_inactive_list(unsigned long nr_to_scan,
enum lru_list lru)
{
LIST_HEAD(folio_list);
unsigned long nr_scanned;
unsigned long nr_scanned = 0;
unsigned int nr_reclaimed = 0;
unsigned long nr_taken;
struct reclaim_stat stat;
@@ -2770,7 +2776,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
enum lru_list lru)
{
unsigned long nr_taken;
unsigned long nr_scanned;
unsigned long nr_scanned = 0;
unsigned long vm_flags;
LIST_HEAD(l_hold); /* The folios which were snipped off */
LIST_HEAD(l_active);
@@ -3014,6 +3020,15 @@ static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
return inactive * inactive_ratio < active;
}
static void customize_sc_file_is_tiny(struct scan_control *sc)
{
bool file_is_tiny = sc->file_is_tiny;
trace_android_vh_mm_customize_file_is_tiny(sc->may_swap, sc->order,
sc->reclaim_idx, &file_is_tiny);
sc->file_is_tiny = file_is_tiny;
}
enum scan_balance {
SCAN_EQUAL,
SCAN_FRACT,
@@ -3131,6 +3146,8 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
!(sc->may_deactivate & DEACTIVATE_ANON) &&
anon >> sc->priority;
}
customize_sc_file_is_tiny(sc);
}
/*
@@ -7478,8 +7495,15 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
{
int i;
unsigned long mark = -1;
bool customized = false;
bool balanced = false;
struct zone *zone;
trace_android_vh_mm_customize_pgdat_balanced(order, highest_zoneidx,
&balanced, &customized);
if (customized)
return balanced;
/*
* Check watermarks bottom-up as lower zones are more likely to
* meet watermarks.

View File

@@ -63,6 +63,17 @@ struct virtio_vsock {
u32 guest_cid;
bool seqpacket_allow;
/* These fields are used only in tx path in function
* 'virtio_transport_send_pkt_work()', so to save
* stack space in it, place both of them here. Each
* pointer from 'out_sgs' points to the corresponding
* element in 'out_bufs' - this is initialized in
* 'virtio_vsock_probe()'. Both fields are protected
* by 'tx_lock'. +1 is needed for packet header.
*/
struct scatterlist *out_sgs[MAX_SKB_FRAGS + 1];
struct scatterlist out_bufs[MAX_SKB_FRAGS + 1];
};
static u32 virtio_transport_get_local_cid(void)
@@ -100,8 +111,8 @@ virtio_transport_send_pkt_work(struct work_struct *work)
vq = vsock->vqs[VSOCK_VQ_TX];
for (;;) {
struct scatterlist hdr, buf, *sgs[2];
int ret, in_sg = 0, out_sg = 0;
struct scatterlist **sgs;
struct sk_buff *skb;
bool reply;
@@ -110,12 +121,43 @@ virtio_transport_send_pkt_work(struct work_struct *work)
break;
reply = virtio_vsock_skb_reply(skb);
sgs = vsock->out_sgs;
sg_init_one(sgs[out_sg], virtio_vsock_hdr(skb),
sizeof(*virtio_vsock_hdr(skb)));
out_sg++;
sg_init_one(&hdr, virtio_vsock_hdr(skb), sizeof(*virtio_vsock_hdr(skb)));
sgs[out_sg++] = &hdr;
if (skb->len > 0) {
sg_init_one(&buf, skb->data, skb->len);
sgs[out_sg++] = &buf;
if (!skb_is_nonlinear(skb)) {
if (skb->len > 0) {
sg_init_one(sgs[out_sg], skb->data, skb->len);
out_sg++;
}
} else {
struct skb_shared_info *si;
int i;
/* If skb is nonlinear, then its buffer must contain
* only header and nothing more. Data is stored in
* the fragged part.
*/
WARN_ON_ONCE(skb_headroom(skb) != sizeof(*virtio_vsock_hdr(skb)));
si = skb_shinfo(skb);
for (i = 0; i < si->nr_frags; i++) {
skb_frag_t *skb_frag = &si->frags[i];
void *va;
/* We will use 'page_to_virt()' for the userspace page
* here, because virtio or dma-mapping layers will call
* 'virt_to_phys()' later to fill the buffer descriptor.
* We don't touch memory at "virtual" address of this page.
*/
va = page_to_virt(skb_frag->bv_page);
sg_init_one(sgs[out_sg],
va + skb_frag->bv_offset,
skb_frag->bv_len);
out_sg++;
}
}
ret = virtqueue_add_sgs(vq, sgs, out_sg, in_sg, skb, GFP_KERNEL);
@@ -221,7 +263,7 @@ out_rcu:
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE + VIRTIO_VSOCK_SKB_HEADROOM;
int total_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
struct scatterlist pkt, *p;
struct virtqueue *vq;
struct sk_buff *skb;
@@ -230,7 +272,7 @@ static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
vq = vsock->vqs[VSOCK_VQ_RX];
do {
skb = virtio_vsock_alloc_skb(total_len, GFP_KERNEL);
skb = virtio_vsock_alloc_linear_skb(total_len, GFP_KERNEL);
if (!skb)
break;
@@ -497,8 +539,9 @@ static void virtio_transport_rx_work(struct work_struct *work)
do {
virtqueue_disable_cb(vq);
for (;;) {
unsigned int len, payload_len;
struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
unsigned int len;
if (!virtio_transport_more_replies(vsock)) {
/* Stop rx until the device processes already
@@ -515,13 +558,22 @@ static void virtio_transport_rx_work(struct work_struct *work)
vsock->rx_buf_nr--;
/* Drop short/long packets */
if (unlikely(len < sizeof(struct virtio_vsock_hdr) ||
if (unlikely(len < sizeof(*hdr) ||
len > virtio_vsock_skb_len(skb))) {
kfree_skb(skb);
continue;
}
virtio_vsock_skb_rx_put(skb);
hdr = virtio_vsock_hdr(skb);
payload_len = le32_to_cpu(hdr->len);
if (unlikely(payload_len > len - sizeof(*hdr))) {
kfree_skb(skb);
continue;
}
if (payload_len)
virtio_vsock_skb_put(skb, payload_len);
virtio_transport_deliver_tap_pkt(skb);
virtio_transport_recv_pkt(&virtio_transport, skb);
}
@@ -637,6 +689,7 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
{
struct virtio_vsock *vsock = NULL;
int ret;
int i;
ret = mutex_lock_interruptible(&the_virtio_vsock_mutex);
if (ret)
@@ -679,6 +732,9 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
if (ret < 0)
goto out;
for (i = 0; i < ARRAY_SIZE(vsock->out_sgs); i++)
vsock->out_sgs[i] = &vsock->out_bufs[i];
rcu_assign_pointer(the_virtio_vsock, vsock);
virtio_vsock_vqs_start(vsock);

View File

@@ -29,10 +29,6 @@
static void virtio_transport_cancel_close_work(struct vsock_sock *vsk,
bool cancel_timeout);
uint virtio_transport_max_vsock_pkt_buf_size = 64 * 1024;
module_param(virtio_transport_max_vsock_pkt_buf_size, uint, 0444);
EXPORT_SYMBOL_GPL(virtio_transport_max_vsock_pkt_buf_size);
static const struct virtio_transport *
virtio_transport_get_ops(struct vsock_sock *vsk)
{
@@ -59,7 +55,6 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
const size_t skb_len = VIRTIO_VSOCK_SKB_HEADROOM + len;
struct virtio_vsock_hdr *hdr;
struct sk_buff *skb;
void *payload;
int err;
skb = virtio_vsock_alloc_skb(skb_len, GFP_KERNEL);
@@ -79,8 +74,8 @@ virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *info,
hdr->fwd_cnt = cpu_to_le32(0);
if (info->msg && len > 0) {
payload = skb_put(skb, len);
err = memcpy_from_msg(payload, info->msg, len);
virtio_vsock_skb_put(skb, len);
err = skb_copy_datagram_from_iter(skb, 0, &info->msg->msg_iter, len);
if (err)
goto out;
@@ -115,6 +110,27 @@ out:
return NULL;
}
static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
void *dst,
size_t len)
{
struct iov_iter iov_iter = { 0 };
struct kvec kvec;
size_t to_copy;
kvec.iov_base = dst;
kvec.iov_len = len;
iov_iter.iter_type = ITER_KVEC;
iov_iter.kvec = &kvec;
iov_iter.nr_segs = 1;
to_copy = min_t(size_t, len, skb->len);
skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
&iov_iter, to_copy);
}
/* Packet capture */
static struct sk_buff *virtio_transport_build_skb(void *opaque)
{
@@ -123,7 +139,6 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
struct af_vsockmon_hdr *hdr;
struct sk_buff *skb;
size_t payload_len;
void *payload_buf;
/* A packet could be split to fit the RX buffer, so we can retrieve
* the payload length from the header and the buffer pointer taking
@@ -131,7 +146,6 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
*/
pkt_hdr = virtio_vsock_hdr(pkt);
payload_len = pkt->len;
payload_buf = pkt->data;
skb = alloc_skb(sizeof(*hdr) + sizeof(*pkt_hdr) + payload_len,
GFP_ATOMIC);
@@ -174,7 +188,13 @@ static struct sk_buff *virtio_transport_build_skb(void *opaque)
skb_put_data(skb, pkt_hdr, sizeof(*pkt_hdr));
if (payload_len) {
skb_put_data(skb, payload_buf, payload_len);
if (skb_is_nonlinear(pkt)) {
void *data = skb_put(skb, payload_len);
virtio_transport_copy_nonlinear_skb(pkt, data, payload_len);
} else {
skb_put_data(skb, pkt->data, payload_len);
}
}
return skb;
@@ -206,6 +226,24 @@ static u16 virtio_transport_get_type(struct sock *sk)
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info *info)
{
/* ANDROID:
*
* Older host kernels (including the 5.10-based images used by
* Cuttlefish) only support linear SKBs on the RX path.
* Consequently, if we transmit a VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
* packet, the host allocation can fail and the packet will be
* silently dropped.
*
* As a nasty workaround, limit the entire SKB to ~28KiB, which
* allows for 4KiB of SKB wiggle room whilst keeping the
* allocation below PAGE_ALLOC_COSTLY_ORDER.
*
* This can be removed when all supported host kernels have
* support for non-linear RX buffers introduced by Change-Id
* I4212a8daf9f19b5bbffc06ce93338c823de7bb19.
*/
u32 max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE,
SKB_WITH_OVERHEAD(SZ_32K - VIRTIO_VSOCK_SKB_HEADROOM) - SZ_4K);
u32 src_cid, src_port, dst_cid, dst_port;
const struct virtio_transport *t_ops;
struct virtio_vsock_sock *vvs;
@@ -244,7 +282,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct sk_buff *skb;
size_t skb_len;
skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE, rest_len);
skb_len = min_t(u32, max_skb_len, rest_len);
skb = virtio_transport_alloc_skb(info, skb_len,
src_cid, src_port,
@@ -373,9 +411,10 @@ virtio_transport_stream_do_peek(struct vsock_sock *vsk,
spin_unlock_bh(&vvs->rx_lock);
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
err = memcpy_to_msg(msg, skb->data, bytes);
err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
&msg->msg_iter, bytes);
if (err)
goto out;
@@ -421,25 +460,27 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk,
while (total < len && !skb_queue_empty(&vvs->rx_queue)) {
skb = skb_peek(&vvs->rx_queue);
bytes = len - total;
if (bytes > skb->len)
bytes = skb->len;
bytes = min_t(size_t, len - total,
skb->len - VIRTIO_VSOCK_SKB_CB(skb)->offset);
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
err = memcpy_to_msg(msg, skb->data, bytes);
err = skb_copy_datagram_iter(skb,
VIRTIO_VSOCK_SKB_CB(skb)->offset,
&msg->msg_iter, bytes);
if (err)
goto out;
spin_lock_bh(&vvs->rx_lock);
total += bytes;
skb_pull(skb, bytes);
if (skb->len == 0) {
VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes;
if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) {
u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len);
virtio_transport_dec_rx_pkt(vvs, pkt_len);
@@ -508,9 +549,10 @@ virtio_transport_seqpacket_do_peek(struct vsock_sock *vsk,
spin_unlock_bh(&vvs->rx_lock);
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
err = memcpy_to_msg(msg, skb->data, bytes);
err = skb_copy_datagram_iter(skb, VIRTIO_VSOCK_SKB_CB(skb)->offset,
&msg->msg_iter, bytes);
if (err)
return err;
@@ -569,11 +611,13 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
int err;
/* sk_lock is held by caller so no one else can dequeue.
* Unlock rx_lock since memcpy_to_msg() may sleep.
* Unlock rx_lock since skb_copy_datagram_iter() may sleep.
*/
spin_unlock_bh(&vvs->rx_lock);
err = memcpy_to_msg(msg, skb->data, bytes_to_copy);
err = skb_copy_datagram_iter(skb, 0,
&msg->msg_iter,
bytes_to_copy);
if (err) {
/* Copy of message failed. Rest of
* fragments will be freed without copy.

View File

@@ -255,7 +255,7 @@ static int bpf_test_partial(const char *mount_dir)
TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC),
src_fd != -1);
TESTEQUAL(create_file(src_fd, s(test_name), 1, 2), 0);
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace",
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_partial",
&bpf_fd, NULL, NULL), 0);
TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
@@ -363,7 +363,7 @@ static int bpf_test_readdir(const char *mount_dir)
src_fd != -1);
TESTEQUAL(create_file(src_fd, s(names[0]), 1, 2), 0);
TESTEQUAL(create_file(src_fd, s(names[1]), 1, 2), 0);
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace",
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_partial",
&bpf_fd, NULL, NULL), 0);
TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
@@ -1490,6 +1490,8 @@ out:
static int bpf_test_lseek(const char *mount_dir)
{
const char *file = "real";
const char *sparse_file = "sparse";
const off_t sparse_length = 0x100000000u;
const char *test_data = "data";
int result = TEST_FAILURE;
int src_fd = -1;
@@ -1504,6 +1506,12 @@ static int bpf_test_lseek(const char *mount_dir)
TESTEQUAL(write(fd, test_data, strlen(test_data)), strlen(test_data));
TESTSYSCALL(close(fd));
fd = -1;
TEST(fd = openat(src_fd, sparse_file, O_CREAT | O_RDWR | O_CLOEXEC,
0777),
fd != -1);
TESTSYSCALL(ftruncate(fd, sparse_length));
TESTSYSCALL(close(fd));
fd = -1;
TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_trace",
&bpf_fd, NULL, NULL), 0);
TESTEQUAL(mount_fuse(mount_dir, bpf_fd, src_fd, &fuse_dev), 0);
@@ -1518,6 +1526,18 @@ static int bpf_test_lseek(const char *mount_dir)
TESTEQUAL(bpf_test_trace("lseek"), 0);
TESTEQUAL(lseek(fd, 1, SEEK_DATA), 1);
TESTEQUAL(bpf_test_trace("lseek"), 0);
TESTSYSCALL(close(fd));
fd = -1;
TEST(fd = s_open(s_path(s(mount_dir), s(sparse_file)),
O_RDONLY | O_CLOEXEC),
fd != -1);
TESTEQUAL(lseek(fd, -256, SEEK_END), sparse_length - 256);
TESTEQUAL(lseek(fd, 0, SEEK_CUR), sparse_length - 256);
TESTSYSCALL(close(fd));
fd = -1;
result = TEST_SUCCESS;
out:
close(fd);

View File

@@ -28,9 +28,9 @@ int readdir_test(struct fuse_bpf_args *fa)
}
}
SEC("test_trace")
SEC("test_partial")
/* return FUSE_BPF_BACKING to use backing fs, 0 to pass to usermode */
int trace_test(struct fuse_bpf_args *fa)
int partial_test(struct fuse_bpf_args *fa)
{
switch (fa->opcode) {
case FUSE_LOOKUP | FUSE_PREFILTER: {
@@ -329,6 +329,195 @@ int trace_test(struct fuse_bpf_args *fa)
}
}
SEC("test_trace")
/* return FUSE_BPF_BACKING to use backing fs, 0 to pass to usermode */
int trace_test(struct fuse_bpf_args *fa)
{
switch (fa->opcode) {
case FUSE_LOOKUP | FUSE_PREFILTER: {
/* real and partial use backing file */
const char *name = fa->in_args[0].value;
bpf_printk("lookup %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_ACCESS | FUSE_PREFILTER: {
bpf_printk("Access: %d", fa->nodeid);
return FUSE_BPF_BACKING;
}
case FUSE_CREATE | FUSE_PREFILTER:
bpf_printk("Create: %d", fa->nodeid);
return FUSE_BPF_BACKING;
case FUSE_MKNOD | FUSE_PREFILTER: {
const struct fuse_mknod_in *fmi = fa->in_args[0].value;
const char *name = fa->in_args[1].value;
bpf_printk("mknod %s %x %x", name, fmi->rdev | fmi->mode, fmi->umask);
return FUSE_BPF_BACKING;
}
case FUSE_MKDIR | FUSE_PREFILTER: {
const struct fuse_mkdir_in *fmi = fa->in_args[0].value;
const char *name = fa->in_args[1].value;
bpf_printk("mkdir %s %x %x", name, fmi->mode, fmi->umask);
return FUSE_BPF_BACKING;
}
case FUSE_RMDIR | FUSE_PREFILTER: {
const char *name = fa->in_args[0].value;
bpf_printk("rmdir %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_RENAME | FUSE_PREFILTER: {
const char *oldname = fa->in_args[1].value;
const char *newname = fa->in_args[2].value;
bpf_printk("rename from %s", oldname);
bpf_printk("rename to %s", newname);
return FUSE_BPF_BACKING;
}
case FUSE_RENAME2 | FUSE_PREFILTER: {
const struct fuse_rename2_in *fri = fa->in_args[0].value;
uint32_t flags = fri->flags;
const char *oldname = fa->in_args[1].value;
const char *newname = fa->in_args[2].value;
bpf_printk("rename(%x) from %s", flags, oldname);
bpf_printk("rename to %s", newname);
return FUSE_BPF_BACKING;
}
case FUSE_UNLINK | FUSE_PREFILTER: {
const char *name = fa->in_args[0].value;
bpf_printk("unlink %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_LINK | FUSE_PREFILTER: {
const struct fuse_link_in *fli = fa->in_args[0].value;
const char *link_name = fa->in_args[1].value;
bpf_printk("link %d %s", fli->oldnodeid, link_name);
return FUSE_BPF_BACKING;
}
case FUSE_SYMLINK | FUSE_PREFILTER: {
const char *link_name = fa->in_args[0].value;
const char *link_dest = fa->in_args[1].value;
bpf_printk("symlink from %s", link_name);
bpf_printk("symlink to %s", link_dest);
return FUSE_BPF_BACKING;
}
case FUSE_READLINK | FUSE_PREFILTER: {
const char *link_name = fa->in_args[0].value;
bpf_printk("readlink from", link_name);
return FUSE_BPF_BACKING;
}
case FUSE_OPEN | FUSE_PREFILTER: {
bpf_printk("open");
return FUSE_BPF_BACKING;
}
case FUSE_OPEN | FUSE_POSTFILTER:
bpf_printk("open postfilter");
return FUSE_BPF_USER_FILTER;
case FUSE_READ | FUSE_PREFILTER: {
const struct fuse_read_in *fri = fa->in_args[0].value;
bpf_printk("read %llu", fri->offset);
return FUSE_BPF_BACKING;
}
case FUSE_GETATTR | FUSE_PREFILTER: {
bpf_printk("getattr");
return FUSE_BPF_BACKING;
}
case FUSE_SETATTR | FUSE_PREFILTER: {
bpf_printk("setattr");
return FUSE_BPF_BACKING;
}
case FUSE_OPENDIR | FUSE_PREFILTER: {
bpf_printk("opendir");
return FUSE_BPF_BACKING;
}
case FUSE_READDIR | FUSE_PREFILTER: {
bpf_printk("readdir");
return FUSE_BPF_BACKING;
}
case FUSE_FLUSH | FUSE_PREFILTER: {
bpf_printk("Flush");
return FUSE_BPF_BACKING;
}
case FUSE_GETXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[1].value;
bpf_printk("getxattr %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_LISTXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[1].value;
bpf_printk("listxattr %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_SETXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[1].value;
unsigned int size = fa->in_args[2].size;
bpf_printk("setxattr %s %u", name, size);
return FUSE_BPF_BACKING;
}
case FUSE_REMOVEXATTR | FUSE_PREFILTER: {
const char *name = fa->in_args[0].value;
bpf_printk("removexattr %s", name);
return FUSE_BPF_BACKING;
}
case FUSE_CANONICAL_PATH | FUSE_PREFILTER: {
bpf_printk("canonical_path");
return FUSE_BPF_BACKING;
}
case FUSE_STATFS | FUSE_PREFILTER: {
bpf_printk("statfs");
return FUSE_BPF_BACKING;
}
case FUSE_LSEEK | FUSE_PREFILTER: {
const struct fuse_lseek_in *fli = fa->in_args[0].value;
bpf_printk("lseek type:%d, offset:%lld", fli->whence, fli->offset);
return FUSE_BPF_BACKING;
}
default:
bpf_printk("Unknown opcode %d", fa->opcode);
return FUSE_BPF_BACKING;
}
}
SEC("test_hidden")
int trace_hidden(struct fuse_bpf_args *fa)
{