From 2bc7bc937c6eefe2d31064b3450e442d4dcb04c4 Mon Sep 17 00:00:00 2001 From: Sandeep Dhavale Date: Tue, 6 May 2025 15:57:41 -0700 Subject: [PATCH] BACKPORT: erofs: lazily initialize per-CPU workers and CPU hotplug hooks Currently, when EROFS is built with per-CPU workers, the workers are started and CPU hotplug hooks are registered during module initialization. This leads to unnecessary worker start/stop cycles during CPU hotplug events, particularly on Android devices that frequently suspend and resume. This change defers the initialization of per-CPU workers and the registration of CPU hotplug hooks until the first EROFS mount. This ensures that these resources are only allocated and managed when EROFS is actually in use. The tear down of per-CPU workers and unregistration of CPU hotplug hooks still occurs during z_erofs_exit_subsystem(), but only if they were initialized. Bug: 382400420 Signed-off-by: Sandeep Dhavale Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20250506225743.308517-1-dhavale@google.com Signed-off-by: Gao Xiang (cherry picked from commit 12bf25d1659b1ec55e44fad2485155707062df79) [dhavale: resolved conflict in z_erofs_init_zip_subsystem() erofs_init_managed_cache() was renamed upstream as z_erofs_init_super() so the changes are applied to erofs_init_managed_cache()] Change-Id: I895b5350e357c2165d44f85cf7a3811e0e48f1b0 --- fs/erofs/zdata.c | 76 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 53 insertions(+), 23 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 1d9b2a61ca63..b0f8b18ff491 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -336,6 +336,7 @@ static struct workqueue_struct *z_erofs_workqueue __read_mostly; #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD static struct kthread_worker __rcu **z_erofs_pcpu_workers; +static atomic_t erofs_percpu_workers_initialized = ATOMIC_INIT(0); static void erofs_destroy_percpu_workers(void) { @@ -381,12 +382,8 @@ static int erofs_init_percpu_workers(void) } return 0; } -#else -static inline void erofs_destroy_percpu_workers(void) {} -static inline int erofs_init_percpu_workers(void) { return 0; } -#endif -#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_EROFS_FS_PCPU_KTHREAD) +#ifdef CONFIG_HOTPLUG_CPU static DEFINE_SPINLOCK(z_erofs_pcpu_worker_lock); static enum cpuhp_state erofs_cpuhp_state; @@ -443,15 +440,53 @@ static void erofs_cpu_hotplug_destroy(void) if (erofs_cpuhp_state) cpuhp_remove_state_nocalls(erofs_cpuhp_state); } -#else /* !CONFIG_HOTPLUG_CPU || !CONFIG_EROFS_FS_PCPU_KTHREAD */ +#else /* !CONFIG_HOTPLUG_CPU */ static inline int erofs_cpu_hotplug_init(void) { return 0; } static inline void erofs_cpu_hotplug_destroy(void) {} -#endif +#endif/* CONFIG_HOTPLUG_CPU */ +static int z_erofs_init_pcpu_workers(struct super_block *sb) +{ + int err; + + if (atomic_xchg(&erofs_percpu_workers_initialized, 1)) + return 0; + + err = erofs_init_percpu_workers(); + if (err) { + erofs_err(sb, "per-cpu workers: failed to allocate."); + goto err_init_percpu_workers; + } + + err = erofs_cpu_hotplug_init(); + if (err < 0) { + erofs_err(sb, "per-cpu workers: failed CPU hotplug init."); + goto err_cpuhp_init; + } + erofs_info(sb, "initialized per-cpu workers successfully."); + return err; + +err_cpuhp_init: + erofs_destroy_percpu_workers(); +err_init_percpu_workers: + atomic_set(&erofs_percpu_workers_initialized, 0); + return err; +} + +static void z_erofs_destroy_pcpu_workers(void) +{ + if (!atomic_xchg(&erofs_percpu_workers_initialized, 0)) + return; + erofs_cpu_hotplug_destroy(); + erofs_destroy_percpu_workers(); +} +#else /* !CONFIG_EROFS_FS_PCPU_KTHREAD */ +static inline int z_erofs_init_pcpu_workers(struct super_block *sb) { return 0; } +static inline void z_erofs_destroy_pcpu_workers(void) {} +#endif/* CONFIG_EROFS_FS_PCPU_KTHREAD */ void z_erofs_exit_zip_subsystem(void) { - erofs_cpu_hotplug_destroy(); - erofs_destroy_percpu_workers(); + z_erofs_destroy_pcpu_workers(); destroy_workqueue(z_erofs_workqueue); z_erofs_destroy_pcluster_pool(); } @@ -467,23 +502,12 @@ int __init z_erofs_init_zip_subsystem(void) WQ_UNBOUND | WQ_HIGHPRI, num_possible_cpus()); if (!z_erofs_workqueue) { err = -ENOMEM; - goto out_error_workqueue_init; + goto out_err_workqueue_init; } - err = erofs_init_percpu_workers(); - if (err) - goto out_error_pcpu_worker; - - err = erofs_cpu_hotplug_init(); - if (err < 0) - goto out_error_cpuhp_init; return err; -out_error_cpuhp_init: - erofs_destroy_percpu_workers(); -out_error_pcpu_worker: - destroy_workqueue(z_erofs_workqueue); -out_error_workqueue_init: +out_err_workqueue_init: z_erofs_destroy_pcluster_pool(); out_error_pcluster_pool: return err; @@ -711,8 +735,14 @@ static const struct address_space_operations z_erofs_cache_aops = { int erofs_init_managed_cache(struct super_block *sb) { - struct inode *const inode = new_inode(sb); + struct inode *inode; + int err; + err = z_erofs_init_pcpu_workers(sb); + if (err) + return err; + + inode = new_inode(sb); if (!inode) return -ENOMEM;