ANDROID: fixup task_struct to avoid ABI breakage

Reuse task_struct.worker_private to store task_dma_buf_info pointer and
avoid adding new task_struct members that would lead to ABI breakage.
This aliasing works because task_struct.worker_private is used only for
kthreads and io_workers which task_dma_buf_info is used for user tasks.

Bug: 424648392
Change-Id: I2caa708d8a729095b308932c1b35c3157835639b
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
This commit is contained in:
Suren Baghdasaryan
2025-07-03 20:22:25 +00:00
parent 0bf76c5311
commit 59af12872d
6 changed files with 180 additions and 90 deletions

View File

@@ -168,11 +168,21 @@ static struct file_system_type dma_buf_fs_type = {
static struct task_dma_buf_record *find_task_dmabuf_record(
struct task_struct *task, struct dma_buf *dmabuf)
{
struct task_dma_buf_info *dmabuf_info = get_task_dma_buf_info(task);
struct task_dma_buf_record *rec;
lockdep_assert_held(&task->dmabuf_info->lock);
if (!dmabuf_info)
return NULL;
list_for_each_entry(rec, &task->dmabuf_info->dmabufs, node)
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return NULL;
}
lockdep_assert_held(&dmabuf_info->lock);
list_for_each_entry(rec, &dmabuf_info->dmabufs, node)
if (dmabuf == rec->dmabuf)
return rec;
@@ -181,26 +191,36 @@ static struct task_dma_buf_record *find_task_dmabuf_record(
static int new_task_dmabuf_record(struct task_struct *task, struct dma_buf *dmabuf)
{
struct task_dma_buf_info *dmabuf_info = get_task_dma_buf_info(task);
struct task_dma_buf_record *rec;
lockdep_assert_held(&task->dmabuf_info->lock);
if (!dmabuf_info)
return 0;
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return PTR_ERR(dmabuf_info);
}
lockdep_assert_held(&dmabuf_info->lock);
rec = kmalloc(sizeof(*rec), GFP_KERNEL);
if (!rec)
return -ENOMEM;
task->dmabuf_info->rss += dmabuf->size;
dmabuf_info->rss += dmabuf->size;
/*
* task->dmabuf_info->lock protects against concurrent writers, so no
* dmabuf_info->lock protects against concurrent writers, so no
* worries about stale rss_hwm between the read and write, and we don't
* need to cmpxchg here.
*/
if (task->dmabuf_info->rss > task->dmabuf_info->rss_hwm)
task->dmabuf_info->rss_hwm = task->dmabuf_info->rss;
if (dmabuf_info->rss > dmabuf_info->rss_hwm)
dmabuf_info->rss_hwm = dmabuf_info->rss;
rec->dmabuf = dmabuf;
rec->refcnt = 1;
list_add(&rec->node, &task->dmabuf_info->dmabufs);
list_add(&rec->node, &dmabuf_info->dmabufs);
atomic64_inc(&dmabuf->num_unique_refs);
@@ -222,24 +242,30 @@ static int new_task_dmabuf_record(struct task_struct *task, struct dma_buf *dmab
*/
int dma_buf_account_task(struct dma_buf *dmabuf, struct task_struct *task)
{
struct task_dma_buf_info *dmabuf_info;
struct task_dma_buf_record *rec;
int ret = 0;
if (!dmabuf || !task)
return -EINVAL;
if (!task->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
return -ENOMEM;
dmabuf_info = get_task_dma_buf_info(task);
if (!dmabuf_info)
return 0;
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return PTR_ERR(dmabuf_info);
}
spin_lock(&task->dmabuf_info->lock);
spin_lock(&dmabuf_info->lock);
rec = find_task_dmabuf_record(task, dmabuf);
if (!rec)
ret = new_task_dmabuf_record(task, dmabuf);
else
++rec->refcnt;
spin_unlock(&task->dmabuf_info->lock);
spin_unlock(&dmabuf_info->lock);
return ret;
}
@@ -260,17 +286,22 @@ int dma_buf_account_task(struct dma_buf *dmabuf, struct task_struct *task)
*/
void dma_buf_unaccount_task(struct dma_buf *dmabuf, struct task_struct *task)
{
struct task_dma_buf_info *dmabuf_info = get_task_dma_buf_info(task);
struct task_dma_buf_record *rec;
if (!dmabuf || !task)
return;
if (!task->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
return;
if (!dmabuf_info)
return;
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return;
}
spin_lock(&task->dmabuf_info->lock);
spin_lock(&dmabuf_info->lock);
rec = find_task_dmabuf_record(task, dmabuf);
if (!rec) { /* Failed fd_install? */
pr_err("dmabuf not found in task list\n");
@@ -280,11 +311,11 @@ void dma_buf_unaccount_task(struct dma_buf *dmabuf, struct task_struct *task)
if (--rec->refcnt == 0) {
list_del(&rec->node);
kfree(rec);
task->dmabuf_info->rss -= dmabuf->size;
dmabuf_info->rss -= dmabuf->size;
atomic64_dec(&dmabuf->num_unique_refs);
}
err:
spin_unlock(&task->dmabuf_info->lock);
spin_unlock(&dmabuf_info->lock);
}
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)

View File

@@ -3309,21 +3309,27 @@ static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
static int proc_dmabuf_rss_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
if (!task->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
return -ENOMEM;
struct task_dma_buf_info *dmabuf_info = get_task_dma_buf_info(task);
if (!dmabuf_info) {
seq_puts(m, "0\n");
return 0;
}
if (!(task->flags & PF_KTHREAD))
seq_printf(m, "%lld\n", READ_ONCE(task->dmabuf_info->rss));
else
seq_puts(m, "0\n");
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return PTR_ERR(dmabuf_info);
}
seq_printf(m, "%lld\n", READ_ONCE(dmabuf_info->rss));
return 0;
}
static int proc_dmabuf_rss_hwm_show(struct seq_file *m, void *v)
{
struct task_dma_buf_info *dmabuf_info;
struct inode *inode = m->private;
struct task_struct *task;
int ret = 0;
@@ -3332,16 +3338,20 @@ static int proc_dmabuf_rss_hwm_show(struct seq_file *m, void *v)
if (!task)
return -ESRCH;
if (!task->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
ret = -ENOMEM;
dmabuf_info = get_task_dma_buf_info(task);
if (!dmabuf_info) {
seq_puts(m, "0\n");
goto out;
}
if (!(task->flags & PF_KTHREAD))
seq_printf(m, "%lld\n", READ_ONCE(task->dmabuf_info->rss_hwm));
else
seq_puts(m, "0\n");
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
ret = PTR_ERR(dmabuf_info);
goto out;
}
seq_printf(m, "%lld\n", READ_ONCE(dmabuf_info->rss_hwm));
out:
put_task_struct(task);
@@ -3358,6 +3368,7 @@ static ssize_t
proc_dmabuf_rss_hwm_write(struct file *file, const char __user *buf,
size_t count, loff_t *offset)
{
struct task_dma_buf_info *dmabuf_info;
struct inode *inode = file_inode(file);
struct task_struct *task;
unsigned long long val;
@@ -3374,15 +3385,22 @@ proc_dmabuf_rss_hwm_write(struct file *file, const char __user *buf,
if (!task)
return -ESRCH;
if (!task->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
ret = -ENOMEM;
dmabuf_info = get_task_dma_buf_info(task);
if (!dmabuf_info) {
ret = -EINVAL;
goto out;
}
spin_lock(&task->dmabuf_info->lock);
task->dmabuf_info->rss_hwm = task->dmabuf_info->rss;
spin_unlock(&task->dmabuf_info->lock);
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
ret = PTR_ERR(dmabuf_info);
goto out;
}
spin_lock(&dmabuf_info->lock);
dmabuf_info->rss_hwm = dmabuf_info->rss;
spin_unlock(&dmabuf_info->lock);
out:
put_task_struct(task);
@@ -3401,33 +3419,37 @@ static const struct file_operations proc_dmabuf_rss_hwm_operations = {
static int proc_dmabuf_pss_show(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
struct task_dma_buf_info *dmabuf_info;
struct task_dma_buf_record *rec;
u64 pss = 0;
if (!task->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
return -ENOMEM;
}
if (!(task->flags & PF_KTHREAD)) {
spin_lock(&task->dmabuf_info->lock);
list_for_each_entry(rec, &task->dmabuf_info->dmabufs, node) {
s64 refs = atomic64_read(&rec->dmabuf->num_unique_refs);
if (refs <= 0) {
pr_err("dmabuf has <= refs %lld\n", refs);
continue;
}
pss += rec->dmabuf->size / (size_t)refs;
}
spin_unlock(&task->dmabuf_info->lock);
seq_printf(m, "%llu\n", pss);
} else {
dmabuf_info = get_task_dma_buf_info(task);
if (!dmabuf_info) {
seq_puts(m, "0\n");
return 0;
}
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return PTR_ERR(dmabuf_info);
}
spin_lock(&dmabuf_info->lock);
list_for_each_entry(rec, &dmabuf_info->dmabufs, node) {
s64 refs = atomic64_read(&rec->dmabuf->num_unique_refs);
if (refs <= 0) {
pr_err("dmabuf has <= refs %lld\n", refs);
continue;
}
pss += rec->dmabuf->size / (size_t)refs;
}
spin_unlock(&dmabuf_info->lock);
seq_printf(m, "%llu\n", pss);
return 0;
}
#endif

View File

@@ -690,6 +690,28 @@ struct task_dma_buf_info {
struct list_head dmabufs;
};
static inline bool task_has_dma_buf_info(struct task_struct *task)
{
return (task->flags & (PF_KTHREAD | PF_IO_WORKER)) == 0;
}
extern struct task_struct init_task;
static inline
struct task_dma_buf_info *get_task_dma_buf_info(struct task_struct *task)
{
if (!task)
return ERR_PTR(-EINVAL);
if (!task_has_dma_buf_info(task))
return NULL;
if (!task->worker_private)
return ERR_PTR(-ENOMEM);
return (struct task_dma_buf_info *)task->worker_private;
}
/**
* DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters
* @name: export-info name

View File

@@ -1003,6 +1003,7 @@ struct task_struct {
int __user *clear_child_tid;
/* PF_KTHREAD | PF_IO_WORKER */
/* Otherwise used as task_dma_buf_info pointer */
void *worker_private;
u64 utime;
@@ -1517,9 +1518,6 @@ struct task_struct {
*/
struct callback_head l1d_flush_kill;
#endif
struct task_dma_buf_info *dmabuf_info;
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);

View File

@@ -214,7 +214,7 @@ struct task_struct init_task
.android_vendor_data1 = {0, },
.android_oem_data1 = {0, },
#endif
.dmabuf_info = NULL,
.worker_private = NULL,
};
EXPORT_SYMBOL(init_task);

View File

@@ -997,21 +997,27 @@ static inline void put_signal_struct(struct signal_struct *sig)
static void put_dmabuf_info(struct task_struct *tsk)
{
if (!tsk->dmabuf_info) {
pr_err("%s dmabuf accounting record was not allocated\n", __func__);
struct task_dma_buf_info *dmabuf_info = get_task_dma_buf_info(tsk);
if (!dmabuf_info)
return;
if (IS_ERR(dmabuf_info)) {
pr_err("%s dmabuf accounting record is missing, error %ld\n",
__func__, PTR_ERR(dmabuf_info));
return;
}
if (!refcount_dec_and_test(&tsk->dmabuf_info->refcnt))
if (!refcount_dec_and_test(&dmabuf_info->refcnt))
return;
if (READ_ONCE(tsk->dmabuf_info->rss))
if (READ_ONCE(dmabuf_info->rss))
pr_err("%s destroying task with non-zero dmabuf rss\n", __func__);
if (!list_empty(&tsk->dmabuf_info->dmabufs))
if (!list_empty(&dmabuf_info->dmabufs))
pr_err("%s destroying task with non-empty dmabuf list\n", __func__);
kfree(tsk->dmabuf_info);
kfree(dmabuf_info);
}
void __put_task_struct(struct task_struct *tsk)
@@ -2291,55 +2297,66 @@ static void rv_task_fork(struct task_struct *p)
static int copy_dmabuf_info(u64 clone_flags, struct task_struct *p)
{
struct task_dma_buf_info *new_dmabuf_info;
struct task_dma_buf_info *dmabuf_info;
struct task_dma_buf_record *rec, *copy;
if (current->dmabuf_info && (clone_flags & (CLONE_VM | CLONE_FILES))
if (!task_has_dma_buf_info(p))
return 0; /* Task is not supposed to have dmabuf_info */
dmabuf_info = get_task_dma_buf_info(current);
/* Original might not have dmabuf_info and that's fine */
if (IS_ERR(dmabuf_info))
dmabuf_info = NULL;
if (dmabuf_info && (clone_flags & (CLONE_VM | CLONE_FILES))
== (CLONE_VM | CLONE_FILES)) {
/*
* Both MM and FD references to dmabufs are shared with the parent, so
* we can share a RSS counter with the parent.
*/
refcount_inc(&current->dmabuf_info->refcnt);
p->dmabuf_info = current->dmabuf_info;
refcount_inc(&dmabuf_info->refcnt);
p->worker_private = dmabuf_info;
return 0;
}
p->dmabuf_info = kmalloc(sizeof(*p->dmabuf_info), GFP_KERNEL);
if (!p->dmabuf_info)
new_dmabuf_info = kmalloc(sizeof(*new_dmabuf_info), GFP_KERNEL);
if (!new_dmabuf_info)
return -ENOMEM;
refcount_set(&p->dmabuf_info->refcnt, 1);
spin_lock_init(&p->dmabuf_info->lock);
INIT_LIST_HEAD(&p->dmabuf_info->dmabufs);
if (current->dmabuf_info) {
spin_lock(&current->dmabuf_info->lock);
p->dmabuf_info->rss = current->dmabuf_info->rss;
p->dmabuf_info->rss_hwm = current->dmabuf_info->rss;
list_for_each_entry(rec, &current->dmabuf_info->dmabufs, node) {
refcount_set(&new_dmabuf_info->refcnt, 1);
spin_lock_init(&new_dmabuf_info->lock);
INIT_LIST_HEAD(&new_dmabuf_info->dmabufs);
if (dmabuf_info) {
spin_lock(&dmabuf_info->lock);
new_dmabuf_info->rss = dmabuf_info->rss;
new_dmabuf_info->rss_hwm = dmabuf_info->rss;
list_for_each_entry(rec, &dmabuf_info->dmabufs, node) {
copy = kmalloc(sizeof(*copy), GFP_KERNEL);
if (!copy) {
spin_unlock(&current->dmabuf_info->lock);
spin_unlock(&dmabuf_info->lock);
goto err_list_copy;
}
copy->dmabuf = rec->dmabuf;
copy->refcnt = rec->refcnt;
list_add(&copy->node, &p->dmabuf_info->dmabufs);
list_add(&copy->node, &new_dmabuf_info->dmabufs);
}
spin_unlock(&current->dmabuf_info->lock);
spin_unlock(&dmabuf_info->lock);
} else {
p->dmabuf_info->rss = 0;
p->dmabuf_info->rss_hwm = 0;
new_dmabuf_info->rss = 0;
new_dmabuf_info->rss_hwm = 0;
}
p->worker_private = new_dmabuf_info;
return 0;
err_list_copy:
list_for_each_entry_safe(rec, copy, &p->dmabuf_info->dmabufs, node) {
list_for_each_entry_safe(rec, copy, &new_dmabuf_info->dmabufs, node) {
list_del(&rec->node);
kfree(rec);
}
kfree(p->dmabuf_info);
kfree(new_dmabuf_info);
return -ENOMEM;
}