cgroups: fix probable race with put_css_set[_taskexit] and find_css_set
put_css_set_taskexit may be called when find_css_set is called on other
cpu. And the race will occur:
put_css_set_taskexit side find_css_set side
|
atomic_dec_and_test(&kref->refcount) |
/* kref->refcount = 0 */ |
....................................................................
| read_lock(&css_set_lock)
| find_existing_css_set
| get_css_set
| read_unlock(&css_set_lock);
....................................................................
__release_css_set |
....................................................................
| /* use a released css_set */
|
[put_css_set is the same. But in the current code, all put_css_set are
put into cgroup mutex critical region as the same as find_css_set.]
[akpm@linux-foundation.org: repair comments]
[menage@google.com: eliminate race in css_set refcounting]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Paul Menage <menage@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
committed by
Linus Torvalds
parent
248736c2a5
commit
146aa1bd05
@@ -57,7 +57,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cont,
|
||||
u64 count;
|
||||
|
||||
rcu_read_lock();
|
||||
count = atomic_read(¤t->cgroups->ref.refcount);
|
||||
count = atomic_read(¤t->cgroups->refcount);
|
||||
rcu_read_unlock();
|
||||
return count;
|
||||
}
|
||||
@@ -90,7 +90,7 @@ static struct cftype files[] = {
|
||||
{
|
||||
.name = "releasable",
|
||||
.read_u64 = releasable_read,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
|
||||
|
||||
Reference in New Issue
Block a user