mm: slab/slub: use page->list consistently instead of page->lru
'struct page' has two list_head fields: 'lru' and 'list'. Conveniently, they are unioned together. This means that code can use them interchangably, which gets horribly confusing like with this nugget from slab.c: > list_del(&page->lru); > if (page->active == cachep->num) > list_add(&page->list, &n->slabs_full); This patch makes the slab and slub code use page->lru universally instead of mixing ->list and ->lru. So, the new rule is: page->lru is what the you use if you want to keep your page on a list. Don't like the fact that it's not called ->list? Too bad. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
committed by
Pekka Enberg
parent
5f0985bb11
commit
34bf6ef94a
@@ -124,6 +124,8 @@ struct page {
|
|||||||
union {
|
union {
|
||||||
struct list_head lru; /* Pageout list, eg. active_list
|
struct list_head lru; /* Pageout list, eg. active_list
|
||||||
* protected by zone->lru_lock !
|
* protected by zone->lru_lock !
|
||||||
|
* Can be used as a generic list
|
||||||
|
* by the page owner.
|
||||||
*/
|
*/
|
||||||
struct { /* slub per cpu partial pages */
|
struct { /* slub per cpu partial pages */
|
||||||
struct page *next; /* Next partial slab */
|
struct page *next; /* Next partial slab */
|
||||||
@@ -136,7 +138,6 @@ struct page {
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
struct list_head list; /* slobs list of pages */
|
|
||||||
struct slab *slab_page; /* slab fields */
|
struct slab *slab_page; /* slab fields */
|
||||||
struct rcu_head rcu_head; /* Used by SLAB
|
struct rcu_head rcu_head; /* Used by SLAB
|
||||||
* when destroying via RCU
|
* when destroying via RCU
|
||||||
|
|||||||
@@ -2922,9 +2922,9 @@ retry:
|
|||||||
/* move slabp to correct slabp list: */
|
/* move slabp to correct slabp list: */
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
if (page->active == cachep->num)
|
if (page->active == cachep->num)
|
||||||
list_add(&page->list, &n->slabs_full);
|
list_add(&page->lru, &n->slabs_full);
|
||||||
else
|
else
|
||||||
list_add(&page->list, &n->slabs_partial);
|
list_add(&page->lru, &n->slabs_partial);
|
||||||
}
|
}
|
||||||
|
|
||||||
must_grow:
|
must_grow:
|
||||||
|
|||||||
10
mm/slob.c
10
mm/slob.c
@@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp)
|
|||||||
|
|
||||||
static void set_slob_page_free(struct page *sp, struct list_head *list)
|
static void set_slob_page_free(struct page *sp, struct list_head *list)
|
||||||
{
|
{
|
||||||
list_add(&sp->list, list);
|
list_add(&sp->lru, list);
|
||||||
__SetPageSlobFree(sp);
|
__SetPageSlobFree(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void clear_slob_page_free(struct page *sp)
|
static inline void clear_slob_page_free(struct page *sp)
|
||||||
{
|
{
|
||||||
list_del(&sp->list);
|
list_del(&sp->lru);
|
||||||
__ClearPageSlobFree(sp);
|
__ClearPageSlobFree(sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|||||||
|
|
||||||
spin_lock_irqsave(&slob_lock, flags);
|
spin_lock_irqsave(&slob_lock, flags);
|
||||||
/* Iterate through each partially free page, try to find room */
|
/* Iterate through each partially free page, try to find room */
|
||||||
list_for_each_entry(sp, slob_list, list) {
|
list_for_each_entry(sp, slob_list, lru) {
|
||||||
#ifdef CONFIG_NUMA
|
#ifdef CONFIG_NUMA
|
||||||
/*
|
/*
|
||||||
* If there's a node specification, search for a partial
|
* If there's a node specification, search for a partial
|
||||||
@@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Attempt to alloc */
|
/* Attempt to alloc */
|
||||||
prev = sp->list.prev;
|
prev = sp->lru.prev;
|
||||||
b = slob_page_alloc(sp, size, align);
|
b = slob_page_alloc(sp, size, align);
|
||||||
if (!b)
|
if (!b)
|
||||||
continue;
|
continue;
|
||||||
@@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
|
|||||||
spin_lock_irqsave(&slob_lock, flags);
|
spin_lock_irqsave(&slob_lock, flags);
|
||||||
sp->units = SLOB_UNITS(PAGE_SIZE);
|
sp->units = SLOB_UNITS(PAGE_SIZE);
|
||||||
sp->freelist = b;
|
sp->freelist = b;
|
||||||
INIT_LIST_HEAD(&sp->list);
|
INIT_LIST_HEAD(&sp->lru);
|
||||||
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
|
set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
|
||||||
set_slob_page_free(sp, slob_list);
|
set_slob_page_free(sp, slob_list);
|
||||||
b = slob_page_alloc(sp, size, align);
|
b = slob_page_alloc(sp, size, align);
|
||||||
|
|||||||
Reference in New Issue
Block a user