Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-fscache
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-fscache: (31 commits) FS-Cache: Provide nop fscache_stat_d() if CONFIG_FSCACHE_STATS=n SLOW_WORK: Fix GFS2 to #include <linux/module.h> before using THIS_MODULE SLOW_WORK: Fix CIFS to pass THIS_MODULE to slow_work_register_user() CacheFiles: Don't log lookup/create failing with ENOBUFS CacheFiles: Catch an overly long wait for an old active object CacheFiles: Better showing of debugging information in active object problems CacheFiles: Mark parent directory locks as I_MUTEX_PARENT to keep lockdep happy CacheFiles: Handle truncate unlocking the page we're reading CacheFiles: Don't write a full page if there's only a partial page to cache FS-Cache: Actually requeue an object when requested FS-Cache: Start processing an object's operations on that object's death FS-Cache: Make sure FSCACHE_COOKIE_LOOKING_UP cleared on lookup failure FS-Cache: Add a retirement stat counter FS-Cache: Handle pages pending storage that get evicted under OOM conditions FS-Cache: Handle read request vs lookup, creation or other cache failure FS-Cache: Don't delete pending pages from the page-store tracking tree FS-Cache: Fix lock misorder in fscache_write_op() FS-Cache: The object-available state can't rely on the cookie to be available FS-Cache: Permit cache retrieval ops to be interrupted in the initial wait phase FS-Cache: Use radix tree preload correctly in tracking of pages to be stored ...
This commit is contained in:
@@ -91,6 +91,8 @@ struct fscache_operation {
|
||||
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
|
||||
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
|
||||
#define FSCACHE_OP_DEAD 6 /* op is now dead */
|
||||
#define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */
|
||||
#define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */
|
||||
|
||||
atomic_t usage;
|
||||
unsigned debug_id; /* debugging ID */
|
||||
@@ -102,6 +104,16 @@ struct fscache_operation {
|
||||
|
||||
/* operation releaser */
|
||||
fscache_operation_release_t release;
|
||||
|
||||
#ifdef CONFIG_SLOW_WORK_PROC
|
||||
const char *name; /* operation name */
|
||||
const char *state; /* operation state */
|
||||
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
|
||||
#define fscache_set_op_state(OP, S) do { (OP)->state = (S); } while(0)
|
||||
#else
|
||||
#define fscache_set_op_name(OP, N) do { } while(0)
|
||||
#define fscache_set_op_state(OP, S) do { } while(0)
|
||||
#endif
|
||||
};
|
||||
|
||||
extern atomic_t fscache_op_debug_id;
|
||||
@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
|
||||
op->debug_id = atomic_inc_return(&fscache_op_debug_id);
|
||||
op->release = release;
|
||||
INIT_LIST_HEAD(&op->pend_link);
|
||||
fscache_set_op_state(op, "Init");
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -221,8 +234,10 @@ struct fscache_cache_ops {
|
||||
struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
|
||||
struct fscache_cookie *cookie);
|
||||
|
||||
/* look up the object for a cookie */
|
||||
void (*lookup_object)(struct fscache_object *object);
|
||||
/* look up the object for a cookie
|
||||
* - return -ETIMEDOUT to be requeued
|
||||
*/
|
||||
int (*lookup_object)(struct fscache_object *object);
|
||||
|
||||
/* finished looking up */
|
||||
void (*lookup_complete)(struct fscache_object *object);
|
||||
@@ -297,12 +312,14 @@ struct fscache_cookie {
|
||||
atomic_t usage; /* number of users of this cookie */
|
||||
atomic_t n_children; /* number of children of this cookie */
|
||||
spinlock_t lock;
|
||||
spinlock_t stores_lock; /* lock on page store tree */
|
||||
struct hlist_head backing_objects; /* object(s) backing this file/index */
|
||||
const struct fscache_cookie_def *def; /* definition */
|
||||
struct fscache_cookie *parent; /* parent of this entry */
|
||||
void *netfs_data; /* back pointer to netfs */
|
||||
struct radix_tree_root stores; /* pages to be stored on this cookie */
|
||||
#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
|
||||
#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
|
||||
|
||||
unsigned long flags;
|
||||
#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
|
||||
@@ -337,6 +354,7 @@ struct fscache_object {
|
||||
FSCACHE_OBJECT_RECYCLING, /* retiring object */
|
||||
FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
|
||||
FSCACHE_OBJECT_DEAD, /* object is now dead */
|
||||
FSCACHE_OBJECT__NSTATES
|
||||
} state;
|
||||
|
||||
int debug_id; /* debugging ID */
|
||||
@@ -345,6 +363,7 @@ struct fscache_object {
|
||||
int n_obj_ops; /* number of object ops outstanding on object */
|
||||
int n_in_progress; /* number of ops in progress */
|
||||
int n_exclusive; /* number of exclusive ops queued */
|
||||
atomic_t n_reads; /* number of read ops in progress */
|
||||
spinlock_t lock; /* state and operations lock */
|
||||
|
||||
unsigned long lookup_jif; /* time at which lookup started */
|
||||
@@ -358,6 +377,7 @@ struct fscache_object {
|
||||
#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
|
||||
#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
|
||||
#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
|
||||
#define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/
|
||||
|
||||
unsigned long flags;
|
||||
#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
|
||||
@@ -373,7 +393,11 @@ struct fscache_object {
|
||||
struct list_head dependents; /* FIFO of dependent objects */
|
||||
struct list_head dep_link; /* link in parent's dependents list */
|
||||
struct list_head pending_ops; /* unstarted operations on this object */
|
||||
#ifdef CONFIG_FSCACHE_OBJECT_LIST
|
||||
struct rb_node objlist_link; /* link in global object list */
|
||||
#endif
|
||||
pgoff_t store_limit; /* current storage limit */
|
||||
loff_t store_limit_l; /* current storage limit */
|
||||
};
|
||||
|
||||
extern const char *fscache_object_states[];
|
||||
@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
|
||||
(obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
|
||||
(obj)->state < FSCACHE_OBJECT_DYING)
|
||||
|
||||
#define fscache_object_is_dead(obj) \
|
||||
(test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
|
||||
(obj)->state >= FSCACHE_OBJECT_DYING)
|
||||
|
||||
extern const struct slow_work_ops fscache_object_slow_work_ops;
|
||||
|
||||
/**
|
||||
@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
|
||||
object->events = object->event_mask = 0;
|
||||
object->flags = 0;
|
||||
object->store_limit = 0;
|
||||
object->store_limit_l = 0;
|
||||
object->cache = cache;
|
||||
object->cookie = cookie;
|
||||
object->parent = NULL;
|
||||
@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
|
||||
extern void fscache_object_lookup_negative(struct fscache_object *object);
|
||||
extern void fscache_obtained_object(struct fscache_object *object);
|
||||
|
||||
#ifdef CONFIG_FSCACHE_OBJECT_LIST
|
||||
extern void fscache_object_destroy(struct fscache_object *object);
|
||||
#else
|
||||
#define fscache_object_destroy(object) do {} while(0)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* fscache_object_destroyed - Note destruction of an object in a cache
|
||||
* @cache: The cache from which the object came
|
||||
@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
|
||||
static inline
|
||||
void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
|
||||
{
|
||||
object->store_limit_l = i_size;
|
||||
object->store_limit = i_size >> PAGE_SHIFT;
|
||||
if (i_size & ~PAGE_MASK)
|
||||
object->store_limit++;
|
||||
|
||||
@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
|
||||
extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
|
||||
extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
|
||||
extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
|
||||
extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
|
||||
gfp_t);
|
||||
|
||||
/**
|
||||
* fscache_register_netfs - Register a filesystem as desiring caching services
|
||||
@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
|
||||
__fscache_wait_on_page_write(cookie, page);
|
||||
}
|
||||
|
||||
/**
|
||||
* fscache_maybe_release_page - Consider releasing a page, cancelling a store
|
||||
* @cookie: The cookie representing the cache object
|
||||
* @page: The netfs page that is being cached.
|
||||
* @gfp: The gfp flags passed to releasepage()
|
||||
*
|
||||
* Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
|
||||
* releasepage() call. A storage request on the page may cancelled if it is
|
||||
* not currently being processed.
|
||||
*
|
||||
* The function returns true if the page no longer has a storage request on it,
|
||||
* and false if a storage request is left in place. If true is returned, the
|
||||
* page will have been passed to fscache_uncache_page(). If false is returned
|
||||
* the page cannot be freed yet.
|
||||
*/
|
||||
static inline
|
||||
bool fscache_maybe_release_page(struct fscache_cookie *cookie,
|
||||
struct page *page,
|
||||
gfp_t gfp)
|
||||
{
|
||||
if (fscache_cookie_valid(cookie) && PageFsCache(page))
|
||||
return __fscache_maybe_release_page(cookie, page, gfp);
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_FSCACHE_H */
|
||||
|
||||
@@ -17,13 +17,20 @@
|
||||
#ifdef CONFIG_SLOW_WORK
|
||||
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
struct slow_work;
|
||||
#ifdef CONFIG_SLOW_WORK_PROC
|
||||
struct seq_file;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The operations used to support slow work items
|
||||
*/
|
||||
struct slow_work_ops {
|
||||
/* owner */
|
||||
struct module *owner;
|
||||
|
||||
/* get a ref on a work item
|
||||
* - return 0 if successful, -ve if not
|
||||
*/
|
||||
@@ -34,6 +41,11 @@ struct slow_work_ops {
|
||||
|
||||
/* execute a work item */
|
||||
void (*execute)(struct slow_work *work);
|
||||
|
||||
#ifdef CONFIG_SLOW_WORK_PROC
|
||||
/* describe a work item for /proc */
|
||||
void (*desc)(struct slow_work *work, struct seq_file *m);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -42,13 +54,24 @@ struct slow_work_ops {
|
||||
* queued
|
||||
*/
|
||||
struct slow_work {
|
||||
struct module *owner; /* the owning module */
|
||||
unsigned long flags;
|
||||
#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
|
||||
#define SLOW_WORK_EXECUTING 1 /* item currently executing */
|
||||
#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
|
||||
#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
|
||||
#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */
|
||||
#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */
|
||||
const struct slow_work_ops *ops; /* operations table for this item */
|
||||
struct list_head link; /* link in queue */
|
||||
#ifdef CONFIG_SLOW_WORK_PROC
|
||||
struct timespec mark; /* jiffies at which queued or exec begun */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct delayed_slow_work {
|
||||
struct slow_work work;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -66,6 +89,20 @@ static inline void slow_work_init(struct slow_work *work,
|
||||
INIT_LIST_HEAD(&work->link);
|
||||
}
|
||||
|
||||
/**
|
||||
* slow_work_init - Initialise a delayed slow work item
|
||||
* @work: The work item to initialise
|
||||
* @ops: The operations to use to handle the slow work item
|
||||
*
|
||||
* Initialise a delayed slow work item.
|
||||
*/
|
||||
static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
|
||||
const struct slow_work_ops *ops)
|
||||
{
|
||||
init_timer(&dwork->timer);
|
||||
slow_work_init(&dwork->work, ops);
|
||||
}
|
||||
|
||||
/**
|
||||
* vslow_work_init - Initialise a very slow work item
|
||||
* @work: The work item to initialise
|
||||
@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work,
|
||||
INIT_LIST_HEAD(&work->link);
|
||||
}
|
||||
|
||||
/**
|
||||
* slow_work_is_queued - Determine if a slow work item is on the work queue
|
||||
* work: The work item to test
|
||||
*
|
||||
* Determine if the specified slow-work item is on the work queue. This
|
||||
* returns true if it is actually on the queue.
|
||||
*
|
||||
* If the item is executing and has been marked for requeue when execution
|
||||
* finishes, then false will be returned.
|
||||
*
|
||||
* Anyone wishing to wait for completion of execution can wait on the
|
||||
* SLOW_WORK_EXECUTING bit.
|
||||
*/
|
||||
static inline bool slow_work_is_queued(struct slow_work *work)
|
||||
{
|
||||
unsigned long flags = work->flags;
|
||||
return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
|
||||
}
|
||||
|
||||
extern int slow_work_enqueue(struct slow_work *work);
|
||||
extern int slow_work_register_user(void);
|
||||
extern void slow_work_unregister_user(void);
|
||||
extern void slow_work_cancel(struct slow_work *work);
|
||||
extern int slow_work_register_user(struct module *owner);
|
||||
extern void slow_work_unregister_user(struct module *owner);
|
||||
|
||||
extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
|
||||
unsigned long delay);
|
||||
|
||||
static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
|
||||
{
|
||||
slow_work_cancel(&dwork->work);
|
||||
}
|
||||
|
||||
extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
|
||||
signed long *_timeout);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern ctl_table slow_work_sysctls[];
|
||||
|
||||
Reference in New Issue
Block a user