Merge branches 'at91', 'dcache', 'ftrace', 'hwbpt', 'misc', 'mmci', 's3c', 'st-ux' and 'unwind' into devel

This commit is contained in:
921 changed files with 14129 additions and 5741 deletions

View File

@@ -1791,19 +1791,20 @@ out:
}
/**
* cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup
* cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
* @from: attach to all cgroups of a given task
* @tsk: the task to be attached
*/
int cgroup_attach_task_current_cg(struct task_struct *tsk)
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
{
struct cgroupfs_root *root;
struct cgroup *cur_cg;
int retval = 0;
cgroup_lock();
for_each_active_root(root) {
cur_cg = task_cgroup_from_root(current, root);
retval = cgroup_attach_task(cur_cg, tsk);
struct cgroup *from_cg = task_cgroup_from_root(from, root);
retval = cgroup_attach_task(from_cg, tsk);
if (retval)
break;
}
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
return retval;
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg);
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
/*
* Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex

View File

@@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
return 0;
}
/*
* Allocate user-space memory for the duration of a single system call,
* in order to marshall parameters inside a compat thunk.
*/
void __user *compat_alloc_user_space(unsigned long len)
{
void __user *ptr;
/* If len would occupy more than half of the entire compat space... */
if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
return NULL;
ptr = arch_compat_alloc_user_space(len);
if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
return NULL;
return ptr;
}
EXPORT_SYMBOL_GPL(compat_alloc_user_space);

View File

@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
int free;
char *symname = NULL;
long offset = 0ul;
int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
/*
* Find an empty bp structure to allocate
*/
free = KDB_MAXBPT;
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;

View File

@@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_mm = mm;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {

View File

@@ -33,10 +33,11 @@
* @children: child nodes
* @all: list head for list of all nodes
* @parent: parent node
* @info: associated profiling data structure if not a directory
* @ghost: when an object file containing profiling data is unloaded we keep a
* copy of the profiling data here to allow collecting coverage data
* for cleanup code. Such a node is called a "ghost".
* @loaded_info: array of pointers to profiling data sets for loaded object
* files.
* @num_loaded: number of profiling data sets for loaded object files.
* @unloaded_info: accumulated copy of profiling data sets for unloaded
* object files. Used only when gcov_persist=1.
* @dentry: main debugfs entry, either a directory or data file
* @links: associated symbolic links
* @name: data file basename
@@ -51,10 +52,11 @@ struct gcov_node {
struct list_head children;
struct list_head all;
struct gcov_node *parent;
struct gcov_info *info;
struct gcov_info *ghost;
struct gcov_info **loaded_info;
struct gcov_info *unloaded_info;
struct dentry *dentry;
struct dentry **links;
int num_loaded;
char name[0];
};
@@ -136,16 +138,37 @@ static const struct seq_operations gcov_seq_ops = {
};
/*
* Return the profiling data set for a given node. This can either be the
* original profiling data structure or a duplicate (also called "ghost")
* in case the associated object file has been unloaded.
* Return a profiling data set associated with the given node. This is
* either a data set for a loaded object file or a data set copy in case
* all associated object files have been unloaded.
*/
static struct gcov_info *get_node_info(struct gcov_node *node)
{
if (node->info)
return node->info;
if (node->num_loaded > 0)
return node->loaded_info[0];
return node->ghost;
return node->unloaded_info;
}
/*
* Return a newly allocated profiling data set which contains the sum of
* all profiling data associated with the given node.
*/
static struct gcov_info *get_accumulated_info(struct gcov_node *node)
{
struct gcov_info *info;
int i = 0;
if (node->unloaded_info)
info = gcov_info_dup(node->unloaded_info);
else
info = gcov_info_dup(node->loaded_info[i++]);
if (!info)
return NULL;
for (; i < node->num_loaded; i++)
gcov_info_add(info, node->loaded_info[i]);
return info;
}
/*
@@ -163,9 +186,10 @@ static int gcov_seq_open(struct inode *inode, struct file *file)
mutex_lock(&node_lock);
/*
* Read from a profiling data copy to minimize reference tracking
* complexity and concurrent access.
* complexity and concurrent access and to keep accumulating multiple
* profiling data sets associated with one node simple.
*/
info = gcov_info_dup(get_node_info(node));
info = get_accumulated_info(node);
if (!info)
goto out_unlock;
iter = gcov_iter_new(info);
@@ -225,12 +249,25 @@ static struct gcov_node *get_node_by_name(const char *name)
return NULL;
}
/*
* Reset all profiling data associated with the specified node.
*/
static void reset_node(struct gcov_node *node)
{
int i;
if (node->unloaded_info)
gcov_info_reset(node->unloaded_info);
for (i = 0; i < node->num_loaded; i++)
gcov_info_reset(node->loaded_info[i]);
}
static void remove_node(struct gcov_node *node);
/*
* write() implementation for gcov data files. Reset profiling data for the
* associated file. If the object file has been unloaded (i.e. this is
* a "ghost" node), remove the debug fs node as well.
* corresponding file. If all associated object files have been unloaded,
* remove the debug fs node as well.
*/
static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -245,10 +282,10 @@ static ssize_t gcov_seq_write(struct file *file, const char __user *addr,
node = get_node_by_name(info->filename);
if (node) {
/* Reset counts or remove node for unloaded modules. */
if (node->ghost)
if (node->num_loaded == 0)
remove_node(node);
else
gcov_info_reset(node->info);
reset_node(node);
}
/* Reset counts for open file. */
gcov_info_reset(info);
@@ -378,7 +415,10 @@ static void init_node(struct gcov_node *node, struct gcov_info *info,
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->all);
node->info = info;
if (node->loaded_info) {
node->loaded_info[0] = info;
node->num_loaded = 1;
}
node->parent = parent;
if (name)
strcpy(node->name, name);
@@ -394,9 +434,13 @@ static struct gcov_node *new_node(struct gcov_node *parent,
struct gcov_node *node;
node = kzalloc(sizeof(struct gcov_node) + strlen(name) + 1, GFP_KERNEL);
if (!node) {
pr_warning("out of memory\n");
return NULL;
if (!node)
goto err_nomem;
if (info) {
node->loaded_info = kcalloc(1, sizeof(struct gcov_info *),
GFP_KERNEL);
if (!node->loaded_info)
goto err_nomem;
}
init_node(node, info, name, parent);
/* Differentiate between gcov data file nodes and directory nodes. */
@@ -416,6 +460,11 @@ static struct gcov_node *new_node(struct gcov_node *parent,
list_add(&node->all, &all_head);
return node;
err_nomem:
kfree(node);
pr_warning("out of memory\n");
return NULL;
}
/* Remove symbolic links associated with node. */
@@ -441,8 +490,9 @@ static void release_node(struct gcov_node *node)
list_del(&node->all);
debugfs_remove(node->dentry);
remove_links(node);
if (node->ghost)
gcov_info_free(node->ghost);
kfree(node->loaded_info);
if (node->unloaded_info)
gcov_info_free(node->unloaded_info);
kfree(node);
}
@@ -477,7 +527,7 @@ static struct gcov_node *get_child_by_name(struct gcov_node *parent,
/*
* write() implementation for reset file. Reset all profiling data to zero
* and remove ghost nodes.
* and remove nodes for which all associated object files are unloaded.
*/
static ssize_t reset_write(struct file *file, const char __user *addr,
size_t len, loff_t *pos)
@@ -487,8 +537,8 @@ static ssize_t reset_write(struct file *file, const char __user *addr,
mutex_lock(&node_lock);
restart:
list_for_each_entry(node, &all_head, all) {
if (node->info)
gcov_info_reset(node->info);
if (node->num_loaded > 0)
reset_node(node);
else if (list_empty(&node->children)) {
remove_node(node);
/* Several nodes may have gone - restart loop. */
@@ -564,37 +614,115 @@ err_remove:
}
/*
* The profiling data set associated with this node is being unloaded. Store a
* copy of the profiling data and turn this node into a "ghost".
* Associate a profiling data set with an existing node. Needs to be called
* with node_lock held.
*/
static int ghost_node(struct gcov_node *node)
static void add_info(struct gcov_node *node, struct gcov_info *info)
{
node->ghost = gcov_info_dup(node->info);
if (!node->ghost) {
pr_warning("could not save data for '%s' (out of memory)\n",
node->info->filename);
return -ENOMEM;
}
node->info = NULL;
struct gcov_info **loaded_info;
int num = node->num_loaded;
return 0;
/*
* Prepare new array. This is done first to simplify cleanup in
* case the new data set is incompatible, the node only contains
* unloaded data sets and there's not enough memory for the array.
*/
loaded_info = kcalloc(num + 1, sizeof(struct gcov_info *), GFP_KERNEL);
if (!loaded_info) {
pr_warning("could not add '%s' (out of memory)\n",
info->filename);
return;
}
memcpy(loaded_info, node->loaded_info,
num * sizeof(struct gcov_info *));
loaded_info[num] = info;
/* Check if the new data set is compatible. */
if (num == 0) {
/*
* A module was unloaded, modified and reloaded. The new
* data set replaces the copy of the last one.
*/
if (!gcov_info_is_compatible(node->unloaded_info, info)) {
pr_warning("discarding saved data for %s "
"(incompatible version)\n", info->filename);
gcov_info_free(node->unloaded_info);
node->unloaded_info = NULL;
}
} else {
/*
* Two different versions of the same object file are loaded.
* The initial one takes precedence.
*/
if (!gcov_info_is_compatible(node->loaded_info[0], info)) {
pr_warning("could not add '%s' (incompatible "
"version)\n", info->filename);
kfree(loaded_info);
return;
}
}
/* Overwrite previous array. */
kfree(node->loaded_info);
node->loaded_info = loaded_info;
node->num_loaded = num + 1;
}
/*
* Profiling data for this node has been loaded again. Add profiling data
* from previous instantiation and turn this node into a regular node.
* Return the index of a profiling data set associated with a node.
*/
static void revive_node(struct gcov_node *node, struct gcov_info *info)
static int get_info_index(struct gcov_node *node, struct gcov_info *info)
{
if (gcov_info_is_compatible(node->ghost, info))
gcov_info_add(info, node->ghost);
else {
pr_warning("discarding saved data for '%s' (version changed)\n",
info->filename);
int i;
for (i = 0; i < node->num_loaded; i++) {
if (node->loaded_info[i] == info)
return i;
}
gcov_info_free(node->ghost);
node->ghost = NULL;
node->info = info;
return -ENOENT;
}
/*
* Save the data of a profiling data set which is being unloaded.
*/
static void save_info(struct gcov_node *node, struct gcov_info *info)
{
if (node->unloaded_info)
gcov_info_add(node->unloaded_info, info);
else {
node->unloaded_info = gcov_info_dup(info);
if (!node->unloaded_info) {
pr_warning("could not save data for '%s' "
"(out of memory)\n", info->filename);
}
}
}
/*
* Disassociate a profiling data set from a node. Needs to be called with
* node_lock held.
*/
static void remove_info(struct gcov_node *node, struct gcov_info *info)
{
int i;
i = get_info_index(node, info);
if (i < 0) {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
return;
}
if (gcov_persist)
save_info(node, info);
/* Shrink array. */
node->loaded_info[i] = node->loaded_info[node->num_loaded - 1];
node->num_loaded--;
if (node->num_loaded > 0)
return;
/* Last loaded data set was removed. */
kfree(node->loaded_info);
node->loaded_info = NULL;
node->num_loaded = 0;
if (!node->unloaded_info)
remove_node(node);
}
/*
@@ -609,30 +737,18 @@ void gcov_event(enum gcov_action action, struct gcov_info *info)
node = get_node_by_name(info->filename);
switch (action) {
case GCOV_ADD:
/* Add new node or revive ghost. */
if (!node) {
if (node)
add_info(node, info);
else
add_node(info);
break;
}
if (gcov_persist)
revive_node(node, info);
else {
pr_warning("could not add '%s' (already exists)\n",
info->filename);
}
break;
case GCOV_REMOVE:
/* Remove node or turn into ghost. */
if (!node) {
if (node)
remove_info(node, info);
else {
pr_warning("could not remove '%s' (not found)\n",
info->filename);
break;
}
if (gcov_persist) {
if (!ghost_node(node))
break;
}
remove_node(node);
break;
}
mutex_unlock(&node_lock);

View File

@@ -143,10 +143,9 @@ int groups_search(const struct group_info *group_info, gid_t grp)
right = group_info->ngroups;
while (left < right) {
unsigned int mid = (left+right)/2;
int cmp = grp - GROUP_AT(group_info, mid);
if (cmp > 0)
if (grp > GROUP_AT(group_info, mid))
left = mid + 1;
else if (cmp < 0)
else if (grp < GROUP_AT(group_info, mid))
right = mid;
else
return 1;

View File

@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
*/
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
struct hrtimer_clock_base *base;
unsigned long flags;
ktime_t rem;
base = lock_hrtimer_base(timer, &flags);
lock_hrtimer_base(timer, &flags);
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);

View File

@@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr,
perf_overflow_handler_t triggered,
struct task_struct *tsk)
{
return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk),
triggered);
}
EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);

View File

@@ -36,15 +36,6 @@
# include <asm/mutex.h>
#endif
/***
* mutex_init - initialize the mutex
* @lock: the mutex to be initialized
* @key: the lock_class_key for the class; used by mutex lock debugging
*
* Initialize the mutex to unlocked state.
*
* It is not allowed to initialize an already locked mutex.
*/
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
/***
/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
/***
/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
/***
* mutex_lock_interruptible - acquire the mutex, interruptable
/**
* mutex_lock_interruptible - acquire the mutex, interruptible
* @lock: the mutex to be acquired
*
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
return prev == 1;
}
/***
* mutex_trylock - try acquire the mutex, without waiting
/**
* mutex_trylock - try to acquire the mutex, without waiting
* @lock: the mutex to be acquired
*
* Try to acquire the mutex atomically. Returns 1 if the mutex
* has been acquired successfully, and 0 on contention.
*
* NOTE: this function follows the spin_trylock() convention, so
* it is negated to the down_trylock() return values! Be careful
* it is negated from the down_trylock() return values! Be careful
* about this when converting semaphore users to mutexes.
*
* This function must not be used in interrupt context. The

View File

@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
}
}
static inline int
event_filter_match(struct perf_event *event)
{
return event->cpu == -1 || event->cpu == smp_processor_id();
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 delta;
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
delta = ctx->time - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = ctx->time;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
struct perf_event_context *ctx)
{
struct perf_event *event;
if (group_event->state != PERF_EVENT_STATE_ACTIVE)
return;
int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
if (group_event->attr.exclusive)
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
@@ -5743,15 +5761,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action) {
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
perf_event_exit_cpu(cpu);
break;

View File

@@ -389,10 +389,12 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
} else if (count == 11) { /* len('0x12345678/0') */
if (copy_from_user(ascii_value, buf, 11))
return -EFAULT;
if (strlen(ascii_value) != 10)
return -EINVAL;
x = sscanf(ascii_value, "%x", &value);
if (x != 1)
return -EINVAL;
pr_debug(KERN_ERR "%s, %d, 0x%x\n", ascii_value, x, value);
pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
} else
return -EINVAL;

View File

@@ -338,7 +338,6 @@ int hibernation_snapshot(int platform_mode)
goto Close;
suspend_console();
hibernation_freeze_swap();
saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
error = dpm_suspend_start(PMSG_FREEZE);
if (error)

View File

@@ -1086,7 +1086,6 @@ void swsusp_free(void)
buffer = NULL;
alloc_normal = 0;
alloc_highmem = 0;
hibernation_thaw_swap();
}
/* Helper functions used for the shrinking of memory. */
@@ -1122,9 +1121,19 @@ static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
return nr_alloc;
}
static unsigned long preallocate_image_memory(unsigned long nr_pages)
static unsigned long preallocate_image_memory(unsigned long nr_pages,
unsigned long avail_normal)
{
return preallocate_image_pages(nr_pages, GFP_IMAGE);
unsigned long alloc;
if (avail_normal <= alloc_normal)
return 0;
alloc = avail_normal - alloc_normal;
if (nr_pages < alloc)
alloc = nr_pages;
return preallocate_image_pages(alloc, GFP_IMAGE);
}
#ifdef CONFIG_HIGHMEM
@@ -1170,15 +1179,22 @@ static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
*/
static void free_unnecessary_pages(void)
{
unsigned long save_highmem, to_free_normal, to_free_highmem;
unsigned long save, to_free_normal, to_free_highmem;
to_free_normal = alloc_normal - count_data_pages();
save_highmem = count_highmem_pages();
if (alloc_highmem > save_highmem) {
to_free_highmem = alloc_highmem - save_highmem;
save = count_data_pages();
if (alloc_normal >= save) {
to_free_normal = alloc_normal - save;
save = 0;
} else {
to_free_normal = 0;
save -= alloc_normal;
}
save += count_highmem_pages();
if (alloc_highmem >= save) {
to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
to_free_normal -= save_highmem - alloc_highmem;
to_free_normal -= save - alloc_highmem;
}
memory_bm_position_reset(&copy_bm);
@@ -1259,7 +1275,7 @@ int hibernate_preallocate_memory(void)
{
struct zone *zone;
unsigned long saveable, size, max_size, count, highmem, pages = 0;
unsigned long alloc, save_highmem, pages_highmem;
unsigned long alloc, save_highmem, pages_highmem, avail_normal;
struct timeval start, stop;
int error;
@@ -1296,6 +1312,7 @@ int hibernate_preallocate_memory(void)
else
count += zone_page_state(zone, NR_FREE_PAGES);
}
avail_normal = count;
count += highmem;
count -= totalreserve_pages;
@@ -1310,12 +1327,21 @@ int hibernate_preallocate_memory(void)
*/
if (size >= saveable) {
pages = preallocate_image_highmem(save_highmem);
pages += preallocate_image_memory(saveable - pages);
pages += preallocate_image_memory(saveable - pages, avail_normal);
goto out;
}
/* Estimate the minimum size of the image. */
pages = minimum_image_size(saveable);
/*
* To avoid excessive pressure on the normal zone, leave room in it to
* accommodate an image of the minimum size (unless it's already too
* small, in which case don't preallocate pages from it at all).
*/
if (avail_normal > pages)
avail_normal -= pages;
else
avail_normal = 0;
if (size < pages)
size = min_t(unsigned long, pages, max_size);
@@ -1336,16 +1362,34 @@ int hibernate_preallocate_memory(void)
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = (count - max_size) - pages_highmem;
pages = preallocate_image_memory(alloc);
if (pages < alloc)
goto err_out;
size = max_size - size;
alloc = size;
size = preallocate_highmem_fraction(size, highmem, count);
pages_highmem += size;
alloc -= size;
pages += preallocate_image_memory(alloc);
pages += pages_highmem;
pages = preallocate_image_memory(alloc, avail_normal);
if (pages < alloc) {
/* We have exhausted non-highmem pages, try highmem. */
alloc -= pages;
pages += pages_highmem;
pages_highmem = preallocate_image_highmem(alloc);
if (pages_highmem < alloc)
goto err_out;
pages += pages_highmem;
/*
* size is the desired number of saveable pages to leave in
* memory, so try to preallocate (all memory - size) pages.
*/
alloc = (count - pages) - size;
pages += preallocate_image_highmem(alloc);
} else {
/*
* There are approximately max_size saveable pages at this point
* and we want to reduce this number down to size.
*/
alloc = max_size - size;
size = preallocate_highmem_fraction(alloc, highmem, count);
pages_highmem += size;
alloc -= size;
size = preallocate_image_memory(alloc, avail_normal);
pages_highmem += preallocate_image_highmem(alloc - size);
pages += pages_highmem + size;
}
/*
* We only need as many page frames for the image as there are saveable

View File

@@ -136,10 +136,10 @@ sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
offset = swp_offset(get_swap_for_hibernation(swap));
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
swap_free_for_hibernation(swp_entry(swap, offset));
swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
@@ -163,7 +163,7 @@ void free_all_swap_pages(int swap)
ext = container_of(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
swap_free_for_hibernation(swp_entry(swap, offset));
swap_free(swp_entry(swap, offset));
kfree(ext);
}

View File

@@ -1294,6 +1294,10 @@ static void resched_task(struct task_struct *p)
static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
{
}
static void sched_avg_update(struct rq *rq)
{
}
#endif /* CONFIG_SMP */
#if BITS_PER_LONG == 32
@@ -3182,6 +3186,8 @@ static void update_cpu_load(struct rq *this_rq)
this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
}
sched_avg_update(this_rq);
}
static void update_cpu_load_active(struct rq *this_rq)
@@ -3507,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
if (total) {
u64 temp;
u64 temp = rtime;
temp = (u64)(rtime * utime);
temp *= utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else
@@ -3540,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
if (total) {
u64 temp;
u64 temp = rtime;
temp = (u64)(rtime * cputime.utime);
temp *= cputime.utime;
do_div(temp, total);
utime = (cputime_t)temp;
} else

View File

@@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
* Minimal preemption granularity for CPU-bound tasks:
* (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_min_granularity = 2000000ULL;
unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
unsigned int sysctl_sched_min_granularity = 750000ULL;
unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
static unsigned int sched_nr_latency = 3;
static unsigned int sched_nr_latency = 8;
/*
* After fork, child runs first. If set to 0 (default) then
@@ -1313,7 +1313,7 @@ static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int load_idx)
{
struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
struct sched_group *idlest = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (local_group) {
this_load = avg_load;
this = group;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
@@ -2268,8 +2267,6 @@ unsigned long scale_rt_power(int cpu)
struct rq *rq = cpu_rq(cpu);
u64 total, available;
sched_avg_update(rq);
total = sched_avg_period() + (rq->clock - rq->age_stamp);
available = total - rq->rt_avg;
@@ -3633,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
if (time_before(now, nohz.next_balance))
return 0;
if (!rq->nr_running)
if (rq->idle_at_tick)
return 0;
first_pick_cpu = atomic_read(&nohz.first_pick_cpu);

View File

@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
pgid = pid;
if (pgid < 0)
return -EINVAL;
rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
return err;
}

View File

@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
{
sysctl_set_parent(NULL, root_table);
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
{
int err;
err = sysctl_check_table(current->nsproxy, root_table);
}
sysctl_check_table(current->nsproxy, root_table);
#endif
return 0;
}

View File

@@ -121,7 +121,7 @@ if FTRACE
config FUNCTION_TRACER
bool "Kernel Function Tracer"
depends on HAVE_FUNCTION_TRACER
select FRAME_POINTER
select FRAME_POINTER if (!ARM_UNWIND)
select KALLSYMS
select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER

View File

@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
#endif
mutex_lock(&ftrace_profile_lock);
/* we raced with function_profile_reset() */
if (unlikely(rec->counter == 0)) {
ret = -EBUSY;
goto out;
}
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
do_div(stddev, (rec->counter - 1) * 1000);
}
mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
trace_seq_puts(&s, " ");
trace_print_graph_duration(stddev, &s);
trace_print_seq(m, &s);
mutex_unlock(&mutex);
#endif
seq_putc(m, '\n');
out:
mutex_unlock(&ftrace_profile_lock);
return 0;
return ret;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1503,6 +1510,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
/* reset in case of seek/pread */
iter->flags &= ~FTRACE_ITER_HASH;
return iter;
}
@@ -2409,7 +2418,7 @@ static const struct file_operations ftrace_filter_fops = {
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
.llseek = ftrace_regex_lseek,
.llseek = no_llseek,
.release = ftrace_filter_release,
};

View File

@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
unsigned length;
cpu_buffer = iter->cpu_buffer;
buffer = cpu_buffer->buffer;
/*
* Check if we are at the end of the buffer.

View File

@@ -91,6 +91,8 @@ int perf_trace_init(struct perf_event *p_event)
tp_event->class && tp_event->class->reg &&
try_module_get(tp_event->mod)) {
ret = perf_trace_event_init(tp_event, p_event);
if (ret)
module_put(tp_event->mod);
break;
}
}
@@ -146,6 +148,7 @@ void perf_trace_destroy(struct perf_event *p_event)
}
}
out:
module_put(tp_event->mod);
mutex_unlock(&event_mutex);
}

View File

@@ -514,8 +514,8 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs);
static int kretprobe_dispatcher(struct kretprobe_instance *ri,
struct pt_regs *regs);
/* Check the name is good for event/group */
static int check_event_name(const char *name)
/* Check the name is good for event/group/fields */
static int is_good_name(const char *name)
{
if (!isalpha(*name) && *name != '_')
return 0;
@@ -557,7 +557,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
else
tp->rp.kp.pre_handler = kprobe_dispatcher;
if (!event || !check_event_name(event)) {
if (!event || !is_good_name(event)) {
ret = -EINVAL;
goto error;
}
@@ -567,7 +567,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
if (!tp->call.name)
goto error;
if (!group || !check_event_name(group)) {
if (!group || !is_good_name(group)) {
ret = -EINVAL;
goto error;
}
@@ -883,7 +883,7 @@ static int create_trace_probe(int argc, char **argv)
int i, ret = 0;
int is_return = 0, is_delete = 0;
char *symbol = NULL, *event = NULL, *group = NULL;
char *arg, *tmp;
char *arg;
unsigned long offset = 0;
void *addr = NULL;
char buf[MAX_EVENT_NAME_LEN];
@@ -992,26 +992,36 @@ static int create_trace_probe(int argc, char **argv)
/* parse arguments */
ret = 0;
for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
/* Increment count for freeing args in error case */
tp->nr_args++;
/* Parse argument name */
arg = strchr(argv[i], '=');
if (arg)
if (arg) {
*arg++ = '\0';
else
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
} else {
arg = argv[i];
/* If argument name is omitted, set "argN" */
snprintf(buf, MAX_EVENT_NAME_LEN, "arg%d", i + 1);
tp->args[i].name = kstrdup(buf, GFP_KERNEL);
}
tp->args[i].name = kstrdup(argv[i], GFP_KERNEL);
if (!tp->args[i].name) {
pr_info("Failed to allocate argument%d name '%s'.\n",
i, argv[i]);
pr_info("Failed to allocate argument[%d] name.\n", i);
ret = -ENOMEM;
goto error;
}
tmp = strchr(tp->args[i].name, ':');
if (tmp)
*tmp = '_'; /* convert : to _ */
if (!is_good_name(tp->args[i].name)) {
pr_info("Invalid argument[%d] name: %s\n",
i, tp->args[i].name);
ret = -EINVAL;
goto error;
}
if (conflict_field_name(tp->args[i].name, tp->args, i)) {
pr_info("Argument%d name '%s' conflicts with "
pr_info("Argument[%d] name '%s' conflicts with "
"another field.\n", i, argv[i]);
ret = -EINVAL;
goto error;
@@ -1020,12 +1030,9 @@ static int create_trace_probe(int argc, char **argv)
/* Parse fetch argument */
ret = parse_probe_arg(arg, tp, &tp->args[i], is_return);
if (ret) {
pr_info("Parse error at argument%d. (%d)\n", i, ret);
kfree(tp->args[i].name);
pr_info("Parse error at argument[%d]. (%d)\n", i, ret);
goto error;
}
tp->nr_args++;
}
ret = register_trace_probe(tp);

View File

@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
void touch_softlockup_watchdog(void)
{
__get_cpu_var(watchdog_touch_ts) = 0;
__raw_get_cpu_var(watchdog_touch_ts) = 0;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
#ifdef CONFIG_HARDLOCKUP_DETECTOR
void touch_nmi_watchdog(void)
{
__get_cpu_var(watchdog_nmi_touch) = true;
if (watchdog_enabled) {
unsigned cpu;
for_each_present_cpu(cpu) {
if (per_cpu(watchdog_nmi_touch, cpu) != true)
per_cpu(watchdog_nmi_touch, cpu) = true;
}
}
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -433,6 +440,9 @@ static int watchdog_enable(int cpu)
wake_up_process(p);
}
/* if any cpu succeeds, watchdog is considered enabled for the system */
watchdog_enabled = 1;
return 0;
}
@@ -455,9 +465,6 @@ static void watchdog_disable(int cpu)
per_cpu(softlockup_watchdog, cpu) = NULL;
kthread_stop(p);
}
/* if any cpu succeeds, watchdog is considered enabled for the system */
watchdog_enabled = 1;
}
static void watchdog_enable_all_cpus(void)

View File

@@ -1,19 +1,26 @@
/*
* linux/kernel/workqueue.c
* kernel/workqueue.c - generic async execution with shared worker pool
*
* Generic mechanism for defining kernel helper threads for running
* arbitrary tasks in process context.
* Copyright (C) 2002 Ingo Molnar
*
* Started by Ingo Molnar, Copyright (C) 2002
*
* Derived from the taskqueue/keventd code by:
*
* David Woodhouse <dwmw2@infradead.org>
* Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
* Derived from the taskqueue/keventd code by:
* David Woodhouse <dwmw2@infradead.org>
* Andrew Morton
* Kai Petzke <wpp@marie.physik.tu-berlin.de>
* Theodore Ts'o <tytso@mit.edu>
*
* Made to use alloc_percpu by Christoph Lameter.
*
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
* automatically managed. There is one worker pool for each CPU and
* one extra for works which are better served by workers which are
* not bound to any specific CPU.
*
* Please read Documentation/workqueue.txt for details.
*/
#include <linux/module.h>
@@ -90,7 +97,8 @@ enum {
/*
* Structure fields follow one of the following exclusion rules.
*
* I: Set during initialization and read-only afterwards.
* I: Modifiable by initialization/destruction paths and read-only for
* everyone else.
*
* P: Preemption protected. Disabling preemption is enough and should
* only be modified and accessed from the local cpu.
@@ -198,7 +206,7 @@ typedef cpumask_var_t mayday_mask_t;
cpumask_test_and_set_cpu((cpu), (mask))
#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp))
#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
#define free_mayday_mask(mask) free_cpumask_var((mask))
#else
typedef unsigned long mayday_mask_t;
@@ -943,10 +951,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
struct global_cwq *gcwq;
struct cpu_workqueue_struct *cwq;
struct list_head *worklist;
unsigned int work_flags;
unsigned long flags;
debug_work_activate(work);
if (WARN_ON_ONCE(wq->flags & WQ_DYING))
return;
/* determine gcwq to use */
if (!(wq->flags & WQ_UNBOUND)) {
struct global_cwq *last_gcwq;
@@ -989,14 +1001,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
BUG_ON(!list_empty(&work->entry));
cwq->nr_in_flight[cwq->work_color]++;
work_flags = work_color_to_flags(cwq->work_color);
if (likely(cwq->nr_active < cwq->max_active)) {
cwq->nr_active++;
worklist = gcwq_determine_ins_pos(gcwq, cwq);
} else
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works;
}
insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
insert_work(cwq, work, worklist, work_flags);
spin_unlock_irqrestore(&gcwq->lock, flags);
}
@@ -1215,6 +1230,7 @@ static void worker_leave_idle(struct worker *worker)
* bound), %false if offline.
*/
static bool worker_maybe_bind_and_lock(struct worker *worker)
__acquires(&gcwq->lock)
{
struct global_cwq *gcwq = worker->gcwq;
struct task_struct *task = worker->task;
@@ -1488,6 +1504,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
* otherwise.
*/
static bool maybe_create_worker(struct global_cwq *gcwq)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
if (!need_to_create_worker(gcwq))
return false;
@@ -1662,6 +1680,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
move_linked_works(work, pos, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++;
}
@@ -1669,6 +1688,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
* cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
* @cwq: cwq of interest
* @color: color of work which left the queue
* @delayed: for a delayed work
*
* A work either has completed or is removed from pending queue,
* decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1696,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
bool delayed)
{
/* ignore uncolored works */
if (color == WORK_NO_COLOR)
return;
cwq->nr_in_flight[color]--;
cwq->nr_active--;
if (!list_empty(&cwq->delayed_works)) {
/* one down, submit a delayed one */
if (cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
if (!delayed) {
cwq->nr_active--;
if (!list_empty(&cwq->delayed_works)) {
/* one down, submit a delayed one */
if (cwq->nr_active < cwq->max_active)
cwq_activate_first_delayed(cwq);
}
}
/* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1748,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
* spin_lock_irq(gcwq->lock) which is released and regrabbed.
*/
static void process_one_work(struct worker *worker, struct work_struct *work)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1848,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
hlist_del_init(&worker->hentry);
worker->current_work = NULL;
worker->current_cwq = NULL;
cwq_dec_nr_in_flight(cwq, work_color);
cwq_dec_nr_in_flight(cwq, work_color, false);
}
/**
@@ -2388,7 +2413,8 @@ static int try_to_grab_pending(struct work_struct *work)
debug_work_deactivate(work);
list_del_init(&work->entry);
cwq_dec_nr_in_flight(get_work_cwq(work),
get_work_color(work));
get_work_color(work),
*work_data_bits(work) & WORK_STRUCT_DELAYED);
ret = 1;
}
}
@@ -2791,7 +2817,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
if (IS_ERR(rescuer->task))
goto err;
wq->rescuer = rescuer;
rescuer->task->flags |= PF_THREAD_BOUND;
wake_up_process(rescuer->task);
}
@@ -2833,6 +2858,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
{
unsigned int cpu;
wq->flags |= WQ_DYING;
flush_workqueue(wq);
/*
@@ -2857,6 +2883,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
if (wq->flags & WQ_RESCUER) {
kthread_stop(wq->rescuer->task);
free_mayday_mask(wq->mayday_mask);
kfree(wq->rescuer);
}
free_cwqs(wq);
@@ -3239,6 +3266,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
* multiple times. To be used by cpu_callback.
*/
static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
if (!(gcwq->trustee_state == state ||
gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3574,7 @@ static int __init init_workqueues(void)
spin_lock_init(&gcwq->lock);
INIT_LIST_HEAD(&gcwq->worklist);
gcwq->cpu = cpu;
if (cpu == WORK_CPU_UNBOUND)
gcwq->flags |= GCWQ_DISASSOCIATED;
gcwq->flags |= GCWQ_DISASSOCIATED;
INIT_LIST_HEAD(&gcwq->idle_list);
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3598,8 @@ static int __init init_workqueues(void)
struct global_cwq *gcwq = get_gcwq(cpu);
struct worker *worker;
if (cpu != WORK_CPU_UNBOUND)
gcwq->flags &= ~GCWQ_DISASSOCIATED;
worker = create_worker(gcwq, true);
BUG_ON(!worker);
spin_lock_irq(&gcwq->lock);