summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/compat.c3
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/exit.c66
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/hrtimer.c177
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/marker.c2
-rw-r--r--kernel/pid.c41
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/posix-timers.c6
-rw-r--r--kernel/printk.c96
-rw-r--r--kernel/ptrace.c13
-rw-r--r--kernel/signal.c646
-rw-r--r--kernel/sys.c79
-rw-r--r--kernel/taskstats.c6
-rw-r--r--kernel/timer.c153
-rw-r--r--kernel/user.c18
-rw-r--r--kernel/workqueue.c2
20 files changed, 786 insertions, 551 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index b9d467d83fc..fbc6fc8949b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -575,7 +575,7 @@ static struct inode_operations cgroup_dir_inode_operations;
static struct file_operations proc_cgroupstats_operations;
static struct backing_dev_info cgroup_backing_dev_info = {
- .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+ .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
diff --git a/kernel/compat.c b/kernel/compat.c
index e1ef04870c2..4a856a3643b 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -898,7 +898,7 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
current->state = TASK_INTERRUPTIBLE;
schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
+ set_restore_sigmask();
return -ERESTARTNOHAND;
}
#endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
@@ -1080,4 +1080,3 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
return 0;
}
-
diff --git a/kernel/cpu.c b/kernel/cpu.c
index a98f6ab16ec..c77bc3a1c72 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -215,7 +215,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
__raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
hcpu, nr_calls, NULL);
printk("%s: attempt to take down CPU %u failed\n",
- __FUNCTION__, cpu);
+ __func__, cpu);
err = -EINVAL;
goto out_release;
}
@@ -295,7 +295,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
if (ret == NOTIFY_BAD) {
nr_calls--;
printk("%s: attempt to bring up CPU %u failed\n",
- __FUNCTION__, cpu);
+ __func__, cpu);
ret = -EINVAL;
goto out_notify;
}
diff --git a/kernel/exit.c b/kernel/exit.c
index ae0f2c4e452..d3ad54677f9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -52,6 +52,11 @@
static void exit_mm(struct task_struct * tsk);
+static inline int task_detached(struct task_struct *p)
+{
+ return p->exit_signal == -1;
+}
+
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
@@ -160,7 +165,7 @@ repeat:
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
- BUG_ON(leader->exit_signal == -1);
+ BUG_ON(task_detached(leader));
do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
@@ -170,7 +175,7 @@ repeat:
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
- zap_leader = (leader->exit_signal == -1);
+ zap_leader = task_detached(leader);
}
write_unlock_irq(&tasklist_lock);
@@ -329,13 +334,11 @@ void __set_special_pids(struct pid *pid)
pid_t nr = pid_nr(pid);
if (task_session(curr) != pid) {
- detach_pid(curr, PIDTYPE_SID);
- attach_pid(curr, PIDTYPE_SID, pid);
+ change_pid(curr, PIDTYPE_SID, pid);
set_task_session(curr, nr);
}
if (task_pgrp(curr) != pid) {
- detach_pid(curr, PIDTYPE_PGID);
- attach_pid(curr, PIDTYPE_PGID, pid);
+ change_pid(curr, PIDTYPE_PGID, pid);
set_task_pgrp(curr, nr);
}
}
@@ -693,7 +696,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
if (unlikely(traced)) {
/* Preserve ptrace links if someone else is tracing this child. */
list_del_init(&p->ptrace_list);
- if (p->parent != p->real_parent)
+ if (ptrace_reparented(p))
list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
} else {
/* If this child is being traced, then we're the one tracing it
@@ -717,18 +720,18 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
/* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
- if (p->real_parent->group_leader == father->group_leader)
+ if (same_thread_group(p->real_parent, father))
return;
/* We don't want people slaying init. */
- if (p->exit_signal != -1)
+ if (!task_detached(p))
p->exit_signal = SIGCHLD;
/* If we'd notified the old parent about this child's death,
* also notify the new parent.
*/
if (!traced && p->exit_state == EXIT_ZOMBIE &&
- p->exit_signal != -1 && thread_group_empty(p))
+ !task_detached(p) && thread_group_empty(p))
do_notify_parent(p, p->exit_signal);
kill_orphaned_pgrp(p, father);
@@ -781,18 +784,18 @@ static void forget_original_parent(struct task_struct *father)
} else {
/* reparent ptraced task to its real parent */
__ptrace_unlink (p);
- if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
+ if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) &&
thread_group_empty(p))
do_notify_parent(p, p->exit_signal);
}
/*
- * if the ptraced child is a zombie with exit_signal == -1
- * we must collect it before we exit, or it will remain
- * zombie forever since we prevented it from self-reap itself
- * while it was being traced by us, to be able to see it in wait4.
+ * if the ptraced child is a detached zombie we must collect
+ * it before we exit, or it will remain zombie forever since
+ * we prevented it from self-reap itself while it was being
+ * traced by us, to be able to see it in wait4.
*/
- if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
+ if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p)))
list_add(&p->ptrace_list, &ptrace_dead);
}
@@ -849,29 +852,30 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
* we have changed execution domain as these two values started
* the same after a fork.
*/
- if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
+ if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
- tsk->self_exec_id != tsk->parent_exec_id)
- && !capable(CAP_KILL))
+ tsk->self_exec_id != tsk->parent_exec_id) &&
+ !capable(CAP_KILL))
tsk->exit_signal = SIGCHLD;
-
/* If something other than our normal parent is ptracing us, then
* send it a SIGCHLD instead of honoring exit_signal. exit_signal
* only has special meaning to our real parent.
*/
- if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
- int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
+ if (!task_detached(tsk) && thread_group_empty(tsk)) {
+ int signal = ptrace_reparented(tsk) ?
+ SIGCHLD : tsk->exit_signal;
do_notify_parent(tsk, signal);
} else if (tsk->ptrace) {
do_notify_parent(tsk, SIGCHLD);
}
state = EXIT_ZOMBIE;
- if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
+ if (task_detached(tsk) && likely(!tsk->ptrace))
state = EXIT_DEAD;
tsk->exit_state = state;
+ /* mt-exec, de_thread() is waiting for us */
if (thread_group_leader(tsk) &&
tsk->signal->notify_count < 0 &&
tsk->signal->group_exit_task)
@@ -1115,12 +1119,13 @@ asmlinkage long sys_exit(int error_code)
NORET_TYPE void
do_group_exit(int exit_code)
{
+ struct signal_struct *sig = current->signal;
+
BUG_ON(exit_code & 0x80); /* core dumps don't get here */
- if (current->signal->flags & SIGNAL_GROUP_EXIT)
- exit_code = current->signal->group_exit_code;
+ if (signal_group_exit(sig))
+ exit_code = sig->group_exit_code;
else if (!thread_group_empty(current)) {
- struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
if (signal_group_exit(sig))
@@ -1172,7 +1177,7 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options,
* Do not consider detached threads that are
* not ptraced:
*/
- if (p->exit_signal == -1 && !p->ptrace)
+ if (task_detached(p) && !p->ptrace)
return 0;
/* Wait for all children (clone and not) if __WALL is set;
@@ -1262,8 +1267,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
return 0;
}
- /* traced means p->ptrace, but not vice versa */
- traced = (p->real_parent != p->parent);
+ traced = ptrace_reparented(p);
if (likely(!traced)) {
struct signal_struct *psig;
@@ -1364,9 +1368,9 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
* If it's still not detached after that, don't release
* it now.
*/
- if (p->exit_signal != -1) {
+ if (!task_detached(p)) {
do_notify_parent(p, p->exit_signal);
- if (p->exit_signal != -1) {
+ if (!task_detached(p)) {
p->exit_state = EXIT_ZOMBIE;
p = NULL;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 068ffe00752..2bb675af4de 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -892,7 +892,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->group_exit_code = 0;
sig->group_exit_task = NULL;
sig->group_stop_count = 0;
- sig->curr_target = NULL;
+ sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
diff --git a/kernel/futex.c b/kernel/futex.c
index e43945e995f..98092c9817f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1266,11 +1266,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
if (!abs_time)
schedule();
else {
- hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
hrtimer_init_sleeper(&t, current);
t.timer.expires = *abs_time;
- hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
+ hrtimer_start(&t.timer, t.timer.expires,
+ HRTIMER_MODE_ABS);
if (!hrtimer_active(&t.timer))
t.task = NULL;
@@ -1286,6 +1288,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
/* Flag if a timeout occured */
rem = (t.task == NULL);
+
+ destroy_hrtimer_on_stack(&t.timer);
}
}
__set_current_state(TASK_RUNNING);
@@ -1367,7 +1371,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
if (time) {
to = &timeout;
- hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
+ HRTIMER_MODE_ABS);
hrtimer_init_sleeper(to, current);
to->timer.expires = *time;
}
@@ -1581,6 +1586,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
unqueue_me_pi(&q);
futex_unlock_mm(fshared);
+ if (to)
+ destroy_hrtimer_on_stack(&to->timer);
return ret != -EINTR ? ret : -ERESTARTNOINTR;
out_unlock_release_sem:
@@ -1588,6 +1595,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
out_release_sem:
futex_unlock_mm(fshared);
+ if (to)
+ destroy_hrtimer_on_stack(&to->timer);
return ret;
uaddr_faulted:
@@ -1615,6 +1624,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
if (!ret && (uval != -EFAULT))
goto retry;
+ if (to)
+ destroy_hrtimer_on_stack(&to->timer);
return ret;
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index dea4c9124ac..9af1d6a8095 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -43,6 +43,7 @@
#include <linux/tick.h>
#include <linux/seq_file.h>
#include <linux/err.h>
+#include <linux/debugobjects.h>
#include <asm/uaccess.h>
@@ -342,6 +343,115 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
return res;
}
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr hrtimer_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct hrtimer *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ hrtimer_cancel(timer);
+ debug_object_init(timer, &hrtimer_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+ switch (state) {
+
+ case ODEBUG_STATE_NOTAVAILABLE:
+ WARN_ON_ONCE(1);
+ return 0;
+
+ case ODEBUG_STATE_ACTIVE:
+ WARN_ON(1);
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct hrtimer *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ hrtimer_cancel(timer);
+ debug_object_free(timer, &hrtimer_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static struct debug_obj_descr hrtimer_debug_descr = {
+ .name = "hrtimer",
+ .fixup_init = hrtimer_fixup_init,
+ .fixup_activate = hrtimer_fixup_activate,
+ .fixup_free = hrtimer_fixup_free,
+};
+
+static inline void debug_hrtimer_init(struct hrtimer *timer)
+{
+ debug_object_init(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_activate(struct hrtimer *timer)
+{
+ debug_object_activate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
+{
+ debug_object_deactivate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_free(struct hrtimer *timer)
+{
+ debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode);
+
+void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ debug_object_init_on_stack(timer, &hrtimer_debug_descr);
+ __hrtimer_init(timer, clock_id, mode);
+}
+
+void destroy_hrtimer_on_stack(struct hrtimer *timer)
+{
+ debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+#else
+static inline void debug_hrtimer_init(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
+#endif
+
/*
* Check, whether the timer is on the callback pending list
*/
@@ -567,6 +677,7 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
/* Timer is expired, act upon the callback mode */
switch(timer->cb_mode) {
case HRTIMER_CB_IRQSAFE_NO_RESTART:
+ debug_hrtimer_deactivate(timer);
/*
* We can call the callback from here. No restart
* happens, so no danger of recursion
@@ -581,6 +692,7 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
* the tick timer in the softirq ! The calling site
* takes care of this.
*/
+ debug_hrtimer_deactivate(timer);
return 1;
case HRTIMER_CB_IRQSAFE:
case HRTIMER_CB_SOFTIRQ:
@@ -735,6 +847,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
struct hrtimer *entry;
int leftmost = 1;
+ debug_hrtimer_activate(timer);
+
/*
* Find the right place in the rbtree:
*/
@@ -831,6 +945,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
+ debug_hrtimer_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
@@ -878,6 +993,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
tim = ktime_add_safe(tim, base->resolution);
#endif
}
+
timer->expires = tim;
timer_stats_hrtimer_set_start_info(timer);
@@ -1011,14 +1127,8 @@ ktime_t hrtimer_get_next_event(void)
}
#endif
-/**
- * hrtimer_init - initialize a timer to the given clock
- * @timer: the timer to be initialized
- * @clock_id: the clock to be used
- * @mode: timer mode abs/rel
- */
-void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
- enum hrtimer_mode mode)
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
{
struct hrtimer_cpu_base *cpu_base;
@@ -1039,6 +1149,19 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
}
+
+/**
+ * hrtimer_init - initialize a timer to the given clock
+ * @timer: the timer to be initialized
+ * @clock_id: the clock to be used
+ * @mode: timer mode abs/rel
+ */
+void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ debug_hrtimer_init(timer);
+ __hrtimer_init(timer, clock_id, mode);
+}
EXPORT_SYMBOL_GPL(hrtimer_init);
/**
@@ -1072,6 +1195,7 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
timer = list_entry(cpu_base->cb_pending.next,
struct hrtimer, cb_entry);
+ debug_hrtimer_deactivate(timer);
timer_stats_account_hrtimer(timer);
fn = timer->function;
@@ -1120,6 +1244,7 @@ static void __run_hrtimer(struct hrtimer *timer)
enum hrtimer_restart (*fn)(struct hrtimer *);
int restart;
+ debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
@@ -1378,22 +1503,27 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
{
struct hrtimer_sleeper t;
struct timespec __user *rmtp;
+ int ret = 0;
- hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS);
+ hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
+ HRTIMER_MODE_ABS);
t.timer.expires.tv64 = restart->nanosleep.expires;
if (do_nanosleep(&t, HRTIMER_MODE_ABS))
- return 0;
+ goto out;
rmtp = restart->nanosleep.rmtp;
if (rmtp) {
- int ret = update_rmtp(&t.timer, rmtp);
+ ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
- return ret;
+ goto out;
}
/* The other values in restart are already filled in */
- return -ERESTART_RESTARTBLOCK;
+ ret = -ERESTART_RESTARTBLOCK;
+out:
+ destroy_hrtimer_on_stack(&t.timer);
+ return ret;
}
long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
@@ -1401,20 +1531,23 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
{
struct restart_block *restart;
struct hrtimer_sleeper t;
+ int ret = 0;
- hrtimer_init(&t.timer, clockid, mode);
+ hrtimer_init_on_stack(&t.timer, clockid, mode);
t.timer.expires = timespec_to_ktime(*rqtp);
if (do_nanosleep(&t, mode))
- return 0;
+ goto out;
/* Absolute timers do not update the rmtp value and restart: */
- if (mode == HRTIMER_MODE_ABS)
- return -ERESTARTNOHAND;
+ if (mode == HRTIMER_MODE_ABS) {
+ ret = -ERESTARTNOHAND;
+ goto out;
+ }
if (rmtp) {
- int ret = update_rmtp(&t.timer, rmtp);
+ ret = update_rmtp(&t.timer, rmtp);
if (ret <= 0)
- return ret;
+ goto out;
}
restart = &current_thread_info()->restart_block;
@@ -1423,7 +1556,10 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
restart->nanosleep.rmtp = rmtp;
restart->nanosleep.expires = t.timer.expires.tv64;
- return -ERESTART_RESTARTBLOCK;
+ ret = -ERESTART_RESTARTBLOCK;
+out:
+ destroy_hrtimer_on_stack(&t.timer);
+ return ret;
}
asmlinkage long
@@ -1468,6 +1604,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
+ debug_hrtimer_deactivate(timer);
__remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
timer->base = new_base;
/*
diff --git a/kernel/kthread.c b/kernel/kthread.c
index ac72eea4833..bd1b9ea024e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -98,7 +98,7 @@ static void create_kthread(struct kthread_create_info *create)
struct sched_param param = { .sched_priority = 0 };
wait_for_completion(&create->started);
read_lock(&tasklist_lock);
- create->result = find_task_by_pid(pid);
+ create->result = find_task_by_pid_ns(pid, &init_pid_ns);
read_unlock(&tasklist_lock);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
diff --git a/kernel/marker.c b/kernel/marker.c
index 139260e5460..b5a9fe1d50d 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -29,7 +29,7 @@ extern struct marker __start___markers[];
extern struct marker __stop___markers[];
/* Set to 1 to enable marker debug output */
-const int marker_debug;
+static const int marker_debug;
/*
* markers_mutex nests inside module_mutex. Markers mutex protects the builtin
diff --git a/kernel/pid.c b/kernel/pid.c
index 477691576b3..20d59fa2d49 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -111,10 +111,11 @@ EXPORT_SYMBOL(is_container_init);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
-static void free_pidmap(struct pid_namespace *pid_ns, int pid)
+static void free_pidmap(struct upid *upid)
{
- struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
- int offset = pid & BITS_PER_PAGE_MASK;
+ int nr = upid->nr;
+ struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
+ int offset = nr & BITS_PER_PAGE_MASK;
clear_bit(offset, map->page);
atomic_inc(&map->nr_free);
@@ -232,7 +233,7 @@ void free_pid(struct pid *pid)
spin_unlock_irqrestore(&pidmap_lock, flags);
for (i = 0; i <= pid->level; i++)
- free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+ free_pidmap(pid->numbers + i);
call_rcu(&pid->rcu, delayed_put_pid);
}
@@ -278,8 +279,8 @@ out:
return pid;
out_free:
- for (i++; i <= ns->level; i++)
- free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+ while (++i <= ns->level)
+ free_pidmap(pid->numbers + i);
kmem_cache_free(ns->pid_cachep, pid);
pid = NULL;
@@ -316,7 +317,7 @@ EXPORT_SYMBOL_GPL(find_pid);
/*
* attach_pid() must be called with the tasklist_lock write-held.
*/
-int attach_pid(struct task_struct *task, enum pid_type type,
+void attach_pid(struct task_struct *task, enum pid_type type,
struct pid *pid)
{
struct pid_link *link;
@@ -324,11 +325,10 @@ int attach_pid(struct task_struct *task, enum pid_type type,
link = &task->pids[type];
link->pid = pid;
hlist_add_head_rcu(&link->node, &pid->tasks[type]);
-
- return 0;
}
-void detach_pid(struct task_struct *task, enum pid_type type)
+static void __change_pid(struct task_struct *task, enum pid_type type,
+ struct pid *new)
{
struct pid_link *link;
struct pid *pid;
@@ -338,7 +338,7 @@ void detach_pid(struct task_struct *task, enum pid_type type)
pid = link->pid;
hlist_del_rcu(&link->node);
- link->pid = NULL;
+ link->pid = new;
for (tmp = PIDTYPE_MAX; --tmp >= 0; )
if (!hlist_empty(&pid->tasks[tmp]))
@@ -347,13 +347,24 @@ void detach_pid(struct task_struct *task, enum pid_type type)
free_pid(pid);
}
+void detach_pid(struct task_struct *task, enum pid_type type)
+{
+ __change_pid(task, type, NULL);
+}
+
+void change_pid(struct task_struct *task, enum pid_type type,
+ struct pid *pid)
+{
+ __change_pid(task, type, pid);
+ attach_pid(task, type, pid);
+}
+
/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
void transfer_pid(struct task_struct *old, struct task_struct *new,
enum pid_type type)
{
new->pids[type].pid = old->pids[type].pid;
hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
- old->pids[type].pid = NULL;
}
struct task_struct *pid_task(struct pid *pid, enum pid_type type)
@@ -380,12 +391,6 @@ struct task_struct *find_task_by_pid_type_ns(int type, int nr,
EXPORT_SYMBOL(find_task_by_pid_type_ns);
-struct task_struct *find_task_by_pid(pid_t nr)
-{
- return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
-}
-EXPORT_SYMBOL(find_task_by_pid);
-
struct task_struct *find_task_by_vpid(pid_t vnr)
{
return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 5ca37fa50be..98702b4b885 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -66,7 +66,7 @@ err_alloc:
return NULL;
}
-static struct pid_namespace *create_pid_namespace(int level)
+static struct pid_namespace *create_pid_namespace(unsigned int level)
{
struct pid_namespace *ns;
int i;
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 8476956ffd9..dbd8398ddb0 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -310,8 +310,7 @@ int posix_timer_event(struct k_itimer *timr,int si_private)
if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
struct task_struct *leader;
- int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
- timr->it_process);
+ int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
if (likely(ret >= 0))
return ret;
@@ -322,8 +321,7 @@ int posix_timer_event(struct k_itimer *timr,int si_private)
timr->it_process = leader;
}
- return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
- timr->it_process);
+ return send_sigqueue(timr->sigq, timr->it_process, 1);
}
EXPORT_SYMBOL_GPL(posix_timer_event);
diff --git a/kernel/printk.c b/kernel/printk.c
index d3f9c0f788b..8fb01c32aa3 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -111,6 +111,9 @@ struct console_cmdline
char name[8]; /* Name of the driver */
int index; /* Minor dev. to use */
char *options; /* Options for the driver */
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ char *brl_options; /* Options for braille driver */
+#endif
};
#define MAX_CMDLINECONSOLES 8
@@ -808,15 +811,60 @@ static void call_console_drivers(unsigned start, unsigned end)
#endif
+static int __add_preferred_console(char *name, int idx, char *options,
+ char *brl_options)
+{
+ struct console_cmdline *c;
+ int i;
+
+ /*
+ * See if this tty is not yet registered, and
+ * if we have a slot free.
+ */
+ for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
+ if (strcmp(console_cmdline[i].name, name) == 0 &&
+ console_cmdline[i].index == idx) {
+ if (!brl_options)
+ selected_console = i;
+ return 0;
+ }
+ if (i == MAX_CMDLINECONSOLES)
+ return -E2BIG;
+ if (!brl_options)
+ selected_console = i;
+ c = &console_cmdline[i];
+ strlcpy(c->name, name, sizeof(c->name));
+ c->options = options;
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ c->brl_options = brl_options;
+#endif
+ c->index = idx;
+ return 0;
+}
/*
* Set up a list of consoles. Called from init/main.c
*/
static int __init console_setup(char *str)
{
char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
- char *s, *options;
+ char *s, *options, *brl_options = NULL;
int idx;
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ if (!memcmp(str, "brl,", 4)) {
+ brl_options = "";
+ str += 4;
+ } else if (!memcmp(str, "brl=", 4)) {
+ brl_options = str + 4;
+ str = strchr(brl_options, ',');
+ if (!str) {
+ printk(KERN_ERR "need port name after brl=\n");
+ return 1;
+ }
+ *(str++) = 0;
+ }
+#endif
+
/*
* Decode str into name, index, options.
*/
@@ -841,7 +889,7 @@ static int __init console_setup(char *str)
idx = simple_strtoul(s, NULL, 10);
*s = 0;
- add_preferred_console(buf, idx, options);
+ __add_preferred_console(buf, idx, options, brl_options);
return 1;
}
__setup("console=", console_setup);
@@ -861,28 +909,7 @@ __setup("console=", console_setup);
*/
int add_preferred_console(char *name, int idx, char *options)
{
- struct console_cmdline *c;
- int i;
-
- /*
- * See if this tty is not yet registered, and
- * if we have a slot free.
- */
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- selected_console = i;
- return 0;
- }
- if (i == MAX_CMDLINECONSOLES)
- return -E2BIG;
- selected_console = i;
- c = &console_cmdline[i];
- memcpy(c->name, name, sizeof(c->name));
- c->name[sizeof(c->name) - 1] = 0;
- c->options = options;
- c->index = idx;
- return 0;
+ return __add_preferred_console(name, idx, options, NULL);
}
int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
@@ -894,7 +921,7 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
if (strcmp(console_cmdline[i].name, name) == 0 &&
console_cmdline[i].index == idx) {
c = &console_cmdline[i];
- memcpy(c->name, name_new, sizeof(c->name));
+ strlcpy(c->name, name_new, sizeof(c->name));
c->name[sizeof(c->name) - 1] = 0;
c->options = options;
c->index = idx_new;
@@ -1163,6 +1190,16 @@ void register_console(struct console *console)
continue;
if (console->index < 0)
console->index = console_cmdline[i].index;
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ if (console_cmdline[i].brl_options) {
+ console->flags |= CON_BRL;
+ braille_register_console(console,
+ console_cmdline[i].index,
+ console_cmdline[i].options,
+ console_cmdline[i].brl_options);
+ return;
+ }
+#endif
if (console->setup &&
console->setup(console, console_cmdline[i].options) != 0)
break;
@@ -1221,6 +1258,11 @@ int unregister_console(struct console *console)
struct console *a, *b;
int res = 1;
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ if (console->flags & CON_BRL)
+ return braille_unregister_console(console);
+#endif
+
acquire_console_sem();
if (console_drivers == console) {
console_drivers=console->next;
@@ -1272,8 +1314,8 @@ late_initcall(disable_boot_consoles);
*/
void tty_write_message(struct tty_struct *tty, char *msg)
{
- if (tty && tty->driver->write)
- tty->driver->write(tty, msg, strlen(msg));
+ if (tty && tty->ops->write)
+ tty->ops->write(tty, msg, strlen(msg));
return;
}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index dac4b4e5729..dcc199c43a1 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -73,7 +73,7 @@ void __ptrace_unlink(struct task_struct *child)
BUG_ON(!child->ptrace);
child->ptrace = 0;
- if (!list_empty(&child->ptrace_list)) {
+ if (ptrace_reparented(child)) {
list_del_init(&child->ptrace_list);
remove_parent(child);
child->parent = child->real_parent;
@@ -168,8 +168,6 @@ int ptrace_attach(struct task_struct *task)
audit_ptrace(task);
retval = -EPERM;
- if (task->pid <= 1)
- goto out;
if (same_thread_group(task, current))
goto out;
@@ -208,8 +206,7 @@ repeat:
__ptrace_link(task, current);
- force_sig_specific(SIGSTOP, task);
-
+ send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
bad:
write_unlock_irqrestore(&tasklist_lock, flags);
task_unlock(task);
@@ -522,12 +519,6 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
{
struct task_struct *child;
- /*
- * Tracing init is not allowed.
- */
- if (pid == 1)
- return ERR_PTR(-EPERM);
-
read_lock(&tasklist_lock);
child = find_task_by_vpid(pid);
if (child)
diff --git a/kernel/signal.c b/kernel/signal.c
index 64ad0ed1599..72bb4f51f96 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -39,11 +39,19 @@
static struct kmem_cache *sigqueue_cachep;
+static int __sig_ignored(struct task_struct *t, int sig)
+{
+ void __user *handler;
+
+ /* Is it explicitly or implicitly ignored? */
+
+ handler = t->sighand->action[sig - 1].sa.sa_handler;
+ return handler == SIG_IGN ||
+ (handler == SIG_DFL && sig_kernel_ignore(sig));
+}
static int sig_ignored(struct task_struct *t, int sig)
{
- void __user * handler;
-
/*
* Tracers always want to know about signals..
*/
@@ -58,10 +66,7 @@ static int sig_ignored(struct task_struct *t, int sig)
if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
return 0;
- /* Is it explicitly or implicitly ignored? */
- handler = t->sighand->action[sig-1].sa.sa_handler;
- return handler == SIG_IGN ||
- (handler == SIG_DFL && sig_kernel_ignore(sig));
+ return __sig_ignored(t, sig);
}
/*
@@ -372,7 +377,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
- int signr = 0;
+ int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
@@ -405,8 +410,12 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
}
}
}
+
recalc_sigpending();
- if (signr && unlikely(sig_kernel_stop(signr))) {
+ if (!signr)
+ return 0;
+
+ if (unlikely(sig_kernel_stop(signr))) {
/*
* Set a marker that we have dequeued a stop signal. Our
* caller might release the siglock and then the pending
@@ -422,9 +431,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
}
- if (signr &&
- ((info->si_code & __SI_MASK) == __SI_TIMER) &&
- info->si_sys_private) {
+ if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
@@ -526,21 +533,34 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
static int check_kill_permission(int sig, struct siginfo *info,
struct task_struct *t)
{
- int error = -EINVAL;
+ struct pid *sid;
+ int error;
+
if (!valid_signal(sig))
- return error;
+ return -EINVAL;
- if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
- error = audit_signal_info(sig, t); /* Let audit system see the signal */
- if (error)
- return error;
- error = -EPERM;
- if (((sig != SIGCONT) ||
- (task_session_nr(current) != task_session_nr(t)))
- && (current->euid ^ t->suid) && (current->euid ^ t->uid)
- && (current->uid ^ t->suid) && (current->uid ^ t->uid)
- && !capable(CAP_KILL))
+ if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+ return 0;
+
+ error = audit_signal_info(sig, t); /* Let audit system see the signal */
+ if (error)
return error;
+
+ if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
+ (current->uid ^ t->suid) && (current->uid ^ t->uid) &&
+ !capable(CAP_KILL)) {
+ switch (sig) {
+ case SIGCONT:
+ sid = task_session(t);
+ /*
+ * We don't return the error if sid == NULL. The
+ * task was unhashed, the caller must notice this.
+ */
+ if (!sid || sid == task_session(current))
+ break;
+ default:
+ return -EPERM;
+ }
}
return security_task_kill(t, info, sig, 0);
@@ -550,62 +570,44 @@ static int check_kill_permission(int sig, struct siginfo *info,
static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
/*
- * Handle magic process-wide effects of stop/continue signals.
- * Unlike the signal actions, these happen immediately at signal-generation
+ * Handle magic process-wide effects of stop/continue signals. Unlike
+ * the signal actions, these happen immediately at signal-generation
* time regardless of blocking, ignoring, or handling. This does the
* actual continuing for SIGCONT, but not the actual stopping for stop
- * signals. The process stop is done as a signal action for SIG_DFL.
+ * signals. The process stop is done as a signal action for SIG_DFL.
+ *
+ * Returns true if the signal should be actually delivered, otherwise
+ * it should be dropped.
*/
-static void handle_stop_signal(int sig, struct task_struct *p)
+static int prepare_signal(int sig, struct task_struct *p)
{
+ struct signal_struct *signal = p->signal;
struct task_struct *t;
- if (p->signal->flags & SIGNAL_GROUP_EXIT)
+ if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
/*
- * The process is in the middle of dying already.
+ * The process is in the middle of dying, nothing to do.
*/
- return;
-
- if (sig_kernel_stop(sig)) {
+ } else if (sig_kernel_stop(sig)) {
/*
* This is a stop signal. Remove SIGCONT from all queues.
*/
- rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
+ rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
t = p;
do {
rm_from_queue(sigmask(SIGCONT), &t->pending);
- t = next_thread(t);
- } while (t != p);
+ } while_each_thread(p, t);
} else if (sig == SIGCONT) {
+ unsigned int why;
/*
* Remove all stop signals from all queues,
* and wake all threads.
*/
- if (unlikely(p->signal->group_stop_count > 0)) {
- /*
- * There was a group stop in progress. We'll
- * pretend it finished before we got here. We are
- * obliged to report it to the parent: if the
- * SIGSTOP happened "after" this SIGCONT, then it
- * would have cleared this pending SIGCONT. If it
- * happened "before" this SIGCONT, then the parent
- * got the SIGCHLD about the stop finishing before
- * the continue happened. We do the notification
- * now, and it's as if the stop had finished and
- * the SIGCHLD was pending on entry to this kill.
- */
- p->signal->group_stop_count = 0;
- p->signal->flags = SIGNAL_STOP_CONTINUED;
- spin_unlock(&p->sighand->siglock);
- do_notify_parent_cldstop(p, CLD_STOPPED);
- spin_lock(&p->sighand->siglock);
- }
- rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
+ rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
t = p;
do {
unsigned int state;
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-
/*
* If there is a handler for SIGCONT, we must make
* sure that no thread returns to user mode before
@@ -615,7 +617,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
* running the handler. With the TIF_SIGPENDING
* flag set, the thread will pause and acquire the
* siglock that we hold now and until we've queued
- * the pending signal.
+ * the pending signal.
*
* Wake up the stopped thread _after_ setting
* TIF_SIGPENDING
@@ -626,49 +628,163 @@ static void handle_stop_signal(int sig, struct task_struct *p)
state |= TASK_INTERRUPTIBLE;
}
wake_up_state(t, state);
+ } while_each_thread(p, t);
- t = next_thread(t);
- } while (t != p);
+ /*
+ * Notify the parent with CLD_CONTINUED if we were stopped.
+ *
+ * If we were in the middle of a group stop, we pretend it
+ * was already finished, and then continued. Since SIGCHLD
+ * doesn't queue we report only CLD_STOPPED, as if the next
+ * CLD_CONTINUED was dropped.
+ */
+ why = 0;
+ if (signal->flags & SIGNAL_STOP_STOPPED)
+ why |= SIGNAL_CLD_CONTINUED;
+ else if (signal->group_stop_count)
+ why |= SIGNAL_CLD_STOPPED;
- if (p->signal->flags & SIGNAL_STOP_STOPPED) {
+ if (why) {
/*
- * We were in fact stopped, and are now continued.
- * Notify the parent with CLD_CONTINUED.
+ * The first thread which returns from finish_stop()
+ * will take ->siglock, notice SIGNAL_CLD_MASK, and
+ * notify its parent. See get_signal_to_deliver().
*/
- p->signal->flags = SIGNAL_STOP_CONTINUED;
- p->signal->group_exit_code = 0;
- spin_unlock(&p->sighand->siglock);
- do_notify_parent_cldstop(p, CLD_CONTINUED);
- spin_lock(&p->sighand->siglock);
+ signal->flags = why | SIGNAL_STOP_CONTINUED;
+ signal->group_stop_count = 0;
+ signal->group_exit_code = 0;
} else {
/*
* We are not stopped, but there could be a stop
* signal in the middle of being processed after
* being removed from the queue. Clear that too.
*/
- p->signal->flags = 0;
+ signal->flags &= ~SIGNAL_STOP_DEQUEUED;
}
- } else if (sig == SIGKILL) {
+ }
+
+ return !sig_ignored(p, sig);
+}
+
+/*
+ * Test if P wants to take SIG. After we've checked all threads with this,
+ * it's equivalent to finding no threads not blocking SIG. Any threads not
+ * blocking SIG were ruled out because they are not running and already
+ * have pending signals. Such threads will dequeue from the shared queue
+ * as soon as they're available, so putting the signal on the shared queue
+ * will be equivalent to sending it to one such thread.
+ */
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+ if (sigismember(&p->blocked, sig))
+ return 0;
+ if (p->flags & PF_EXITING)
+ return 0;
+ if (sig == SIGKILL)
+ return 1;
+ if (task_is_stopped_or_traced(p))
+ return 0;
+ return task_curr(p) || !signal_pending(p);
+}
+
+static void complete_signal(int sig, struct task_struct *p, int group)
+{
+ struct signal_struct *signal = p->signal;
+ struct task_struct *t;
+
+ /*
+ * Now find a thread we can wake up to take the signal off the queue.
+ *
+ * If the main thread wants the signal, it gets first crack.
+ * Probably the least surprising to the average bear.
+ */
+ if (wants_signal(sig, p))
+ t = p;
+ else if (!group || thread_group_empty(p))
+ /*
+ * There is just one thread and it does not need to be woken.
+ * It will dequeue unblocked signals before it runs again.
+ */
+ return;
+ else {
/*
- * Make sure that any pending stop signal already dequeued
- * is undone by the wakeup for SIGKILL.
+ * Otherwise try to find a suitable thread.
*/
- p->signal->flags = 0;
+ t = signal->curr_target;
+ while (!wants_signal(sig, t)) {
+ t = next_thread(t);
+ if (t == signal->curr_target)
+ /*
+ * No thread needs to be woken.
+ * Any eligible threads will see
+ * the signal in the queue soon.
+ */
+ return;
+ }
+ signal->curr_target = t;
}
+
+ /*
+ * Found a killable thread. If the signal will be fatal,
+ * then start taking the whole group down immediately.
+ */
+ if (sig_fatal(p, sig) &&
+ !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
+ !sigismember(&t->real_blocked, sig) &&
+ (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
+ /*
+ * This signal will be fatal to the whole group.
+ */
+ if (!sig_kernel_coredump(sig)) {
+ /*
+ * Start a group exit and wake everybody up.
+ * This way we don't have other threads
+ * running and doing things after a slower
+ * thread has the fatal signal pending.
+ */
+ signal->flags = SIGNAL_GROUP_EXIT;
+ signal->group_exit_code = sig;
+ signal->group_stop_count = 0;
+ t = p;
+ do {
+ sigaddset(&t->pending.signal, SIGKILL);
+ signal_wake_up(t, 1);
+ } while_each_thread(p, t);
+ return;
+ }
+ }
+
+ /*
+ * The signal is already in the shared-pending queue.
+ * Tell the chosen thread to wake up and dequeue it.
+ */
+ signal_wake_up(t, sig == SIGKILL);
+ return;
+}
+
+static inline int legacy_queue(struct sigpending *signals, int sig)
+{
+ return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
}
static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
- struct sigpending *signals)
+ int group)
{
- struct sigqueue * q = NULL;
- int ret = 0;
+ struct sigpending *pending;
+ struct sigqueue *q;
+
+ assert_spin_locked(&t->sighand->siglock);
+ if (!prepare_signal(sig, t))
+ return 0;
+ pending = group ? &t->signal->shared_pending : &t->pending;
/*
- * Deliver the signal to listening signalfds. This must be called
- * with the sighand lock held.
+ * Short-circuit ignored signals and support queuing
+ * exactly one non-rt signal, so that we can get more
+ * detailed information about the cause of the signal.
*/
- signalfd_notify(t, sig);
-
+ if (legacy_queue(pending, sig))
+ return 0;
/*
* fast-pathed signals for kernel-internal things like SIGSTOP
* or SIGKILL.
@@ -688,7 +804,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
(is_si_special(info) ||
info->si_code >= 0)));
if (q) {
- list_add_tail(&q->list, &signals->list);
+ list_add_tail(&q->list, &pending->list);
switch ((unsigned long) info) {
case (unsigned long) SEND_SIG_NOINFO:
q->info.si_signo = sig;
@@ -718,13 +834,12 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
}
out_set:
- sigaddset(&signals->signal, sig);
- return ret;
+ signalfd_notify(t, sig);
+ sigaddset(&pending->signal, sig);
+ complete_signal(sig, t, group);
+ return 0;
}
-#define LEGACY_QUEUE(sigptr, sig) \
- (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
-
int print_fatal_signals;
static void print_fatal_signal(struct pt_regs *regs, int signr)
@@ -757,29 +872,16 @@ static int __init setup_print_fatal_signals(char *str)
__setup("print-fatal-signals=", setup_print_fatal_signals);
+int
+__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+{
+ return send_signal(sig, info, p, 1);
+}
+
static int
specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
{
- int ret = 0;
-
- BUG_ON(!irqs_disabled());
- assert_spin_locked(&t->sighand->siglock);
-
- /* Short-circuit ignored signals. */
- if (sig_ignored(t, sig))
- goto out;
-
- /* Support queueing exactly one non-rt signal, so that we
- can get more detailed information about the cause of
- the signal. */
- if (LEGACY_QUEUE(&t->pending, sig))
- goto out;
-
- ret = send_signal(sig, info, t, &t->pending);
- if (!ret && !sigismember(&t->blocked, sig))
- signal_wake_up(t, sig == SIGKILL);
-out:
- return ret;
+ return send_signal(sig, info, t, 0);
}
/*
@@ -790,7 +892,8 @@ out:
* since we do not want to have a signal handler that was blocked
* be invoked when user space had explicitly blocked it.
*
- * We don't want to have recursive SIGSEGV's etc, for example.
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
*/
int
force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
@@ -810,6 +913,8 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
recalc_sigpending_and_wake(t);
}
}
+ if (action->sa.sa_handler == SIG_DFL)
+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = specific_send_sig_info(sig, info, t);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
@@ -823,134 +928,6 @@ force_sig_specific(int sig, struct task_struct *t)
}
/*
- * Test if P wants to take SIG. After we've checked all threads with this,
- * it's equivalent to finding no threads not blocking SIG. Any threads not
- * blocking SIG were ruled out because they are not running and already
- * have pending signals. Such threads will dequeue from the shared queue
- * as soon as they're available, so putting the signal on the shared queue
- * will be equivalent to sending it to one such thread.
- */
-static inline int wants_signal(int sig, struct task_struct *p)
-{
- if (sigismember(&p->blocked, sig))
- return 0;
- if (p->flags & PF_EXITING)
- return 0;
- if (sig == SIGKILL)
- return 1;
- if (task_is_stopped_or_traced(p))
- return 0;
- return task_curr(p) || !signal_pending(p);
-}
-
-static void
-__group_complete_signal(int sig, struct task_struct *p)
-{
- struct task_struct *t;
-
- /*
- * Now find a thread we can wake up to take the signal off the queue.
- *
- * If the main thread wants the signal, it gets first crack.
- * Probably the least surprising to the average bear.
- */
- if (wants_signal(sig, p))
- t = p;
- else if (thread_group_empty(p))
- /*
- * There is just one thread and it does not need to be woken.
- * It will dequeue unblocked signals before it runs again.
- */
- return;
- else {
- /*
- * Otherwise try to find a suitable thread.
- */
- t = p->signal->curr_target;
- if (t == NULL)
- /* restart balancing at this thread */
- t = p->signal->curr_target = p;
-
- while (!wants_signal(sig, t)) {
- t = next_thread(t);
- if (t == p->signal->curr_target)
- /*
- * No thread needs to be woken.
- * Any eligible threads will see
- * the signal in the queue soon.
- */
- return;
- }
- p->signal->curr_target = t;
- }
-
- /*
- * Found a killable thread. If the signal will be fatal,
- * then start taking the whole group down immediately.
- */
- if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
- !sigismember(&t->real_blocked, sig) &&
- (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
- /*
- * This signal will be fatal to the whole group.
- */
- if (!sig_kernel_coredump(sig)) {
- /*
- * Start a group exit and wake everybody up.
- * This way we don't have other threads
- * running and doing things after a slower
- * thread has the fatal signal pending.
- */
- p->signal->flags = SIGNAL_GROUP_EXIT;
- p->signal->group_exit_code = sig;
- p->signal->group_stop_count = 0;
- t = p;
- do {
- sigaddset(&t->pending.signal, SIGKILL);
- signal_wake_up(t, 1);
- } while_each_thread(p, t);
- return;
- }
- }
-
- /*
- * The signal is already in the shared-pending queue.
- * Tell the chosen thread to wake up and dequeue it.
- */
- signal_wake_up(t, sig == SIGKILL);
- return;
-}
-
-int
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
-{
- int ret = 0;
-
- assert_spin_locked(&p->sighand->siglock);
- handle_stop_signal(sig, p);
-
- /* Short-circuit ignored signals. */
- if (sig_ignored(p, sig))
- return ret;
-
- if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
- /* This is a non-RT signal and we already have one queued. */
- return ret;
-
- /*
- * Put this signal on the shared-pending queue, or fail with EAGAIN.
- * We always use the shared queue for process-wide signals,
- * to avoid several races.
- */
- ret = send_signal(sig, info, p, &p->signal->shared_pending);
- if (unlikely(ret))
- return ret;
-
- __group_complete_signal(sig, p);
- return 0;
-}
-
-/*
* Nuke all other threads in the group.
*/
void zap_other_threads(struct task_struct *p)
@@ -978,13 +955,11 @@ int __fatal_signal_pending(struct task_struct *tsk)
}
EXPORT_SYMBOL(__fatal_signal_pending);
-/*
- * Must be called under rcu_read_lock() or with tasklist_lock read-held.
- */
struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
{
struct sighand_struct *sighand;
+ rcu_read_lock();
for (;;) {
sighand = rcu_dereference(tsk->sighand);
if (unlikely(sighand == NULL))
@@ -995,6 +970,7 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
break;
spin_unlock_irqrestore(&sighand->siglock, *flags);
}
+ rcu_read_unlock();
return sighand;
}
@@ -1043,9 +1019,6 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
struct task_struct *p;
rcu_read_lock();
- if (unlikely(sig_needs_tasklist(sig)))
- read_lock(&tasklist_lock);
-
retry:
p = pid_task(pid, PIDTYPE_PID);
if (p) {
@@ -1059,10 +1032,8 @@ retry:
*/
goto retry;
}
-
- if (unlikely(sig_needs_tasklist(sig)))
- read_unlock(&tasklist_lock);
rcu_read_unlock();
+
return error;
}
@@ -1159,8 +1130,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
*/
/*
- * These two are the most common entry points. They send a signal
- * just to the specific thread.
+ * The caller must ensure the task can't exit.
*/
int
send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
@@ -1175,17 +1145,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
if (!valid_signal(sig))
return -EINVAL;
- /*
- * We need the tasklist lock even for the specific
- * thread case (when we don't need to follow the group
- * lists) in order to avoid races with "p->sighand"
- * going away or changing from under us.
- */
- read_lock(&tasklist_lock);
spin_lock_irqsave(&p->sighand->siglock, flags);
ret = specific_send_sig_info(sig, info, p);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
- read_unlock(&tasklist_lock);
return ret;
}
@@ -1291,28 +1253,24 @@ void sigqueue_free(struct sigqueue *q)
__sigqueue_free(q);
}
-int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
{
+ int sig = q->info.si_signo;
+ struct sigpending *pending;
unsigned long flags;
- int ret = 0;
+ int ret;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
- /*
- * The rcu based delayed sighand destroy makes it possible to
- * run this without tasklist lock held. The task struct itself
- * cannot go away as create_timer did get_task_struct().
- *
- * We return -1, when the task is marked exiting, so
- * posix_timer_event can redirect it to the group leader
- */
- rcu_read_lock();
+ ret = -1;
+ if (!likely(lock_task_sighand(t, &flags)))
+ goto ret;
- if (!likely(lock_task_sighand(p, &flags))) {
- ret = -1;
- goto out_err;
- }
+ ret = 1; /* the signal is ignored */
+ if (!prepare_signal(sig, t))
+ goto out;
+ ret = 0;
if (unlikely(!list_empty(&q->list))) {
/*
* If an SI_TIMER entry is already queue just increment
@@ -1322,77 +1280,15 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
q->info.si_overrun++;
goto out;
}
- /* Short-circuit ignored signals. */
- if (sig_ignored(p, sig)) {
- ret = 1;
- goto out;
- }
- /*
- * Deliver the signal to listening signalfds. This must be called
- * with the sighand lock held.
- */
- signalfd_notify(p, sig);
-
- list_add_tail(&q->list, &p->pending.list);
- sigaddset(&p->pending.signal, sig);
- if (!sigismember(&p->blocked, sig))
- signal_wake_up(p, sig == SIGKILL);
-
-out:
- unlock_task_sighand(p, &flags);
-out_err:
- rcu_read_unlock();
-
- return ret;
-}
-
-int
-send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
-{
- unsigned long flags;
- int ret = 0;
-
- BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-
- read_lock(&tasklist_lock);
- /* Since it_lock is held, p->sighand cannot be NULL. */
- spin_lock_irqsave(&p->sighand->siglock, flags);
- handle_stop_signal(sig, p);
-
- /* Short-circuit ignored signals. */
- if (sig_ignored(p, sig)) {
- ret = 1;
- goto out;
- }
- if (unlikely(!list_empty(&q->list))) {
- /*
- * If an SI_TIMER entry is already queue just increment
- * the overrun count. Other uses should not try to
- * send the signal multiple times.
- */
- BUG_ON(q->info.si_code != SI_TIMER);
- q->info.si_overrun++;
- goto out;
- }
- /*
- * Deliver the signal to listening signalfds. This must be called
- * with the sighand lock held.
- */
- signalfd_notify(p, sig);
-
- /*
- * Put this signal on the shared-pending queue.
- * We always use the shared queue for process-wide signals,
- * to avoid several races.
- */
- list_add_tail(&q->list, &p->signal->shared_pending.list);
- sigaddset(&p->signal->shared_pending.signal, sig);
-
- __group_complete_signal(sig, p);
+ signalfd_notify(t, sig);
+ pending = group ? &t->signal->shared_pending : &t->pending;
+ list_add_tail(&q->list, &pending->list);
+ sigaddset(&pending->signal, sig);
+ complete_signal(sig, t, group);
out:
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- read_unlock(&tasklist_lock);
+ unlock_task_sighand(t, &flags);
+ret:
return ret;
}
@@ -1723,8 +1619,9 @@ static int do_signal_stop(int signr)
} else {
struct task_struct *t;
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
- unlikely(sig->group_exit_task))
+ if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE))
+ != SIGNAL_STOP_DEQUEUED) ||
+ unlikely(signal_group_exit(sig)))
return 0;
/*
* There is no group stop already in progress.
@@ -1799,8 +1696,9 @@ static int ptrace_signal(int signr, siginfo_t *info,
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct pt_regs *regs, void *cookie)
{
- sigset_t *mask = &current->blocked;
- int signr = 0;
+ struct sighand_struct *sighand = current->sighand;
+ struct signal_struct *signal = current->signal;
+ int signr;
relock:
/*
@@ -1811,16 +1709,32 @@ relock:
*/
try_to_freeze();
- spin_lock_irq(&current->sighand->siglock);
+ spin_lock_irq(&sighand->siglock);
+ /*
+ * Every stopped thread goes here after wakeup. Check to see if
+ * we should notify the parent, prepare_signal(SIGCONT) encodes
+ * the CLD_ si_code into SIGNAL_CLD_MASK bits.
+ */
+ if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
+ int why = (signal->flags & SIGNAL_STOP_CONTINUED)
+ ? CLD_CONTINUED : CLD_STOPPED;
+ signal->flags &= ~SIGNAL_CLD_MASK;
+ spin_unlock_irq(&sighand->siglock);
+
+ read_lock(&tasklist_lock);
+ do_notify_parent_cldstop(current->group_leader, why);
+ read_unlock(&tasklist_lock);
+ goto relock;
+ }
+
for (;;) {
struct k_sigaction *ka;
- if (unlikely(current->signal->group_stop_count > 0) &&
+ if (unlikely(signal->group_stop_count > 0) &&
do_signal_stop(0))
goto relock;
- signr = dequeue_signal(current, mask, info);
-
+ signr = dequeue_signal(current, &current->blocked, info);
if (!signr)
break; /* will return 0 */
@@ -1830,7 +1744,7 @@ relock:
continue;
}
- ka = &current->sighand->action[signr-1];
+ ka = &sighand->action[signr-1];
if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
continue;
if (ka->sa.sa_handler != SIG_DFL) {
@@ -1852,7 +1766,8 @@ relock:
/*
* Global init gets no signals it doesn't want.
*/
- if (is_global_init(current))
+ if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
+ !signal_group_exit(signal))
continue;
if (sig_kernel_stop(signr)) {
@@ -1867,14 +1782,14 @@ relock:
* We need to check for that and bail out if necessary.
*/
if (signr != SIGSTOP) {
- spin_unlock_irq(&current->sighand->siglock);
+ spin_unlock_irq(&sighand->siglock);
/* signals can be posted during this window */
if (is_current_pgrp_orphaned())
goto relock;
- spin_lock_irq(&current->sighand->siglock);
+ spin_lock_irq(&sighand->siglock);
}
if (likely(do_signal_stop(signr))) {
@@ -1889,15 +1804,16 @@ relock:
continue;
}
- spin_unlock_irq(&current->sighand->siglock);
+ spin_unlock_irq(&sighand->siglock);
/*
* Anything else is fatal, maybe with a core dump.
*/
current->flags |= PF_SIGNALED;
- if ((signr != SIGKILL) && print_fatal_signals)
- print_fatal_signal(regs, signr);
+
if (sig_kernel_coredump(signr)) {
+ if (print_fatal_signals)
+ print_fatal_signal(regs, signr);
/*
* If it was able to dump core, this kills all
* other threads in the group and synchronizes with
@@ -1915,7 +1831,7 @@ relock:
do_group_exit(signr);
/* NOTREACHED */
}
- spin_unlock_irq(&current->sighand->siglock);
+ spin_unlock_irq(&sighand->siglock);
return signr;
}
@@ -2259,6 +2175,7 @@ static int do_tkill(int tgid, int pid, int sig)
int error;
struct siginfo info;
struct task_struct *p;
+ unsigned long flags;
error = -ESRCH;
info.si_signo = sig;
@@ -2267,22 +2184,24 @@ static int do_tkill(int tgid, int pid, int sig)
info.si_pid = task_tgid_vnr(current);
info.si_uid = current->uid;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_task_by_vpid(pid);
if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
error = check_kill_permission(sig, &info, p);
/*
* The null signal is a permissions and process existence
* probe. No signal is actually delivered.
+ *
+ * If lock_task_sighand() fails we pretend the task dies
+ * after receiving the signal. The window is tiny, and the
+ * signal is private anyway.
*/
- if (!error && sig && p->sighand) {
- spin_lock_irq(&p->sighand->siglock);
- handle_stop_signal(sig, p);
+ if (!error && sig && lock_task_sighand(p, &flags)) {
error = specific_send_sig_info(sig, &info, p);
- spin_unlock_irq(&p->sighand->siglock);
+ unlock_task_sighand(p, &flags);
}
}
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return error;
}
@@ -2339,13 +2258,14 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
{
+ struct task_struct *t = current;
struct k_sigaction *k;
sigset_t mask;
if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
return -EINVAL;
- k = &current->sighand->action[sig-1];
+ k = &t->sighand->action[sig-1];
spin_lock_irq(&current->sighand->siglock);
if (oact)
@@ -2366,9 +2286,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
* (for example, SIGCHLD), shall cause the pending signal to
* be discarded, whether or not it is blocked"
*/
- if (act->sa.sa_handler == SIG_IGN ||
- (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
- struct task_struct *t = current;
+ if (__sig_ignored(t, sig)) {
sigemptyset(&mask);
sigaddset(&mask, sig);
rm_from_queue_full(&mask, &t->signal->shared_pending);
@@ -2623,7 +2541,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
current->state = TASK_INTERRUPTIBLE;
schedule();
- set_thread_flag(TIF_RESTORE_SIGMASK);
+ set_restore_sigmask();
return -ERESTARTNOHAND;
}
#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
diff --git a/kernel/sys.c b/kernel/sys.c
index e423d0d9e6f..895d2d4c949 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -978,8 +978,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
goto out;
if (task_pgrp(p) != pgrp) {
- detach_pid(p, PIDTYPE_PGID);
- attach_pid(p, PIDTYPE_PGID, pgrp);
+ change_pid(p, PIDTYPE_PGID, pgrp);
set_task_pgrp(p, pid_nr(pgrp));
}
@@ -992,54 +991,67 @@ out:
asmlinkage long sys_getpgid(pid_t pid)
{
+ struct task_struct *p;
+ struct pid *grp;
+ int retval;
+
+ rcu_read_lock();
if (!pid)
- return task_pgrp_vnr(current);
+ grp = task_pgrp(current);
else {
- int retval;
- struct task_struct *p;
-
- read_lock(&tasklist_lock);
- p = find_task_by_vpid(pid);
retval = -ESRCH;
- if (p) {
- retval = security_task_getpgid(p);
- if (!retval)
- retval = task_pgrp_vnr(p);
- }
- read_unlock(&tasklist_lock);
- return retval;
+ p = find_task_by_vpid(pid);
+ if (!p)
+ goto out;
+ grp = task_pgrp(p);
+ if (!grp)
+ goto out;
+
+ retval = security_task_getpgid(p);
+ if (retval)
+ goto out;
}
+ retval = pid_vnr(grp);
+out:
+ rcu_read_unlock();
+ return retval;
}
#ifdef __ARCH_WANT_SYS_GETPGRP
asmlinkage long sys_getpgrp(void)
{
- /* SMP - assuming writes are word atomic this is fine */
- return task_pgrp_vnr(current);
+ return sys_getpgid(0);
}
#endif
asmlinkage long sys_getsid(pid_t pid)
{
+ struct task_struct *p;
+ struct pid *sid;
+ int retval;
+
+ rcu_read_lock();
if (!pid)
- return task_session_vnr(current);
+ sid = task_session(current);
else {
- int retval;
- struct task_struct *p;
-
- rcu_read_lock();
- p = find_task_by_vpid(pid);
retval = -ESRCH;
- if (p) {
- retval = security_task_getsid(p);
- if (!retval)
- retval = task_session_vnr(p);
- }
- rcu_read_unlock();
- return retval;
+ p = find_task_by_vpid(pid);
+ if (!p)
+ goto out;
+ sid = task_session(p);
+ if (!sid)
+ goto out;
+
+ retval = security_task_getsid(p);
+ if (retval)
+ goto out;
}
+ retval = pid_vnr(sid);
+out:
+ rcu_read_unlock();
+ return retval;
}
asmlinkage long sys_setsid(void)
@@ -1572,11 +1584,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
goto out;
}
- rcu_read_lock();
- if (!lock_task_sighand(p, &flags)) {
- rcu_read_unlock();
+ if (!lock_task_sighand(p, &flags))
return;
- }
switch (who) {
case RUSAGE_BOTH:
@@ -1612,9 +1621,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
default:
BUG();
}
-
unlock_task_sighand(p, &flags);
- rcu_read_unlock();
out:
cputime_to_timeval(utime, &r->ru_utime);
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 07e86a82807..4a23517169a 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -183,7 +183,7 @@ static int fill_pid(pid_t pid, struct task_struct *tsk,
if (!tsk) {
rcu_read_lock();
- tsk = find_task_by_pid(pid);
+ tsk = find_task_by_vpid(pid);
if (tsk)
get_task_struct(tsk);
rcu_read_unlock();
@@ -230,7 +230,7 @@ static int fill_tgid(pid_t tgid, struct task_struct *first,
*/
rcu_read_lock();
if (!first)
- first = find_task_by_pid(tgid);
+ first = find_task_by_vpid(tgid);
if (!first || !lock_task_sighand(first, &flags))
goto out;
@@ -547,7 +547,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
if (!stats)
goto err;
- rc = fill_pid(tsk->pid, tsk, stats);
+ rc = fill_pid(-1, tsk, stats);
if (rc < 0)
goto err;
diff --git a/kernel/timer.c b/kernel/timer.c
index f3d35d4ea42..ceacc662657 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -320,14 +320,130 @@ static void timer_stats_account_timer(struct timer_list *timer)
static void timer_stats_account_timer(struct timer_list *timer) {}
#endif
-/**
- * init_timer - initialize a timer.
- * @timer: the timer to be initialized
- *
- * init_timer() must be done to a timer prior calling *any* of the
- * other timer functions.
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr timer_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
*/
-void init_timer(struct timer_list *timer)
+static int timer_fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct timer_list *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ del_timer_sync(timer);
+ debug_object_init(timer, &timer_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int timer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+ struct timer_list *timer = addr;
+
+ switch (state) {
+
+ case ODEBUG_STATE_NOTAVAILABLE:
+ /*
+ * This is not really a fixup. The timer was
+ * statically initialized. We just make sure that it
+ * is tracked in the object tracker.
+ */
+ if (timer->entry.next == NULL &&
+ timer->entry.prev == TIMER_ENTRY_STATIC) {
+ debug_object_init(timer, &timer_debug_descr);
+ debug_object_activate(timer, &timer_debug_descr);
+ return 0;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ return 0;
+
+ case ODEBUG_STATE_ACTIVE:
+ WARN_ON(1);
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int timer_fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct timer_list *timer = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ del_timer_sync(timer);
+ debug_object_free(timer, &timer_debug_descr);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static struct debug_obj_descr timer_debug_descr = {
+ .name = "timer_list",
+ .fixup_init = timer_fixup_init,
+ .fixup_activate = timer_fixup_activate,
+ .fixup_free = timer_fixup_free,
+};
+
+static inline void debug_timer_init(struct timer_list *timer)
+{
+ debug_object_init(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_activate(struct timer_list *timer)
+{
+ debug_object_activate(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_deactivate(struct timer_list *timer)
+{
+ debug_object_deactivate(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_free(struct timer_list *timer)
+{
+ debug_object_free(timer, &timer_debug_descr);
+}
+
+static void __init_timer(struct timer_list *timer);
+
+void init_timer_on_stack(struct timer_list *timer)
+{
+ debug_object_init_on_stack(timer, &timer_debug_descr);
+ __init_timer(timer);
+}
+EXPORT_SYMBOL_GPL(init_timer_on_stack);
+
+void destroy_timer_on_stack(struct timer_list *timer)
+{
+ debug_object_free(timer, &timer_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
+
+#else
+static inline void debug_timer_init(struct timer_list *timer) { }
+static inline void debug_timer_activate(struct timer_list *timer) { }
+static inline void debug_timer_deactivate(struct timer_list *timer) { }
+#endif
+
+static void __init_timer(struct timer_list *timer)
{
timer->entry.next = NULL;
timer->base = __raw_get_cpu_var(tvec_bases);
@@ -337,6 +453,19 @@ void init_timer(struct timer_list *timer)
memset(timer->start_comm, 0, TASK_COMM_LEN);
#endif
}
+
+/**
+ * init_timer - initialize a timer.
+ * @timer: the timer to be initialized
+ *
+ * init_timer() must be done to a timer prior calling *any* of the
+ * other timer functions.
+ */
+void init_timer(struct timer_list *timer)
+{
+ debug_timer_init(timer);
+ __init_timer(timer);
+}
EXPORT_SYMBOL(init_timer);
void init_timer_deferrable(struct timer_list *timer)
@@ -351,6 +480,8 @@ static inline void detach_timer(struct timer_list *timer,
{
struct list_head *entry = &timer->entry;
+ debug_timer_deactivate(timer);
+
__list_del(entry->prev, entry->next);
if (clear_pending)
entry->next = NULL;
@@ -405,6 +536,8 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
ret = 1;
}
+ debug_timer_activate(timer);
+
new_base = __get_cpu_var(tvec_bases);
if (base != new_base) {
@@ -450,6 +583,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
BUG_ON(timer_pending(timer) || !timer->function);
spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base);
+ debug_timer_activate(timer);
internal_add_timer(base, timer);
/*
* Check whether the other CPU is idle and needs to be
@@ -1086,11 +1220,14 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
- setup_timer(&timer, process_timeout, (unsigned long)current);
+ setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
__mod_timer(&timer, expire);
schedule();
del_singleshot_timer_sync(&timer);
+ /* Remove the timer from the object tracker */
+ destroy_timer_on_stack(&timer);
+
timeout = expire - jiffies;
out:
diff --git a/kernel/user.c b/kernel/user.c
index aefbbfa3159..865ecf57a09 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -384,7 +384,7 @@ void free_uid(struct user_struct *up)
local_irq_restore(flags);
}
-struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
+struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
{
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up, *new;
@@ -399,26 +399,12 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
spin_unlock_irq(&uidhash_lock);
if (!up) {
- new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
+ new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
if (!new)
goto out_unlock;
new->uid = uid;
atomic_set(&new->__count, 1);
- atomic_set(&new->processes, 0);
- atomic_set(&new->files, 0);
- atomic_set(&new->sigpending, 0);
-#ifdef CONFIG_INOTIFY_USER
- atomic_set(&new->inotify_watches, 0);
- atomic_set(&new->inotify_devs, 0);
-#endif
-#ifdef CONFIG_POSIX_MQUEUE
- new->mq_bytes = 0;
-#endif
- new->locked_shm = 0;
-#ifdef CONFIG_KEYS
- new->uid_keyring = new->session_keyring = NULL;
-#endif
if (sched_create_user(new) < 0)
goto out_free_user;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7db251a959c..721093a2256 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -247,7 +247,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
if (cwq->run_depth > 3) {
/* morton gets to eat his hat */
printk("%s: recursion depth exceeded: %d\n",
- __FUNCTION__, cwq->run_depth);
+ __func__, cwq->run_depth);
dump_stack();
}
while (!list_empty(&cwq->worklist)) {