summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bounds.c23
-rw-r--r--kernel/cpuset.c22
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c8
-rw-r--r--kernel/kexec.c3
-rw-r--r--kernel/kprobes.c349
-rw-r--r--kernel/power/console.c27
-rw-r--r--kernel/sys.c27
8 files changed, 349 insertions, 112 deletions
diff --git a/kernel/bounds.c b/kernel/bounds.c
new file mode 100644
index 00000000000..c3c55544db2
--- /dev/null
+++ b/kernel/bounds.c
@@ -0,0 +1,23 @@
+/*
+ * Generate definitions needed by the preprocessor.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#define __GENERATING_BOUNDS_H
+/* Include headers that define the enum constants of interest */
+#include <linux/page-flags.h>
+#include <linux/mmzone.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+void foo(void)
+{
+ /* The enum constants to put into include/linux/bounds.h */
+ DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+ DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+ /* End of constants */
+}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 8b35fbd8292..024888bb981 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -941,7 +941,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
cs->mems_generation = cpuset_mems_generation++;
mutex_unlock(&callback_mutex);
- cpuset_being_rebound = cs; /* causes mpol_copy() rebind */
+ cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
fudge = 10; /* spare mmarray[] slots */
fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
@@ -992,7 +992,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
* rebind the vma mempolicies of each mm in mmarray[] to their
* new cpuset, and release that mm. The mpol_rebind_mm()
* call takes mmap_sem, which we couldn't take while holding
- * tasklist_lock. Forks can happen again now - the mpol_copy()
+ * tasklist_lock. Forks can happen again now - the mpol_dup()
* cpuset_being_rebound check will catch such forks, and rebind
* their vma mempolicies too. Because we still hold the global
* cgroup_mutex, we know that no other rebind effort will
@@ -1958,22 +1958,14 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
}
/**
- * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
- * @zl: the zonelist to be checked
+ * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
+ * @nodemask: the nodemask to be checked
*
- * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
+ * Are any of the nodes in the nodemask allowed in current->mems_allowed?
*/
-int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
{
- int i;
-
- for (i = 0; zl->zones[i]; i++) {
- int nid = zone_to_nid(zl->zones[i]);
-
- if (node_isset(nid, current->mems_allowed))
- return 1;
- }
- return 0;
+ return nodes_intersects(*nodemask, current->mems_allowed);
}
/*
diff --git a/kernel/exit.c b/kernel/exit.c
index 97f609f574b..2a9d98c641a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -967,7 +967,7 @@ NORET_TYPE void do_exit(long code)
proc_exit_connector(tsk);
exit_notify(tsk, group_dead);
#ifdef CONFIG_NUMA
- mpol_free(tsk->mempolicy);
+ mpol_put(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
#ifdef CONFIG_FUTEX
diff --git a/kernel/fork.c b/kernel/fork.c
index cb46befdd3a..6067e429f28 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -279,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
- pol = mpol_copy(vma_policy(mpnt));
+ pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
@@ -521,7 +521,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
-static struct mm_struct *dup_mm(struct task_struct *tsk)
+struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
@@ -1116,7 +1116,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->audit_context = NULL;
cgroup_fork(p);
#ifdef CONFIG_NUMA
- p->mempolicy = mpol_copy(p->mempolicy);
+ p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
@@ -1374,7 +1374,7 @@ bad_fork_cleanup_security:
security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
- mpol_free(p->mempolicy);
+ mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
cgroup_exit(p, cgroup_callbacks_done);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 6782dce93d0..cb85c79989b 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1405,6 +1405,9 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
+ VMCOREINFO_NUMBER(PG_lru);
+ VMCOREINFO_NUMBER(PG_private);
+ VMCOREINFO_NUMBER(PG_swapcache);
arch_crash_save_vmcoreinfo();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fcfb580c3af..1e0250cb948 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+/*
+ * Normally, functions that we'd want to prohibit kprobes in, are marked
+ * __kprobes. But, there are cases where such functions already belong to
+ * a different section (__sched for preempt_schedule)
+ *
+ * For such cases, we now have a blacklist
+ */
+struct kprobe_blackpoint kprobe_blacklist[] = {
+ {"preempt_schedule",},
+ {NULL} /* Terminator */
+};
+
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -417,6 +429,21 @@ static inline void free_rp_inst(struct kretprobe *rp)
}
}
+static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
+{
+ unsigned long flags;
+ struct kretprobe_instance *ri;
+ struct hlist_node *pos, *next;
+ /* No race here */
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
+ ri->rp = NULL;
+ hlist_del(&ri->uflist);
+ }
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ free_rp_inst(rp);
+}
+
/*
* Keep all fields in the kprobe consistent
*/
@@ -492,9 +519,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
static int __kprobes in_kprobes_functions(unsigned long addr)
{
+ struct kprobe_blackpoint *kb;
+
if (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end)
return -EINVAL;
+ /*
+ * If there exists a kprobe_blacklist, verify and
+ * fail any probe registration in the prohibited area
+ */
+ for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ if (kb->start_addr) {
+ if (addr >= kb->start_addr &&
+ addr < (kb->start_addr + kb->range))
+ return -EINVAL;
+ }
+ }
return 0;
}
@@ -555,6 +595,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
}
p->nmissed = 0;
+ INIT_LIST_HEAD(&p->list);
mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
if (old_p) {
@@ -581,35 +622,28 @@ out:
return ret;
}
-int __kprobes register_kprobe(struct kprobe *p)
-{
- return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
-}
-
-void __kprobes unregister_kprobe(struct kprobe *p)
+/*
+ * Unregister a kprobe without a scheduler synchronization.
+ */
+static int __kprobes __unregister_kprobe_top(struct kprobe *p)
{
- struct module *mod;
struct kprobe *old_p, *list_p;
- int cleanup_p;
- mutex_lock(&kprobe_mutex);
old_p = get_kprobe(p->addr);
- if (unlikely(!old_p)) {
- mutex_unlock(&kprobe_mutex);
- return;
- }
+ if (unlikely(!old_p))
+ return -EINVAL;
+
if (p != old_p) {
list_for_each_entry_rcu(list_p, &old_p->list, list)
if (list_p == p)
/* kprobe p is a valid probe */
goto valid_p;
- mutex_unlock(&kprobe_mutex);
- return;
+ return -EINVAL;
}
valid_p:
if (old_p == p ||
(old_p->pre_handler == aggr_pre_handler &&
- p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
+ list_is_singular(&old_p->list))) {
/*
* Only probe on the hash list. Disarm only if kprobes are
* enabled - otherwise, the breakpoint would already have
@@ -618,43 +652,97 @@ valid_p:
if (kprobe_enabled)
arch_disarm_kprobe(p);
hlist_del_rcu(&old_p->hlist);
- cleanup_p = 1;
} else {
+ if (p->break_handler)
+ old_p->break_handler = NULL;
+ if (p->post_handler) {
+ list_for_each_entry_rcu(list_p, &old_p->list, list) {
+ if ((list_p != p) && (list_p->post_handler))
+ goto noclean;
+ }
+ old_p->post_handler = NULL;
+ }
+noclean:
list_del_rcu(&p->list);
- cleanup_p = 0;
}
+ return 0;
+}
- mutex_unlock(&kprobe_mutex);
+static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
+{
+ struct module *mod;
+ struct kprobe *old_p;
- synchronize_sched();
if (p->mod_refcounted) {
mod = module_text_address((unsigned long)p->addr);
if (mod)
module_put(mod);
}
- if (cleanup_p) {
- if (p != old_p) {
- list_del_rcu(&p->list);
+ if (list_empty(&p->list) || list_is_singular(&p->list)) {
+ if (!list_empty(&p->list)) {
+ /* "p" is the last child of an aggr_kprobe */
+ old_p = list_entry(p->list.next, struct kprobe, list);
+ list_del(&p->list);
kfree(old_p);
}
arch_remove_kprobe(p);
- } else {
- mutex_lock(&kprobe_mutex);
- if (p->break_handler)
- old_p->break_handler = NULL;
- if (p->post_handler){
- list_for_each_entry_rcu(list_p, &old_p->list, list){
- if (list_p->post_handler){
- cleanup_p = 2;
- break;
- }
- }
- if (cleanup_p == 0)
- old_p->post_handler = NULL;
+ }
+}
+
+static int __register_kprobes(struct kprobe **kps, int num,
+ unsigned long called_from)
+{
+ int i, ret = 0;
+
+ if (num <= 0)
+ return -EINVAL;
+ for (i = 0; i < num; i++) {
+ ret = __register_kprobe(kps[i], called_from);
+ if (ret < 0 && i > 0) {
+ unregister_kprobes(kps, i);
+ break;
}
- mutex_unlock(&kprobe_mutex);
}
+ return ret;
+}
+
+/*
+ * Registration and unregistration functions for kprobe.
+ */
+int __kprobes register_kprobe(struct kprobe *p)
+{
+ return __register_kprobes(&p, 1,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kprobe(struct kprobe *p)
+{
+ unregister_kprobes(&p, 1);
+}
+
+int __kprobes register_kprobes(struct kprobe **kps, int num)
+{
+ return __register_kprobes(kps, num,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kprobes(struct kprobe **kps, int num)
+{
+ int i;
+
+ if (num <= 0)
+ return;
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(kps[i]) < 0)
+ kps[i]->addr = NULL;
+ mutex_unlock(&kprobe_mutex);
+
+ synchronize_sched();
+ for (i = 0; i < num; i++)
+ if (kps[i]->addr)
+ __unregister_kprobe_bottom(kps[i]);
}
static struct notifier_block kprobe_exceptions_nb = {
@@ -667,24 +755,69 @@ unsigned long __weak arch_deref_entry_point(void *entry)
return (unsigned long)entry;
}
-int __kprobes register_jprobe(struct jprobe *jp)
+static int __register_jprobes(struct jprobe **jps, int num,
+ unsigned long called_from)
{
- unsigned long addr = arch_deref_entry_point(jp->entry);
+ struct jprobe *jp;
+ int ret = 0, i;
- if (!kernel_text_address(addr))
+ if (num <= 0)
return -EINVAL;
+ for (i = 0; i < num; i++) {
+ unsigned long addr;
+ jp = jps[i];
+ addr = arch_deref_entry_point(jp->entry);
+
+ if (!kernel_text_address(addr))
+ ret = -EINVAL;
+ else {
+ /* Todo: Verify probepoint is a function entry point */
+ jp->kp.pre_handler = setjmp_pre_handler;
+ jp->kp.break_handler = longjmp_break_handler;
+ ret = __register_kprobe(&jp->kp, called_from);
+ }
+ if (ret < 0 && i > 0) {
+ unregister_jprobes(jps, i);
+ break;
+ }
+ }
+ return ret;
+}
- /* Todo: Verify probepoint is a function entry point */
- jp->kp.pre_handler = setjmp_pre_handler;
- jp->kp.break_handler = longjmp_break_handler;
-
- return __register_kprobe(&jp->kp,
+int __kprobes register_jprobe(struct jprobe *jp)
+{
+ return __register_jprobes(&jp, 1,
(unsigned long)__builtin_return_address(0));
}
void __kprobes unregister_jprobe(struct jprobe *jp)
{
- unregister_kprobe(&jp->kp);
+ unregister_jprobes(&jp, 1);
+}
+
+int __kprobes register_jprobes(struct jprobe **jps, int num)
+{
+ return __register_jprobes(jps, num,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_jprobes(struct jprobe **jps, int num)
+{
+ int i;
+
+ if (num <= 0)
+ return;
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(&jps[i]->kp) < 0)
+ jps[i]->kp.addr = NULL;
+ mutex_unlock(&kprobe_mutex);
+
+ synchronize_sched();
+ for (i = 0; i < num; i++) {
+ if (jps[i]->kp.addr)
+ __unregister_kprobe_bottom(&jps[i]->kp);
+ }
}
#ifdef CONFIG_KRETPROBES
@@ -725,7 +858,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
return 0;
}
-int __kprobes register_kretprobe(struct kretprobe *rp)
+static int __kprobes __register_kretprobe(struct kretprobe *rp,
+ unsigned long called_from)
{
int ret = 0;
struct kretprobe_instance *inst;
@@ -771,46 +905,101 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
rp->nmissed = 0;
/* Establish function entry probe point */
- if ((ret = __register_kprobe(&rp->kp,
- (unsigned long)__builtin_return_address(0))) != 0)
+ ret = __register_kprobe(&rp->kp, called_from);
+ if (ret != 0)
free_rp_inst(rp);
return ret;
}
+static int __register_kretprobes(struct kretprobe **rps, int num,
+ unsigned long called_from)
+{
+ int ret = 0, i;
+
+ if (num <= 0)
+ return -EINVAL;
+ for (i = 0; i < num; i++) {
+ ret = __register_kretprobe(rps[i], called_from);
+ if (ret < 0 && i > 0) {
+ unregister_kretprobes(rps, i);
+ break;
+ }
+ }
+ return ret;
+}
+
+int __kprobes register_kretprobe(struct kretprobe *rp)
+{
+ return __register_kretprobes(&rp, 1,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kretprobe(struct kretprobe *rp)
+{
+ unregister_kretprobes(&rp, 1);
+}
+
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
+{
+ return __register_kretprobes(rps, num,
+ (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
+{
+ int i;
+
+ if (num <= 0)
+ return;
+ mutex_lock(&kprobe_mutex);
+ for (i = 0; i < num; i++)
+ if (__unregister_kprobe_top(&rps[i]->kp) < 0)
+ rps[i]->kp.addr = NULL;
+ mutex_unlock(&kprobe_mutex);
+
+ synchronize_sched();
+ for (i = 0; i < num; i++) {
+ if (rps[i]->kp.addr) {
+ __unregister_kprobe_bottom(&rps[i]->kp);
+ cleanup_rp_inst(rps[i]);
+ }
+ }
+}
+
#else /* CONFIG_KRETPROBES */
int __kprobes register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
-static int __kprobes pre_handler_kretprobe(struct kprobe *p,
- struct pt_regs *regs)
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
- return 0;
+ return -ENOSYS;
}
-#endif /* CONFIG_KRETPROBES */
-
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
- unsigned long flags;
- struct kretprobe_instance *ri;
- struct hlist_node *pos, *next;
+}
- unregister_kprobe(&rp->kp);
+void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
- /* No race here */
- spin_lock_irqsave(&kretprobe_lock, flags);
- hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
- ri->rp = NULL;
- hlist_del(&ri->uflist);
- }
- spin_unlock_irqrestore(&kretprobe_lock, flags);
- free_rp_inst(rp);
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ return 0;
}
+#endif /* CONFIG_KRETPROBES */
+
static int __init init_kprobes(void)
{
int i, err = 0;
+ unsigned long offset = 0, size = 0;
+ char *modname, namebuf[128];
+ const char *symbol_name;
+ void *addr;
+ struct kprobe_blackpoint *kb;
/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
@@ -819,6 +1008,28 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
+ /*
+ * Lookup and populate the kprobe_blacklist.
+ *
+ * Unlike the kretprobe blacklist, we'll need to determine
+ * the range of addresses that belong to the said functions,
+ * since a kprobe need not necessarily be at the beginning
+ * of a function.
+ */
+ for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+ kprobe_lookup_name(kb->name, addr);
+ if (!addr)
+ continue;
+
+ kb->start_addr = (unsigned long)addr;
+ symbol_name = kallsyms_lookup(kb->start_addr,
+ &size, &offset, &modname, namebuf);
+ if (!symbol_name)
+ kb->range = 0;
+ else
+ kb->range = size;
+ }
+
if (kretprobe_blacklist_size) {
/* lookup the function address from its name */
for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
@@ -1066,8 +1277,12 @@ module_init(init_kprobes);
EXPORT_SYMBOL_GPL(register_kprobe);
EXPORT_SYMBOL_GPL(unregister_kprobe);
+EXPORT_SYMBOL_GPL(register_kprobes);
+EXPORT_SYMBOL_GPL(unregister_kprobes);
EXPORT_SYMBOL_GPL(register_jprobe);
EXPORT_SYMBOL_GPL(unregister_jprobe);
+EXPORT_SYMBOL_GPL(register_jprobes);
+EXPORT_SYMBOL_GPL(unregister_jprobes);
#ifdef CONFIG_KPROBES
EXPORT_SYMBOL_GPL(jprobe_return);
#endif
@@ -1075,4 +1290,6 @@ EXPORT_SYMBOL_GPL(jprobe_return);
#ifdef CONFIG_KPROBES
EXPORT_SYMBOL_GPL(register_kretprobe);
EXPORT_SYMBOL_GPL(unregister_kretprobe);
+EXPORT_SYMBOL_GPL(register_kretprobes);
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
#endif
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 89bcf4973ee..b8628be2a46 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -7,17 +7,39 @@
#include <linux/vt_kern.h>
#include <linux/kbd_kern.h>
#include <linux/console.h>
+#include <linux/module.h>
#include "power.h"
#if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
static int orig_fgconsole, orig_kmsg;
+static int disable_vt_switch;
+
+/*
+ * Normally during a suspend, we allocate a new console and switch to it.
+ * When we resume, we switch back to the original console. This switch
+ * can be slow, so on systems where the framebuffer can handle restoration
+ * of video registers anyways, there's little point in doing the console
+ * switch. This function allows you to disable it by passing it '0'.
+ */
+void pm_set_vt_switch(int do_switch)
+{
+ acquire_console_sem();
+ disable_vt_switch = !do_switch;
+ release_console_sem();
+}
+EXPORT_SYMBOL(pm_set_vt_switch);
int pm_prepare_console(void)
{
acquire_console_sem();
+ if (disable_vt_switch) {
+ release_console_sem();
+ return 0;
+ }
+
orig_fgconsole = fg_console;
if (vc_allocate(SUSPEND_CONSOLE)) {
@@ -50,9 +72,12 @@ int pm_prepare_console(void)
void pm_restore_console(void)
{
acquire_console_sem();
+ if (disable_vt_switch) {
+ release_console_sem();
+ return;
+ }
set_console(orig_fgconsole);
release_console_sem();
kmsg_redirect = orig_kmsg;
- return;
}
#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 6a0cc71ee88..f2a45136695 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1632,10 +1632,9 @@ asmlinkage long sys_umask(int mask)
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- long error;
+ long uninitialized_var(error);
- error = security_task_prctl(option, arg2, arg3, arg4, arg5);
- if (error)
+ if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
return error;
switch (option) {
@@ -1688,17 +1687,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = -EINVAL;
break;
- case PR_GET_KEEPCAPS:
- if (current->keep_capabilities)
- error = 1;
- break;
- case PR_SET_KEEPCAPS:
- if (arg2 != 0 && arg2 != 1) {
- error = -EINVAL;
- break;
- }
- current->keep_capabilities = arg2;
- break;
case PR_SET_NAME: {
struct task_struct *me = current;
unsigned char ncomm[sizeof(me->comm)];
@@ -1732,17 +1720,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
case PR_SET_SECCOMP:
error = prctl_set_seccomp(arg2);
break;
-
- case PR_CAPBSET_READ:
- if (!cap_valid(arg2))
- return -EINVAL;
- return !!cap_raised(current->cap_bset, arg2);
- case PR_CAPBSET_DROP:
-#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
- return cap_prctl_drop(arg2);
-#else
- return -EINVAL;
-#endif
case PR_GET_TSC:
error = GET_TSC_CTL(arg2);
break;