summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c52
-rw-r--r--kernel/hung_task.c2
-rw-r--r--kernel/locking/lockdep.c11
-rw-r--r--kernel/panic.c15
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/time/timer.c9
-rw-r--r--kernel/trace/Kconfig7
-rw-r--r--kernel/trace/trace.c4
-rw-r--r--kernel/watchdog.c2
-rw-r--r--kernel/watchdog_hld.c2
11 files changed, 66 insertions, 42 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 30d94f68c5bd..c3b57fe93951 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5292,20 +5292,16 @@ static int __perf_read_group_add(struct perf_event *leader,
}
static int perf_read_group(struct perf_event *event,
- u64 read_format, char __user *buf)
+ u64 read_format, char __user *buf,
+ u64 *values)
{
struct perf_event *leader = event->group_leader, *child;
struct perf_event_context *ctx = leader->ctx;
int ret;
- u64 *values;
lockdep_assert_held(&ctx->mutex);
- values = kzalloc(event->read_size, GFP_KERNEL);
- if (!values)
- return -ENOMEM;
-
- values[0] = 1 + leader->nr_siblings;
+ *values = 1 + leader->nr_siblings;
/*
* By locking the child_mutex of the leader we effectively
@@ -5323,25 +5319,17 @@ static int perf_read_group(struct perf_event *event,
goto unlock;
}
- mutex_unlock(&leader->child_mutex);
-
ret = event->read_size;
- if (copy_to_user(buf, values, event->read_size))
- ret = -EFAULT;
- goto out;
-
unlock:
mutex_unlock(&leader->child_mutex);
-out:
- kfree(values);
return ret;
}
static int perf_read_one(struct perf_event *event,
- u64 read_format, char __user *buf)
+ u64 read_format, char __user *buf,
+ u64 *values)
{
u64 enabled, running;
- u64 values[4];
int n = 0;
values[n++] = __perf_event_read_value(event, &enabled, &running);
@@ -5352,9 +5340,6 @@ static int perf_read_one(struct perf_event *event,
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
- if (copy_to_user(buf, values, n * sizeof(u64)))
- return -EFAULT;
-
return n * sizeof(u64);
}
@@ -5375,7 +5360,8 @@ static bool is_event_hup(struct perf_event *event)
* Read the performance event - simple non blocking version for now
*/
static ssize_t
-__perf_read(struct perf_event *event, char __user *buf, size_t count)
+__perf_read(struct perf_event *event, char __user *buf,
+ size_t count, u64 *values)
{
u64 read_format = event->attr.read_format;
int ret;
@@ -5393,9 +5379,9 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count)
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
- ret = perf_read_group(event, read_format, buf);
+ ret = perf_read_group(event, read_format, buf, values);
else
- ret = perf_read_one(event, read_format, buf);
+ ret = perf_read_one(event, read_format, buf, values);
return ret;
}
@@ -5405,16 +5391,31 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
struct perf_event_context *ctx;
+ u64 stack_values[8];
+ u64 *values;
int ret;
ret = security_perf_event_read(event);
if (ret)
return ret;
+ if (event->read_size <= sizeof(stack_values))
+ values = memset(stack_values, 0, event->read_size);
+ else
+ values = kzalloc(event->read_size, GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
ctx = perf_event_ctx_lock(event);
- ret = __perf_read(event, buf, count);
+ ret = __perf_read(event, buf, count, values);
perf_event_ctx_unlock(event, ctx);
+ if (ret > 0 && copy_to_user(buf, values, ret))
+ ret = -EFAULT;
+
+ if (values != stack_values)
+ kfree(values);
+
return ret;
}
@@ -11213,7 +11214,8 @@ void perf_pmu_unregister(struct pmu *pmu)
device_del(pmu->dev);
put_device(pmu->dev);
}
- free_pmu_context(pmu);
+ if (!find_pmu_context(pmu->task_ctx_nr))
+ free_pmu_context(pmu);
mutex_unlock(&pmus_lock);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 9888e2bc8c76..df979c5043e0 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -118,6 +118,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
console_verbose();
hung_task_show_lock = true;
hung_task_call_panic = true;
+ } else {
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
/*
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 2270ec68f10a..dc03fe856729 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5402,11 +5402,14 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
if (match_held_lock(hlock, lock)) {
/*
- * Grab 16bits of randomness; this is sufficient to not
- * be guessable and still allows some pin nesting in
- * our u32 pin_count.
+ * Grab 6bits of randomness; this is barely sufficient
+ * to not be guessable and still allows some 32 levels
+ * of pin nesting in our u12 pin_count.
*/
- cookie.val = 1 + (prandom_u32() >> 16);
+ cookie.val = 1 + (prandom_u32() >> 26);
+ if (DEBUG_LOCKS_WARN_ON(hlock->pin_count + cookie.val >= 1 << 12))
+ return NIL_COOKIE;
+
hlock->pin_count += cookie.val;
return cookie;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index cefd7d82366f..494e8e3dfae0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -224,13 +224,6 @@ void panic(const char *fmt, ...)
buf[len - 1] = '\0';
pr_emerg("Kernel panic - not syncing: %s\n", buf);
-#ifdef CONFIG_DEBUG_BUGVERBOSE
- /*
- * Avoid nested stack-dumping if a panic occurs during oops processing
- */
- if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
- dump_stack();
-#endif
/*
* If kgdb is enabled, give it a chance to run before we stop all
@@ -271,6 +264,14 @@ void panic(const char *fmt, ...)
*/
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ /*
+ * Avoid nested stack-dumping if a panic occurs during oops processing
+ */
+ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
+ dump_stack();
+#endif
+
kmsg_dump(KMSG_DUMP_PANIC);
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d2c072b0ef01..db884d20ac2b 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -800,7 +800,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se)
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
- printk_deferred_once("sched: DL replenish lagged too much\n");
+ printk_deferred_once(KERN_NOTICE "sched: DL replenish lagged too much\n");
dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
dl_se->runtime = pi_of(dl_se)->dl_runtime;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b48baaba2fc2..b04c19579f82 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -977,7 +977,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
*/
if (likely(rt_b->rt_runtime)) {
rt_rq->rt_throttled = 1;
- printk_deferred_once("sched: RT throttling activated\n");
+ printk_deferred_once(KERN_NOTICE "sched: RT throttling activated\n");
} else {
/*
* In case we did anyway, make it go away,
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 85f1021ad459..600b3ddbdf08 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -617,7 +617,14 @@ static const struct debug_obj_descr timer_debug_descr;
static void *timer_debug_hint(void *addr)
{
- return ((struct timer_list *) addr)->function;
+ struct timer_list *timer = addr;
+
+ if (timer->function == delayed_work_timer_fn) {
+ struct delayed_work *work = from_timer(work, timer, timer);
+ return work->work.func;
+ }
+
+ return timer->function;
}
static bool timer_is_static_object(void *addr)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 420ff4bc67fd..a2e963985a29 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -121,6 +121,13 @@ config TRACING
select EVENT_TRACING
select TRACE_CLOCK
+config GLOBAL_TRACE_BUF_SIZE
+ int
+ prompt "Global ftrace buffer size (for trace_printk)" if EXPERT
+ range 0 4194034
+ default 1441792 # 16384 * 88 (sizeof(struct print_entry))
+ depends on TRACING
+
config GENERIC_TRACER
bool
select TRACING
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 88de94da596b..60249f9f8d5c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -797,9 +797,7 @@ int tracing_is_enabled(void)
* to not have to wait for all that output. Anyway this can be
* boot time and run time configurable.
*/
-#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
-
-static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
+static unsigned long trace_buf_size = CONFIG_GLOBAL_TRACE_BUF_SIZE;
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index ad912511a0c0..2e7a449ef9f7 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -442,6 +442,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
if (softlockup_panic)
panic("softlockup: hung tasks");
+ else
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
return HRTIMER_RESTART;
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 247bf0b1582c..cce46cf75d76 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -154,6 +154,8 @@ static void watchdog_overflow_callback(struct perf_event *event,
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
+ else
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
__this_cpu_write(hard_watchdog_warn, true);
return;