diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 52 | ||||
| -rw-r--r-- | kernel/hung_task.c | 2 | ||||
| -rw-r--r-- | kernel/locking/lockdep.c | 11 | ||||
| -rw-r--r-- | kernel/panic.c | 15 | ||||
| -rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 2 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 7 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 4 | ||||
| -rw-r--r-- | kernel/watchdog.c | 2 | ||||
| -rw-r--r-- | kernel/watchdog_hld.c | 2 |
10 files changed, 58 insertions, 41 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index d2b354991bf5..6a57f3bc9181 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -5272,20 +5272,16 @@ static int __perf_read_group_add(struct perf_event *leader, } static int perf_read_group(struct perf_event *event, - u64 read_format, char __user *buf) + u64 read_format, char __user *buf, + u64 *values) { struct perf_event *leader = event->group_leader, *child; struct perf_event_context *ctx = leader->ctx; int ret; - u64 *values; lockdep_assert_held(&ctx->mutex); - values = kzalloc(event->read_size, GFP_KERNEL); - if (!values) - return -ENOMEM; - - values[0] = 1 + leader->nr_siblings; + *values = 1 + leader->nr_siblings; /* * By locking the child_mutex of the leader we effectively @@ -5303,25 +5299,17 @@ static int perf_read_group(struct perf_event *event, goto unlock; } - mutex_unlock(&leader->child_mutex); - ret = event->read_size; - if (copy_to_user(buf, values, event->read_size)) - ret = -EFAULT; - goto out; - unlock: mutex_unlock(&leader->child_mutex); -out: - kfree(values); return ret; } static int perf_read_one(struct perf_event *event, - u64 read_format, char __user *buf) + u64 read_format, char __user *buf, + u64 *values) { u64 enabled, running; - u64 values[4]; int n = 0; values[n++] = __perf_event_read_value(event, &enabled, &running); @@ -5332,9 +5320,6 @@ static int perf_read_one(struct perf_event *event, if (read_format & PERF_FORMAT_ID) values[n++] = primary_event_id(event); - if (copy_to_user(buf, values, n * sizeof(u64))) - return -EFAULT; - return n * sizeof(u64); } @@ -5355,7 +5340,8 @@ static bool is_event_hup(struct perf_event *event) * Read the performance event - simple non blocking version for now */ static ssize_t -__perf_read(struct perf_event *event, char __user *buf, size_t count) +__perf_read(struct perf_event *event, char __user *buf, + size_t count, u64 *values) { u64 read_format = event->attr.read_format; int ret; @@ -5373,9 +5359,9 @@ __perf_read(struct perf_event *event, char __user *buf, size_t count) WARN_ON_ONCE(event->ctx->parent_ctx); if (read_format & PERF_FORMAT_GROUP) - ret = perf_read_group(event, read_format, buf); + ret = perf_read_group(event, read_format, buf, values); else - ret = perf_read_one(event, read_format, buf); + ret = perf_read_one(event, read_format, buf, values); return ret; } @@ -5385,16 +5371,31 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; struct perf_event_context *ctx; + u64 stack_values[8]; + u64 *values; int ret; ret = security_perf_event_read(event); if (ret) return ret; + if (event->read_size <= sizeof(stack_values)) + values = memset(stack_values, 0, event->read_size); + else + values = kzalloc(event->read_size, GFP_KERNEL); + if (!values) + return -ENOMEM; + ctx = perf_event_ctx_lock(event); - ret = __perf_read(event, buf, count); + ret = __perf_read(event, buf, count, values); perf_event_ctx_unlock(event, ctx); + if (ret > 0 && copy_to_user(buf, values, ret)) + ret = -EFAULT; + + if (values != stack_values) + kfree(values); + return ret; } @@ -11203,7 +11204,8 @@ void perf_pmu_unregister(struct pmu *pmu) device_del(pmu->dev); put_device(pmu->dev); } - free_pmu_context(pmu); + if (!find_pmu_context(pmu->task_ctx_nr)) + free_pmu_context(pmu); mutex_unlock(&pmus_lock); } EXPORT_SYMBOL_GPL(perf_pmu_unregister); diff --git a/kernel/hung_task.c b/kernel/hung_task.c index cff3ae8c818f..e10a8142bb0e 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -120,6 +120,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) console_verbose(); hung_task_show_lock = true; hung_task_call_panic = true; + } else { + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } /* diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index f06b91ca6482..adfbcffe1d48 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -5428,11 +5428,14 @@ static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) if (match_held_lock(hlock, lock)) { /* - * Grab 16bits of randomness; this is sufficient to not - * be guessable and still allows some pin nesting in - * our u32 pin_count. + * Grab 6bits of randomness; this is barely sufficient + * to not be guessable and still allows some 32 levels + * of pin nesting in our u12 pin_count. */ - cookie.val = 1 + (sched_clock() & 0xffff); + cookie.val = 1 + (sched_clock() & 0x3f); + if (DEBUG_LOCKS_WARN_ON(hlock->pin_count + cookie.val >= 1 << 12)) + return NIL_COOKIE; + hlock->pin_count += cookie.val; return cookie; } diff --git a/kernel/panic.c b/kernel/panic.c index a3308af28a21..6718f8e5a2dd 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -266,13 +266,6 @@ void panic(const char *fmt, ...) buf[len - 1] = '\0'; pr_emerg("Kernel panic - not syncing: %s\n", buf); -#ifdef CONFIG_DEBUG_BUGVERBOSE - /* - * Avoid nested stack-dumping if a panic occurs during oops processing - */ - if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) - dump_stack(); -#endif /* * If kgdb is enabled, give it a chance to run before we stop all @@ -315,6 +308,14 @@ void panic(const char *fmt, ...) panic_print_sys_info(false); +#ifdef CONFIG_DEBUG_BUGVERBOSE + /* + * Avoid nested stack-dumping if a panic occurs during oops processing + */ + if (!test_taint(TAINT_DIE) && oops_in_progress <= 1) + dump_stack(); +#endif + kmsg_dump(KMSG_DUMP_PANIC); /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 7bf561262cb8..48302429a7db 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -863,7 +863,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se) * entity. */ if (dl_time_before(dl_se->deadline, rq_clock(rq))) { - printk_deferred_once("sched: DL replenish lagged too much\n"); + printk_deferred_once(KERN_NOTICE "sched: DL replenish lagged too much\n"); dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; dl_se->runtime = pi_of(dl_se)->dl_runtime; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 8c9ed9664840..c7feb90bb42d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1019,7 +1019,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) */ if (likely(rt_b->rt_runtime)) { rt_rq->rt_throttled = 1; - printk_deferred_once("sched: RT throttling activated\n"); + printk_deferred_once(KERN_NOTICE "sched: RT throttling activated\n"); } else { /* * In case we did anyway, make it go away, diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index ccd6a5ade3e9..5ec967cb2ae9 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -146,6 +146,13 @@ config TRACING select TRACE_CLOCK select TASKS_RCU if PREEMPTION +config GLOBAL_TRACE_BUF_SIZE + int + prompt "Global ftrace buffer size (for trace_printk)" if EXPERT + range 0 4194034 + default 1441792 # 16384 * 88 (sizeof(struct print_entry)) + depends on TRACING + config GENERIC_TRACER bool select TRACING diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b8dd54627075..88c630b1db3e 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -813,9 +813,7 @@ int tracing_is_enabled(void) * to not have to wait for all that output. Anyway this can be * boot time and run time configurable. */ -#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */ - -static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT; +static unsigned long trace_buf_size = CONFIG_GLOBAL_TRACE_BUF_SIZE; /* trace_types holds a link list of available tracers. */ static struct tracer *trace_types __read_mostly; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index ecb0e8346e65..cd9354d18bae 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -442,6 +442,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); if (softlockup_panic) panic("softlockup: hung tasks"); + else + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); } return HRTIMER_RESTART; diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 247bf0b1582c..cce46cf75d76 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -154,6 +154,8 @@ static void watchdog_overflow_callback(struct perf_event *event, if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); + else + add_taint(TAINT_WARN, LOCKDEP_STILL_OK); __this_cpu_write(hard_watchdog_warn, true); return; |
