summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 09:46:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-23 09:46:15 -0700
commit31bbb9b58d1e8ebcf2b28c95c2250a9f8e31e397 (patch)
tree6bb0c0490d66d32eca43e73abb28d8b3ab0e7b91 /kernel
parentff830b8e5f999d1ccbd0282a666520f0b557daa4 (diff)
parent3f0a525ebf4b8ef041a332bbe4a73aee94bb064b (diff)
Merge branch 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: itimers: Add tracepoints for itimer hrtimer: Add tracepoint for hrtimers timers: Add tracepoints for timer_list timers cputime: Optimize jiffies_to_cputime(1) itimers: Simplify arm_timer() code a bit itimers: Fix periodic tics precision itimers: Merge ITIMER_VIRT and ITIMER_PROF Trivial header file include conflicts in kernel/fork.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/hrtimer.c40
-rw-r--r--kernel/itimer.c169
-rw-r--r--kernel/posix-cpu-timers.c155
-rw-r--r--kernel/sched.c9
-rw-r--r--kernel/timer.c32
6 files changed, 245 insertions, 169 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 8f45b0ebdda..51ad0b0b726 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -63,6 +63,7 @@
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
+#include <linux/posix-timers.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -805,10 +806,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
thread_group_cputime_init(sig);
/* Expiration times and increments. */
- sig->it_virt_expires = cputime_zero;
- sig->it_virt_incr = cputime_zero;
- sig->it_prof_expires = cputime_zero;
- sig->it_prof_incr = cputime_zero;
+ sig->it[CPUCLOCK_PROF].expires = cputime_zero;
+ sig->it[CPUCLOCK_PROF].incr = cputime_zero;
+ sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
+ sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
/* Cached expiration times. */
sig->cputime_expires.prof_exp = cputime_zero;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c03f221fee4..e5d98ce50f8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <trace/events/timer.h>
+
/*
* The timer bases:
*
@@ -442,6 +444,26 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
#endif
+static inline void
+debug_init(struct hrtimer *timer, clockid_t clockid,
+ enum hrtimer_mode mode)
+{
+ debug_hrtimer_init(timer);
+ trace_hrtimer_init(timer, clockid, mode);
+}
+
+static inline void debug_activate(struct hrtimer *timer)
+{
+ debug_hrtimer_activate(timer);
+ trace_hrtimer_start(timer);
+}
+
+static inline void debug_deactivate(struct hrtimer *timer)
+{
+ debug_hrtimer_deactivate(timer);
+ trace_hrtimer_cancel(timer);
+}
+
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -798,7 +820,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
struct hrtimer *entry;
int leftmost = 1;
- debug_hrtimer_activate(timer);
+ debug_activate(timer);
/*
* Find the right place in the rbtree:
@@ -884,7 +906,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
* reprogramming happens in the interrupt handler. This is a
* rare case and less expensive than a smp call.
*/
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
@@ -1117,7 +1139,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
enum hrtimer_mode mode)
{
- debug_hrtimer_init(timer);
+ debug_init(timer, clock_id, mode);
__hrtimer_init(timer, clock_id, mode);
}
EXPORT_SYMBOL_GPL(hrtimer_init);
@@ -1141,7 +1163,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
}
EXPORT_SYMBOL_GPL(hrtimer_get_res);
-static void __run_hrtimer(struct hrtimer *timer)
+static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
{
struct hrtimer_clock_base *base = timer->base;
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
@@ -1150,7 +1172,7 @@ static void __run_hrtimer(struct hrtimer *timer)
WARN_ON(!irqs_disabled());
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
__remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
timer_stats_account_hrtimer(timer);
fn = timer->function;
@@ -1161,7 +1183,9 @@ static void __run_hrtimer(struct hrtimer *timer)
* the timer base.
*/
spin_unlock(&cpu_base->lock);
+ trace_hrtimer_expire_entry(timer, now);
restart = fn(timer);
+ trace_hrtimer_expire_exit(timer);
spin_lock(&cpu_base->lock);
/*
@@ -1272,7 +1296,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
break;
}
- __run_hrtimer(timer);
+ __run_hrtimer(timer, &basenow);
}
base++;
}
@@ -1394,7 +1418,7 @@ void hrtimer_run_queues(void)
hrtimer_get_expires_tv64(timer))
break;
- __run_hrtimer(timer);
+ __run_hrtimer(timer, &base->softirq_time);
}
spin_unlock(&cpu_base->lock);
}
@@ -1571,7 +1595,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
while ((node = rb_first(&old_base->active))) {
timer = rb_entry(node, struct hrtimer, node);
BUG_ON(hrtimer_callback_running(timer));
- debug_hrtimer_deactivate(timer);
+ debug_deactivate(timer);
/*
* Mark it as STATE_MIGRATE not INACTIVE otherwise the
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 58762f7077e..b03451ede52 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -12,6 +12,7 @@
#include <linux/time.h>
#include <linux/posix-timers.h>
#include <linux/hrtimer.h>
+#include <trace/events/timer.h>
#include <asm/uaccess.h>
@@ -41,10 +42,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
return ktime_to_timeval(rem);
}
+static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+ struct itimerval *const value)
+{
+ cputime_t cval, cinterval;
+ struct cpu_itimer *it = &tsk->signal->it[clock_id];
+
+ spin_lock_irq(&tsk->sighand->siglock);
+
+ cval = it->expires;
+ cinterval = it->incr;
+ if (!cputime_eq(cval, cputime_zero)) {
+ struct task_cputime cputime;
+ cputime_t t;
+
+ thread_group_cputimer(tsk, &cputime);
+ if (clock_id == CPUCLOCK_PROF)
+ t = cputime_add(cputime.utime, cputime.stime);
+ else
+ /* CPUCLOCK_VIRT */
+ t = cputime.utime;
+
+ if (cputime_le(cval, t))
+ /* about to fire */
+ cval = cputime_one_jiffy;
+ else
+ cval = cputime_sub(cval, t);
+ }
+
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ cputime_to_timeval(cval, &value->it_value);
+ cputime_to_timeval(cinterval, &value->it_interval);
+}
+
int do_getitimer(int which, struct itimerval *value)
{
struct task_struct *tsk = current;
- cputime_t cinterval, cval;
switch (which) {
case ITIMER_REAL:
@@ -55,44 +89,10 @@ int do_getitimer(int which, struct itimerval *value)
spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_virt_expires;
- cinterval = tsk->signal->it_virt_incr;
- if (!cputime_eq(cval, cputime_zero)) {
- struct task_cputime cputime;
- cputime_t utime;
-
- thread_group_cputimer(tsk, &cputime);
- utime = cputime.utime;
- if (cputime_le(cval, utime)) { /* about to fire */
- cval = jiffies_to_cputime(1);
- } else {
- cval = cputime_sub(cval, utime);
- }
- }
- spin_unlock_irq(&tsk->sighand->siglock);
- cputime_to_timeval(cval, &value->it_value);
- cputime_to_timeval(cinterval, &value->it_interval);
+ get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
break;
case ITIMER_PROF:
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_prof_expires;
- cinterval = tsk->signal->it_prof_incr;
- if (!cputime_eq(cval, cputime_zero)) {
- struct task_cputime times;
- cputime_t ptime;
-
- thread_group_cputimer(tsk, &times);
- ptime = cputime_add(times.utime, times.stime);
- if (cputime_le(cval, ptime)) { /* about to fire */
- cval = jiffies_to_cputime(1);
- } else {
- cval = cputime_sub(cval, ptime);
- }
- }
- spin_unlock_irq(&tsk->sighand->siglock);
- cputime_to_timeval(cval, &value->it_value);
- cputime_to_timeval(cinterval, &value->it_interval);
+ get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
break;
default:
return(-EINVAL);
@@ -123,11 +123,62 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
struct signal_struct *sig =
container_of(timer, struct signal_struct, real_timer);
+ trace_itimer_expire(ITIMER_REAL, sig->leader_pid, 0);
kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
return HRTIMER_NORESTART;
}
+static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
+{
+ struct timespec ts;
+ s64 cpu_ns;
+
+ cputime_to_timespec(ct, &ts);
+ cpu_ns = timespec_to_ns(&ts);
+
+ return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
+}
+
+static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
+ const struct itimerval *const value,
+ struct itimerval *const ovalue)
+{
+ cputime_t cval, nval, cinterval, ninterval;
+ s64 ns_ninterval, ns_nval;
+ struct cpu_itimer *it = &tsk->signal->it[clock_id];
+
+ nval = timeval_to_cputime(&value->it_value);
+ ns_nval = timeval_to_ns(&value->it_value);
+ ninterval = timeval_to_cputime(&value->it_interval);
+ ns_ninterval = timeval_to_ns(&value->it_interval);
+
+ it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
+ it->error = cputime_sub_ns(nval, ns_nval);
+
+ spin_lock_irq(&tsk->sighand->siglock);
+
+ cval = it->expires;
+ cinterval = it->incr;
+ if (!cputime_eq(cval, cputime_zero) ||
+ !cputime_eq(nval, cputime_zero)) {
+ if (cputime_gt(nval, cputime_zero))
+ nval = cputime_add(nval, cputime_one_jiffy);
+ set_process_cpu_timer(tsk, clock_id, &nval, &cval);
+ }
+ it->expires = nval;
+ it->incr = ninterval;
+ trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
+ ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
+
+ spin_unlock_irq(&tsk->sighand->siglock);
+
+ if (ovalue) {
+ cputime_to_timeval(cval, &ovalue->it_value);
+ cputime_to_timeval(cinterval, &ovalue->it_interval);
+ }
+}
+
/*
* Returns true if the timeval is in canonical form
*/
@@ -139,7 +190,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
struct task_struct *tsk = current;
struct hrtimer *timer;
ktime_t expires;
- cputime_t cval, cinterval, nval, ninterval;
/*
* Validate the timevals in value.
@@ -171,51 +221,14 @@ again:
} else
tsk->signal->it_real_incr.tv64 = 0;
+ trace_itimer_state(ITIMER_REAL, value, 0);
spin_unlock_irq(&tsk->sighand->siglock);
break;
case ITIMER_VIRTUAL:
- nval = timeval_to_cputime(&value->it_value);
- ninterval = timeval_to_cputime(&value->it_interval);
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_virt_expires;
- cinterval = tsk->signal->it_virt_incr;
- if (!cputime_eq(cval, cputime_zero) ||
- !cputime_eq(nval, cputime_zero)) {
- if (cputime_gt(nval, cputime_zero))
- nval = cputime_add(nval,
- jiffies_to_cputime(1));
- set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
- &nval, &cval);
- }
- tsk->signal->it_virt_expires = nval;
- tsk->signal->it_virt_incr = ninterval;
- spin_unlock_irq(&tsk->sighand->siglock);
- if (ovalue) {
- cputime_to_timeval(cval, &ovalue->it_value);
- cputime_to_timeval(cinterval, &ovalue->it_interval);
- }
+ set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
break;
case ITIMER_PROF:
- nval = timeval_to_cputime(&value->it_value);
- ninterval = timeval_to_cputime(&value->it_interval);
- spin_lock_irq(&tsk->sighand->siglock);
- cval = tsk->signal->it_prof_expires;
- cinterval = tsk->signal->it_prof_incr;
- if (!cputime_eq(cval, cputime_zero) ||
- !cputime_eq(nval, cputime_zero)) {
- if (cputime_gt(nval, cputime_zero))
- nval = cputime_add(nval,
- jiffies_to_cputime(1));
- set_process_cpu_timer(tsk, CPUCLOCK_PROF,
- &nval, &cval);
- }
- tsk->signal->it_prof_expires = nval;
- tsk->signal->it_prof_incr = ninterval;
- spin_unlock_irq(&tsk->sighand->siglock);
- if (ovalue) {
- cputime_to_timeval(cval, &ovalue->it_value);
- cputime_to_timeval(cinterval, &ovalue->it_interval);
- }
+ set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
break;
default:
return -EINVAL;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e33a21cb940..5c9dc228747 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -8,17 +8,18 @@
#include <linux/math64.h>
#include <asm/uaccess.h>
#include <linux/kernel_stat.h>
+#include <trace/events/timer.h>
/*
* Called after updating RLIMIT_CPU to set timer expiration if necessary.
*/
void update_rlimit_cpu(unsigned long rlim_new)
{
- cputime_t cputime;
+ cputime_t cputime = secs_to_cputime(rlim_new);
+ struct signal_struct *const sig = current->signal;
- cputime = secs_to_cputime(rlim_new);
- if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
- cputime_gt(current->signal->it_prof_expires, cputime)) {
+ if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
+ cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
spin_lock_irq(&current->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&current->sighand->siglock);
@@ -542,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
now);
}
+static inline int expires_gt(cputime_t expires, cputime_t new_exp)
+{
+ return cputime_eq(expires, cputime_zero) ||
+ cputime_gt(expires, new_exp);
+}
+
+static inline int expires_le(cputime_t expires, cputime_t new_exp)
+{
+ return !cputime_eq(expires, cputime_zero) &&
+ cputime_le(expires, new_exp);
+}
/*
* Insert the timer on the appropriate list before any timers that
* expire later. This must be called with the tasklist_lock held
@@ -586,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
*/
if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
+ union cpu_time_count *exp = &nt->expires;
+
switch (CPUCLOCK_WHICH(timer->it_clock)) {
default:
BUG();
case CPUCLOCK_PROF:
- if (cputime_eq(p->cputime_expires.prof_exp,
- cputime_zero) ||
- cputime_gt(p->cputime_expires.prof_exp,
- nt->expires.cpu))
- p->cputime_expires.prof_exp =
- nt->expires.cpu;
+ if (expires_gt(p->cputime_expires.prof_exp,
+ exp->cpu))
+ p->cputime_expires.prof_exp = exp->cpu;
break;
case CPUCLOCK_VIRT:
- if (cputime_eq(p->cputime_expires.virt_exp,
- cputime_zero) ||
- cputime_gt(p->cputime_expires.virt_exp,
- nt->expires.cpu))
- p->cputime_expires.virt_exp =
- nt->expires.cpu;
+ if (expires_gt(p->cputime_expires.virt_exp,
+ exp->cpu))
+ p->cputime_expires.virt_exp = exp->cpu;
break;
case CPUCLOCK_SCHED:
if (p->cputime_expires.sched_exp == 0 ||
- p->cputime_expires.sched_exp >
- nt->expires.sched)
+ p->cputime_expires.sched_exp > exp->sched)
p->cputime_expires.sched_exp =
- nt->expires.sched;
+ exp->sched;
break;
}
} else {
+ struct signal_struct *const sig = p->signal;
+ union cpu_time_count *exp = &timer->it.cpu.expires;
+
/*
* For a process timer, set the cached expiration time.
*/
@@ -621,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
default:
BUG();
case CPUCLOCK_VIRT:
- if (!cputime_eq(p->signal->it_virt_expires,
- cputime_zero) &&
- cputime_lt(p->signal->it_virt_expires,
- timer->it.cpu.expires.cpu))
+ if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
+ exp->cpu))
break;
- p->signal->cputime_expires.virt_exp =
- timer->it.cpu.expires.cpu;
+ sig->cputime_expires.virt_exp = exp->cpu;
break;
case CPUCLOCK_PROF:
- if (!cputime_eq(p->signal->it_prof_expires,
- cputime_zero) &&
- cputime_lt(p->signal->it_prof_expires,
- timer->it.cpu.expires.cpu))
+ if (expires_le(sig->it[CPUCLOCK_PROF].expires,
+ exp->cpu))
break;
- i = p->signal->rlim[RLIMIT_CPU].rlim_cur;
+ i = sig->rlim[RLIMIT_CPU].rlim_cur;
if (i != RLIM_INFINITY &&
- i <= cputime_to_secs(timer->it.cpu.expires.cpu))
+ i <= cputime_to_secs(exp->cpu))
break;
- p->signal->cputime_expires.prof_exp =
- timer->it.cpu.expires.cpu;
+ sig->cputime_expires.prof_exp = exp->cpu;
break;
case CPUCLOCK_SCHED:
- p->signal->cputime_expires.sched_exp =
- timer->it.cpu.expires.sched;
+ sig->cputime_expires.sched_exp = exp->sched;
break;
}
}
@@ -1071,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk)
spin_unlock_irqrestore(&cputimer->lock, flags);
}
+static u32 onecputick;
+
+static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
+ cputime_t *expires, cputime_t cur_time, int signo)
+{
+ if (cputime_eq(it->expires, cputime_zero))
+ return;
+
+ if (cputime_ge(cur_time, it->expires)) {
+ if (!cputime_eq(it->incr, cputime_zero)) {
+ it->expires = cputime_add(it->expires, it->incr);
+ it->error += it->incr_error;
+ if (it->error >= onecputick) {
+ it->expires = cputime_sub(it->expires,
+ cputime_one_jiffy);
+ it->error -= onecputick;
+ }
+ } else {
+ it->expires = cputime_zero;
+ }
+
+ trace_itimer_expire(signo == SIGPROF ?
+ ITIMER_PROF : ITIMER_VIRTUAL,
+ tsk->signal->leader_pid, cur_time);
+ __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
+ }
+
+ if (!cputime_eq(it->expires, cputime_zero) &&
+ (cputime_eq(*expires, cputime_zero) ||
+ cputime_lt(it->expires, *expires))) {
+ *expires = it->expires;
+ }
+}
+
/*
* Check for any per-thread CPU timers that have fired and move them
* off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1090,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk,
* Don't sample the current process CPU clocks if there are no timers.
*/
if (list_empty(&timers[CPUCLOCK_PROF]) &&
- cputime_eq(sig->it_prof_expires, cputime_zero) &&
+ cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
list_empty(&timers[CPUCLOCK_VIRT]) &&
- cputime_eq(sig->it_virt_expires, cputime_zero) &&
+ cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
list_empty(&timers[CPUCLOCK_SCHED])) {
stop_process_timers(tsk);
return;
@@ -1153,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk,
/*
* Check for the special case process timers.
*/
- if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
- if (cputime_ge(ptime, sig->it_prof_expires)) {
- /* ITIMER_PROF fires and reloads. */
- sig->it_prof_expires = sig->it_prof_incr;
- if (!cputime_eq(sig->it_prof_expires, cputime_zero)) {
- sig->it_prof_expires = cputime_add(
- sig->it_prof_expires, ptime);
- }
- __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
- }
- if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
- (cputime_eq(prof_expires, cputime_zero) ||
- cputime_lt(sig->it_prof_expires, prof_expires))) {
- prof_expires = sig->it_prof_expires;
- }
- }
- if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
- if (cputime_ge(utime, sig->it_virt_expires)) {
- /* ITIMER_VIRTUAL fires and reloads. */
- sig->it_virt_expires = sig->it_virt_incr;
- if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
- sig->it_virt_expires = cputime_add(
- sig->it_virt_expires, utime);
- }
- __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
- }
- if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
- (cputime_eq(virt_expires, cputime_zero) ||
- cputime_lt(sig->it_virt_expires, virt_expires))) {
- virt_expires = sig->it_virt_expires;
- }
- }
+ check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
+ SIGPROF);
+ check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
+ SIGVTALRM);
+
if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
unsigned long psecs = cputime_to_secs(ptime);
cputime_t x;
@@ -1457,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
if (!cputime_eq(*oldval, cputime_zero)) {
if (cputime_le(*oldval, now.cpu)) {
/* Just about to fire. */
- *oldval = jiffies_to_cputime(1);
+ *oldval = cputime_one_jiffy;
} else {
*oldval = cputime_sub(*oldval, now.cpu);
}
@@ -1703,10 +1713,15 @@ static __init int init_posix_cpu_timers(void)
.nsleep = thread_cpu_nsleep,
.nsleep_restart = thread_cpu_nsleep_restart,
};
+ struct timespec ts;
register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
+ cputime_to_timespec(cputime_one_jiffy, &ts);
+ onecputick = ts.tv_nsec;
+ WARN_ON(ts.tv_sec != 0);
+
return 0;
}
__initcall(init_posix_cpu_timers);
diff --git a/kernel/sched.c b/kernel/sched.c
index 0ac9053c21d..2f76e06bea5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5092,17 +5092,16 @@ void account_idle_time(cputime_t cputime)
*/
void account_process_tick(struct task_struct *p, int user_tick)
{
- cputime_t one_jiffy = jiffies_to_cputime(1);
- cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
+ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
if (user_tick)
- account_user_time(p, one_jiffy, one_jiffy_scaled);
+ account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
- account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
+ account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
one_jiffy_scaled);
else
- account_idle_time(one_jiffy);
+ account_idle_time(cputime_one_jiffy);
}
/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 811e5c39145..5db5a8d2681 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -46,6 +46,9 @@
#include <asm/timex.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/timer.h>
+
u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64);
@@ -521,6 +524,25 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
static inline void debug_timer_deactivate(struct timer_list *timer) { }
#endif
+static inline void debug_init(struct timer_list *timer)
+{
+ debug_timer_init(timer);
+ trace_timer_init(timer);
+}
+
+static inline void
+debug_activate(struct timer_list *timer, unsigned long expires)
+{
+ debug_timer_activate(timer);
+ trace_timer_start(timer, expires);
+}
+
+static inline void debug_deactivate(struct timer_list *timer)
+{
+ debug_timer_deactivate(timer);
+ trace_timer_cancel(timer);
+}
+
static void __init_timer(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
@@ -549,7 +571,7 @@ void init_timer_key(struct timer_list *timer,
const char *name,
struct lock_class_key *key)
{
- debug_timer_init(timer);
+ debug_init(timer);
__init_timer(timer, name, key);
}
EXPORT_SYMBOL(init_timer_key);
@@ -568,7 +590,7 @@ static inline void detach_timer(struct timer_list *timer,
{
struct list_head *entry = &timer->entry;
- debug_timer_deactivate(timer);
+ debug_deactivate(timer);
__list_del(entry->prev, entry->next);
if (clear_pending)
@@ -632,7 +654,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
goto out_unlock;
}
- debug_timer_activate(timer);
+ debug_activate(timer, expires);
new_base = __get_cpu_var(tvec_bases);
@@ -787,7 +809,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
BUG_ON(timer_pending(timer) || !timer->function);
spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base);
- debug_timer_activate(timer);
+ debug_activate(timer, timer->expires);
if (time_before(timer->expires, base->next_timer) &&
!tbase_get_deferrable(timer->base))
base->next_timer = timer->expires;
@@ -1000,7 +1022,9 @@ static inline void __run_timers(struct tvec_base *base)
*/
lock_map_acquire(&lockdep_map);
+ trace_timer_expire_entry(timer);
fn(data);
+ trace_timer_expire_exit(timer);
lock_map_release(&lockdep_map);